Merge branch 'develop' into release/3.15.x

This commit is contained in:
Jakub Trllo 2022-10-20 15:11:40 +02:00
commit 258acdfca1
119 changed files with 8827 additions and 2033 deletions

View file

@ -37,27 +37,27 @@ jobs:
echo ::set-output name=next_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version_type.outputs.type != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
unreleasedLabel: ${{ steps.version.outputs.next_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version_type.outputs.type != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# unreleasedLabel: ${{ steps.version.outputs.next_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: "🖨️ Print changelog to console"
if: steps.version_type.outputs.type != 'skip'

View file

@ -33,27 +33,27 @@ jobs:
echo ::set-output name=last_release::$LASTRELEASE
echo ::set-output name=release_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
futureRelease: ${{ steps.version.outputs.release_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version.outputs.release_tag != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# futureRelease: ${{ steps.version.outputs.release_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: 💾 Commit and Tag
id: git_commit

2
.gitignore vendored
View file

@ -110,3 +110,5 @@ tools/run_eventserver.*
# Developer tools
tools/dev_*
.github_changelog_generator

File diff suppressed because it is too large Load diff

1814
HISTORY.md

File diff suppressed because it is too large Load diff

View file

@ -11,7 +11,6 @@ from .lib import (
PypeLogger,
Logger,
Anatomy,
config,
execute,
run_subprocess,
version_up,
@ -72,7 +71,6 @@ __all__ = [
"PypeLogger",
"Logger",
"Anatomy",
"config",
"execute",
"get_default_components",
"ApplicationManager",

View file

@ -277,6 +277,13 @@ def projectmanager():
PypeCommands().launch_project_manager()
@main.command(context_settings={"ignore_unknown_options": True})
def publish_report_viewer():
from openpype.tools.publisher.publish_report_viewer import main
sys.exit(main())
@main.command()
@click.argument("output_path")
@click.option("--project", help="Define project context")

View file

@ -3,7 +3,7 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder

View file

@ -3,14 +3,15 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
from openpype.pipeline.publish import ValidateContentsOrder
import openpype.hosts.blender.api.action
class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
"""Validate that the current mesh has UV's."""
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"

View file

@ -3,14 +3,15 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
from openpype.pipeline.publish import ValidateContentsOrder
import openpype.hosts.blender.api.action
class ValidateMeshNoNegativeScale(pyblish.api.Validator):
"""Ensure that meshes don't have a negative scale."""
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"

View file

@ -3,7 +3,7 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder

View file

@ -4,7 +4,7 @@ import mathutils
import bpy
import pyblish.api
import openpype.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder

View file

@ -1,7 +1,7 @@
"""Host API required Work Files tool"""
import os
from openpype.api import Logger
from openpype.lib import Logger
# from .. import (
# get_project_manager,
# get_current_project

View file

@ -3,16 +3,17 @@ import json
import tempfile
import contextlib
import socket
from pprint import pformat
from openpype.lib import (
PreLaunchHook,
get_openpype_username
get_openpype_username,
run_subprocess,
)
from openpype.lib.applications import (
ApplicationLaunchFailed
)
from openpype.hosts import flame as opflame
import openpype
from pprint import pformat
class FlamePrelaunch(PreLaunchHook):
@ -127,7 +128,6 @@ class FlamePrelaunch(PreLaunchHook):
except OSError as exc:
self.log.warning("Not able to open files: {}".format(exc))
def _get_flame_fps(self, fps_num):
fps_table = {
float(23.976): "23.976 fps",
@ -179,7 +179,7 @@ class FlamePrelaunch(PreLaunchHook):
"env": self.launch_context.env
}
openpype.api.run_subprocess(args, **process_kwargs)
run_subprocess(args, **process_kwargs)
# process returned json file to pass launch args
return_json_data = open(tmp_json_path).read()

View file

@ -44,11 +44,26 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class FusionLogHandler(logging.Handler):
# Keep a reference to fusion's Print function (Remote Object)
_print = None
@property
def print(self):
if self._print is not None:
# Use cached
return self._print
_print = getattr(sys.modules["__main__"], "fusion").Print
if _print is None:
# Backwards compatibility: Print method on Fusion instance was
# added around Fusion 17.4 and wasn't available on PyRemote Object
# before
_print = get_current_comp().Print
self._print = _print
return _print
def emit(self, record):
entry = self.format(record)
self._print(entry)
self.print(entry)
def install():

View file

@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
import openpype.api
import pyblish.api
import hou
from openpype.pipeline.publish import RepairAction
class ValidateWorkfilePaths(pyblish.api.InstancePlugin):
"""Validate workfile paths so they are absolute."""
@ -11,7 +12,7 @@ class ValidateWorkfilePaths(pyblish.api.InstancePlugin):
families = ["workfile"]
hosts = ["houdini"]
label = "Validate Workfile Paths"
actions = [openpype.api.RepairAction]
actions = [RepairAction]
optional = True
node_types = ["file", "alembic"]

View file

@ -2459,182 +2459,120 @@ def bake_to_world_space(nodes,
def load_capture_preset(data=None):
"""Convert OpenPype Extract Playblast settings to `capture` arguments
Input data is the settings from:
`project_settings/maya/publish/ExtractPlayblast/capture_preset`
Args:
data (dict): Capture preset settings from OpenPype settings
Returns:
dict: `capture.capture` compatible keyword arguments
"""
import capture
preset = data
options = dict()
viewport_options = dict()
viewport2_options = dict()
camera_options = dict()
# CODEC
id = 'Codec'
for key in preset[id]:
options[str(key)] = preset[id][key]
# Straight key-value match from settings to capture arguments
options.update(data["Codec"])
options.update(data["Generic"])
options.update(data["Resolution"])
# GENERIC
id = 'Generic'
for key in preset[id]:
options[str(key)] = preset[id][key]
# RESOLUTION
id = 'Resolution'
options['height'] = preset[id]['height']
options['width'] = preset[id]['width']
camera_options.update(data['Camera Options'])
viewport_options.update(data["Renderer"])
# DISPLAY OPTIONS
id = 'Display Options'
disp_options = {}
for key in preset[id]:
for key, value in data['Display Options'].items():
if key.startswith('background'):
disp_options[key] = preset['Display Options'][key]
if len(disp_options[key]) == 4:
disp_options[key][0] = (float(disp_options[key][0])/255)
disp_options[key][1] = (float(disp_options[key][1])/255)
disp_options[key][2] = (float(disp_options[key][2])/255)
disp_options[key].pop()
# Convert background, backgroundTop, backgroundBottom colors
if len(value) == 4:
# Ignore alpha + convert RGB to float
value = [
float(value[0]) / 255,
float(value[1]) / 255,
float(value[2]) / 255
]
disp_options[key] = value
else:
disp_options['displayGradient'] = True
options['display_options'] = disp_options
# VIEWPORT OPTIONS
temp_options = {}
id = 'Renderer'
for key in preset[id]:
temp_options[str(key)] = preset[id][key]
# Viewport Options has a mixture of Viewport2 Options and Viewport Options
# to pass along to capture. So we'll need to differentiate between the two
VIEWPORT2_OPTIONS = {
"textureMaxResolution",
"renderDepthOfField",
"ssaoEnable",
"ssaoSamples",
"ssaoAmount",
"ssaoRadius",
"ssaoFilterRadius",
"hwFogStart",
"hwFogEnd",
"hwFogAlpha",
"hwFogFalloff",
"hwFogColorR",
"hwFogColorG",
"hwFogColorB",
"hwFogDensity",
"motionBlurEnable",
"motionBlurSampleCount",
"motionBlurShutterOpenFraction",
"lineAAEnable"
}
for key, value in data['Viewport Options'].items():
temp_options2 = {}
id = 'Viewport Options'
for key in preset[id]:
# There are some keys we want to ignore
if key in {"override_viewport_options", "high_quality"}:
continue
# First handle special cases where we do value conversion to
# separate option values
if key == 'textureMaxResolution':
if preset[id][key] > 0:
temp_options2['textureMaxResolution'] = preset[id][key]
temp_options2['enableTextureMaxRes'] = True
temp_options2['textureMaxResMode'] = 1
viewport2_options['textureMaxResolution'] = value
if value > 0:
viewport2_options['enableTextureMaxRes'] = True
viewport2_options['textureMaxResMode'] = 1
else:
temp_options2['textureMaxResolution'] = preset[id][key]
temp_options2['enableTextureMaxRes'] = False
temp_options2['textureMaxResMode'] = 0
viewport2_options['enableTextureMaxRes'] = False
viewport2_options['textureMaxResMode'] = 0
if key == 'multiSample':
if preset[id][key] > 0:
temp_options2['multiSampleEnable'] = True
temp_options2['multiSampleCount'] = preset[id][key]
elif key == 'multiSample':
viewport2_options['multiSampleEnable'] = value > 0
viewport2_options['multiSampleCount'] = value
elif key == 'alphaCut':
viewport2_options['transparencyAlgorithm'] = 5
viewport2_options['transparencyQuality'] = 1
elif key == 'hwFogFalloff':
# Settings enum value string to integer
viewport2_options['hwFogFalloff'] = int(value)
# Then handle Viewport 2.0 Options
elif key in VIEWPORT2_OPTIONS:
viewport2_options[key] = value
# Then assume remainder is Viewport Options
else:
temp_options2['multiSampleEnable'] = False
temp_options2['multiSampleCount'] = preset[id][key]
viewport_options[key] = value
if key == 'renderDepthOfField':
temp_options2['renderDepthOfField'] = preset[id][key]
if key == 'ssaoEnable':
if preset[id][key] is True:
temp_options2['ssaoEnable'] = True
else:
temp_options2['ssaoEnable'] = False
if key == 'ssaoSamples':
temp_options2['ssaoSamples'] = preset[id][key]
if key == 'ssaoAmount':
temp_options2['ssaoAmount'] = preset[id][key]
if key == 'ssaoRadius':
temp_options2['ssaoRadius'] = preset[id][key]
if key == 'hwFogDensity':
temp_options2['hwFogDensity'] = preset[id][key]
if key == 'ssaoFilterRadius':
temp_options2['ssaoFilterRadius'] = preset[id][key]
if key == 'alphaCut':
temp_options2['transparencyAlgorithm'] = 5
temp_options2['transparencyQuality'] = 1
if key == 'headsUpDisplay':
temp_options['headsUpDisplay'] = True
if key == 'fogging':
temp_options['fogging'] = preset[id][key] or False
if key == 'hwFogStart':
temp_options2['hwFogStart'] = preset[id][key]
if key == 'hwFogEnd':
temp_options2['hwFogEnd'] = preset[id][key]
if key == 'hwFogAlpha':
temp_options2['hwFogAlpha'] = preset[id][key]
if key == 'hwFogFalloff':
temp_options2['hwFogFalloff'] = int(preset[id][key])
if key == 'hwFogColorR':
temp_options2['hwFogColorR'] = preset[id][key]
if key == 'hwFogColorG':
temp_options2['hwFogColorG'] = preset[id][key]
if key == 'hwFogColorB':
temp_options2['hwFogColorB'] = preset[id][key]
if key == 'motionBlurEnable':
if preset[id][key] is True:
temp_options2['motionBlurEnable'] = True
else:
temp_options2['motionBlurEnable'] = False
if key == 'motionBlurSampleCount':
temp_options2['motionBlurSampleCount'] = preset[id][key]
if key == 'motionBlurShutterOpenFraction':
temp_options2['motionBlurShutterOpenFraction'] = preset[id][key]
if key == 'lineAAEnable':
if preset[id][key] is True:
temp_options2['lineAAEnable'] = True
else:
temp_options2['lineAAEnable'] = False
else:
temp_options[str(key)] = preset[id][key]
for key in ['override_viewport_options',
'high_quality',
'alphaCut',
'gpuCacheDisplayFilter',
'multiSample',
'ssaoEnable',
'ssaoSamples',
'ssaoAmount',
'ssaoFilterRadius',
'ssaoRadius',
'hwFogStart',
'hwFogEnd',
'hwFogAlpha',
'hwFogFalloff',
'hwFogColorR',
'hwFogColorG',
'hwFogColorB',
'hwFogDensity',
'textureMaxResolution',
'motionBlurEnable',
'motionBlurSampleCount',
'motionBlurShutterOpenFraction',
'lineAAEnable',
'renderDepthOfField'
]:
temp_options.pop(key, None)
options['viewport_options'] = temp_options
options['viewport2_options'] = temp_options2
options['viewport_options'] = viewport_options
options['viewport2_options'] = viewport2_options
options['camera_options'] = camera_options
# use active sound track
scene = capture.parse_active_scene()
options['sound'] = scene['sound']
# options['display_options'] = temp_options
return options

View file

@ -80,7 +80,7 @@ IMAGE_PREFIXES = {
"mayahardware2": "defaultRenderGlobals.imageFilePrefix"
}
RENDERMAN_IMAGE_DIR = "maya/<scene>/<layer>"
RENDERMAN_IMAGE_DIR = "<scene>/<layer>"
def has_tokens(string, tokens):
@ -260,20 +260,20 @@ class ARenderProducts:
"""
try:
file_prefix_attr = IMAGE_PREFIXES[self.renderer]
prefix_attr = IMAGE_PREFIXES[self.renderer]
except KeyError:
raise UnsupportedRendererException(
"Unsupported renderer {}".format(self.renderer)
)
file_prefix = self._get_attr(file_prefix_attr)
prefix = self._get_attr(prefix_attr)
if not file_prefix:
if not prefix:
# Fall back to scene name by default
log.debug("Image prefix not set, using <Scene>")
file_prefix = "<Scene>"
return file_prefix
return prefix
def get_render_attribute(self, attribute):
"""Get attribute from render options.
@ -730,13 +730,16 @@ class RenderProductsVray(ARenderProducts):
"""Get image prefix for V-Ray.
This overrides :func:`ARenderProducts.get_renderer_prefix()` as
we must add `<aov>` token manually.
we must add `<aov>` token manually. This is done only for
non-multipart outputs, where `<aov>` token doesn't make sense.
See also:
:func:`ARenderProducts.get_renderer_prefix()`
"""
prefix = super(RenderProductsVray, self).get_renderer_prefix()
if self.multipart:
return prefix
aov_separator = self._get_aov_separator()
prefix = "{}{}<aov>".format(prefix, aov_separator)
return prefix
@ -974,15 +977,18 @@ class RenderProductsRedshift(ARenderProducts):
"""Get image prefix for Redshift.
This overrides :func:`ARenderProducts.get_renderer_prefix()` as
we must add `<aov>` token manually.
we must add `<aov>` token manually. This is done only for
non-multipart outputs, where `<aov>` token doesn't make sense.
See also:
:func:`ARenderProducts.get_renderer_prefix()`
"""
file_prefix = super(RenderProductsRedshift, self).get_renderer_prefix()
separator = self.extract_separator(file_prefix)
prefix = "{}{}<aov>".format(file_prefix, separator or "_")
prefix = super(RenderProductsRedshift, self).get_renderer_prefix()
if self.multipart:
return prefix
separator = self.extract_separator(prefix)
prefix = "{}{}<aov>".format(prefix, separator or "_")
return prefix
def get_render_products(self):

View file

@ -29,7 +29,7 @@ class RenderSettings(object):
_image_prefixes = {
'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa
'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa
'renderman': 'maya/<Scene>/<layer>/<layer>{aov_separator}<aov>',
'renderman': '<Scene>/<layer>/<layer>{aov_separator}<aov>',
'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa
}

View file

@ -12,6 +12,7 @@ class CreateAnimation(plugin.Creator):
family = "animation"
icon = "male"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateAnimation, self).__init__(*args, **kwargs)
@ -24,7 +25,7 @@ class CreateAnimation(plugin.Creator):
# Write vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = False
self.data["writeFaceSets"] = self.write_face_sets
# Include only renderable visible shapes.
# Skips locators and empty transforms

View file

@ -9,13 +9,14 @@ class CreateModel(plugin.Creator):
family = "model"
icon = "cube"
defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"]
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateModel, self).__init__(*args, **kwargs)
# Vertex colors with the geometry
self.data["writeColorSets"] = False
self.data["writeFaceSets"] = False
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = self.write_face_sets
# Include attributes by attribute name or prefix
self.data["attr"] = ""

View file

@ -12,6 +12,7 @@ class CreatePointCache(plugin.Creator):
family = "pointcache"
icon = "gears"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreatePointCache, self).__init__(*args, **kwargs)
@ -21,7 +22,8 @@ class CreatePointCache(plugin.Creator):
# Vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = False # Vertex colors with the geometry.
# Vertex colors with the geometry.
self.data["writeFaceSets"] = self.write_face_sets
self.data["renderableOnly"] = False # Only renderable visible shapes
self.data["visibleOnly"] = False # only nodes that are visible
self.data["includeParentHierarchy"] = False # Include parent groups

View file

@ -13,22 +13,14 @@ from openpype.settings import (
get_system_settings,
get_project_settings,
)
from openpype.lib import requests_get
from openpype.modules import ModulesManager
from openpype.pipeline import legacy_io
from openpype.hosts.maya.api import (
lib,
lib_rendersettings,
plugin
)
from openpype.lib import requests_get
from openpype.api import (
get_system_settings,
get_project_settings)
from openpype.modules import ModulesManager
from openpype.pipeline import legacy_io
from openpype.pipeline import (
CreatorError,
legacy_io,
)
from openpype.pipeline.context_tools import get_current_project_asset
class CreateRender(plugin.Creator):

View file

@ -34,14 +34,15 @@ class ExtractLayout(publish.Extractor):
for asset in cmds.sets(str(instance), query=True):
# Find the container
grp_name = asset.split(':')[0]
containers = cmds.ls(f"{grp_name}*_CON")
containers = cmds.ls("{}*_CON".format(grp_name))
assert len(containers) == 1, \
f"More than one container found for {asset}"
"More than one container found for {}".format(asset)
container = containers[0]
representation_id = cmds.getAttr(f"{container}.representation")
representation_id = cmds.getAttr(
"{}.representation".format(container))
representation = get_representation_by_id(
project_name,
@ -56,7 +57,8 @@ class ExtractLayout(publish.Extractor):
json_element = {
"family": family,
"instance_name": cmds.getAttr(f"{container}.name"),
"instance_name": cmds.getAttr(
"{}.namespace".format(container)),
"representation": str(representation_id),
"version": str(version_id)
}

View file

@ -77,8 +77,10 @@ class ExtractPlayblast(publish.Extractor):
preset['height'] = asset_height
preset['start_frame'] = start
preset['end_frame'] = end
camera_option = preset.get("camera_option", {})
camera_option["depthOfField"] = cmds.getAttr(
# Enforce persisting camera depth of field
camera_options = preset.setdefault("camera_options", {})
camera_options["depthOfField"] = cmds.getAttr(
"{0}.depthOfField".format(camera))
stagingdir = self.staging_dir(instance)

View file

@ -1,5 +1,6 @@
import os
import glob
import tempfile
import capture
@ -81,9 +82,17 @@ class ExtractThumbnail(publish.Extractor):
elif asset_width and asset_height:
preset['width'] = asset_width
preset['height'] = asset_height
stagingDir = self.staging_dir(instance)
# Create temp directory for thumbnail
# - this is to avoid "override" of source file
dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
self.log.debug(
"Create temp directory {} for thumbnail".format(dst_staging)
)
# Store new staging to cleanup paths
instance.context.data["cleanupFullPaths"].append(dst_staging)
filename = "{0}".format(instance.name)
path = os.path.join(stagingDir, filename)
path = os.path.join(dst_staging, filename)
self.log.info("Outputting images to %s" % path)
@ -137,7 +146,7 @@ class ExtractThumbnail(publish.Extractor):
'name': 'thumbnail',
'ext': 'jpg',
'files': thumbnail,
"stagingDir": stagingDir,
"stagingDir": dst_staging,
"thumbnail": True
}
instance.data["representations"].append(representation)

View file

@ -118,7 +118,7 @@ def preview_fname(folder, scene, layer, padding, ext):
"""
# Following hardcoded "<Scene>/<Scene>_<Layer>/<Layer>"
output = "maya/{scene}/{layer}/{layer}.{number}.{ext}".format(
output = "{scene}/{layer}/{layer}.{number}.{ext}".format(
scene=scene,
layer=layer,
number="#" * padding,

View file

@ -22,10 +22,10 @@ def get_redshift_image_format_labels():
class ValidateRenderSettings(pyblish.api.InstancePlugin):
"""Validates the global render settings
* File Name Prefix must start with: `maya/<Scene>`
* File Name Prefix must start with: `<Scene>`
all other token are customizable but sane values for Arnold are:
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
`<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
<Camera> token is supported also, useful for multiple renderable
cameras per render layer.
@ -64,12 +64,12 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
}
ImagePrefixTokens = {
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>',
'vray': 'maya/<Scene>/<Layer>/<Layer>',
'mentalray': '<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'arnold': '<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'redshift': '<Scene>/<RenderLayer>/<RenderLayer>',
'vray': '<Scene>/<Layer>/<Layer>',
'renderman': '<layer>{aov_separator}<aov>.<f4>.<ext>',
'mayahardware2': 'maya/<Scene>/<RenderLayer>/<RenderLayer>',
'mayahardware2': '<Scene>/<RenderLayer>/<RenderLayer>',
}
_aov_chars = {
@ -80,7 +80,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
redshift_AOV_prefix = "<BeautyPath>/<BeautyFile>{aov_separator}<RenderPass>" # noqa: E501
renderman_dir_prefix = "maya/<scene>/<layer>"
renderman_dir_prefix = "<scene>/<layer>"
R_AOV_TOKEN = re.compile(
r'%a|<aov>|<renderpass>', re.IGNORECASE)
@ -90,8 +90,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
R_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
DEFAULT_PADDING = 4
VRAY_PREFIX = "maya/<Scene>/<Layer>/<Layer>"
DEFAULT_PREFIX = "maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
VRAY_PREFIX = "<Scene>/<Layer>/<Layer>"
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
def process(self, instance):
@ -123,7 +123,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
prefix = prefix.replace(
"{aov_separator}", instance.data.get("aovSeparator", "_"))
required_prefix = "maya/<scene>"
default_prefix = cls.ImagePrefixTokens[renderer]
if not anim_override:
@ -131,15 +130,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
cls.log.error("Animation needs to be enabled. Use the same "
"frame for start and end to render single frame")
if renderer != "renderman" and not prefix.lower().startswith(
required_prefix):
invalid = True
cls.log.error(
("Wrong image prefix [ {} ] "
" - doesn't start with: '{}'").format(
prefix, required_prefix)
)
if not re.search(cls.R_LAYER_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "

View file

@ -1,22 +1,24 @@
#### Basic setup
## Basic setup
- Install [latest DaVinci Resolve](https://sw.blackmagicdesign.com/DaVinciResolve/v16.2.8/DaVinci_Resolve_Studio_16.2.8_Windows.zip?Key-Pair-Id=APKAJTKA3ZJMJRQITVEA&Signature=EcFuwQFKHZIBu2zDj5LTCQaQDXcKOjhZY7Fs07WGw24xdDqfwuALOyKu+EVzDX2Tik0cWDunYyV0r7hzp+mHmczp9XP4YaQXHdyhD/2BGWDgiMsiTQbNkBgbfy5MsAMFY8FHCl724Rxm8ke1foWeUVyt/Cdkil+ay+9sL72yFhaSV16sncko1jCIlCZeMkHhbzqPwyRuqLGmxmp8ey9KgBhI3wGFFPN201VMaV+RHrpX+KAfaR6p6dwo3FrPbRHK9TvMI1RA/1lJ3fVtrkDW69LImIKAWmIxgcStUxR9/taqLOD66FNiflHd1tufHv3FBa9iYQsjb3VLMPx7OCwLyg==&Expires=1608308139)
- add absolute path to ffmpeg into openpype settings
![image](https://user-images.githubusercontent.com/40640033/102630786-43294f00-414d-11eb-98de-f0ae51f62077.png)
- install Python 3.6 into `%LOCALAPPDATA%/Programs/Python/Python36` (only respected path by Resolve)
- install OpenTimelineIO for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and
- Actually supported version is up to v18
- install Python 3.6.2 (latest tested v17) or up to 3.9.13 (latest tested on v18)
- pip install PySide2:
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install PySide2`
- pip install OpenTimelineIO:
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install OpenTimelineIO`
- Python 3.6: open terminal and go to python.exe directory, then `python -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `./Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `./Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and
![image](https://user-images.githubusercontent.com/40640033/102792588-ffcb1c80-43a8-11eb-9c6b-bf2114ed578e.png) with installed CMake in PATH.
- install PySide2 for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install PySide2`
- make sure Resolve Fusion (Fusion Tab/menu/Fusion/Fusion Settings) is set to Python 3.6
![image](https://user-images.githubusercontent.com/40640033/102631545-280b0f00-414e-11eb-89fc-98ac268d209d.png)
- Open OpenPype **Tray/Admin/Studio settings** > `applications/resolve/environment` and add Python3 path to `RESOLVE_PYTHON3_HOME` platform related.
#### Editorial setup
## Editorial setup
This is how it looks on my testing project timeline
![image](https://user-images.githubusercontent.com/40640033/102637638-96ec6600-4156-11eb-9656-6e8e3ce4baf8.png)
Notice I had renamed tracks to `main` (holding metadata markers) and `review` used for generating review data with ffmpeg confersion to jpg sequence.
1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/**__OpenPype_Menu__**
1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/Comp/**__OpenPype_Menu__**
2. then select any clips in `main` track and change their color to `Chocolate`
3. in OpenPype Menu select `Create`
4. in Creator select `Create Publishable Clip [New]` (temporary name)

View file

@ -1,189 +0,0 @@
Updated as of 08 March 2019
--------------------------
In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import modules for scripting access (DaVinciResolve.py) and some representative examples.
Overview
--------
As with Blackmagic Design Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page, or via command line. This permission can be changed in Resolve Preferences, to be only from Console, or to be invoked from the local network. Please be aware of the security implications when allowing scripting access from outside of the Resolve application.
Using a script
--------------
DaVinci Resolve needs to be running for a script to be invoked.
For a Resolve script to be executed from an external folder, the script needs to know of the API location.
You may need to set the these environment variables to allow for your Python installation to pick up the appropriate dependencies as shown below:
Mac OS X:
RESOLVE_SCRIPT_API="/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting/"
RESOLVE_SCRIPT_LIB="/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so"
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
Windows:
RESOLVE_SCRIPT_API="%PROGRAMDATA%\\Blackmagic Design\\DaVinci Resolve\\Support\\Developer\\Scripting\\"
RESOLVE_SCRIPT_LIB="C:\\Program Files\\Blackmagic Design\\DaVinci Resolve\\fusionscript.dll"
PYTHONPATH="%PYTHONPATH%;%RESOLVE_SCRIPT_API%\\Modules\\"
Linux:
RESOLVE_SCRIPT_API="/opt/resolve/Developer/Scripting/"
RESOLVE_SCRIPT_LIB="/opt/resolve/libs/Fusion/fusionscript.so"
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
(Note: For standard ISO Linux installations, the path above may need to be modified to refer to /home/resolve instead of /opt/resolve)
As with Fusion scripts, Resolve scripts can also be invoked via the menu and the Console.
On startup, DaVinci Resolve scans the Utility Scripts directory and enumerates the scripts found in the Script application menu. Placing your script in this folder and invoking it from this menu is the easiest way to use scripts. The Utility Scripts folder is located in:
Mac OS X: /Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp/
Windows: %APPDATA%\Blackmagic Design\DaVinci Resolve\Fusion\Scripts\Comp\
Linux: /opt/resolve/Fusion/Scripts/Comp/ (or /home/resolve/Fusion/Scripts/Comp/ depending on installation)
The interactive Console window allows for an easy way to execute simple scripting commands, to query or modify properties, and to test scripts. The console accepts commands in Python 2.7, Python 3.6 and Lua and evaluates and executes them immediately. For more information on how to use the Console, please refer to the DaVinci Resolve User Manual.
This example Python script creates a simple project:
#!/usr/bin/env python
import DaVinciResolveScript as dvr_script
resolve = dvr_script.scriptapp("Resolve")
fusion = resolve.Fusion()
projectManager = resolve.GetProjectManager()
projectManager.CreateProject("Hello World")
The resolve object is the fundamental starting point for scripting via Resolve. As a native object, it can be inspected for further scriptable properties - using table iteration and `getmetatable` in Lua and dir, help etc in Python (among other methods). A notable scriptable object above is fusion - it allows access to all existing Fusion scripting functionality.
Running DaVinci Resolve in headless mode
----------------------------------------
DaVinci Resolve can be launched in a headless mode without the user interface using the -nogui command line option. When DaVinci Resolve is launched using this option, the user interface is disabled. However, the various scripting APIs will continue to work as expected.
Basic Resolve API
-----------------
Some commonly used API functions are described below (*). As with the resolve object, each object is inspectable for properties and functions.
Resolve
Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts.
GetMediaStorage() --> MediaStorage # Returns media storage object to query and act on media locations.
GetProjectManager() --> ProjectManager # Returns project manager object for currently open database.
OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "edit", "fusion", "color", "fairlight", "deliver").
ProjectManager
CreateProject(projectName) --> Project # Creates and returns a project if projectName (text) is unique, and None if it is not.
LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (text) if there is a match found, and None if there is no matching Project.
GetCurrentProject() --> Project # Returns the currently loaded Resolve project.
SaveProject() --> Bool # Saves the currently loaded project with its own name. Returns True if successful.
CreateFolder(folderName) --> Bool # Creates a folder if folderName (text) is unique.
GetProjectsInCurrentFolder() --> [project names...] # Returns an array of project names in current folder.
GetFoldersInCurrentFolder() --> [folder names...] # Returns an array of folder names in current folder.
GotoRootFolder() --> Bool # Opens root folder in database.
GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent.
OpenFolder(folderName) --> Bool # Opens folder under given name.
ImportProject(filePath) --> Bool # Imports a project under given file path. Returns true in case of success.
ExportProject(projectName, filePath) --> Bool # Exports a project based on given name into provided file path. Returns true in case of success.
RestoreProject(filePath) --> Bool # Restores a project under given backup file path. Returns true in case of success.
Project
GetMediaPool() --> MediaPool # Returns the Media Pool object.
GetTimelineCount() --> int # Returns the number of timelines currently present in the project.
GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount()
GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline.
SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful.
GetName() --> string # Returns project name.
SetName(projectName) --> Bool # Sets project name if given projectname (text) is unique.
GetPresets() --> [presets...] # Returns a table of presets and their information.
SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project.
GetRenderJobs() --> [render jobs...] # Returns a table of render jobs and their information.
GetRenderPresets() --> [presets...] # Returns a table of render presets and their information.
StartRendering(index1, index2, ...) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs.
StartRendering([idxs...]) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs.
StopRendering() --> None # Stops rendering for all render jobs.
IsRenderingInProgress() --> Bool # Returns true is rendering is in progress.
AddRenderJob() --> Bool # Adds render job to render queue.
DeleteRenderJobByIndex(idx) --> Bool # Deletes render job based on given job index (int).
DeleteAllRenderJobs() --> Bool # Deletes all render jobs.
LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (text) exists.
SaveAsNewRenderPreset(presetName) --> Bool # Creates a new render preset by given name if presetName(text) is unique.
SetRenderSettings([settings map]) --> Bool # Sets given settings for rendering. Settings map is a map, keys of map are: "SelectAllFrames", "MarkIn", "MarkOut", "TargetDir", "CustomName".
GetRenderJobStatus(idx) --> [status info] # Returns job status and completion rendering percentage of the job by given job index (int).
GetSetting(settingName) --> string # Returns setting value by given settingName (string) if the setting exist. With empty settingName the function returns a full list of settings.
SetSetting(settingName, settingValue) --> Bool # Sets project setting base on given name (string) and value (string).
GetRenderFormats() --> [render formats...]# Returns a list of available render formats.
GetRenderCodecs(renderFormat) --> [render codecs...] # Returns a list of available codecs for given render format (string).
GetCurrentRenderFormatAndCodec() --> [format, codec] # Returns currently selected render format and render codec.
SetCurrentRenderFormatAndCodec(format, codec) --> Bool # Sets given render format (string) and render codec (string) as options for rendering.
MediaStorage
GetMountedVolumes() --> [paths...] # Returns an array of folder paths corresponding to mounted volumes displayed in Resolves Media Storage.
GetSubFolders(folderPath) --> [paths...] # Returns an array of folder paths in the given absolute folder path.
GetFiles(folderPath) --> [paths...] # Returns an array of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
RevealInStorage(path) --> None # Expands and displays a given file/folder path in Resolves Media Storage.
AddItemsToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is one or more file/folder paths.
AddItemsToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is an array of file/folder paths.
MediaPool
GetRootFolder() --> Folder # Returns the root Folder of Media Pool
AddSubFolder(folder, name) --> Folder # Adds a new subfolder under specified Folder object with the given name.
CreateEmptyTimeline(name) --> Timeline # Adds a new timeline with given name.
AppendToTimeline(clip1, clip2...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
CreateTimelineFromClips(name, clip1, clip2, ...)--> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects.
CreateTimelineFromClips(name, [clips]) --> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects.
ImportTimelineFromFile(filePath) --> Timeline # Creates timeline based on parameters within given file.
GetCurrentFolder() --> Folder # Returns currently selected Folder.
SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder.
Folder
GetClips() --> [clips...] # Returns a list of clips (items) within the folder.
GetName() --> string # Returns user-defined name of the folder.
GetSubFolders() --> [folders...] # Returns a list of subfolders in the folder.
MediaPoolItem
GetMetadata(metadataType) --> [[types],[values]] # Returns a value of metadataType. If parameter is not specified returns all set metadata parameters.
SetMetadata(metadataType, metadataValue) --> Bool # Sets metadata by given type and value. Returns True if successful.
GetMediaId() --> string # Returns a unique ID name related to MediaPoolItem.
AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information.
GetMarkers() --> [markers...] # Returns a list of all markers and their information.
AddFlag(color) --> Bool # Adds a flag with given color (text).
GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item.
GetClipColor() --> string # Returns an item color as a string.
GetClipProperty(propertyName) --> [[types],[values]] # Returns property value related to the item based on given propertyName (string). if propertyName is empty then it returns a full list of properties.
SetClipProperty(propertyName, propertyValue) --> Bool # Sets into given propertyName (string) propertyValue (string).
Timeline
GetName() --> string # Returns user-defined name of the timeline.
SetName(timelineName) --> Bool # Sets timeline name is timelineName (text) is unique.
GetStartFrame() --> int # Returns frame number at the start of timeline.
GetEndFrame() --> int # Returns frame number at the end of timeline.
GetTrackCount(trackType) --> int # Returns a number of track based on specified track type ("audio", "video" or "subtitle").
GetItemsInTrack(trackType, index) --> [items...] # Returns an array of Timeline items on the video or audio track (based on trackType) at specified index. 1 <= index <= GetTrackCount(trackType).
AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information.
GetMarkers() --> [markers...] # Returns a list of all markers and their information.
ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
GetCurrentTimecode() --> string # Returns a string representing a timecode for current position of the timeline, while on Cut, Edit, Color and Deliver page.
GetCurrentVideoItem() --> item # Returns current video timeline item.
GetCurrentClipThumbnailImage() --> [width, height, format, data] # Returns raw thumbnail image data (This image data is encoded in base 64 format and the image format is RGB 8 bit) for the current media in the Color Page in the format of dictionary (in Python) and table (in Lua). Information return are "width", "height", "format" and "data". Example is provided in 6_get_current_media_thumbnail.py in Example folder.
TimelineItem
GetName() --> string # Returns a name of the item.
GetDuration() --> int # Returns a duration of item.
GetEnd() --> int # Returns a position of end frame.
GetFusionCompCount() --> int # Returns the number of Fusion compositions associated with the timeline item.
GetFusionCompByIndex(compIndex) --> fusionComp # Returns Fusion composition object based on given index. 1 <= compIndex <= timelineItem.GetFusionCompCount()
GetFusionCompNames() --> [names...] # Returns a list of Fusion composition names associated with the timeline item.
GetFusionCompByName(compName) --> fusionComp # Returns Fusion composition object based on given name.
GetLeftOffset() --> int # Returns a maximum extension by frame for clip from left side.
GetRightOffset() --> int # Returns a maximum extension by frame for clip from right side.
GetStart() --> int # Returns a position of first frame.
AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information.
GetMarkers() --> [markers...] # Returns a list of all markers and their information.
GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item.
GetClipColor() --> string # Returns an item color as a string.
AddFusionComp() --> fusionComp # Adds a new Fusion composition associated with the timeline item.
ImportFusionComp(path) --> fusionComp # Imports Fusion composition from given file path by creating and adding a new composition for the item.
ExportFusionComp(path, compIndex) --> Bool # Exports Fusion composition based on given index into provided file name path.
DeleteFusionCompByName(compName) --> Bool # Deletes Fusion composition by provided name.
LoadFusionCompByName(compName) --> fusionComp # Loads Fusion composition by provided name and sets it as active composition.
RenameFusionCompByName(oldName, newName) --> Bool # Renames Fusion composition by provided name with new given name.
AddVersion(versionName, versionType) --> Bool # Adds a new Version associated with the timeline item. versionType: 0 - local, 1 - remote.
DeleteVersionByName(versionName, versionType) --> Bool # Deletes Version by provided name. versionType: 0 - local, 1 - remote.
LoadVersionByName(versionName, versionType) --> Bool # Loads Version by provided name and sets it as active Version. versionType: 0 - local, 1 - remote.
RenameVersionByName(oldName, newName, versionType)--> Bool # Renames Version by provided name with new given name. versionType: 0 - local, 1 - remote.
GetMediaPoolItem() --> MediaPoolItem # Returns a corresponding to the timeline item media pool item if it exists.
GetVersionNames(versionType) --> [strings...] # Returns a list of version names by provided versionType: 0 - local, 1 - remote.
GetStereoConvergenceValues() --> [offset, value] # Returns a table of keyframe offsets and respective convergence values
GetStereoLeftFloatingWindowParams() --> [offset, value] # For the LEFT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values
GetStereoRightFloatingWindowParams() --> [offset, value] # For the RIGHT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values

View file

@ -1,5 +1,5 @@
Updated as of 20 October 2020
-----------------------------
Updated as of 9 May 2022
----------------------------
In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import
modules for scripting access (DaVinciResolve.py) and some representative examples.
@ -89,12 +89,25 @@ Resolve
Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts.
GetMediaStorage() --> MediaStorage # Returns the media storage object to query and act on media locations.
GetProjectManager() --> ProjectManager # Returns the project manager object for currently open database.
OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver").
OpenPage(pageName) --> Bool # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver").
GetCurrentPage() --> String # Returns the page currently displayed in the main window. Returned value can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver", None).
GetProductName() --> string # Returns product name.
GetVersion() --> [version fields] # Returns list of product version fields in [major, minor, patch, build, suffix] format.
GetVersionString() --> string # Returns product version in "major.minor.patch[suffix].build" format.
LoadLayoutPreset(presetName) --> Bool # Loads UI layout from saved preset named 'presetName'.
UpdateLayoutPreset(presetName) --> Bool # Overwrites preset named 'presetName' with current UI layout.
ExportLayoutPreset(presetName, presetFilePath) --> Bool # Exports preset named 'presetName' to path 'presetFilePath'.
DeleteLayoutPreset(presetName) --> Bool # Deletes preset named 'presetName'.
SaveLayoutPreset(presetName) --> Bool # Saves current UI layout as a preset named 'presetName'.
ImportLayoutPreset(presetFilePath, presetName) --> Bool # Imports preset from path 'presetFilePath'. The optional argument 'presetName' specifies how the preset shall be named. If not specified, the preset is named based on the filename.
Quit() --> None # Quits the Resolve App.
ProjectManager
ArchiveProject(projectName,
filePath,
isArchiveSrcMedia=True,
isArchiveRenderCache=True,
isArchiveProxyMedia=False) --> Bool # Archives project to provided file path with the configuration as provided by the optional arguments
CreateProject(projectName) --> Project # Creates and returns a project if projectName (string) is unique, and None if it is not.
DeleteProject(projectName) --> Bool # Delete project in the current folder if not currently loaded
LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (string) if there is a match found, and None if there is no matching Project.
@ -109,9 +122,9 @@ ProjectManager
GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent.
GetCurrentFolder() --> string # Returns the current folder name.
OpenFolder(folderName) --> Bool # Opens folder under given name.
ImportProject(filePath) --> Bool # Imports a project from the file path provided. Returns True if successful.
ImportProject(filePath, projectName=None) --> Bool # Imports a project from the file path provided with given project name, if any. Returns True if successful.
ExportProject(projectName, filePath, withStillsAndLUTs=True) --> Bool # Exports project to provided file path, including stills and LUTs if withStillsAndLUTs is True (enabled by default). Returns True in case of success.
RestoreProject(filePath) --> Bool # Restores a project from the file path provided. Returns True if successful.
RestoreProject(filePath, projectName=None) --> Bool # Restores a project from the file path provided with given project name, if any. Returns True if successful.
GetCurrentDatabase() --> {dbInfo} # Returns a dictionary (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to the current database connection
GetDatabaseList() --> [{dbInfo}] # Returns a list of dictionary items (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to all the databases added to Resolve
SetCurrentDatabase({dbInfo}) --> Bool # Switches current database connection to the database specified by the keys below, and closes any open project.
@ -125,8 +138,9 @@ Project
GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount()
GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline.
SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful.
GetGallery() --> Gallery # Returns the Gallery object.
GetName() --> string # Returns project name.
SetName(projectName) --> Bool # Sets project name if given projectname (string) is unique.
SetName(projectName) --> Bool # Sets project name if given projectName (string) is unique.
GetPresetList() --> [presets...] # Returns a list of presets and their information.
SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project.
AddRenderJob() --> string # Adds a render job based on current render settings to the render queue. Returns a unique job id (string) for the new render job.
@ -144,27 +158,7 @@ Project
LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (string) exists.
SaveAsNewRenderPreset(presetName) --> Bool # Creates new render preset by given name if presetName(string) is unique.
SetRenderSettings({settings}) --> Bool # Sets given settings for rendering. Settings is a dict, with support for the keys:
# "SelectAllFrames": Bool
# "MarkIn": int
# "MarkOut": int
# "TargetDir": string
# "CustomName": string
# "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix.
# "ExportVideo": Bool
# "ExportAudio": Bool
# "FormatWidth": int
# "FormatHeight": int
# "FrameRate": float (examples: 23.976, 24)
# "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope")
# "VideoQuality" possible values for current codec (if applicable):
# 0 (int) - will set quality to automatic
# [1 -> MAX] (int) - will set input bit rate
# ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level
# "AudioCodec": string (example: "aac")
# "AudioBitDepth": int
# "AudioSampleRate": int
# "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign")
# "GammaTag" : string (example: "Same as Project", "ACEScct")
# Refer to "Looking up render settings" section for information for supported settings
GetRenderJobStatus(jobId) --> {status info} # Returns a dict with job status and completion percentage of the job by given jobId (string).
GetSetting(settingName) --> string # Returns value of project setting (indicated by settingName, string). Check the section below for more information.
SetSetting(settingName, settingValue) --> Bool # Sets the project setting (indicated by settingName, string) to the value (settingValue, string). Check the section below for more information.
@ -176,12 +170,13 @@ Project
SetCurrentRenderMode(renderMode) --> Bool # Sets the render mode. Specify renderMode = 0 for Individual clips, 1 for Single clip.
GetRenderResolutions(format, codec) --> [{Resolution}] # Returns list of resolutions applicable for the given render format (string) and render codec (string). Returns full list of resolutions if no argument is provided. Each element in the list is a dictionary with 2 keys "Width" and "Height".
RefreshLUTList() --> Bool # Refreshes LUT List
GetUniqueId() --> string # Returns a unique ID for the project item
MediaStorage
GetMountedVolumeList() --> [paths...] # Returns list of folder paths corresponding to mounted volumes displayed in Resolves Media Storage.
GetSubFolderList(folderPath) --> [paths...] # Returns list of folder paths in the given absolute folder path.
GetFileList(folderPath) --> [paths...] # Returns list of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
RevealInStorage(path) --> None # Expands and displays given file/folder path in Resolves Media Storage.
RevealInStorage(path) --> Bool # Expands and displays given file/folder path in Resolves Media Storage.
AddItemListToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a list of the MediaPoolItems created.
AddItemListToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
AddClipMattesToMediaPool(MediaPoolItem, [paths], stereoEye) --> Bool # Adds specified media files as mattes for the specified MediaPoolItem. StereoEye is an optional argument for specifying which eye to add the matte to for stereo clips ("left" or "right"). Returns True if successful.
@ -190,10 +185,11 @@ MediaStorage
MediaPool
GetRootFolder() --> Folder # Returns root Folder of Media Pool
AddSubFolder(folder, name) --> Folder # Adds new subfolder under specified Folder object with the given name.
RefreshFolders() --> Bool # Updates the folders in collaboration mode
CreateEmptyTimeline(name) --> Timeline # Adds new timeline with given name.
AppendToTimeline(clip1, clip2, ...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
AppendToTimeline([{clipInfo}, ...]) --> Bool # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int).
AppendToTimeline(clip1, clip2, ...) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
AppendToTimeline([clips]) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
AppendToTimeline([{clipInfo}, ...]) --> [TimelineItem] # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), (optional) "mediaType" (int; 1 - Video only, 2 - Audio only). Returns the list of appended timelineItems.
CreateTimelineFromClips(name, clip1, clip2,...) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
CreateTimelineFromClips(name, [clips]) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
CreateTimelineFromClips(name, [{clipInfo}]) --> Timeline # Creates new timeline with specified name, appending the list of clipInfos specified as a dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int).
@ -202,6 +198,8 @@ MediaPool
# "importSourceClips": Bool, specifies whether source clips should be imported, True by default
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "importSourceClips" is True
# "sourceClipsFolders": List of Media Pool folder objects to search for source clips if the media is not present in current folder and if "importSourceClips" is False
# "interlaceProcessing": Bool, specifies whether to enable interlace processing on the imported timeline being created. valid only for AAF import
DeleteTimelines([timeline]) --> Bool # Deletes specified timelines in the media pool.
GetCurrentFolder() --> Folder # Returns currently selected Folder.
SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder.
DeleteClips([clips]) --> Bool # Deletes specified clips or timeline mattes in the media pool
@ -214,19 +212,26 @@ MediaPool
RelinkClips([MediaPoolItem], folderPath) --> Bool # Update the folder location of specified media pool clips with the specified folder path.
UnlinkClips([MediaPoolItem]) --> Bool # Unlink specified media pool clips.
ImportMedia([items...]) --> [MediaPoolItems] # Imports specified file/folder paths into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
ImportMedia([{clipInfo}]) --> [MediaPoolItems] # Imports file path(s) into current Media Pool folder as specified in list of clipInfo dict. Returns a list of the MediaPoolItems created.
# Each clipInfo gets imported as one MediaPoolItem unless 'Show Individual Frames' is turned on.
# Example: ImportMedia([{"FilePath":"file_%03d.dpx", "StartIndex":1, "EndIndex":100}]) would import clip "file_[001-100].dpx".
ExportMetadata(fileName, [clips]) --> Bool # Exports metadata of specified clips to 'fileName' in CSV format.
# If no clips are specified, all clips from media pool will be used.
GetUniqueId() --> string # Returns a unique ID for the media pool
Folder
GetClipList() --> [clips...] # Returns a list of clips (items) within the folder.
GetName() --> string # Returns the media folder name.
GetSubFolderList() --> [folders...] # Returns a list of subfolders in the folder.
GetIsFolderStale() --> bool # Returns true if folder is stale in collaboration mode, false otherwise
GetUniqueId() --> string # Returns a unique ID for the media pool folder
MediaPoolItem
GetName() --> string # Returns the clip name.
GetMetadata(metadataType=None) --> string|dict # Returns the metadata value for the key 'metadataType'.
# If no argument is specified, a dict of all set metadata properties is returned.
SetMetadata(metadataType, metadataValue) --> Bool # Sets the given metadata to metadataValue (string). Returns True if successful.
SetMetadata({metadata}) --> Bool # Sets the item metadata with specified 'metadata' dict. Returns True if successful.
GetMediaId() --> string # Returns the unique ID for the MediaPoolItem.
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
customData)
@ -248,15 +253,18 @@ MediaPoolItem
GetClipProperty(propertyName=None) --> string|dict # Returns the property value for the key 'propertyName'.
# If no argument is specified, a dict of all clip properties is returned. Check the section below for more information.
SetClipProperty(propertyName, propertyValue) --> Bool # Sets the given property to propertyValue (string). Check the section below for more information.
LinkProxyMedia(propertyName) --> Bool # Links proxy media (absolute path) with the current clip.
LinkProxyMedia(proxyMediaFilePath) --> Bool # Links proxy media located at path specified by arg 'proxyMediaFilePath' with the current clip. 'proxyMediaFilePath' should be absolute clip path.
UnlinkProxyMedia() --> Bool # Unlinks any proxy media associated with clip.
ReplaceClip(filePath) --> Bool # Replaces the underlying asset and metadata of MediaPoolItem with the specified absolute clip path.
GetUniqueId() --> string # Returns a unique ID for the media pool item
Timeline
GetName() --> string # Returns the timeline name.
SetName(timelineName) --> Bool # Sets the timeline name if timelineName (string) is unique. Returns True if successful.
GetStartFrame() --> int # Returns the frame number at the start of timeline.
GetEndFrame() --> int # Returns the frame number at the end of timeline.
SetStartTimecode(timecode) --> Bool # Set the start timecode of the timeline to the string 'timecode'. Returns true when the change is successful, false otherwise.
GetStartTimecode() --> string # Returns the start timecode for the timeline.
GetTrackCount(trackType) --> int # Returns the number of tracks for the given track type ("audio", "video" or "subtitle").
GetItemListInTrack(trackType, index) --> [items...] # Returns a list of timeline items on that track (based on trackType and index). 1 <= index <= GetTrackCount(trackType).
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
@ -271,7 +279,8 @@ Timeline
DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData.
ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color and Deliver pages.
GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color, Fairlight and Deliver pages.
SetCurrentTimecode(timecode) --> Bool # Sets current playhead position from input timecode for Cut, Edit, Color, Fairlight and Deliver pages.
GetCurrentVideoItem() --> item # Returns the current video timeline item.
GetCurrentClipThumbnailImage() --> {thumbnailData} # Returns a dict (keys "width", "height", "format" and "data") with data containing raw thumbnail image data (RGB 8-bit image data encoded in base64 format) for current media in the Color Page.
# An example of how to retrieve and interpret thumbnails is provided in 6_get_current_media_thumbnail.py in the Examples folder.
@ -280,37 +289,30 @@ Timeline
DuplicateTimeline(timelineName) --> timeline # Duplicates the timeline and returns the created timeline, with the (optional) timelineName, on success.
CreateCompoundClip([timelineItems], {clipInfo}) --> timelineItem # Creates a compound clip of input timeline items with an optional clipInfo map: {"startTimecode" : "00:00:00:00", "name" : "Compound Clip 1"}. It returns the created timeline item.
CreateFusionClip([timelineItems]) --> timelineItem # Creates a Fusion clip of input timeline items. It returns the created timeline item.
ImportIntoTimeline(filePath, {importOptions}) --> Bool # Imports timeline items from an AAF file and optional importOptions dict into the timeline, with support for the keys:
# "autoImportSourceClipsIntoMediaPool": Bool, specifies if source clips should be imported into media pool, True by default
# "ignoreFileExtensionsWhenMatching": Bool, specifies if file extensions should be ignored when matching, False by default
# "linkToSourceCameraFiles": Bool, specifies if link to source camera files should be enabled, False by default
# "useSizingInfo": Bool, specifies if sizing information should be used, False by default
# "importMultiChannelAudioTracksAsLinkedGroups": Bool, specifies if multi-channel audio tracks should be imported as linked groups, False by default
# "insertAdditionalTracks": Bool, specifies if additional tracks should be inserted, True by default
# "insertWithOffset": string, specifies insert with offset value in timecode format - defaults to "00:00:00:00", applicable if "insertAdditionalTracks" is False
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "ignoreFileExtensionsWhenMatching" is True
# "sourceClipsFolders": string, list of Media Pool folder objects to search for source clips if the media is not present in current folder
Export(fileName, exportType, exportSubtype) --> Bool # Exports timeline to 'fileName' as per input exportType & exportSubtype format.
# exportType can be one of the following constants:
# resolve.EXPORT_AAF
# resolve.EXPORT_DRT
# resolve.EXPORT_EDL
# resolve.EXPORT_FCP_7_XML
# resolve.EXPORT_FCPXML_1_3
# resolve.EXPORT_FCPXML_1_4
# resolve.EXPORT_FCPXML_1_5
# resolve.EXPORT_FCPXML_1_6
# resolve.EXPORT_FCPXML_1_7
# resolve.EXPORT_FCPXML_1_8
# resolve.EXPORT_HDR_10_PROFILE_A
# resolve.EXPORT_HDR_10_PROFILE_B
# resolve.EXPORT_TEXT_CSV
# resolve.EXPORT_TEXT_TAB
# resolve.EXPORT_DOLBY_VISION_VER_2_9
# resolve.EXPORT_DOLBY_VISION_VER_4_0
# exportSubtype can be one of the following enums:
# resolve.EXPORT_NONE
# resolve.EXPORT_AAF_NEW
# resolve.EXPORT_AAF_EXISTING
# resolve.EXPORT_CDL
# resolve.EXPORT_SDL
# resolve.EXPORT_MISSING_CLIPS
# Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored.
# When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING.
# When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE.
# Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used.
# Refer to section "Looking up timeline exports properties" for information on the parameters.
GetSetting(settingName) --> string # Returns value of timeline setting (indicated by settingName : string). Check the section below for more information.
SetSetting(settingName, settingValue) --> Bool # Sets timeline setting (indicated by settingName : string) to the value (settingValue : string). Check the section below for more information.
InsertGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a generator (indicated by generatorName : string) into the timeline.
InsertFusionGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a Fusion generator (indicated by generatorName : string) into the timeline.
InsertFusionCompositionIntoTimeline() --> TimelineItem # Inserts a Fusion composition into the timeline.
InsertOFXGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts an OFX generator (indicated by generatorName : string) into the timeline.
InsertTitleIntoTimeline(titleName) --> TimelineItem # Inserts a title (indicated by titleName : string) into the timeline.
InsertFusionTitleIntoTimeline(titleName) --> TimelineItem # Inserts a Fusion title (indicated by titleName : string) into the timeline.
GrabStill() --> galleryStill # Grabs still from the current video clip. Returns a GalleryStill object.
GrabAllStills(stillFrameSource) --> [galleryStill] # Grabs stills from all the clips of the timeline at 'stillFrameSource' (1 - First frame, 2 - Middle frame). Returns the list of GalleryStill objects.
GetUniqueId() --> string # Returns a unique ID for the timeline
TimelineItem
GetName() --> string # Returns the item name.
@ -323,6 +325,10 @@ TimelineItem
GetLeftOffset() --> int # Returns the maximum extension by frame for clip from left side.
GetRightOffset() --> int # Returns the maximum extension by frame for clip from right side.
GetStart() --> int # Returns the start frame position on the timeline.
SetProperty(propertyKey, propertyValue) --> Bool # Sets the value of property "propertyKey" to value "propertyValue"
# Refer to "Looking up Timeline item properties" for more information
GetProperty(propertyKey) --> int/[key:value] # returns the value of the specified key
# if no key is specified, the method returns a dictionary(python) or table(lua) for all supported keys
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
customData)
GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information.
@ -345,7 +351,8 @@ TimelineItem
DeleteFusionCompByName(compName) --> Bool # Deletes the named Fusion composition.
LoadFusionCompByName(compName) --> fusionComp # Loads the named Fusion composition as the active composition.
RenameFusionCompByName(oldName, newName) --> Bool # Renames the Fusion composition identified by oldName.
AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clipbased on versionType (0 - local, 1 - remote).
AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clip based on versionType (0 - local, 1 - remote).
GetCurrentVersion() --> {versionName...} # Returns the current version of the video clip. The returned value will have the keys versionName and versionType(0 - local, 1 - remote).
DeleteVersionByName(versionName, versionType) --> Bool # Deletes a color version by name and versionType (0 - local, 1 - remote).
LoadVersionByName(versionName, versionType) --> Bool # Loads a named color version as the active version. versionType: 0 - local, 1 - remote.
RenameVersionByName(oldName, newName, versionType)--> Bool # Renames the color version identified by oldName and versionType (0 - local, 1 - remote).
@ -354,12 +361,14 @@ TimelineItem
GetStereoConvergenceValues() --> {keyframes...} # Returns a dict (offset -> value) of keyframe offsets and respective convergence values.
GetStereoLeftFloatingWindowParams() --> {keyframes...} # For the LEFT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
GetStereoRightFloatingWindowParams() --> {keyframes...} # For the RIGHT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes.
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
SetCDL([CDL map]) --> Bool # Keys of map are: "NodeIndex", "Slope", "Offset", "Power", "Saturation", where 1 <= NodeIndex <= total number of nodes.
# Example python code - SetCDL({"NodeIndex" : "1", "Slope" : "0.5 0.4 0.2", "Offset" : "0.4 0.3 0.2", "Power" : "0.6 0.7 0.8", "Saturation" : "0.65"})
AddTake(mediaPoolItem, startFrame=0, endFrame)=0 --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the whole clip is added. startFrame and endFrame can be specified as extents.
AddTake(mediaPoolItem, startFrame, endFrame) --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the full clip extents is added. startFrame (int) and endFrame (int) are optional arguments used to specify the extents.
GetSelectedTakeIndex() --> int # Returns the index of the currently selected take, or 0 if the clip is not a take selector.
GetTakesCount() --> int # Returns the number of takes in take selector, or 0 if the clip is not a take selector.
GetTakeByIndex(idx) --> {takeInfo...} # Returns a dict (keys "startFrame", "endFrame" and "mediaPoolItem") with take info for specified index.
@ -367,7 +376,24 @@ TimelineItem
SelectTakeByIndex(idx) --> Bool # Selects a take by index, 1 <= idx <= number of takes.
FinalizeTake() --> Bool # Finalizes take selection.
CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occurred.
UpdateSidecar() --> Bool # Updates sidecar file for BRAW clips or RMD file for R3D clips.
GetUniqueId() --> string # Returns a unique ID for the timeline item
Gallery
GetAlbumName(galleryStillAlbum) --> string # Returns the name of the GalleryStillAlbum object 'galleryStillAlbum'.
SetAlbumName(galleryStillAlbum, albumName) --> Bool # Sets the name of the GalleryStillAlbum object 'galleryStillAlbum' to 'albumName'.
GetCurrentStillAlbum() --> galleryStillAlbum # Returns current album as a GalleryStillAlbum object.
SetCurrentStillAlbum(galleryStillAlbum) --> Bool # Sets current album to GalleryStillAlbum object 'galleryStillAlbum'.
GetGalleryStillAlbums() --> [galleryStillAlbum] # Returns the gallery albums as a list of GalleryStillAlbum objects.
GalleryStillAlbum
GetStills() --> [galleryStill] # Returns the list of GalleryStill objects in the album.
GetLabel(galleryStill) --> string # Returns the label of the galleryStill.
SetLabel(galleryStill, label) --> Bool # Sets the new 'label' to GalleryStill object 'galleryStill'.
ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm).
DeleteStills([galleryStill]) --> Bool # Deletes specified list of GalleryStill objects '[galleryStill]'.
GalleryStill # This class does not provide any API functions but the object type is used by functions in other classes.
List and Dict Data Structures
-----------------------------
@ -375,7 +401,6 @@ Beside primitive data types, Resolve's Python API mainly uses list and dict data
As Lua does not support list and dict data structures, the Lua API implements "list" as a table with indices, e.g. { [1] = listValue1, [2] = listValue2, ... }.
Similarly the Lua API implements "dict" as a table with the dictionary key as first element, e.g. { [dictKey1] = dictValue1, [dictKey2] = dictValue2, ... }.
Looking up Project and Clip properties
--------------------------------------
This section covers additional notes for the functions "Project:GetSetting", "Project:SetSetting", "Timeline:GetSetting", "Timeline:SetSetting", "MediaPoolItem:GetClipProperty" and
@ -412,6 +437,179 @@ Affects:
• x = MediaPoolItem:GetClipProperty('Super Scale') and MediaPoolItem:SetClipProperty('Super Scale', x)
Looking up Render Settings
--------------------------
This section covers the supported settings for the method SetRenderSettings({settings})
The parameter setting is a dictionary containing the following keys:
- "SelectAllFrames": Bool (when set True, the settings MarkIn and MarkOut are ignored)
- "MarkIn": int
- "MarkOut": int
- "TargetDir": string
- "CustomName": string
- "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix.
- "ExportVideo": Bool
- "ExportAudio": Bool
- "FormatWidth": int
- "FormatHeight": int
- "FrameRate": float (examples: 23.976, 24)
- "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope")
- "VideoQuality" possible values for current codec (if applicable):
- 0 (int) - will set quality to automatic
- [1 -> MAX] (int) - will set input bit rate
- ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level
- "AudioCodec": string (example: "aac")
- "AudioBitDepth": int
- "AudioSampleRate": int
- "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign")
- "GammaTag" : string (example: "Same as Project", "ACEScct")
- "ExportAlpha": Bool
- "EncodingProfile": string (example: "Main10"). Can only be set for H.264 and H.265.
- "MultiPassEncode": Bool. Can only be set for H.264.
- "AlphaMode": 0 - Premultiplied, 1 - Straight. Can only be set if "ExportAlpha" is true.
- "NetworkOptimization": Bool. Only supported by QuickTime and MP4 formats.
Looking up timeline export properties
-------------------------------------
This section covers the parameters for the argument Export(fileName, exportType, exportSubtype).
exportType can be one of the following constants:
- resolve.EXPORT_AAF
- resolve.EXPORT_DRT
- resolve.EXPORT_EDL
- resolve.EXPORT_FCP_7_XML
- resolve.EXPORT_FCPXML_1_3
- resolve.EXPORT_FCPXML_1_4
- resolve.EXPORT_FCPXML_1_5
- resolve.EXPORT_FCPXML_1_6
- resolve.EXPORT_FCPXML_1_7
- resolve.EXPORT_FCPXML_1_8
- resolve.EXPORT_FCPXML_1_9
- resolve.EXPORT_FCPXML_1_10
- resolve.EXPORT_HDR_10_PROFILE_A
- resolve.EXPORT_HDR_10_PROFILE_B
- resolve.EXPORT_TEXT_CSV
- resolve.EXPORT_TEXT_TAB
- resolve.EXPORT_DOLBY_VISION_VER_2_9
- resolve.EXPORT_DOLBY_VISION_VER_4_0
exportSubtype can be one of the following enums:
- resolve.EXPORT_NONE
- resolve.EXPORT_AAF_NEW
- resolve.EXPORT_AAF_EXISTING
- resolve.EXPORT_CDL
- resolve.EXPORT_SDL
- resolve.EXPORT_MISSING_CLIPS
Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored.
When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING.
When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE.
Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used.
Looking up Timeline item properties
-----------------------------------
This section covers additional notes for the function "TimelineItem:SetProperty" and "TimelineItem:GetProperty". These functions are used to get and set properties mentioned.
The supported keys with their accepted values are:
"Pan" : floating point values from -4.0*width to 4.0*width
"Tilt" : floating point values from -4.0*height to 4.0*height
"ZoomX" : floating point values from 0.0 to 100.0
"ZoomY" : floating point values from 0.0 to 100.0
"ZoomGang" : a boolean value
"RotationAngle" : floating point values from -360.0 to 360.0
"AnchorPointX" : floating point values from -4.0*width to 4.0*width
"AnchorPointY" : floating point values from -4.0*height to 4.0*height
"Pitch" : floating point values from -1.5 to 1.5
"Yaw" : floating point values from -1.5 to 1.5
"FlipX" : boolean value for flipping horizontally
"FlipY" : boolean value for flipping vertically
"CropLeft" : floating point values from 0.0 to width
"CropRight" : floating point values from 0.0 to width
"CropTop" : floating point values from 0.0 to height
"CropBottom" : floating point values from 0.0 to height
"CropSoftness" : floating point values from -100.0 to 100.0
"CropRetain" : boolean value for "Retain Image Position" checkbox
"DynamicZoomEase" : A value from the following constants
- DYNAMIC_ZOOM_EASE_LINEAR = 0
- DYNAMIC_ZOOM_EASE_IN
- DYNAMIC_ZOOM_EASE_OUT
- DYNAMIC_ZOOM_EASE_IN_AND_OUT
"CompositeMode" : A value from the following constants
- COMPOSITE_NORMAL = 0
- COMPOSITE_ADD
- COMPOSITE_SUBTRACT
- COMPOSITE_DIFF
- COMPOSITE_MULTIPLY
- COMPOSITE_SCREEN
- COMPOSITE_OVERLAY
- COMPOSITE_HARDLIGHT
- COMPOSITE_SOFTLIGHT
- COMPOSITE_DARKEN
- COMPOSITE_LIGHTEN
- COMPOSITE_COLOR_DODGE
- COMPOSITE_COLOR_BURN
- COMPOSITE_EXCLUSION
- COMPOSITE_HUE
- COMPOSITE_SATURATE
- COMPOSITE_COLORIZE
- COMPOSITE_LUMA_MASK
- COMPOSITE_DIVIDE
- COMPOSITE_LINEAR_DODGE
- COMPOSITE_LINEAR_BURN
- COMPOSITE_LINEAR_LIGHT
- COMPOSITE_VIVID_LIGHT
- COMPOSITE_PIN_LIGHT
- COMPOSITE_HARD_MIX
- COMPOSITE_LIGHTER_COLOR
- COMPOSITE_DARKER_COLOR
- COMPOSITE_FOREGROUND
- COMPOSITE_ALPHA
- COMPOSITE_INVERTED_ALPHA
- COMPOSITE_LUM
- COMPOSITE_INVERTED_LUM
"Opacity" : floating point value from 0.0 to 100.0
"Distortion" : floating point value from -1.0 to 1.0
"RetimeProcess" : A value from the following constants
- RETIME_USE_PROJECT = 0
- RETIME_NEAREST
- RETIME_FRAME_BLEND
- RETIME_OPTICAL_FLOW
"MotionEstimation" : A value from the following constants
- MOTION_EST_USE_PROJECT = 0
- MOTION_EST_STANDARD_FASTER
- MOTION_EST_STANDARD_BETTER
- MOTION_EST_ENHANCED_FASTER
- MOTION_EST_ENHANCED_BETTER
- MOTION_EST_SPEED_WRAP
"Scaling" : A value from the following constants
- SCALE_USE_PROJECT = 0
- SCALE_CROP
- SCALE_FIT
- SCALE_FILL
- SCALE_STRETCH
"ResizeFilter" : A value from the following constants
- RESIZE_FILTER_USE_PROJECT = 0
- RESIZE_FILTER_SHARPER
- RESIZE_FILTER_SMOOTHER
- RESIZE_FILTER_BICUBIC
- RESIZE_FILTER_BILINEAR
- RESIZE_FILTER_BESSEL
- RESIZE_FILTER_BOX
- RESIZE_FILTER_CATMULL_ROM
- RESIZE_FILTER_CUBIC
- RESIZE_FILTER_GAUSSIAN
- RESIZE_FILTER_LANCZOS
- RESIZE_FILTER_MITCHELL
- RESIZE_FILTER_NEAREST_NEIGHBOR
- RESIZE_FILTER_QUADRATIC
- RESIZE_FILTER_SINC
- RESIZE_FILTER_LINEAR
Values beyond the range will be clipped
width and height are same as the UI max limits
The arguments can be passed as a key and value pair or they can be grouped together into a dictionary (for python) or table (for lua) and passed
as a single argument.
Getting the values for the keys that uses constants will return the number which is in the constant
Deprecated Resolve API Functions
--------------------------------
The following API functions are deprecated.
@ -450,12 +648,12 @@ TimelineItem
Unsupported Resolve API Functions
---------------------------------
The following API (functions and paraameters) are no longer supported.
The following API (functions and parameters) are no longer supported. Use job IDs instead of indices.
Project
StartRendering(index1, index2, ...) --> Bool # Please use unique job ids (string) instead of indices.
StartRendering([idxs...]) --> Bool # Please use unique job ids (string) instead of indices.
DeleteRenderJobByIndex(idx) --> Bool # Please use unique job ids (string) instead of indices.
GetRenderJobStatus(idx) --> {status info} # Please use unique job ids (string) instead of indices.
GetSetting and SetSetting --> {} # settingName "videoMonitorUseRec601For422SDI" is no longer supported.
# Please use "videoMonitorUseMatrixOverrideFor422SDI" and "videoMonitorMatrixOverrideFor422SDI" instead.
GetSetting and SetSetting --> {} # settingName videoMonitorUseRec601For422SDI is now replaced with videoMonitorUseMatrixOverrideFor422SDI and videoMonitorMatrixOverrideFor422SDI.
# settingName perfProxyMediaOn is now replaced with perfProxyMediaMode which takes values 0 - disabled, 1 - when available, 2 - when source not available.

View file

@ -1,10 +1,6 @@
"""
resolve api
"""
bmdvr = None
bmdvf = None
from .utils import (
get_resolve_module
)
@ -70,6 +66,9 @@ from .workio import (
from .testing_utils import TestGUI
bmdvr = None
bmdvf = None
__all__ = [
"bmdvr",
"bmdvf",

View file

@ -54,15 +54,15 @@ class OpenPypeMenu(QtWidgets.QWidget):
)
self.setWindowTitle("OpenPype")
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
create_btn = QtWidgets.QPushButton("Create...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
load_btn = QtWidgets.QPushButton("Load...", self)
inventory_btn = QtWidgets.QPushButton("Inventory...", self)
subsetm_btn = QtWidgets.QPushButton("Subset Manager...", self)
libload_btn = QtWidgets.QPushButton("Library...", self)
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
create_btn = QtWidgets.QPushButton("Create ...", self)
publish_btn = QtWidgets.QPushButton("Publish ...", self)
load_btn = QtWidgets.QPushButton("Load ...", self)
inventory_btn = QtWidgets.QPushButton("Manager ...", self)
subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self)
libload_btn = QtWidgets.QPushButton("Library ...", self)
experimental_btn = QtWidgets.QPushButton(
"Experimental tools...", self
"Experimental tools ...", self
)
# rename_btn = QtWidgets.QPushButton("Rename", self)
# set_colorspace_btn = QtWidgets.QPushButton(

View file

@ -244,7 +244,7 @@ def on_pyblish_instance_toggled(instance, old_value, new_value):
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
instance, old_value, new_value))
from openpype.hosts.resolve import (
from openpype.hosts.resolve.api import (
set_publish_attribute
)

View file

@ -4,13 +4,15 @@ import uuid
import qargparse
from Qt import QtWidgets, QtCore
from openpype.settings import get_current_project_settings
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
)
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.hosts import resolve
from . import lib
from .menu import load_stylesheet
class CreatorWidget(QtWidgets.QDialog):
@ -86,7 +88,7 @@ class CreatorWidget(QtWidgets.QDialog):
ok_btn.clicked.connect(self._on_ok_clicked)
cancel_btn.clicked.connect(self._on_cancel_clicked)
stylesheet = resolve.api.menu.load_stylesheet()
stylesheet = load_stylesheet()
self.setStyleSheet(stylesheet)
def _on_ok_clicked(self):
@ -438,7 +440,7 @@ class ClipLoader:
source_in = int(_clip_property("Start"))
source_out = int(_clip_property("End"))
resolve.swap_clips(
lib.swap_clips(
timeline_item,
media_pool_item,
source_in,
@ -504,7 +506,7 @@ class Creator(LegacyCreator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
from openpype.settings import get_current_project_settings
resolve_p_settings = get_current_project_settings().get("resolve")
self.presets = {}
if resolve_p_settings:
@ -512,13 +514,13 @@ class Creator(LegacyCreator):
self.__class__.__name__, {})
# adding basic current context resolve objects
self.project = resolve.get_current_project()
self.timeline = resolve.get_current_timeline()
self.project = lib.get_current_project()
self.timeline = lib.get_current_timeline()
if (self.options or {}).get("useSelection"):
self.selected = resolve.get_current_timeline_items(filter=True)
self.selected = lib.get_current_timeline_items(filter=True)
else:
self.selected = resolve.get_current_timeline_items(filter=False)
self.selected = lib.get_current_timeline_items(filter=False)
self.widget = CreatorWidget

View file

@ -1,31 +0,0 @@
#!/usr/bin/env python
import time
from openpype.hosts.resolve.utils import get_resolve_module
from openpype.lib import Logger
log = Logger.get_logger(__name__)
wait_delay = 2.5
wait = 0.00
ready = None
while True:
try:
# Create project and set parameters:
resolve = get_resolve_module()
pm = resolve.GetProjectManager()
if pm:
ready = None
else:
ready = True
except AttributeError:
pass
if ready is None:
time.sleep(wait_delay)
log.info(f"Waiting {wait}s for Resolve to have opened Project Manager")
wait += wait_delay
else:
print(f"Preloaded variables: \n\n\tResolve module: "
f"`resolve` > {type(resolve)} \n\tProject manager: "
f"`pm` > {type(pm)}")
break

View file

@ -1,5 +1,5 @@
import os
import platform
from openpype.lib import PreLaunchHook
from openpype.hosts.resolve.utils import setup
@ -14,35 +14,91 @@ class ResolvePrelaunch(PreLaunchHook):
app_groups = ["resolve"]
def execute(self):
current_platform = platform.system().lower()
PROGRAMDATA = self.launch_context.env.get("PROGRAMDATA", "")
RESOLVE_SCRIPT_API_ = {
"windows": (
f"{PROGRAMDATA}/Blackmagic Design/"
"DaVinci Resolve/Support/Developer/Scripting"
),
"darwin": (
"/Library/Application Support/Blackmagic Design"
"/DaVinci Resolve/Developer/Scripting"
),
"linux": "/opt/resolve/Developer/Scripting"
}
RESOLVE_SCRIPT_API = os.path.normpath(
RESOLVE_SCRIPT_API_[current_platform])
self.launch_context.env["RESOLVE_SCRIPT_API"] = RESOLVE_SCRIPT_API
RESOLVE_SCRIPT_LIB_ = {
"windows": (
"C:/Program Files/Blackmagic Design"
"/DaVinci Resolve/fusionscript.dll"
),
"darwin": (
"/Applications/DaVinci Resolve/DaVinci Resolve.app"
"/Contents/Libraries/Fusion/fusionscript.so"
),
"linux": "/opt/resolve/libs/Fusion/fusionscript.so"
}
RESOLVE_SCRIPT_LIB = os.path.normpath(
RESOLVE_SCRIPT_LIB_[current_platform])
self.launch_context.env["RESOLVE_SCRIPT_LIB"] = RESOLVE_SCRIPT_LIB
# TODO: add OTIO installation from `openpype/requirements.py`
# making sure python 3.6 is installed at provided path
py36_dir = os.path.normpath(
self.launch_context.env.get("PYTHON36_RESOLVE", ""))
assert os.path.isdir(py36_dir), (
"Python 3.6 is not installed at the provided folder path. Either "
# making sure python <3.9.* is installed at provided path
python3_home = os.path.normpath(
self.launch_context.env.get("RESOLVE_PYTHON3_HOME", ""))
assert os.path.isdir(python3_home), (
"Python 3 is not installed at the provided folder path. Either "
"make sure the `environments\resolve.json` is having correctly "
"set `PYTHON36_RESOLVE` or make sure Python 3.6 is installed "
f"in given path. \nPYTHON36_RESOLVE: `{py36_dir}`"
"set `RESOLVE_PYTHON3_HOME` or make sure Python 3 is installed "
f"in given path. \nRESOLVE_PYTHON3_HOME: `{python3_home}`"
)
self.log.info(f"Path to Resolve Python folder: `{py36_dir}`...")
self.launch_context.env["PYTHONHOME"] = python3_home
self.log.info(f"Path to Resolve Python folder: `{python3_home}`...")
# add to the python path to path
env_path = self.launch_context.env["PATH"]
self.launch_context.env["PATH"] = os.pathsep.join([
python3_home,
os.path.join(python3_home, "Scripts")
] + env_path.split(os.pathsep))
self.log.debug(f"PATH: {self.launch_context.env['PATH']}")
# add to the PYTHONPATH
env_pythonpath = self.launch_context.env["PYTHONPATH"]
self.launch_context.env["PYTHONPATH"] = os.pathsep.join([
os.path.join(python3_home, "Lib", "site-packages"),
os.path.join(RESOLVE_SCRIPT_API, "Modules"),
] + env_pythonpath.split(os.pathsep))
self.log.debug(f"PYTHONPATH: {self.launch_context.env['PYTHONPATH']}")
RESOLVE_UTILITY_SCRIPTS_DIR_ = {
"windows": (
f"{PROGRAMDATA}/Blackmagic Design"
"/DaVinci Resolve/Fusion/Scripts/Comp"
),
"darwin": (
"/Library/Application Support/Blackmagic Design"
"/DaVinci Resolve/Fusion/Scripts/Comp"
),
"linux": "/opt/resolve/Fusion/Scripts/Comp"
}
RESOLVE_UTILITY_SCRIPTS_DIR = os.path.normpath(
RESOLVE_UTILITY_SCRIPTS_DIR_[current_platform]
)
# setting utility scripts dir for scripts syncing
us_dir = os.path.normpath(
self.launch_context.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
)
assert os.path.isdir(us_dir), (
"Resolve utility script dir does not exists. Either make sure "
"the `environments\resolve.json` is having correctly set "
"`RESOLVE_UTILITY_SCRIPTS_DIR` or reinstall DaVinci Resolve. \n"
f"RESOLVE_UTILITY_SCRIPTS_DIR: `{us_dir}`"
)
self.log.debug(f"-- us_dir: `{us_dir}`")
self.launch_context.env["RESOLVE_UTILITY_SCRIPTS_DIR"] = (
RESOLVE_UTILITY_SCRIPTS_DIR)
# correctly format path for pre python script
pre_py_sc = os.path.normpath(
self.launch_context.env.get("PRE_PYTHON_SCRIPT", ""))
self.launch_context.env["PRE_PYTHON_SCRIPT"] = pre_py_sc
self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...")
# remove terminal coloring tags
self.launch_context.env["OPENPYPE_LOG_NO_COLORS"] = "True"
# Resolve Setup integration
setup(self.launch_context.env)

View file

@ -9,7 +9,8 @@ def setup(env):
log = Logger.get_logger("ResolveSetup")
scripts = {}
us_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR")
us_dir = env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
us_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"]
us_paths = [os.path.join(
RESOLVE_ROOT_DIR,
"utility_scripts"

View file

@ -188,6 +188,7 @@ class BatchMovieCreator(TrayPublishCreator):
folders=False,
single_item=False,
extensions=self.extensions,
allow_sequences=False,
label="Filepath"
),
BoolDef(

View file

@ -35,12 +35,12 @@ class CollectMovieBatch(
"stagingDir": os.path.dirname(file_url),
"tags": []
}
instance.data["representations"].append(repre)
if creator_attributes["add_review_family"]:
repre["tags"].append("review")
instance.data["families"].append("review")
instance.data["representations"].append(repre)
instance.data["thumbnailSource"] = file_url
instance.data["source"] = file_url

View file

@ -1,5 +1,6 @@
import os
import tempfile
from pathlib import Path
import clique
import pyblish.api
@ -69,9 +70,17 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin):
repre_names,
representation_files_mapping
)
source_filepaths = list(set(source_filepaths))
instance.data["source"] = source
instance.data["sourceFilepaths"] = list(set(source_filepaths))
instance.data["sourceFilepaths"] = source_filepaths
# NOTE: Missing filepaths should not cause crashes (at least not here)
# - if filepaths are required they should crash on validation
if source_filepaths:
# NOTE: Original basename is not handling sequences
# - we should maybe not fill the key when sequence is used?
origin_basename = Path(source_filepaths[0]).stem
instance.data["originalBasename"] = origin_basename
self.log.debug(
(
@ -148,8 +157,11 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin):
))
return
item_dir = review_file_item["directory"]
first_filepath = os.path.join(item_dir, filenames[0])
filepaths = {
os.path.join(review_file_item["directory"], filename)
os.path.join(item_dir, filename)
for filename in filenames
}
source_filepaths.extend(filepaths)
@ -176,6 +188,8 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin):
if "review" not in instance.data["families"]:
instance.data["families"].append("review")
instance.data["thumbnailSource"] = first_filepath
review_representation["tags"].append("review")
self.log.debug("Representation {} was marked for review. {}".format(
review_representation["name"], review_path

View file

@ -0,0 +1,173 @@
"""Create instance thumbnail from "thumbnailSource" on 'instance.data'.
Output is new representation with "thumbnail" name on instance. If instance
already have such representation the process is skipped.
This way a collector can point to a file from which should be thumbnail
generated. This is different approach then what global plugin for thumbnails
does. The global plugin has specific logic which does not support
Todos:
No size handling. Size of input is used for output thumbnail which can
cause issues.
"""
import os
import tempfile
import pyblish.api
from openpype.lib import (
get_ffmpeg_tool_path,
get_oiio_tools_path,
is_oiio_supported,
run_subprocess,
)
class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
"""Create jpg thumbnail for instance based on 'thumbnailSource'.
Thumbnail source must be a single image or video filepath.
"""
label = "Extract Thumbnail (from source)"
# Before 'ExtractThumbnail' in global plugins
order = pyblish.api.ExtractorOrder - 0.00001
hosts = ["traypublisher"]
def process(self, instance):
subset_name = instance.data["subset"]
self.log.info(
"Processing instance with subset name {}".format(subset_name)
)
thumbnail_source = instance.data.get("thumbnailSource")
if not thumbnail_source:
self.log.debug("Thumbnail source not filled. Skipping.")
return
elif not os.path.exists(thumbnail_source):
self.log.debug(
"Thumbnail source file was not found {}. Skipping.".format(
thumbnail_source))
return
# Check if already has thumbnail created
if self._already_has_thumbnail(instance):
self.log.info("Thumbnail representation already present.")
return
# Create temp directory for thumbnail
# - this is to avoid "override" of source file
dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
self.log.debug(
"Create temp directory {} for thumbnail".format(dst_staging)
)
# Store new staging to cleanup paths
instance.context.data["cleanupFullPaths"].append(dst_staging)
thumbnail_created = False
oiio_supported = is_oiio_supported()
self.log.info("Thumbnail source: {}".format(thumbnail_source))
src_basename = os.path.basename(thumbnail_source)
dst_filename = os.path.splitext(src_basename)[0] + ".jpg"
full_output_path = os.path.join(dst_staging, dst_filename)
if oiio_supported:
self.log.info("Trying to convert with OIIO")
# If the input can read by OIIO then use OIIO method for
# conversion otherwise use ffmpeg
thumbnail_created = self.create_thumbnail_oiio(
thumbnail_source, full_output_path
)
# Try to use FFMPEG if OIIO is not supported or for cases when
# oiiotool isn't available
if not thumbnail_created:
if oiio_supported:
self.log.info((
"Converting with FFMPEG because input"
" can't be read by OIIO."
))
thumbnail_created = self.create_thumbnail_ffmpeg(
thumbnail_source, full_output_path
)
# Skip representation and try next one if wasn't created
if not thumbnail_created:
self.log.warning("Thumbanil has not been created.")
return
new_repre = {
"name": "thumbnail",
"ext": "jpg",
"files": dst_filename,
"stagingDir": dst_staging,
"thumbnail": True,
"tags": ["thumbnail"]
}
# adding representation
self.log.debug(
"Adding thumbnail representation: {}".format(new_repre)
)
instance.data["representations"].append(new_repre)
def _already_has_thumbnail(self, instance):
if "representations" not in instance.data:
self.log.warning(
"Instance does not have 'representations' key filled"
)
instance.data["representations"] = []
for repre in instance.data["representations"]:
if repre["name"] == "thumbnail":
return True
return False
def create_thumbnail_oiio(self, src_path, dst_path):
self.log.info("outputting {}".format(dst_path))
oiio_tool_path = get_oiio_tools_path()
oiio_cmd = [
oiio_tool_path,
"-a", src_path,
"-o", dst_path
]
self.log.info("Running: {}".format(" ".join(oiio_cmd)))
try:
run_subprocess(oiio_cmd, logger=self.log)
return True
except Exception:
self.log.warning(
"Failed to create thubmnail using oiiotool",
exc_info=True
)
return False
def create_thumbnail_ffmpeg(self, src_path, dst_path):
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
max_int = str(2147483647)
ffmpeg_cmd = [
ffmpeg_path,
"-y",
"-analyzeduration", max_int,
"-probesize", max_int,
"-i", src_path,
"-vframes", "1",
dst_path
]
self.log.info("Running: {}".format(" ".join(ffmpeg_cmd)))
try:
run_subprocess(ffmpeg_cmd, logger=self.log)
return True
except Exception:
self.log.warning(
"Failed to create thubmnail using ffmpeg",
exc_info=True
)
return False

View file

@ -1,47 +1,11 @@
from .communication_server import CommunicationWrapper
from . import lib
from . import launch_script
from . import workio
from . import pipeline
from . import plugin
from .pipeline import (
install,
maintained_selection,
remove_instance,
list_instances,
ls
)
from .workio import (
open_file,
save_file,
current_file,
has_unsaved_changes,
file_extensions,
work_root,
TVPaintHost,
)
__all__ = (
"CommunicationWrapper",
"lib",
"launch_script",
"workio",
"pipeline",
"plugin",
"install",
"maintained_selection",
"remove_instance",
"list_instances",
"ls",
# Workfiles API
"open_file",
"save_file",
"current_file",
"has_unsaved_changes",
"file_extensions",
"work_root"
"TVPaintHost",
)

View file

@ -10,10 +10,10 @@ from Qt import QtWidgets, QtCore, QtGui
from openpype import style
from openpype.pipeline import install_host
from openpype.hosts.tvpaint.api.communication_server import (
CommunicationWrapper
from openpype.hosts.tvpaint.api import (
TVPaintHost,
CommunicationWrapper,
)
from openpype.hosts.tvpaint import api as tvpaint_host
log = logging.getLogger(__name__)
@ -30,6 +30,7 @@ def main(launch_args):
# - QApplicaiton is also main thread/event loop of the server
qt_app = QtWidgets.QApplication([])
tvpaint_host = TVPaintHost()
# Execute pipeline installation
install_host(tvpaint_host)

View file

@ -2,7 +2,7 @@ import os
import logging
import tempfile
from . import CommunicationWrapper
from .communication_server import CommunicationWrapper
log = logging.getLogger(__name__)

View file

@ -1,6 +1,5 @@
import os
import json
import contextlib
import tempfile
import logging
@ -9,7 +8,8 @@ import requests
import pyblish.api
from openpype.client import get_project, get_asset_by_name
from openpype.hosts import tvpaint
from openpype.host import HostBase, IWorkfileHost, ILoadHost
from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR
from openpype.settings import get_current_project_settings
from openpype.lib import register_event_callback
from openpype.pipeline import (
@ -26,11 +26,6 @@ from .lib import (
log = logging.getLogger(__name__)
HOST_DIR = os.path.dirname(os.path.abspath(tvpaint.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
METADATA_SECTION = "avalon"
SECTION_NAME_CONTEXT = "context"
@ -63,7 +58,10 @@ instances=2
"""
def install():
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
name = "tvpaint"
def install(self):
"""Install TVPaint-specific functionality."""
log.info("OpenPype - Installing TVPaint integration")
@ -74,19 +72,138 @@ def install():
if not os.path.exists(workdir):
os.makedirs(workdir)
plugins_dir = os.path.join(TVPAINT_ROOT_DIR, "plugins")
publish_dir = os.path.join(plugins_dir, "publish")
load_dir = os.path.join(plugins_dir, "load")
create_dir = os.path.join(plugins_dir, "create")
pyblish.api.register_host("tvpaint")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
pyblish.api.register_plugin_path(publish_dir)
register_loader_plugin_path(load_dir)
register_creator_plugin_path(create_dir)
registered_callbacks = (
pyblish.api.registered_callbacks().get("instanceToggled") or []
)
if on_instance_toggle not in registered_callbacks:
pyblish.api.register_callback("instanceToggled", on_instance_toggle)
if self.on_instance_toggle not in registered_callbacks:
pyblish.api.register_callback(
"instanceToggled", self.on_instance_toggle
)
register_event_callback("application.launched", initial_launch)
register_event_callback("application.exit", application_exit)
register_event_callback("application.launched", self.initial_launch)
register_event_callback("application.exit", self.application_exit)
def open_workfile(self, filepath):
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath.replace("\\", "/")
)
return execute_george_through_file(george_script)
def save_workfile(self, filepath=None):
if not filepath:
filepath = self.get_current_workfile()
context = {
"project": legacy_io.Session["AVALON_PROJECT"],
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
}
save_current_workfile_context(context)
# Execute george script to save workfile.
george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/"))
return execute_george(george_script)
def work_root(self, session):
return session["AVALON_WORKDIR"]
def get_current_workfile(self):
return execute_george("tv_GetProjectName")
def workfile_has_unsaved_changes(self):
return None
def get_workfile_extensions(self):
return [".tvpp"]
def get_containers(self):
return get_containers()
def initial_launch(self):
# Setup project settings if its the template that's launched.
# TODO also check for template creation when it's possible to define
# templates
last_workfile = os.environ.get("AVALON_LAST_WORKFILE")
if not last_workfile or os.path.exists(last_workfile):
return
log.info("Setting up project...")
set_context_settings()
def remove_instance(self, instance):
"""Remove instance from current workfile metadata.
Implementation for Subset manager tool.
"""
current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES)
instance_id = instance.get("uuid")
found_idx = None
if instance_id:
for idx, _inst in enumerate(current_instances):
if _inst["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
current_instances.pop(found_idx)
write_instances(current_instances)
def application_exit(self):
"""Logic related to TimerManager.
Todo:
This should be handled out of TVPaint integration logic.
"""
data = get_current_project_settings()
stop_timer = data["tvpaint"]["stop_timer_on_application_exit"]
if not stop_timer:
return
# Stop application timer.
webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
requests.post(rest_api_url)
def on_instance_toggle(self, instance, old_value, new_value):
"""Update instance data in workfile on publish toggle."""
# Review may not have real instance in wokrfile metadata
if not instance.data.get("uuid"):
return
instance_id = instance.data["uuid"]
found_idx = None
current_instances = list_instances()
for idx, workfile_instance in enumerate(current_instances):
if workfile_instance["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
if "active" in current_instances[found_idx]:
current_instances[found_idx]["active"] = new_value
self.write_instances(current_instances)
def list_instances(self):
"""List all created instances from current workfile."""
return list_instances()
def write_instances(self, data):
return write_instances(data)
def containerise(
@ -116,7 +233,7 @@ def containerise(
"representation": str(context["representation"]["_id"])
}
if current_containers is None:
current_containers = ls()
current_containers = get_containers()
# Add container to containers list
current_containers.append(container_data)
@ -127,15 +244,6 @@ def containerise(
return container_data
@contextlib.contextmanager
def maintained_selection():
# TODO implement logic
try:
yield
finally:
pass
def split_metadata_string(text, chunk_length=None):
"""Split string by length.
@ -333,23 +441,6 @@ def save_current_workfile_context(context):
return write_workfile_metadata(SECTION_NAME_CONTEXT, context)
def remove_instance(instance):
"""Remove instance from current workfile metadata."""
current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES)
instance_id = instance.get("uuid")
found_idx = None
if instance_id:
for idx, _inst in enumerate(current_instances):
if _inst["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
current_instances.pop(found_idx)
write_instances(current_instances)
def list_instances():
"""List all created instances from current workfile."""
return get_workfile_metadata(SECTION_NAME_INSTANCES)
@ -359,12 +450,7 @@ def write_instances(data):
return write_workfile_metadata(SECTION_NAME_INSTANCES, data)
# Backwards compatibility
def _write_instances(*args, **kwargs):
return write_instances(*args, **kwargs)
def ls():
def get_containers():
output = get_workfile_metadata(SECTION_NAME_CONTAINERS)
if output:
for item in output:
@ -376,53 +462,6 @@ def ls():
return output
def on_instance_toggle(instance, old_value, new_value):
"""Update instance data in workfile on publish toggle."""
# Review may not have real instance in wokrfile metadata
if not instance.data.get("uuid"):
return
instance_id = instance.data["uuid"]
found_idx = None
current_instances = list_instances()
for idx, workfile_instance in enumerate(current_instances):
if workfile_instance["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
if "active" in current_instances[found_idx]:
current_instances[found_idx]["active"] = new_value
write_instances(current_instances)
def initial_launch():
# Setup project settings if its the template that's launched.
# TODO also check for template creation when it's possible to define
# templates
last_workfile = os.environ.get("AVALON_LAST_WORKFILE")
if not last_workfile or os.path.exists(last_workfile):
return
log.info("Setting up project...")
set_context_settings()
def application_exit():
data = get_current_project_settings()
stop_timer = data["tvpaint"]["stop_timer_on_application_exit"]
if not stop_timer:
return
# Stop application timer.
webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
requests.post(rest_api_url)
def set_context_settings(asset_doc=None):
"""Set workfile settings by asset document data.

View file

@ -4,11 +4,11 @@ import uuid
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
registered_host,
)
from openpype.hosts.tvpaint.api import (
pipeline,
lib
)
from .lib import get_layers_data
from .pipeline import get_current_workfile_context
class Creator(LegacyCreator):
@ -22,7 +22,7 @@ class Creator(LegacyCreator):
dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs)
# Change asset and name by current workfile context
workfile_context = pipeline.get_current_workfile_context()
workfile_context = get_current_workfile_context()
asset_name = workfile_context.get("asset")
task_name = workfile_context.get("task")
if "asset" not in dynamic_data and asset_name:
@ -67,10 +67,12 @@ class Creator(LegacyCreator):
self.log.debug(
"Storing instance data to workfile. {}".format(str(data))
)
return pipeline.write_instances(data)
host = registered_host()
return host.write_instances(data)
def process(self):
data = pipeline.list_instances()
host = registered_host()
data = host.list_instances()
data.append(self.data)
self.write_instances(data)
@ -108,7 +110,7 @@ class Loader(LoaderPlugin):
counter_regex = re.compile(r"_(\d{3})$")
higher_counter = 0
for layer in lib.get_layers_data():
for layer in get_layers_data():
layer_name = layer["name"]
if not layer_name.startswith(layer_name_base):
continue

View file

@ -1,58 +0,0 @@
"""Host API required for Work Files.
# TODO @iLLiCiT implement functions:
has_unsaved_changes
"""
from openpype.pipeline import (
HOST_WORKFILE_EXTENSIONS,
legacy_io,
)
from .lib import (
execute_george,
execute_george_through_file
)
from .pipeline import save_current_workfile_context
def open_file(filepath):
"""Open the scene file in Blender."""
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath.replace("\\", "/")
)
return execute_george_through_file(george_script)
def save_file(filepath):
"""Save the open scene file."""
# Store context to workfile before save
context = {
"project": legacy_io.Session["AVALON_PROJECT"],
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
}
save_current_workfile_context(context)
# Execute george script to save workfile.
george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/"))
return execute_george(george_script)
def current_file():
"""Return the path of the open scene file."""
george_script = "tv_GetProjectName"
return execute_george(george_script)
def has_unsaved_changes():
"""Does the open scene file have unsaved changes?"""
return False
def file_extensions():
"""Return the supported file extensions for Blender scene files."""
return HOST_WORKFILE_EXTENSIONS["tvpaint"]
def work_root(session):
"""Return the default root to browse for work files."""
return session["AVALON_WORKDIR"]

View file

@ -1,11 +1,15 @@
from openpype.pipeline import CreatorError
from openpype.lib import prepare_template_data
from openpype.pipeline import CreatorError
from openpype.hosts.tvpaint.api import (
plugin,
pipeline,
lib,
CommunicationWrapper
)
from openpype.hosts.tvpaint.api.lib import (
get_layers_data,
get_groups_data,
execute_george_through_file,
)
from openpype.hosts.tvpaint.api.pipeline import list_instances
class CreateRenderlayer(plugin.Creator):
@ -63,7 +67,7 @@ class CreateRenderlayer(plugin.Creator):
# Validate that communication is initialized
if CommunicationWrapper.communicator:
# Get currently selected layers
layers_data = lib.get_layers_data()
layers_data = get_layers_data()
selected_layers = [
layer
@ -81,8 +85,8 @@ class CreateRenderlayer(plugin.Creator):
def process(self):
self.log.debug("Query data from workfile.")
instances = pipeline.list_instances()
layers_data = lib.get_layers_data()
instances = list_instances()
layers_data = get_layers_data()
self.log.debug("Checking for selection groups.")
# Collect group ids from selection
@ -109,7 +113,7 @@ class CreateRenderlayer(plugin.Creator):
self.log.debug(f"Selected group id is \"{group_id}\".")
self.data["group_id"] = group_id
group_data = lib.get_groups_data()
group_data = get_groups_data()
group_name = None
for group in group_data:
if group["group_id"] == group_id:
@ -176,7 +180,7 @@ class CreateRenderlayer(plugin.Creator):
return
self.log.debug("Querying groups data from workfile.")
groups_data = lib.get_groups_data()
groups_data = get_groups_data()
self.log.debug("Changing name of the group.")
selected_group = None
@ -195,7 +199,7 @@ class CreateRenderlayer(plugin.Creator):
b=selected_group["blue"],
name=new_group_name
)
lib.execute_george_through_file(rename_script)
execute_george_through_file(rename_script)
self.log.info(
f"Name of group with index {group_id}"

View file

@ -2,10 +2,10 @@ from openpype.pipeline import CreatorError
from openpype.lib import prepare_template_data
from openpype.hosts.tvpaint.api import (
plugin,
pipeline,
lib,
CommunicationWrapper
)
from openpype.hosts.tvpaint.api.lib import get_layers_data
from openpype.hosts.tvpaint.api.pipeline import list_instances
class CreateRenderPass(plugin.Creator):
@ -54,7 +54,7 @@ class CreateRenderPass(plugin.Creator):
# Validate that communication is initialized
if CommunicationWrapper.communicator:
# Get currently selected layers
layers_data = lib.layers_data()
layers_data = get_layers_data()
selected_layers = [
layer
@ -72,8 +72,8 @@ class CreateRenderPass(plugin.Creator):
def process(self):
self.log.debug("Query data from workfile.")
instances = pipeline.list_instances()
layers_data = lib.layers_data()
instances = list_instances()
layers_data = get_layers_data()
self.log.debug("Checking selection.")
# Get all selected layers and their group ids

View file

@ -1,5 +1,6 @@
import qargparse
from openpype.hosts.tvpaint.api import lib, plugin
from openpype.hosts.tvpaint.api import plugin
from openpype.hosts.tvpaint.api.lib import execute_george_through_file
class ImportImage(plugin.Loader):
@ -79,4 +80,4 @@ class ImportImage(plugin.Loader):
layer_name,
load_options_str
)
return lib.execute_george_through_file(george_script)
return execute_george_through_file(george_script)

View file

@ -1,7 +1,21 @@
import collections
import qargparse
from openpype.pipeline import get_representation_context
from openpype.hosts.tvpaint.api import lib, pipeline, plugin
from openpype.pipeline import (
get_representation_context,
register_host,
)
from openpype.hosts.tvpaint.api import plugin
from openpype.hosts.tvpaint.api.lib import (
get_layers_data,
execute_george_through_file,
)
from openpype.hosts.tvpaint.api.pipeline import (
write_workfile_metadata,
SECTION_NAME_CONTAINERS,
containerise,
)
class LoadImage(plugin.Loader):
@ -79,10 +93,10 @@ class LoadImage(plugin.Loader):
load_options_str
)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
loaded_layer = None
layers = lib.layers_data()
layers = get_layers_data()
for layer in layers:
if layer["name"] == layer_name:
loaded_layer = layer
@ -95,7 +109,7 @@ class LoadImage(plugin.Loader):
layer_names = [loaded_layer["name"]]
namespace = namespace or layer_name
return pipeline.containerise(
return containerise(
name=name,
namespace=namespace,
members=layer_names,
@ -109,7 +123,7 @@ class LoadImage(plugin.Loader):
return
if layers is None:
layers = lib.layers_data()
layers = get_layers_data()
available_ids = set(layer["layer_id"] for layer in layers)
@ -152,14 +166,15 @@ class LoadImage(plugin.Loader):
line = "tv_layerkill {}".format(layer_id)
george_script_lines.append(line)
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
def _remove_container(self, container, members=None):
if not container:
return
representation = container["representation"]
members = self.get_members_from_container(container)
current_containers = pipeline.ls()
host = register_host()
current_containers = host.get_containers()
pop_idx = None
for idx, cur_con in enumerate(current_containers):
cur_members = self.get_members_from_container(cur_con)
@ -179,8 +194,8 @@ class LoadImage(plugin.Loader):
return
current_containers.pop(pop_idx)
pipeline.write_workfile_metadata(
pipeline.SECTION_NAME_CONTAINERS, current_containers
write_workfile_metadata(
SECTION_NAME_CONTAINERS, current_containers
)
def remove(self, container):
@ -214,7 +229,7 @@ class LoadImage(plugin.Loader):
break
old_layers = []
layers = lib.layers_data()
layers = get_layers_data()
previous_layer_ids = set(layer["layer_id"] for layer in layers)
if old_layers_are_ids:
for layer in layers:
@ -263,7 +278,7 @@ class LoadImage(plugin.Loader):
new_container = self.load(context, name, namespace, {})
new_layer_names = self.get_members_from_container(new_container)
layers = lib.layers_data()
layers = get_layers_data()
new_layers = []
for layer in layers:
@ -304,4 +319,4 @@ class LoadImage(plugin.Loader):
# Execute george scripts if there are any
if george_script_lines:
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)

View file

@ -1,6 +1,9 @@
import os
import tempfile
from openpype.hosts.tvpaint.api import lib, plugin
from openpype.hosts.tvpaint.api import plugin
from openpype.hosts.tvpaint.api.lib import (
execute_george_through_file,
)
class ImportSound(plugin.Loader):
@ -64,7 +67,7 @@ class ImportSound(plugin.Loader):
)
self.log.info("*** George script:\n{}\n***".format(george_script))
# Execute geoge script
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
# Read output file
lines = []

View file

@ -11,7 +11,13 @@ from openpype.pipeline.workfile import (
get_last_workfile_with_version,
)
from openpype.pipeline.template_data import get_template_data_with_names
from openpype.hosts.tvpaint.api import lib, pipeline, plugin
from openpype.hosts.tvpaint.api import plugin
from openpype.hosts.tvpaint.api.lib import (
execute_george_through_file,
)
from openpype.hosts.tvpaint.api.pipeline import (
get_current_workfile_context,
)
class LoadWorkfile(plugin.Loader):
@ -26,9 +32,9 @@ class LoadWorkfile(plugin.Loader):
# Load context of current workfile as first thing
# - which context and extension has
host = registered_host()
current_file = host.current_file()
current_file = host.get_current_workfile()
context = pipeline.get_current_workfile_context()
context = get_current_workfile_context()
filepath = self.fname.replace("\\", "/")
@ -40,7 +46,7 @@ class LoadWorkfile(plugin.Loader):
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath
)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
# Save workfile.
host_name = "tvpaint"
@ -69,12 +75,13 @@ class LoadWorkfile(plugin.Loader):
file_template = anatomy.templates[template_key]["file"]
# Define saving file extension
extensions = host.get_workfile_extensions()
if current_file:
# Match the extension of current file
_, extension = os.path.splitext(current_file)
else:
# Fall back to the first extension supported for this host.
extension = host.file_extensions()[0]
extension = extensions[0]
data["ext"] = extension
@ -83,7 +90,7 @@ class LoadWorkfile(plugin.Loader):
folder_template, data
)
version = get_last_workfile_with_version(
work_root, file_template, data, host.file_extensions()
work_root, file_template, data, extensions
)[1]
if version is None:
@ -97,4 +104,4 @@ class LoadWorkfile(plugin.Loader):
file_template, data
)
path = os.path.join(work_root, filename)
host.save_file(path)
host.save_workfile(path)

View file

@ -5,7 +5,22 @@ import tempfile
import pyblish.api
from openpype.pipeline import legacy_io
from openpype.hosts.tvpaint.api import pipeline, lib
from openpype.hosts.tvpaint.api.lib import (
execute_george,
execute_george_through_file,
get_layers_data,
get_groups_data,
)
from openpype.hosts.tvpaint.api.pipeline import (
SECTION_NAME_CONTEXT,
SECTION_NAME_INSTANCES,
SECTION_NAME_CONTAINERS,
get_workfile_metadata_string,
write_workfile_metadata,
get_current_workfile_context,
list_instances,
)
class ResetTVPaintWorkfileMetadata(pyblish.api.Action):
@ -15,12 +30,12 @@ class ResetTVPaintWorkfileMetadata(pyblish.api.Action):
def process(self, context, plugin):
metadata_keys = {
pipeline.SECTION_NAME_CONTEXT: {},
pipeline.SECTION_NAME_INSTANCES: [],
pipeline.SECTION_NAME_CONTAINERS: []
SECTION_NAME_CONTEXT: {},
SECTION_NAME_INSTANCES: [],
SECTION_NAME_CONTAINERS: []
}
for metadata_key, default in metadata_keys.items():
json_string = pipeline.get_workfile_metadata_string(metadata_key)
json_string = get_workfile_metadata_string(metadata_key)
if not json_string:
continue
@ -35,7 +50,7 @@ class ResetTVPaintWorkfileMetadata(pyblish.api.Action):
).format(metadata_key, default, json_string),
exc_info=True
)
pipeline.write_workfile_metadata(metadata_key, default)
write_workfile_metadata(metadata_key, default)
class CollectWorkfileData(pyblish.api.ContextPlugin):
@ -45,8 +60,8 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
actions = [ResetTVPaintWorkfileMetadata]
def process(self, context):
current_project_id = lib.execute_george("tv_projectcurrentid")
lib.execute_george("tv_projectselect {}".format(current_project_id))
current_project_id = execute_george("tv_projectcurrentid")
execute_george("tv_projectselect {}".format(current_project_id))
# Collect and store current context to have reference
current_context = {
@ -60,7 +75,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect context from workfile metadata
self.log.info("Collecting workfile context")
workfile_context = pipeline.get_current_workfile_context()
workfile_context = get_current_workfile_context()
# Store workfile context to pyblish context
context.data["workfile_context"] = workfile_context
if workfile_context:
@ -96,7 +111,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect instances
self.log.info("Collecting instance data from workfile")
instance_data = pipeline.list_instances()
instance_data = list_instances()
context.data["workfileInstances"] = instance_data
self.log.debug(
"Instance data:\"{}".format(json.dumps(instance_data, indent=4))
@ -104,7 +119,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect information about layers
self.log.info("Collecting layers data from workfile")
layers_data = lib.layers_data()
layers_data = get_layers_data()
layers_by_name = {}
for layer in layers_data:
layer_name = layer["name"]
@ -120,14 +135,14 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect information about groups
self.log.info("Collecting groups data from workfile")
group_data = lib.groups_data()
group_data = get_groups_data()
context.data["groupsData"] = group_data
self.log.debug(
"Group data:\"{}".format(json.dumps(group_data, indent=4))
)
self.log.info("Collecting scene data from workfile")
workfile_info_parts = lib.execute_george("tv_projectinfo").split(" ")
workfile_info_parts = execute_george("tv_projectinfo").split(" ")
# Project frame start - not used
workfile_info_parts.pop(-1)
@ -139,10 +154,10 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
workfile_path = " ".join(workfile_info_parts).replace("\"", "")
# Marks return as "{frame - 1} {state} ", example "0 set".
result = lib.execute_george("tv_markin")
result = execute_george("tv_markin")
mark_in_frame, mark_in_state, _ = result.split(" ")
result = lib.execute_george("tv_markout")
result = execute_george("tv_markout")
mark_out_frame, mark_out_state, _ = result.split(" ")
scene_data = {
@ -156,7 +171,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
"sceneMarkInState": mark_in_state == "set",
"sceneMarkOut": int(mark_out_frame),
"sceneMarkOutState": mark_out_state == "set",
"sceneStartFrame": int(lib.execute_george("tv_startframe")),
"sceneStartFrame": int(execute_george("tv_startframe")),
"sceneBgColor": self._get_bg_color()
}
self.log.debug(
@ -188,7 +203,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
]
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
with open(output_filepath, "r") as stream:
data = stream.read()

View file

@ -5,7 +5,13 @@ import tempfile
from PIL import Image
import pyblish.api
from openpype.hosts.tvpaint.api import lib
from openpype.hosts.tvpaint.api.lib import (
execute_george,
execute_george_through_file,
get_layers_pre_post_behavior,
get_layers_exposure_frames,
)
from openpype.hosts.tvpaint.lib import (
calculate_layers_extraction_data,
get_frame_filename_template,
@ -61,7 +67,7 @@ class ExtractSequence(pyblish.api.Extractor):
# different way when Start Frame is not `0`
# NOTE It will be set back after rendering
scene_start_frame = instance.context.data["sceneStartFrame"]
lib.execute_george("tv_startframe 0")
execute_george("tv_startframe 0")
# Frame start/end may be stored as float
frame_start = int(instance.data["frameStart"])
@ -113,7 +119,7 @@ class ExtractSequence(pyblish.api.Extractor):
output_filepaths_by_frame_idx, thumbnail_fullpath = result
# Change scene frame Start back to previous value
lib.execute_george("tv_startframe {}".format(scene_start_frame))
execute_george("tv_startframe {}".format(scene_start_frame))
# Sequence of one frame
if not output_filepaths_by_frame_idx:
@ -241,7 +247,7 @@ class ExtractSequence(pyblish.api.Extractor):
george_script_lines.append(" ".join(orig_color_command))
lib.execute_george_through_file("\n".join(george_script_lines))
execute_george_through_file("\n".join(george_script_lines))
first_frame_filepath = None
output_filepaths_by_frame_idx = {}
@ -304,8 +310,8 @@ class ExtractSequence(pyblish.api.Extractor):
return [], None
self.log.debug("Collecting pre/post behavior of individual layers.")
behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids)
exposure_frames_by_layer_id = lib.get_layers_exposure_frames(
behavior_by_layer_id = get_layers_pre_post_behavior(layer_ids)
exposure_frames_by_layer_id = get_layers_exposure_frames(
layer_ids, layers
)
extraction_data_by_layer_id = calculate_layers_extraction_data(
@ -410,7 +416,7 @@ class ExtractSequence(pyblish.api.Extractor):
",".join(frames_to_render), layer_id, layer["name"]
))
# Let TVPaint render layer's image
lib.execute_george_through_file("\n".join(george_script_lines))
execute_george_through_file("\n".join(george_script_lines))
# Fill frames between `frame_start_index` and `frame_end_index`
self.log.debug("Filling frames not rendered frames.")

View file

@ -1,7 +1,7 @@
import pyblish.api
from openpype.api import version_up
from openpype.hosts.tvpaint.api import workio
from openpype.lib import version_up
from openpype.pipeline import registered_host
class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
@ -17,6 +17,7 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
assert all(result["success"] for result in context.data["results"]), (
"Publishing not successful so version is not increased.")
host = registered_host()
path = context.data["currentFile"]
workio.save_file(version_up(path))
host.save_workfile(version_up(path))
self.log.info('Incrementing workfile version')

View file

@ -1,6 +1,9 @@
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import pipeline
from openpype.hosts.tvpaint.api.pipeline import (
list_instances,
write_instances,
)
class FixAssetNames(pyblish.api.Action):
@ -15,7 +18,7 @@ class FixAssetNames(pyblish.api.Action):
def process(self, context, plugin):
context_asset_name = context.data["asset"]
old_instance_items = pipeline.list_instances()
old_instance_items = list_instances()
new_instance_items = []
for instance_item in old_instance_items:
instance_asset_name = instance_item.get("asset")
@ -25,7 +28,7 @@ class FixAssetNames(pyblish.api.Action):
):
instance_item["asset"] = context_asset_name
new_instance_items.append(instance_item)
pipeline._write_instances(new_instance_items)
write_instances(new_instance_items)
class ValidateAssetNames(pyblish.api.ContextPlugin):

View file

@ -2,7 +2,7 @@ import json
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import lib
from openpype.hosts.tvpaint.api.lib import execute_george
class ValidateMarksRepair(pyblish.api.Action):
@ -15,10 +15,10 @@ class ValidateMarksRepair(pyblish.api.Action):
def process(self, context, plugin):
expected_data = ValidateMarks.get_expected_data(context)
lib.execute_george(
execute_george(
"tv_markin {} set".format(expected_data["markIn"])
)
lib.execute_george(
execute_george(
"tv_markout {} set".format(expected_data["markOut"])
)

View file

@ -1,6 +1,6 @@
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import lib
from openpype.hosts.tvpaint.api.lib import execute_george
class RepairStartFrame(pyblish.api.Action):
@ -11,7 +11,7 @@ class RepairStartFrame(pyblish.api.Action):
on = "failed"
def process(self, context, plugin):
lib.execute_george("tv_startframe 0")
execute_george("tv_startframe 0")
class ValidateStartFrame(pyblish.api.ContextPlugin):
@ -24,7 +24,7 @@ class ValidateStartFrame(pyblish.api.ContextPlugin):
optional = True
def process(self, context):
start_frame = lib.execute_george("tv_startframe")
start_frame = execute_george("tv_startframe")
if start_frame == 0:
return

View file

@ -1,6 +1,5 @@
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import save_file
from openpype.pipeline import PublishXmlValidationError, registered_host
class ValidateWorkfileMetadataRepair(pyblish.api.Action):
@ -13,8 +12,9 @@ class ValidateWorkfileMetadataRepair(pyblish.api.Action):
def process(self, context, _plugin):
"""Save current workfile which should trigger storing of metadata."""
current_file = context.data["currentFile"]
host = registered_host()
# Save file should trigger
save_file(current_file)
host.save_workfile(current_file)
class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):

View file

@ -1,10 +1,8 @@
# -*- coding: utf-8 -*-
"""Unreal Editor OpenPype host API."""
from .plugin import (
Loader,
Creator
)
from .plugin import Loader
from .pipeline import (
install,
uninstall,
@ -25,7 +23,6 @@ from .pipeline import (
__all__ = [
"install",
"uninstall",
"Creator",
"Loader",
"ls",
"publish",

View file

@ -1,16 +1,7 @@
# -*- coding: utf-8 -*-
from abc import ABC
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
)
class Creator(LegacyCreator):
"""This serves as skeleton for future OpenPype specific functionality"""
defaults = ['Main']
maintain_selection = False
from openpype.pipeline import LoaderPlugin
class Loader(LoaderPlugin, ABC):

View file

@ -8,8 +8,8 @@ from openpype.lib import (
PreLaunchHook,
ApplicationLaunchFailed,
ApplicationNotFound,
get_workfile_template_key
)
from openpype.pipeline.workfile import get_workfile_template_key
import openpype.hosts.unreal.lib as unreal_lib

View file

@ -1,5 +1,5 @@
#include "OpenPype.h"
#include "OpenPypeStyle.h"
#include "OpenPype.h"
#include "Framework/Application/SlateApplication.h"
#include "Styling/SlateStyleRegistry.h"
#include "Slate/SlateGameResources.h"

View file

@ -2,11 +2,11 @@ import unreal
from unreal import EditorAssetLibrary as eal
from unreal import EditorLevelLibrary as ell
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api.pipeline import instantiate
from openpype.pipeline import LegacyCreator
class CreateCamera(plugin.Creator):
class CreateCamera(LegacyCreator):
"""Layout output for character rigs"""
name = "layoutMain"

View file

@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-
from unreal import EditorLevelLibrary
from openpype.hosts.unreal.api import plugin
from openpype.pipeline import LegacyCreator
from openpype.hosts.unreal.api.pipeline import instantiate
class CreateLayout(plugin.Creator):
class CreateLayout(LegacyCreator):
"""Layout output for character rigs."""
name = "layoutMain"

View file

@ -2,9 +2,10 @@
"""Create look in Unreal."""
import unreal # noqa
from openpype.hosts.unreal.api import pipeline, plugin
from openpype.pipeline import LegacyCreator
class CreateLook(plugin.Creator):
class CreateLook(LegacyCreator):
"""Shader connections defining shape look."""
name = "unrealLook"

View file

@ -1,10 +1,10 @@
import unreal
from openpype.hosts.unreal.api import pipeline
from openpype.hosts.unreal.api.plugin import Creator
from openpype.pipeline import LegacyCreator
class CreateRender(Creator):
class CreateRender(LegacyCreator):
"""Create instance for sequence for rendering"""
name = "unrealRender"

View file

@ -1,13 +1,13 @@
# -*- coding: utf-8 -*-
"""Create Static Meshes as FBX geometry."""
import unreal # noqa
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api.pipeline import (
instantiate,
)
from openpype.pipeline import LegacyCreator
class CreateStaticMeshFBX(plugin.Creator):
class CreateStaticMeshFBX(LegacyCreator):
"""Static FBX geometry."""
name = "unrealStaticMeshMain"

View file

@ -20,15 +20,11 @@ class StaticMeshAlembicLoader(plugin.Loader):
icon = "cube"
color = "orange"
def get_task(self, filename, asset_dir, asset_name, replace):
@staticmethod
def get_task(filename, asset_dir, asset_name, replace, default_conversion):
task = unreal.AssetImportTask()
options = unreal.AbcImportSettings()
sm_settings = unreal.AbcStaticMeshSettings()
conversion_settings = unreal.AbcConversionSettings(
preset=unreal.AbcConversionPreset.CUSTOM,
flip_u=False, flip_v=False,
rotation=[0.0, 0.0, 0.0],
scale=[1.0, 1.0, 1.0])
task.set_editor_property('filename', filename)
task.set_editor_property('destination_path', asset_dir)
@ -44,13 +40,20 @@ class StaticMeshAlembicLoader(plugin.Loader):
sm_settings.set_editor_property('merge_meshes', True)
options.static_mesh_settings = sm_settings
if not default_conversion:
conversion_settings = unreal.AbcConversionSettings(
preset=unreal.AbcConversionPreset.CUSTOM,
flip_u=False, flip_v=False,
rotation=[0.0, 0.0, 0.0],
scale=[1.0, 1.0, 1.0])
options.conversion_settings = conversion_settings
options.static_mesh_settings = sm_settings
task.options = options
return task
def load(self, context, name, namespace, data):
def load(self, context, name, namespace, options):
"""Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
@ -82,6 +85,10 @@ class StaticMeshAlembicLoader(plugin.Loader):
asset_name = "{}".format(name)
version = context.get('version').get('name')
default_conversion = False
if options.get("default_conversion"):
default_conversion = options.get("default_conversion")
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}_v{version:03d}", suffix="")
@ -91,7 +98,8 @@ class StaticMeshAlembicLoader(plugin.Loader):
if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir):
unreal.EditorAssetLibrary.make_directory(asset_dir)
task = self.get_task(self.fname, asset_dir, asset_name, False)
task = self.get_task(
self.fname, asset_dir, asset_name, False, default_conversion)
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501

View file

@ -0,0 +1,418 @@
import json
from pathlib import Path
import unreal
from unreal import EditorLevelLibrary
from bson.objectid import ObjectId
from openpype import pipeline
from openpype.pipeline import (
discover_loader_plugins,
loaders_from_representation,
load_container,
get_representation_path,
AVALON_CONTAINER_ID,
legacy_io,
)
from openpype.api import get_current_project_settings
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as upipeline
class ExistingLayoutLoader(plugin.Loader):
"""
Load Layout for an existing scene, and match the existing assets.
"""
families = ["layout"]
representations = ["json"]
label = "Load Layout on Existing Scene"
icon = "code-fork"
color = "orange"
ASSET_ROOT = "/Game/OpenPype"
@staticmethod
def _create_container(
asset_name, asset_dir, asset, representation, parent, family
):
container_name = f"{asset_name}_CON"
container = None
if not unreal.EditorAssetLibrary.does_asset_exist(
f"{asset_dir}/{container_name}"
):
container = upipeline.create_container(container_name, asset_dir)
else:
ar = unreal.AssetRegistryHelpers.get_asset_registry()
obj = ar.get_asset_by_object_path(
f"{asset_dir}/{container_name}.{container_name}")
container = obj.get_asset()
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
"asset_name": asset_name,
# "loader": str(self.__class__.__name__),
"representation": representation,
"parent": parent,
"family": family
}
upipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
return container.get_path_name()
@staticmethod
def _get_current_level():
ue_version = unreal.SystemLibrary.get_engine_version().split('.')
ue_major = ue_version[0]
if ue_major == '4':
return EditorLevelLibrary.get_editor_world()
elif ue_major == '5':
return unreal.LevelEditorSubsystem().get_current_level()
raise NotImplementedError(
f"Unreal version {ue_major} not supported")
def _get_transform(self, ext, import_data, lasset):
conversion = unreal.Matrix.IDENTITY.transform()
fbx_tuning = unreal.Matrix.IDENTITY.transform()
basis = unreal.Matrix(
lasset.get('basis')[0],
lasset.get('basis')[1],
lasset.get('basis')[2],
lasset.get('basis')[3]
).transform()
transform = unreal.Matrix(
lasset.get('transform_matrix')[0],
lasset.get('transform_matrix')[1],
lasset.get('transform_matrix')[2],
lasset.get('transform_matrix')[3]
).transform()
# Check for the conversion settings. We cannot access
# the alembic conversion settings, so we assume that
# the maya ones have been applied.
if ext == '.fbx':
loc = import_data.import_translation
rot = import_data.import_rotation.to_vector()
scale = import_data.import_uniform_scale
conversion = unreal.Transform(
location=[loc.x, loc.y, loc.z],
rotation=[rot.x, rot.y, rot.z],
scale=[-scale, scale, scale]
)
fbx_tuning = unreal.Transform(
rotation=[180.0, 0.0, 90.0],
scale=[1.0, 1.0, 1.0]
)
elif ext == '.abc':
# This is the standard conversion settings for
# alembic files from Maya.
conversion = unreal.Transform(
location=[0.0, 0.0, 0.0],
rotation=[0.0, 0.0, 0.0],
scale=[1.0, -1.0, 1.0]
)
new_transform = (basis.inverse() * transform * basis)
return fbx_tuning * conversion.inverse() * new_transform
def _spawn_actor(self, obj, lasset):
actor = EditorLevelLibrary.spawn_actor_from_object(
obj, unreal.Vector(0.0, 0.0, 0.0)
)
actor.set_actor_label(lasset.get('instance_name'))
smc = actor.get_editor_property('static_mesh_component')
mesh = smc.get_editor_property('static_mesh')
import_data = mesh.get_editor_property('asset_import_data')
filename = import_data.get_first_filename()
path = Path(filename)
transform = self._get_transform(
path.suffix, import_data, lasset)
actor.set_actor_transform(transform, False, True)
@staticmethod
def _get_fbx_loader(loaders, family):
name = ""
if family == 'rig':
name = "SkeletalMeshFBXLoader"
elif family == 'model' or family == 'staticMesh':
name = "StaticMeshFBXLoader"
elif family == 'camera':
name = "CameraLoader"
if name == "":
return None
for loader in loaders:
if loader.__name__ == name:
return loader
return None
@staticmethod
def _get_abc_loader(loaders, family):
name = ""
if family == 'rig':
name = "SkeletalMeshAlembicLoader"
elif family == 'model':
name = "StaticMeshAlembicLoader"
if name == "":
return None
for loader in loaders:
if loader.__name__ == name:
return loader
return None
def _load_asset(self, representation, version, instance_name, family):
valid_formats = ['fbx', 'abc']
repr_data = legacy_io.find_one({
"type": "representation",
"parent": ObjectId(version),
"name": {"$in": valid_formats}
})
repr_format = repr_data.get('name')
all_loaders = discover_loader_plugins()
loaders = loaders_from_representation(
all_loaders, representation)
loader = None
if repr_format == 'fbx':
loader = self._get_fbx_loader(loaders, family)
elif repr_format == 'abc':
loader = self._get_abc_loader(loaders, family)
if not loader:
self.log.error(f"No valid loader found for {representation}")
return []
# This option is necessary to avoid importing the assets with a
# different conversion compared to the other assets. For ABC files,
# it is in fact impossible to access the conversion settings. So,
# we must assume that the Maya conversion settings have been applied.
options = {
"default_conversion": True
}
assets = load_container(
loader,
representation,
namespace=instance_name,
options=options
)
return assets
def _process(self, lib_path):
data = get_current_project_settings()
delete_unmatched = data["unreal"]["delete_unmatched_assets"]
ar = unreal.AssetRegistryHelpers.get_asset_registry()
actors = EditorLevelLibrary.get_all_level_actors()
with open(lib_path, "r") as fp:
data = json.load(fp)
layout_data = []
# Get all the representations in the JSON from the database.
for element in data:
if element.get('representation'):
layout_data.append((
pipeline.legacy_io.find_one({
"_id": ObjectId(element.get('representation'))
}),
element
))
containers = []
actors_matched = []
for (repr_data, lasset) in layout_data:
if not repr_data:
raise AssertionError("Representation not found")
if not (repr_data.get('data') or
repr_data.get('data').get('path')):
raise AssertionError("Representation does not have path")
if not repr_data.get('context'):
raise AssertionError("Representation does not have context")
# For every actor in the scene, check if it has a representation in
# those we got from the JSON. If so, create a container for it.
# Otherwise, remove it from the scene.
found = False
for actor in actors:
if not actor.get_class().get_name() == 'StaticMeshActor':
continue
if actor in actors_matched:
continue
# Get the original path of the file from which the asset has
# been imported.
smc = actor.get_editor_property('static_mesh_component')
mesh = smc.get_editor_property('static_mesh')
import_data = mesh.get_editor_property('asset_import_data')
filename = import_data.get_first_filename()
path = Path(filename)
if (not path.name or
path.name not in repr_data.get('data').get('path')):
continue
actor.set_actor_label(lasset.get('instance_name'))
mesh_path = Path(mesh.get_path_name()).parent.as_posix()
# Create the container for the asset.
asset = repr_data.get('context').get('asset')
subset = repr_data.get('context').get('subset')
container = self._create_container(
f"{asset}_{subset}", mesh_path, asset,
repr_data.get('_id'), repr_data.get('parent'),
repr_data.get('context').get('family')
)
containers.append(container)
# Set the transform for the actor.
transform = self._get_transform(
path.suffix, import_data, lasset)
actor.set_actor_transform(transform, False, True)
actors_matched.append(actor)
found = True
break
# If an actor has not been found for this representation,
# we check if it has been loaded already by checking all the
# loaded containers. If so, we add it to the scene. Otherwise,
# we load it.
if found:
continue
all_containers = upipeline.ls()
loaded = False
for container in all_containers:
repr = container.get('representation')
if not repr == str(repr_data.get('_id')):
continue
asset_dir = container.get('namespace')
filter = unreal.ARFilter(
class_names=["StaticMesh"],
package_paths=[asset_dir],
recursive_paths=False)
assets = ar.get_assets(filter)
for asset in assets:
obj = asset.get_asset()
self._spawn_actor(obj, lasset)
loaded = True
break
# If the asset has not been loaded yet, we load it.
if loaded:
continue
assets = self._load_asset(
lasset.get('representation'),
lasset.get('version'),
lasset.get('instance_name'),
lasset.get('family')
)
for asset in assets:
obj = ar.get_asset_by_object_path(asset).get_asset()
if not obj.get_class().get_name() == 'StaticMesh':
continue
self._spawn_actor(obj, lasset)
break
# Check if an actor was not matched to a representation.
# If so, remove it from the scene.
for actor in actors:
if not actor.get_class().get_name() == 'StaticMeshActor':
continue
if actor not in actors_matched:
self.log.warning(f"Actor {actor.get_name()} not matched.")
if delete_unmatched:
EditorLevelLibrary.destroy_actor(actor)
return containers
def load(self, context, name, namespace, options):
print("Loading Layout and Match Assets")
asset = context.get('asset').get('name')
asset_name = f"{asset}_{name}" if asset else name
container_name = f"{asset}_{name}_CON"
curr_level = self._get_current_level()
if not curr_level:
raise AssertionError("Current level not saved")
containers = self._process(self.fname)
curr_level_path = Path(
curr_level.get_outer().get_path_name()).parent.as_posix()
if not unreal.EditorAssetLibrary.does_asset_exist(
f"{curr_level_path}/{container_name}"
):
upipeline.create_container(
container=container_name, path=curr_level_path)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"asset": asset,
"namespace": curr_level_path,
"container_name": container_name,
"asset_name": asset_name,
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"],
"loaded_assets": containers
}
upipeline.imprint(f"{curr_level_path}/{container_name}", data)
def update(self, container, representation):
asset_dir = container.get('namespace')
source_path = get_representation_path(representation)
containers = self._process(source_path)
data = {
"representation": str(representation["_id"]),
"parent": str(representation["parent"]),
"loaded_assets": containers
}
upipeline.imprint(
"{}/{}".format(asset_dir, container.get('container_name')), data)

View file

@ -203,19 +203,6 @@ from .path_tools import (
get_project_basic_paths,
)
from .editorial import (
is_overlapping_otio_ranges,
otio_range_to_frame_range,
otio_range_with_handles,
get_media_range_with_retimes,
convert_to_padded_path,
trim_media_range,
range_from_frames,
frames_to_secons,
frames_to_timecode,
make_sequence_collection
)
from .openpype_version import (
op_version_control_available,
get_openpype_version,
@ -383,16 +370,6 @@ __all__ = [
"validate_mongo_connection",
"OpenPypeMongoConnection",
"is_overlapping_otio_ranges",
"otio_range_with_handles",
"convert_to_padded_path",
"otio_range_to_frame_range",
"get_media_range_with_retimes",
"trim_media_range",
"range_from_frames",
"frames_to_secons",
"frames_to_timecode",
"make_sequence_collection",
"create_project_folders",
"create_workdir_extra_folders",
"get_project_basic_paths",

View file

@ -1,33 +0,0 @@
# -*- coding: utf-8 -*-
"""Content was moved to 'openpype.pipeline.publish.abstract_collect_render'.
Please change your imports as soon as possible.
File will be probably removed in OpenPype 3.14.*
"""
import warnings
from openpype.pipeline.publish import AbstractCollectRender, RenderInstance
class CollectRenderDeprecated(DeprecationWarning):
pass
warnings.simplefilter("always", CollectRenderDeprecated)
warnings.warn(
(
"Content of 'abstract_collect_render' was moved."
"\nUsing deprecated source of 'abstract_collect_render'. Content was"
" move to 'openpype.pipeline.publish.abstract_collect_render'."
" Please change your imports as soon as possible."
),
category=CollectRenderDeprecated,
stacklevel=4
)
__all__ = (
"AbstractCollectRender",
"RenderInstance"
)

View file

@ -1,32 +0,0 @@
# -*- coding: utf-8 -*-
"""Content was moved to 'openpype.pipeline.publish.abstract_expected_files'.
Please change your imports as soon as possible.
File will be probably removed in OpenPype 3.14.*
"""
import warnings
from openpype.pipeline.publish import ExpectedFiles
class ExpectedFilesDeprecated(DeprecationWarning):
pass
warnings.simplefilter("always", ExpectedFilesDeprecated)
warnings.warn(
(
"Content of 'abstract_expected_files' was moved."
"\nUsing deprecated source of 'abstract_expected_files'. Content was"
" move to 'openpype.pipeline.publish.abstract_expected_files'."
" Please change your imports as soon as possible."
),
category=ExpectedFilesDeprecated,
stacklevel=4
)
__all__ = (
"ExpectedFiles",
)

View file

@ -1,35 +0,0 @@
"""Content was moved to 'openpype.pipeline.publish.publish_plugins'.
Please change your imports as soon as possible.
File will be probably removed in OpenPype 3.14.*
"""
import warnings
from openpype.pipeline.publish import (
AbstractMetaInstancePlugin,
AbstractMetaContextPlugin
)
class MetaPluginsDeprecated(DeprecationWarning):
pass
warnings.simplefilter("always", MetaPluginsDeprecated)
warnings.warn(
(
"Content of 'abstract_metaplugins' was moved."
"\nUsing deprecated source of 'abstract_metaplugins'. Content was"
" moved to 'openpype.pipeline.publish.publish_plugins'."
" Please change your imports as soon as possible."
),
category=MetaPluginsDeprecated,
stacklevel=4
)
__all__ = (
"AbstractMetaInstancePlugin",
"AbstractMetaContextPlugin",
)

View file

@ -3,11 +3,33 @@ import re
import collections
import uuid
import json
from abc import ABCMeta, abstractmethod
from abc import ABCMeta, abstractmethod, abstractproperty
import six
import clique
# Global variable which store attribude definitions by type
# - default types are registered on import
_attr_defs_by_type = {}
def register_attr_def_class(cls):
"""Register attribute definition.
Currently are registered definitions used to deserialize data to objects.
Attrs:
cls (AbtractAttrDef): Non-abstract class to be registered with unique
'type' attribute.
Raises:
KeyError: When type was already registered.
"""
if cls.type in _attr_defs_by_type:
raise KeyError("Type \"{}\" was already registered".format(cls.type))
_attr_defs_by_type[cls.type] = cls
def get_attributes_keys(attribute_definitions):
"""Collect keys from list of attribute definitions.
@ -90,6 +112,8 @@ class AbtractAttrDef:
next to value input or ahead.
"""
type_attributes = []
is_value_def = True
def __init__(
@ -115,6 +139,16 @@ class AbtractAttrDef:
return False
return self.key == other.key
@abstractproperty
def type(self):
"""Attribute definition type also used as identifier of class.
Returns:
str: Type of attribute definition.
"""
pass
@abstractmethod
def convert_value(self, value):
"""Convert value to a valid one.
@ -125,6 +159,35 @@ class AbtractAttrDef:
pass
def serialize(self):
"""Serialize object to data so it's possible to recreate it.
Returns:
Dict[str, Any]: Serialized object that can be passed to
'deserialize' method.
"""
data = {
"type": self.type,
"key": self.key,
"label": self.label,
"tooltip": self.tooltip,
"default": self.default,
"is_label_horizontal": self.is_label_horizontal
}
for attr in self.type_attributes:
data[attr] = getattr(self, attr)
return data
@classmethod
def deserialize(cls, data):
"""Recreate object from data.
Data can be received using 'serialize' method.
"""
return cls(**data)
# -----------------------------------------
# UI attribute definitoins won't hold value
@ -141,10 +204,12 @@ class UIDef(AbtractAttrDef):
class UISeparatorDef(UIDef):
pass
type = "separator"
class UILabelDef(UIDef):
type = "label"
def __init__(self, label):
super(UILabelDef, self).__init__(label=label)
@ -160,6 +225,8 @@ class UnknownDef(AbtractAttrDef):
have known definition of type.
"""
type = "unknown"
def __init__(self, key, default=None, **kwargs):
kwargs["default"] = default
super(UnknownDef, self).__init__(key, **kwargs)
@ -181,6 +248,13 @@ class NumberDef(AbtractAttrDef):
default(int, float): Default value for conversion.
"""
type = "number"
type_attributes = [
"minimum",
"maximum",
"decimals"
]
def __init__(
self, key, minimum=None, maximum=None, decimals=None, default=None,
**kwargs
@ -252,6 +326,12 @@ class TextDef(AbtractAttrDef):
default(str, None): Default value. Empty string used when not defined.
"""
type = "text"
type_attributes = [
"multiline",
"placeholder",
]
def __init__(
self, key, multiline=None, regex=None, placeholder=None, default=None,
**kwargs
@ -290,6 +370,11 @@ class TextDef(AbtractAttrDef):
return value
return self.default
def serialize(self):
data = super(TextDef, self).serialize()
data["regex"] = self.regex.pattern
return data
class EnumDef(AbtractAttrDef):
"""Enumeration of single item from items.
@ -301,6 +386,8 @@ class EnumDef(AbtractAttrDef):
default: Default value. Must be one key(value) from passed items.
"""
type = "enum"
def __init__(self, key, items, default=None, **kwargs):
if not items:
raise ValueError((
@ -335,6 +422,11 @@ class EnumDef(AbtractAttrDef):
return value
return self.default
def serialize(self):
data = super(TextDef, self).serialize()
data["items"] = list(self.items)
return data
class BoolDef(AbtractAttrDef):
"""Boolean representation.
@ -343,6 +435,8 @@ class BoolDef(AbtractAttrDef):
default(bool): Default value. Set to `False` if not defined.
"""
type = "bool"
def __init__(self, key, default=None, **kwargs):
if default is None:
default = False
@ -585,6 +679,15 @@ class FileDef(AbtractAttrDef):
default(str, List[str]): Default value.
"""
type = "path"
type_attributes = [
"single_item",
"folders",
"extensions",
"allow_sequences",
"extensions_label",
]
def __init__(
self, key, single_item=True, folders=None, extensions=None,
allow_sequences=True, extensions_label=None, default=None, **kwargs
@ -675,3 +778,71 @@ class FileDef(AbtractAttrDef):
if self.single_item:
return FileDefItem.create_empty_item().to_dict()
return []
def serialize_attr_def(attr_def):
"""Serialize attribute definition to data.
Args:
attr_def (AbtractAttrDef): Attribute definition to serialize.
Returns:
Dict[str, Any]: Serialized data.
"""
return attr_def.serialize()
def serialize_attr_defs(attr_defs):
"""Serialize attribute definitions to data.
Args:
attr_defs (List[AbtractAttrDef]): Attribute definitions to serialize.
Returns:
List[Dict[str, Any]]: Serialized data.
"""
return [
serialize_attr_def(attr_def)
for attr_def in attr_defs
]
def deserialize_attr_def(attr_def_data):
"""Deserialize attribute definition from data.
Args:
attr_def (Dict[str, Any]): Attribute definition data to deserialize.
"""
attr_type = attr_def_data.pop("type")
cls = _attr_defs_by_type[attr_type]
return cls.deserialize(attr_def_data)
def deserialize_attr_defs(attr_defs_data):
"""Deserialize attribute definitions.
Args:
List[Dict[str, Any]]: List of attribute definitions.
"""
return [
deserialize_attr_def(attr_def_data)
for attr_def_data in attr_defs_data
]
# Register attribute definitions
for _attr_class in (
UISeparatorDef,
UILabelDef,
UnknownDef,
NumberDef,
TextDef,
EnumDef,
BoolDef,
FileDef
):
register_attr_def_class(_attr_class)

View file

@ -1,41 +0,0 @@
import warnings
import functools
class ConfigDeprecatedWarning(DeprecationWarning):
pass
def deprecated(func):
"""Mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", ConfigDeprecatedWarning)
warnings.warn(
(
"Deprecated import of function '{}'."
" Class was moved to 'openpype.lib.dateutils.{}'."
" Please change your imports."
).format(func.__name__),
category=ConfigDeprecatedWarning
)
return func(*args, **kwargs)
return new_func
@deprecated
def get_datetime_data(datetime_obj=None):
from .dateutils import get_datetime_data
return get_datetime_data(datetime_obj)
@deprecated
def get_formatted_current_time():
from .dateutils import get_formatted_current_time
return get_formatted_current_time()

View file

@ -1,102 +0,0 @@
"""Code related to editorial utility functions was moved
to 'openpype.pipeline.editorial' please change your imports as soon as
possible. File will be probably removed in OpenPype 3.14.*
"""
import warnings
import functools
class EditorialDeprecatedWarning(DeprecationWarning):
pass
def editorial_deprecated(func):
"""Mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", EditorialDeprecatedWarning)
warnings.warn(
(
"Call to deprecated function '{}'."
" Function was moved to 'openpype.pipeline.editorial'."
).format(func.__name__),
category=EditorialDeprecatedWarning,
stacklevel=2
)
return func(*args, **kwargs)
return new_func
@editorial_deprecated
def otio_range_to_frame_range(*args, **kwargs):
from openpype.pipeline.editorial import otio_range_to_frame_range
return otio_range_to_frame_range(*args, **kwargs)
@editorial_deprecated
def otio_range_with_handles(*args, **kwargs):
from openpype.pipeline.editorial import otio_range_with_handles
return otio_range_with_handles(*args, **kwargs)
@editorial_deprecated
def is_overlapping_otio_ranges(*args, **kwargs):
from openpype.pipeline.editorial import is_overlapping_otio_ranges
return is_overlapping_otio_ranges(*args, **kwargs)
@editorial_deprecated
def convert_to_padded_path(*args, **kwargs):
from openpype.pipeline.editorial import convert_to_padded_path
return convert_to_padded_path(*args, **kwargs)
@editorial_deprecated
def trim_media_range(*args, **kwargs):
from openpype.pipeline.editorial import trim_media_range
return trim_media_range(*args, **kwargs)
@editorial_deprecated
def range_from_frames(*args, **kwargs):
from openpype.pipeline.editorial import range_from_frames
return range_from_frames(*args, **kwargs)
@editorial_deprecated
def frames_to_secons(*args, **kwargs):
from openpype.pipeline.editorial import frames_to_seconds
return frames_to_seconds(*args, **kwargs)
@editorial_deprecated
def frames_to_timecode(*args, **kwargs):
from openpype.pipeline.editorial import frames_to_timecode
return frames_to_timecode(*args, **kwargs)
@editorial_deprecated
def make_sequence_collection(*args, **kwargs):
from openpype.pipeline.editorial import make_sequence_collection
return make_sequence_collection(*args, **kwargs)
@editorial_deprecated
def get_media_range_with_retimes(*args, **kwargs):
from openpype.pipeline.editorial import get_media_range_with_retimes
return get_media_range_with_retimes(*args, **kwargs)

View file

@ -1,6 +1,7 @@
"""Events holding data about specific event."""
import os
import re
import copy
import inspect
import logging
import weakref
@ -207,6 +208,12 @@ class Event(object):
@property
def source(self):
"""Event's source used for triggering callbacks.
Returns:
Union[str, None]: Source string or None. Source is optional.
"""
return self._source
@property
@ -215,6 +222,12 @@ class Event(object):
@property
def topic(self):
"""Event's topic used for triggering callbacks.
Returns:
str: Topic string.
"""
return self._topic
def emit(self):
@ -227,6 +240,42 @@ class Event(object):
)
self._event_system.emit_event(self)
def to_data(self):
"""Convert Event object to data.
Returns:
Dict[str, Any]: Event data.
"""
return {
"id": self.id,
"topic": self.topic,
"source": self.source,
"data": copy.deepcopy(self.data)
}
@classmethod
def from_data(cls, event_data, event_system=None):
"""Create event from data.
Args:
event_data (Dict[str, Any]): Event data with defined keys. Can be
created using 'to_data' method.
event_system (EventSystem): System to which the event belongs.
Returns:
Event: Event with attributes from passed data.
"""
obj = cls(
event_data["topic"],
event_data["data"],
event_data["source"],
event_system
)
obj._id = event_data["id"]
return obj
class EventSystem(object):
"""Encapsulate event handling into an object.

View file

@ -1,10 +1,33 @@
import os
import logging
import platform
import subprocess
log = logging.getLogger("Vendor utils")
class CachedToolPaths:
"""Cache already used and discovered tools and their executables.
Discovering path can take some time and can trigger subprocesses so it's
better to cache the paths on first get.
"""
_cached_paths = {}
@classmethod
def is_tool_cached(cls, tool):
return tool in cls._cached_paths
@classmethod
def get_executable_path(cls, tool):
return cls._cached_paths.get(tool)
@classmethod
def cache_executable_path(cls, tool, path):
cls._cached_paths[tool] = path
def is_file_executable(filepath):
"""Filepath lead to executable file.
@ -98,6 +121,7 @@ def get_vendor_bin_path(bin_app):
Returns:
str: Path to vendorized binaries folder.
"""
return os.path.join(
os.environ["OPENPYPE_ROOT"],
"vendor",
@ -107,6 +131,123 @@ def get_vendor_bin_path(bin_app):
)
def find_tool_in_custom_paths(paths, tool, validation_func=None):
"""Find a tool executable in custom paths.
Args:
paths (Iterable[str]): Iterable of paths where to look for tool.
tool (str): Name of tool (binary file) to find in passed paths.
validation_func (Function): Custom validation function of path.
Function must expect one argument which is path to executable.
If not passed only 'find_executable' is used to be able identify
if path is valid.
Reuturns:
Union[str, None]: Path to validated executable or None if was not
found.
"""
for path in paths:
# Skip empty strings
if not path:
continue
# Handle cases when path is just an executable
# - it allows to use executable from PATH
# - basename must match 'tool' value (without extension)
extless_path, ext = os.path.splitext(path)
if extless_path == tool:
executable_path = find_executable(tool)
if executable_path and (
validation_func is None
or validation_func(executable_path)
):
return executable_path
continue
# Normalize path because it should be a path and check if exists
normalized = os.path.normpath(path)
if not os.path.exists(normalized):
continue
# Note: Path can be both file and directory
# If path is a file validate it
if os.path.isfile(normalized):
basename, ext = os.path.splitext(os.path.basename(path))
# Check if the filename has actually the sane bane as 'tool'
if basename == tool:
executable_path = find_executable(normalized)
if executable_path and (
validation_func is None
or validation_func(executable_path)
):
return executable_path
# Check if path is a directory and look for tool inside the dir
if os.path.isdir(normalized):
executable_path = find_executable(os.path.join(normalized, tool))
if executable_path and (
validation_func is None
or validation_func(executable_path)
):
return executable_path
return None
def _check_args_returncode(args):
try:
# Python 2 compatibility where DEVNULL is not available
if hasattr(subprocess, "DEVNULL"):
proc = subprocess.Popen(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
proc.wait()
else:
with open(os.devnull, "w") as devnull:
proc = subprocess.Popen(
args, stdout=devnull, stderr=devnull,
)
proc.wait()
except Exception:
return False
return proc.returncode == 0
def _oiio_executable_validation(filepath):
"""Validate oiio tool executable if can be executed.
Validation has 2 steps. First is using 'find_executable' to fill possible
missing extension or fill directory then launch executable and validate
that it can be executed. For that is used '--help' argument which is fast
and does not need any other inputs.
Any possible crash of missing libraries or invalid build should be catched.
Main reason is to validate if executable can be executed on OS just running
which can be issue ob linux machines.
Note:
It does not validate if the executable is really a oiio tool which
should be used.
Args:
filepath (str): Path to executable.
Returns:
bool: Filepath is valid executable.
"""
filepath = find_executable(filepath)
if not filepath:
return False
return _check_args_returncode([filepath, "--help"])
def get_oiio_tools_path(tool="oiiotool"):
"""Path to vendorized OpenImageIO tool executables.
@ -117,10 +258,62 @@ def get_oiio_tools_path(tool="oiiotool"):
Default is "oiiotool".
"""
if CachedToolPaths.is_tool_cached(tool):
return CachedToolPaths.get_executable_path(tool)
custom_paths_str = os.environ.get("OPENPYPE_OIIO_PATHS") or ""
tool_executable_path = find_tool_in_custom_paths(
custom_paths_str.split(os.pathsep),
tool,
_oiio_executable_validation
)
if not tool_executable_path:
oiio_dir = get_vendor_bin_path("oiio")
if platform.system().lower() == "linux":
oiio_dir = os.path.join(oiio_dir, "bin")
return find_executable(os.path.join(oiio_dir, tool))
default_path = os.path.join(oiio_dir, tool)
if _oiio_executable_validation(default_path):
tool_executable_path = default_path
# Look to PATH for the tool
if not tool_executable_path:
from_path = find_executable(tool)
if from_path and _oiio_executable_validation(from_path):
tool_executable_path = from_path
CachedToolPaths.cache_executable_path(tool, tool_executable_path)
return tool_executable_path
def _ffmpeg_executable_validation(filepath):
"""Validate ffmpeg tool executable if can be executed.
Validation has 2 steps. First is using 'find_executable' to fill possible
missing extension or fill directory then launch executable and validate
that it can be executed. For that is used '-version' argument which is fast
and does not need any other inputs.
Any possible crash of missing libraries or invalid build should be catched.
Main reason is to validate if executable can be executed on OS just running
which can be issue ob linux machines.
Note:
It does not validate if the executable is really a ffmpeg tool.
Args:
filepath (str): Path to executable.
Returns:
bool: Filepath is valid executable.
"""
filepath = find_executable(filepath)
if not filepath:
return False
return _check_args_returncode([filepath, "-version"])
def get_ffmpeg_tool_path(tool="ffmpeg"):
@ -133,10 +326,33 @@ def get_ffmpeg_tool_path(tool="ffmpeg"):
Returns:
str: Full path to ffmpeg executable.
"""
if CachedToolPaths.is_tool_cached(tool):
return CachedToolPaths.get_executable_path(tool)
custom_paths_str = os.environ.get("OPENPYPE_FFMPEG_PATHS") or ""
tool_executable_path = find_tool_in_custom_paths(
custom_paths_str.split(os.pathsep),
tool,
_ffmpeg_executable_validation
)
if not tool_executable_path:
ffmpeg_dir = get_vendor_bin_path("ffmpeg")
if platform.system().lower() == "windows":
ffmpeg_dir = os.path.join(ffmpeg_dir, "bin")
return find_executable(os.path.join(ffmpeg_dir, tool))
tool_path = find_executable(os.path.join(ffmpeg_dir, tool))
if tool_path and _ffmpeg_executable_validation(tool_path):
tool_executable_path = tool_path
# Look to PATH for the tool
if not tool_executable_path:
from_path = find_executable(tool)
if from_path and _oiio_executable_validation(from_path):
tool_executable_path = from_path
CachedToolPaths.cache_executable_path(tool, tool_executable_path)
return tool_executable_path
def is_oiio_supported():

View file

@ -32,6 +32,9 @@ from maya import cmds
from openpype.pipeline import legacy_io
from openpype.hosts.maya.api.lib_rendersettings import RenderSettings
from openpype.hosts.maya.api.lib import get_attr_in_layer
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@ -498,9 +501,10 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
job_info.AssetDependency += self.scene_path
# Get layer prefix
render_products = self._instance.data["renderProducts"]
layer_metadata = render_products.layer_data
layer_prefix = layer_metadata.filePrefix
renderlayer = self._instance.data["setMembers"]
renderer = self._instance.data["renderer"]
layer_prefix_attr = RenderSettings.get_image_prefix_attr(renderer)
layer_prefix = get_attr_in_layer(layer_prefix_attr, layer=renderlayer)
plugin_info = copy.deepcopy(self.plugin_info)
plugin_info.update({
@ -762,10 +766,10 @@ def _format_tiles(
Example::
Image prefix is:
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
`<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
Result for tile 0 for 4x4 will be:
`maya/<Scene>/<RenderLayer>/_tile_1x1_4x4_<RenderLayer>_<RenderPass>`
`<Scene>/<RenderLayer>/_tile_1x1_4x4_<RenderLayer>_<RenderPass>`
Calculating coordinates is tricky as in Job they are defined as top,
left, bottom, right with zero being in top-left corner. But Assembler

View file

@ -169,7 +169,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
thumbnail_item["thumbnail"] = True
# Create copy of item before setting location
if "delete" not in repre["tags"]:
if "delete" not in repre.get("tags", []):
src_components_to_add.append(copy.deepcopy(thumbnail_item))
# Create copy of first thumbnail
if first_thumbnail_component is None:
@ -284,7 +284,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
not_first_components.append(review_item)
# Create copy of item before setting location
if "delete" not in repre["tags"]:
if "delete" not in repre.get("tags", []):
src_components_to_add.append(copy.deepcopy(review_item))
# Set location

View file

@ -13,10 +13,9 @@ import functools
import itertools
import distutils.version
import hashlib
import tempfile
import appdirs
import threading
import atexit
import warnings
import requests
import requests.auth
@ -241,7 +240,7 @@ class Session(object):
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub in (None, True):
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
@ -252,9 +251,7 @@ class Session(object):
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = (
auto_connect_event_hub is None
)
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
@ -271,8 +268,9 @@ class Session(object):
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(

View file

@ -0,0 +1,125 @@
import webbrowser
from openpype.pipeline import LauncherAction
from openpype.modules import ModulesManager
from openpype.client import get_project, get_asset_by_name
class ShowInKitsu(LauncherAction):
name = "showinkitsu"
label = "Show in Kitsu"
icon = "external-link-square"
color = "#e0e1e1"
order = 10
@staticmethod
def get_kitsu_module():
return ModulesManager().modules_by_name.get("kitsu")
def is_compatible(self, session):
if not session.get("AVALON_PROJECT"):
return False
return True
def process(self, session, **kwargs):
# Context inputs
project_name = session["AVALON_PROJECT"]
asset_name = session.get("AVALON_ASSET", None)
task_name = session.get("AVALON_TASK", None)
project = get_project(project_name=project_name,
fields=["data.zou_id"])
if not project:
raise RuntimeError(f"Project {project_name} not found.")
project_zou_id = project["data"].get("zou_id")
if not project_zou_id:
raise RuntimeError(f"Project {project_name} has no "
f"connected kitsu id.")
asset_zou_name = None
asset_zou_id = None
asset_zou_type = 'Assets'
task_zou_id = None
zou_sub_type = ['AssetType', 'Sequence']
if asset_name:
asset_zou_name = asset_name
asset_fields = ["data.zou.id", "data.zou.type"]
if task_name:
asset_fields.append(f"data.tasks.{task_name}.zou.id")
asset = get_asset_by_name(project_name,
asset_name=asset_name,
fields=asset_fields)
asset_zou_data = asset["data"].get("zou")
if asset_zou_data:
asset_zou_type = asset_zou_data["type"]
if asset_zou_type not in zou_sub_type:
asset_zou_id = asset_zou_data["id"]
else:
asset_zou_type = asset_name
if task_name:
task_data = asset["data"]["tasks"][task_name]
task_zou_data = task_data.get("zou", {})
if not task_zou_data:
self.log.debug(f"No zou task data for task: {task_name}")
task_zou_id = task_zou_data["id"]
# Define URL
url = self.get_url(project_id=project_zou_id,
asset_name=asset_zou_name,
asset_id=asset_zou_id,
asset_type=asset_zou_type,
task_id=task_zou_id)
# Open URL in webbrowser
self.log.info(f"Opening URL: {url}")
webbrowser.open(url,
# Try in new tab
new=2)
def get_url(self,
project_id,
asset_name=None,
asset_id=None,
asset_type=None,
task_id=None):
shots_url = {'Shots', 'Sequence', 'Shot'}
sub_type = {'AssetType', 'Sequence'}
kitsu_module = self.get_kitsu_module()
# Get kitsu url with /api stripped
kitsu_url = kitsu_module.server_url
if kitsu_url.endswith("/api"):
kitsu_url = kitsu_url[:-len("/api")]
sub_url = f"/productions/{project_id}"
asset_type_url = "Shots" if asset_type in shots_url else "Assets"
if task_id:
# Go to task page
# /productions/{project-id}/{asset_type}/tasks/{task_id}
sub_url += f"/{asset_type_url}/tasks/{task_id}"
elif asset_id:
# Go to asset or shot page
# /productions/{project-id}/assets/{entity_id}
# /productions/{project-id}/shots/{entity_id}
sub_url += f"/{asset_type_url}/{asset_id}"
else:
# Go to project page
# Project page must end with a view
# /productions/{project-id}/assets/
# Add search method if is a sub_type
sub_url += f"/{asset_type_url}"
if asset_type in sub_type:
sub_url += f'?search={asset_name}'
return f"{kitsu_url}{sub_url}"

View file

@ -89,7 +89,10 @@ class KitsuModule(OpenPypeModule, IPluginPaths, ITrayAction):
"""Implementation of abstract method for `IPluginPaths`."""
current_dir = os.path.dirname(os.path.abspath(__file__))
return {"publish": [os.path.join(current_dir, "plugins", "publish")]}
return {
"publish": [os.path.join(current_dir, "plugins", "publish")],
"actions": [os.path.join(current_dir, "actions")]
}
def cli(self, click_group):
click_group.add_command(cli_main)

View file

@ -115,7 +115,9 @@ def update_op_assets(
item_data["frameStart"] = frame_in
# Frames duration, fallback on 0
try:
frames_duration = int(item_data.pop("nb_frames", 0))
# NOTE nb_frames is stored directly in item
# because of zou's legacy design
frames_duration = int(item.get("nb_frames", 0))
except (TypeError, ValueError):
frames_duration = 0
# Frame out, fallback on frame_in + duration or project's value or 1001
@ -170,7 +172,7 @@ def update_op_assets(
# Substitute item type for general classification (assets or shots)
if item_type in ["Asset", "AssetType"]:
entity_root_asset_name = "Assets"
elif item_type in ["Episode", "Sequence"]:
elif item_type in ["Episode", "Sequence", "Shot"]:
entity_root_asset_name = "Shots"
# Root parent folder if exist
@ -276,11 +278,13 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne:
match_res = re.match(r"(\d+)x(\d+)", project["resolution"])
if match_res:
project_data['resolutionWidth'] = int(match_res.group(1))
project_data['resolutionHeight'] = int(match_res.group(2))
project_data["resolutionWidth"] = int(match_res.group(1))
project_data["resolutionHeight"] = int(match_res.group(2))
else:
log.warning(f"\'{project['resolution']}\' does not match the expected"
" format for the resolution, for example: 1920x1080")
log.warning(
f"'{project['resolution']}' does not match the expected"
" format for the resolution, for example: 1920x1080"
)
return UpdateOne(
{"_id": project_doc["_id"]},

View file

@ -6,8 +6,17 @@ import collections
import numbers
import six
import time
from openpype.settings.lib import get_anatomy_settings
from openpype.settings.lib import (
get_project_settings,
get_local_settings,
)
from openpype.settings.constants import (
DEFAULT_PROJECT_KEY
)
from openpype.client import get_project
from openpype.lib.path_templates import (
TemplateUnsolved,
TemplateResult,
@ -39,34 +48,23 @@ class RootCombinationError(Exception):
super(RootCombinationError, self).__init__(msg)
class Anatomy:
class BaseAnatomy(object):
"""Anatomy module helps to keep project settings.
Wraps key project specifications, AnatomyTemplates and Roots.
Args:
project_name (str): Project name to look on overrides.
"""
root_key_regex = re.compile(r"{(root?[^}]+)}")
root_name_regex = re.compile(r"root\[([^]]+)\]")
def __init__(self, project_name=None, site_name=None):
if not project_name:
project_name = os.environ.get("AVALON_PROJECT")
if not project_name:
raise ProjectNotSet((
"Implementation bug: Project name is not set. Anatomy requires"
" to load data for specific project."
))
def __init__(self, project_doc, local_settings, site_name):
project_name = project_doc["name"]
self.project_name = project_name
self._data = self._prepare_anatomy_data(
get_anatomy_settings(project_name, site_name)
)
self._site_name = site_name
self._data = self._prepare_anatomy_data(
project_doc, local_settings, site_name
)
self._templates_obj = AnatomyTemplates(self)
self._roots_obj = Roots(self)
@ -87,12 +85,14 @@ class Anatomy:
def items(self):
return copy.deepcopy(self._data).items()
@staticmethod
def _prepare_anatomy_data(anatomy_data):
def _prepare_anatomy_data(self, project_doc, local_settings, site_name):
"""Prepare anatomy data for further processing.
Method added to replace `{task}` with `{task[name]}` in templates.
"""
project_name = project_doc["name"]
anatomy_data = self._project_doc_to_anatomy_data(project_doc)
templates_data = anatomy_data.get("templates")
if templates_data:
# Replace `{task}` with `{task[name]}` in templates
@ -103,23 +103,13 @@ class Anatomy:
if not isinstance(item, dict):
continue
for key in tuple(item.keys()):
value = item[key]
if isinstance(value, dict):
value_queue.append(value)
self._apply_local_settings_on_anatomy_data(anatomy_data,
local_settings,
project_name,
site_name)
elif isinstance(value, six.string_types):
item[key] = value.replace("{task}", "{task[name]}")
return anatomy_data
def reset(self):
"""Reset values of cached data in templates and roots objects."""
self._data = self._prepare_anatomy_data(
get_anatomy_settings(self.project_name, self._site_name)
)
self.templates_obj.reset()
self.roots_obj.reset()
@property
def templates(self):
"""Wrap property `templates` of Anatomy's AnatomyTemplates instance."""
@ -338,6 +328,161 @@ class Anatomy:
data = self.root_environmets_fill_data(template)
return rootless_path.format(**data)
def _project_doc_to_anatomy_data(self, project_doc):
"""Convert project document to anatomy data.
Probably should fill missing keys and values.
"""
output = copy.deepcopy(project_doc["config"])
output["attributes"] = copy.deepcopy(project_doc["data"])
return output
def _apply_local_settings_on_anatomy_data(
self, anatomy_data, local_settings, project_name, site_name
):
"""Apply local settings on anatomy data.
ATM local settings can modify project roots. Project name is required
as local settings have data stored data by project's name.
Local settings override root values in this order:
1.) Check if local settings contain overrides for default project and
apply it's values on roots if there are any.
2.) If passed `project_name` is not None then check project specific
overrides in local settings for the project and apply it's value on
roots if there are any.
NOTE: Root values of default project from local settings are always
applied if are set.
Args:
anatomy_data (dict): Data for anatomy.
local_settings (dict): Data of local settings.
project_name (str): Name of project for which anatomy data are.
"""
if not local_settings:
return
local_project_settings = local_settings.get("projects") or {}
# Check for roots existence in local settings first
roots_project_locals = (
local_project_settings
.get(project_name, {})
)
roots_default_locals = (
local_project_settings
.get(DEFAULT_PROJECT_KEY, {})
)
# Skip rest of processing if roots are not set
if not roots_project_locals and not roots_default_locals:
return
# Combine roots from local settings
roots_locals = roots_default_locals.get(site_name) or {}
roots_locals.update(roots_project_locals.get(site_name) or {})
# Skip processing if roots for current active site are not available in
# local settings
if not roots_locals:
return
current_platform = platform.system().lower()
root_data = anatomy_data["roots"]
for root_name, path in roots_locals.items():
if root_name not in root_data:
continue
anatomy_data["roots"][root_name][current_platform] = (
path
)
class Anatomy(BaseAnatomy):
_project_cache = {}
_site_cache = {}
def __init__(self, project_name=None, site_name=None):
if not project_name:
project_name = os.environ.get("AVALON_PROJECT")
if not project_name:
raise ProjectNotSet((
"Implementation bug: Project name is not set. Anatomy requires"
" to load data for specific project."
))
project_doc = self.get_project_doc_from_cache(project_name)
local_settings = get_local_settings()
if not site_name:
site_name = self.get_site_name_from_cache(
project_name, local_settings
)
super(Anatomy, self).__init__(
project_doc,
local_settings,
site_name
)
@classmethod
def get_project_doc_from_cache(cls, project_name):
project_cache = cls._project_cache.get(project_name)
if project_cache is not None:
if time.time() - project_cache["start"] > 10:
cls._project_cache.pop(project_name)
project_cache = None
if project_cache is None:
project_cache = {
"project_doc": get_project(project_name),
"start": time.time()
}
cls._project_cache[project_name] = project_cache
return copy.deepcopy(
cls._project_cache[project_name]["project_doc"]
)
@classmethod
def get_site_name_from_cache(cls, project_name, local_settings):
site_cache = cls._site_cache.get(project_name)
if site_cache is not None:
if time.time() - site_cache["start"] > 10:
cls._site_cache.pop(project_name)
site_cache = None
if site_cache:
return site_cache["site_name"]
local_project_settings = local_settings.get("projects")
if not local_project_settings:
return
project_locals = local_project_settings.get(project_name) or {}
default_locals = local_project_settings.get(DEFAULT_PROJECT_KEY) or {}
active_site = (
project_locals.get("active_site")
or default_locals.get("active_site")
)
if not active_site:
project_settings = get_project_settings(project_name)
active_site = (
project_settings
["global"]
["sync_server"]
["config"]
["active_site"]
)
cls._site_cache[project_name] = {
"site_name": active_site,
"start": time.time()
}
return active_site
class AnatomyTemplateUnsolved(TemplateUnsolved):
"""Exception for unsolved template when strict is set to True."""

View file

@ -200,6 +200,16 @@ class AttributeValues:
def changes(self):
return self.calculate_changes(self._data, self._origin_data)
def apply_changes(self, changes):
for key, item in changes.items():
old_value, new_value = item
if new_value is None:
if key in self:
self.pop(key)
elif self.get(key) != new_value:
self[key] = new_value
class CreatorAttributeValues(AttributeValues):
"""Creator specific attribute values of an instance.
@ -333,6 +343,21 @@ class PublishAttributes:
changes[key] = (value, None)
return changes
def apply_changes(self, changes):
for key, item in changes.items():
if isinstance(item, dict):
self._data[key].apply_changes(item)
continue
old_value, new_value = item
if new_value is not None:
raise ValueError(
"Unexpected type \"{}\" expected None".format(
str(type(new_value))
)
)
self.pop(key)
def set_publish_plugins(self, attr_plugins):
"""Set publish plugins attribute definitions."""
@ -730,6 +755,97 @@ class CreatedInstance:
if member not in self._members:
self._members.append(member)
def serialize_for_remote(self):
return {
"data": self.data_to_store(),
"orig_data": copy.deepcopy(self._orig_data)
}
@classmethod
def deserialize_on_remote(cls, serialized_data, creator_items):
"""Convert instance data to CreatedInstance.
This is fake instance in remote process e.g. in UI process. The creator
is not a full creator and should not be used for calling methods when
instance is created from this method (matters on implementation).
Args:
serialized_data (Dict[str, Any]): Serialized data for remote
recreating. Should contain 'data' and 'orig_data'.
creator_items (Dict[str, Any]): Mapping of creator identifier and
objects that behave like a creator for most of attribute
access.
"""
instance_data = copy.deepcopy(serialized_data["data"])
creator_identifier = instance_data["creator_identifier"]
creator_item = creator_items[creator_identifier]
family = instance_data.get("family", None)
if family is None:
family = creator_item.family
subset_name = instance_data.get("subset", None)
obj = cls(
family, subset_name, instance_data, creator_item, new=False
)
obj._orig_data = serialized_data["orig_data"]
return obj
def remote_changes(self):
"""Prepare serializable changes on remote side.
Returns:
Dict[str, Any]: Prepared changes that can be send to client side.
"""
return {
"changes": self.changes(),
"asset_is_valid": self._asset_is_valid,
"task_is_valid": self._task_is_valid,
}
def update_from_remote(self, remote_changes):
"""Apply changes from remote side on client side.
Args:
remote_changes (Dict[str, Any]): Changes created on remote side.
"""
self._asset_is_valid = remote_changes["asset_is_valid"]
self._task_is_valid = remote_changes["task_is_valid"]
changes = remote_changes["changes"]
creator_attributes = changes.pop("creator_attributes", None) or {}
publish_attributes = changes.pop("publish_attributes", None) or {}
if changes:
self.apply_changes(changes)
if creator_attributes:
self.creator_attributes.apply_changes(creator_attributes)
if publish_attributes:
self.publish_attributes.apply_changes(publish_attributes)
def apply_changes(self, changes):
"""Apply changes created via 'changes'.
Args:
Dict[str, Tuple[Any, Any]]: Instance changes to apply. Same values
are kept untouched.
"""
for key, item in changes.items():
old_value, new_value = item
if new_value is None:
if key in self:
self.pop(key)
else:
current_value = self.get(key)
if current_value != new_value:
self[key] = new_value
class CreateContext:
"""Context of instance creation.
@ -817,6 +933,10 @@ class CreateContext:
def instances(self):
return self._instances_by_id.values()
@property
def instances_by_id(self):
return self._instances_by_id
@property
def publish_attributes(self):
"""Access to global publish attributes."""
@ -976,7 +1096,8 @@ class CreateContext:
and creator_class.host_name != self.host_name
):
self.log.info((
"Creator's host name is not supported for current host {}"
"Creator's host name \"{}\""
" is not supported for current host \"{}\""
).format(creator_class.host_name, self.host_name))
continue

View file

@ -9,7 +9,9 @@ import os
import logging
import collections
from openpype.lib import get_subset_name
from openpype.client import get_asset_by_id
from .subset_name import get_subset_name
class LegacyCreator(object):
@ -147,11 +149,15 @@ class LegacyCreator(object):
variant, task_name, asset_id, project_name, host_name
)
asset_doc = get_asset_by_id(
project_name, asset_id, fields=["data.tasks"]
)
return get_subset_name(
cls.family,
variant,
task_name,
asset_id,
asset_doc,
project_name,
host_name,
dynamic_data=dynamic_data

View file

@ -265,6 +265,10 @@ def get_last_workfile_with_version(
if not match:
continue
if not match.groups():
output_filenames.append(filename)
continue
file_version = int(match.group(1))
if version is None or file_version > version:
output_filenames[:] = []

View file

@ -29,7 +29,7 @@
"delivery": {},
"unreal": {
"folder": "{root[work]}/{project[name]}/unreal/{task[name]}",
"file": "{project[code]}_{asset}",
"file": "{project[code]}_{asset}.{ext}",
"path": "{@folder}/{@file}"
},
"others": {

View file

@ -21,7 +21,7 @@
"viewTransform": "sRGB gamma"
}
},
"mel_workspace": "workspace -fr \"shaders\" \"renderData/shaders\";\nworkspace -fr \"images\" \"renders\";\nworkspace -fr \"particles\" \"particles\";\nworkspace -fr \"mayaAscii\" \"\";\nworkspace -fr \"mayaBinary\" \"\";\nworkspace -fr \"scene\" \"\";\nworkspace -fr \"alembicCache\" \"cache/alembic\";\nworkspace -fr \"renderData\" \"renderData\";\nworkspace -fr \"sourceImages\" \"sourceimages\";\nworkspace -fr \"fileCache\" \"cache/nCache\";\n",
"mel_workspace": "workspace -fr \"shaders\" \"renderData/shaders\";\nworkspace -fr \"images\" \"renders/maya\";\nworkspace -fr \"particles\" \"particles\";\nworkspace -fr \"mayaAscii\" \"\";\nworkspace -fr \"mayaBinary\" \"\";\nworkspace -fr \"scene\" \"\";\nworkspace -fr \"alembicCache\" \"cache/alembic\";\nworkspace -fr \"renderData\" \"renderData\";\nworkspace -fr \"sourceImages\" \"sourceimages\";\nworkspace -fr \"fileCache\" \"cache/nCache\";\n",
"ext_mapping": {
"model": "ma",
"mayaAscii": "ma",
@ -56,12 +56,12 @@
},
"RenderSettings": {
"apply_render_settings": true,
"default_render_image_folder": "renders",
"default_render_image_folder": "renders/maya",
"enable_all_lights": true,
"aov_separator": "underscore",
"reset_current_frame": false,
"arnold_renderer": {
"image_prefix": "maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>",
"image_prefix": "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>",
"image_format": "exr",
"multilayer_exr": true,
"tiled": true,
@ -69,14 +69,14 @@
"additional_options": []
},
"vray_renderer": {
"image_prefix": "maya/<scene>/<Layer>/<Layer>",
"image_prefix": "<scene>/<Layer>/<Layer>",
"engine": "1",
"image_format": "exr",
"aov_list": [],
"additional_options": []
},
"redshift_renderer": {
"image_prefix": "maya/<Scene>/<RenderLayer>/<RenderLayer>",
"image_prefix": "<Scene>/<RenderLayer>/<RenderLayer>",
"primary_gi_engine": "0",
"secondary_gi_engine": "0",
"image_format": "exr",
@ -126,6 +126,7 @@
"CreateAnimation": {
"enabled": true,
"write_color_sets": false,
"write_face_sets": false,
"defaults": [
"Main"
]
@ -133,6 +134,7 @@
"CreatePointCache": {
"enabled": true,
"write_color_sets": false,
"write_face_sets": false,
"defaults": [
"Main"
]
@ -187,6 +189,8 @@
},
"CreateModel": {
"enabled": true,
"write_color_sets": false,
"write_face_sets": false,
"defaults": [
"Main",
"Proxy",

View file

@ -1,5 +1,6 @@
{
"level_sequences_for_layouts": false,
"delete_unmatched_assets": false,
"project_setup": {
"dev_mode": true
}

View file

@ -782,41 +782,11 @@
"host_name": "resolve",
"environment": {
"RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [],
"RESOLVE_SCRIPT_API": {
"windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Support/Developer/Scripting",
"darwin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting",
"linux": "/opt/resolve/Developer/Scripting"
},
"RESOLVE_SCRIPT_LIB": {
"windows": "C:/Program Files/Blackmagic Design/DaVinci Resolve/fusionscript.dll",
"darwin": "/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so",
"linux": "/opt/resolve/libs/Fusion/fusionscript.so"
},
"RESOLVE_UTILITY_SCRIPTS_DIR": {
"windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp",
"darwin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp",
"linux": "/opt/resolve/Fusion/Scripts/Comp"
},
"PYTHON36_RESOLVE": {
"RESOLVE_PYTHON3_HOME": {
"windows": "{LOCALAPPDATA}/Programs/Python/Python36",
"darwin": "~/Library/Python/3.6/bin",
"linux": "/opt/Python/3.6/bin"
},
"PYTHONPATH": [
"{PYTHON36_RESOLVE}/Lib/site-packages",
"{VIRTUAL_ENV}/Lib/site-packages",
"{PYTHONPATH}",
"{RESOLVE_SCRIPT_API}/Modules",
"{PYTHONPATH}"
],
"PATH": [
"{PYTHON36_RESOLVE}",
"{PYTHON36_RESOLVE}/Scripts",
"{PATH}"
],
"PRE_PYTHON_SCRIPT": "{OPENPYPE_REPOS_ROOT}/openpype/resolve/preload_console.py",
"OPENPYPE_LOG_NO_COLORS": "True",
"RESOLVE_DEV": "True"
}
},
"variants": {
"stable": {

View file

@ -10,6 +10,11 @@
"key": "level_sequences_for_layouts",
"label": "Generate level sequences when loading layouts"
},
{
"type": "boolean",
"key": "delete_unmatched_assets",
"label": "Delete assets that are not matched"
},
{
"type": "dict",
"collapsible": true,

View file

@ -127,6 +127,41 @@
"key": "write_color_sets",
"label": "Write Color Sets"
},
{
"type": "boolean",
"key": "write_face_sets",
"label": "Write Face Sets"
},
{
"type": "list",
"key": "defaults",
"label": "Default Subsets",
"object_type": "text"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "CreateModel",
"label": "Create Model",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "write_color_sets",
"label": "Write Color Sets"
},
{
"type": "boolean",
"key": "write_face_sets",
"label": "Write Face Sets"
},
{
"type": "list",
"key": "defaults",
@ -152,6 +187,11 @@
"key": "write_color_sets",
"label": "Write Color Sets"
},
{
"type": "boolean",
"key": "write_face_sets",
"label": "Write Face Sets"
},
{
"type": "list",
"key": "defaults",
@ -197,10 +237,6 @@
"key": "CreateMayaScene",
"label": "Create Maya Scene"
},
{
"key": "CreateModel",
"label": "Create Model"
},
{
"key": "CreateRenderSetup",
"label": "Create Render Setup"

View file

@ -973,23 +973,22 @@ VariantInputsWidget QToolButton {
background: {color:bg};
border-radius: 0.3em;
}
#PublishInfoFrame[state="-1"] {
background: rgb(194, 226, 236);
}
#PublishInfoFrame[state="0"] {
background: {color:publisher:crash};
background: {color:publisher:success};
}
#PublishInfoFrame[state="1"] {
background: {color:publisher:success};
background: {color:publisher:crash};
}
#PublishInfoFrame[state="2"] {
background: {color:publisher:warning};
}
#PublishInfoFrame[state="3"], #PublishInfoFrame[state="4"] {
background: rgb(194, 226, 236);
}
#PublishInfoFrame QLabel {
color: black;
font-style: bold;
@ -1086,7 +1085,7 @@ ValidationArtistMessage QLabel {
border-color: {color:publisher:error};
}
#PublishProgressBar[state="0"]::chunk {
#PublishProgressBar[state="1"]::chunk, #PublishProgressBar[state="4"]::chunk {
background: {color:bg-buttons};
}

View file

@ -1,7 +0,0 @@
from .app import show
from .window import PublisherWindow
__all__ = (
"show",
"PublisherWindow"
)

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more