mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge branch '2.x/develop' into feature/tvpaint_creators
This commit is contained in:
commit
9106cd1297
222 changed files with 406561 additions and 3243 deletions
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
* text=auto
|
||||
*.js eol=lf
|
||||
*.c eol=lf
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
pype.aport package
|
||||
==================
|
||||
|
||||
.. automodule:: pype.aport
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
pype.aport.api module
|
||||
---------------------
|
||||
|
||||
.. automodule:: pype.aport.api
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
|
||||
|
|
@ -11,7 +11,6 @@ Subpackages
|
|||
|
||||
.. toctree::
|
||||
|
||||
pype.aport
|
||||
pype.avalon_apps
|
||||
pype.clockify
|
||||
pype.ftrack
|
||||
|
|
|
|||
|
|
@ -39,13 +39,9 @@ from .action import (
|
|||
from .lib import (
|
||||
version_up,
|
||||
get_asset,
|
||||
get_project,
|
||||
get_hierarchy,
|
||||
get_subsets,
|
||||
get_version_from_path,
|
||||
get_last_version_from_path,
|
||||
modified_environ,
|
||||
add_tool_to_environment,
|
||||
source_hash,
|
||||
get_latest_version
|
||||
)
|
||||
|
|
@ -88,14 +84,10 @@ __all__ = [
|
|||
|
||||
# get contextual data
|
||||
"version_up",
|
||||
"get_project",
|
||||
"get_hierarchy",
|
||||
"get_asset",
|
||||
"get_subsets",
|
||||
"get_version_from_path",
|
||||
"get_last_version_from_path",
|
||||
"modified_environ",
|
||||
"add_tool_to_environment",
|
||||
"source_hash",
|
||||
|
||||
"subprocess",
|
||||
|
|
|
|||
74
pype/hosts/aftereffects/__init__.py
Normal file
74
pype/hosts/aftereffects/__init__.py
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from avalon import api, io
|
||||
from avalon.vendor import Qt
|
||||
from pype import lib
|
||||
import pyblish.api
|
||||
|
||||
|
||||
def check_inventory():
|
||||
if not lib.any_outdated():
|
||||
return
|
||||
|
||||
host = api.registered_host()
|
||||
outdated_containers = []
|
||||
for container in host.ls():
|
||||
representation = container['representation']
|
||||
representation_doc = io.find_one(
|
||||
{
|
||||
"_id": io.ObjectId(representation),
|
||||
"type": "representation"
|
||||
},
|
||||
projection={"parent": True}
|
||||
)
|
||||
if representation_doc and not lib.is_latest(representation_doc):
|
||||
outdated_containers.append(container)
|
||||
|
||||
# Warn about outdated containers.
|
||||
print("Starting new QApplication..")
|
||||
app = Qt.QtWidgets.QApplication(sys.argv)
|
||||
|
||||
message_box = Qt.QtWidgets.QMessageBox()
|
||||
message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning)
|
||||
msg = "There are outdated containers in the scene."
|
||||
message_box.setText(msg)
|
||||
message_box.exec_()
|
||||
|
||||
# Garbage collect QApplication.
|
||||
del app
|
||||
|
||||
|
||||
def application_launch():
|
||||
check_inventory()
|
||||
|
||||
|
||||
def install():
|
||||
print("Installing Pype config...")
|
||||
|
||||
plugins_directory = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
|
||||
"plugins",
|
||||
"aftereffects"
|
||||
)
|
||||
|
||||
pyblish.api.register_plugin_path(
|
||||
os.path.join(plugins_directory, "publish")
|
||||
)
|
||||
api.register_plugin_path(
|
||||
api.Loader, os.path.join(plugins_directory, "load")
|
||||
)
|
||||
api.register_plugin_path(
|
||||
api.Creator, os.path.join(plugins_directory, "create")
|
||||
)
|
||||
|
||||
pyblish.api.register_callback(
|
||||
"instanceToggled", on_pyblish_instance_toggled
|
||||
)
|
||||
|
||||
api.on("application.launched", application_launch)
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle layer visibility on instance toggles."""
|
||||
instance[0].Visible = new_value
|
||||
|
|
@ -2,7 +2,7 @@ import sys
|
|||
|
||||
from avalon.vendor.Qt import QtGui
|
||||
import avalon.fusion
|
||||
|
||||
from avalon import io
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._project = None
|
||||
|
|
@ -59,3 +59,84 @@ def get_additional_data(container):
|
|||
return {"color": QtGui.QColor.fromRgbF(tile_color["R"],
|
||||
tile_color["G"],
|
||||
tile_color["B"])}
|
||||
|
||||
|
||||
def switch_item(container,
|
||||
asset_name=None,
|
||||
subset_name=None,
|
||||
representation_name=None):
|
||||
"""Switch container asset, subset or representation of a container by name.
|
||||
|
||||
It'll always switch to the latest version - of course a different
|
||||
approach could be implemented.
|
||||
|
||||
Args:
|
||||
container (dict): data of the item to switch with
|
||||
asset_name (str): name of the asset
|
||||
subset_name (str): name of the subset
|
||||
representation_name (str): name of the representation
|
||||
|
||||
Returns:
|
||||
dict
|
||||
|
||||
"""
|
||||
|
||||
if all(not x for x in [asset_name, subset_name, representation_name]):
|
||||
raise ValueError("Must have at least one change provided to switch.")
|
||||
|
||||
# Collect any of current asset, subset and representation if not provided
|
||||
# so we can use the original name from those.
|
||||
if any(not x for x in [asset_name, subset_name, representation_name]):
|
||||
_id = io.ObjectId(container["representation"])
|
||||
representation = io.find_one({"type": "representation", "_id": _id})
|
||||
version, subset, asset, project = io.parenthood(representation)
|
||||
|
||||
if asset_name is None:
|
||||
asset_name = asset["name"]
|
||||
|
||||
if subset_name is None:
|
||||
subset_name = subset["name"]
|
||||
|
||||
if representation_name is None:
|
||||
representation_name = representation["name"]
|
||||
|
||||
# Find the new one
|
||||
asset = io.find_one({
|
||||
"name": asset_name,
|
||||
"type": "asset"
|
||||
})
|
||||
assert asset, ("Could not find asset in the database with the name "
|
||||
"'%s'" % asset_name)
|
||||
|
||||
subset = io.find_one({
|
||||
"name": subset_name,
|
||||
"type": "subset",
|
||||
"parent": asset["_id"]
|
||||
})
|
||||
assert subset, ("Could not find subset in the database with the name "
|
||||
"'%s'" % subset_name)
|
||||
|
||||
version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
},
|
||||
sort=[('name', -1)]
|
||||
)
|
||||
|
||||
assert version, "Could not find a version for {}.{}".format(
|
||||
asset_name, subset_name
|
||||
)
|
||||
|
||||
representation = io.find_one({
|
||||
"name": representation_name,
|
||||
"type": "representation",
|
||||
"parent": version["_id"]}
|
||||
)
|
||||
|
||||
assert representation, ("Could not find representation in the database "
|
||||
"with the name '%s'" % representation_name)
|
||||
|
||||
avalon.api.switch(container, representation)
|
||||
|
||||
return representation
|
||||
|
|
|
|||
|
|
@ -234,7 +234,7 @@ def switch(asset_name, filepath=None, new=True):
|
|||
representations = []
|
||||
for container in containers:
|
||||
try:
|
||||
representation = pype.switch_item(
|
||||
representation = fusion_lib.switch_item(
|
||||
container,
|
||||
asset_name=asset_name)
|
||||
representations.append(representation)
|
||||
|
|
|
|||
|
|
@ -1,56 +1,45 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Pype Harmony Host implementation."""
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from avalon import api, io, harmony
|
||||
from avalon.vendor import Qt
|
||||
import avalon.tools.sceneinventory
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from pype import lib
|
||||
from pype.api import config
|
||||
|
||||
|
||||
def set_scene_settings(settings):
|
||||
"""Set correct scene settings in Harmony.
|
||||
|
||||
signature = harmony.signature("set_scene_settings")
|
||||
func = """function %s(args)
|
||||
{
|
||||
if (args[0]["fps"])
|
||||
{
|
||||
scene.setFrameRate(args[0]["fps"]);
|
||||
}
|
||||
if (args[0]["frameStart"] && args[0]["frameEnd"])
|
||||
{
|
||||
var duration = args[0]["frameEnd"] - args[0]["frameStart"] + 1
|
||||
Args:
|
||||
settings (dict): Scene settings.
|
||||
|
||||
if (frame.numberOf() < duration)
|
||||
{
|
||||
frame.insert(
|
||||
duration, duration - frame.numberOf()
|
||||
);
|
||||
}
|
||||
Returns:
|
||||
dict: Dictionary of settings to set.
|
||||
|
||||
scene.setStartFrame(1);
|
||||
scene.setStopFrame(duration);
|
||||
}
|
||||
if (args[0]["resolutionWidth"] && args[0]["resolutionHeight"])
|
||||
{
|
||||
scene.setDefaultResolution(
|
||||
args[0]["resolutionWidth"], args[0]["resolutionHeight"], 41.112
|
||||
)
|
||||
}
|
||||
}
|
||||
%s
|
||||
""" % (signature, signature)
|
||||
harmony.send({"function": func, "args": [settings]})
|
||||
"""
|
||||
harmony.send(
|
||||
{"function": "PypeHarmony.setSceneSettings", "args": settings})
|
||||
|
||||
|
||||
def get_asset_settings():
|
||||
"""Get settings on current asset from database.
|
||||
|
||||
Returns:
|
||||
dict: Scene data.
|
||||
|
||||
"""
|
||||
asset_data = lib.get_asset()["data"]
|
||||
fps = asset_data.get("fps")
|
||||
frame_start = asset_data.get("frameStart")
|
||||
frame_end = asset_data.get("frameEnd")
|
||||
resolution_width = asset_data.get("resolutionWidth")
|
||||
resolution_height = asset_data.get("resolutionHeight")
|
||||
entity_type = asset_data.get("entityType")
|
||||
|
||||
scene_data = {
|
||||
"fps": fps,
|
||||
|
|
@ -63,17 +52,25 @@ def get_asset_settings():
|
|||
try:
|
||||
skip_resolution_check = \
|
||||
config.get_presets()["harmony"]["general"]["skip_resolution_check"]
|
||||
skip_timelines_check = \
|
||||
config.get_presets()["harmony"]["general"]["skip_timelines_check"]
|
||||
except KeyError:
|
||||
skip_resolution_check = []
|
||||
skip_timelines_check = []
|
||||
|
||||
if os.getenv('AVALON_TASK') in skip_resolution_check:
|
||||
scene_data.pop("resolutionWidth")
|
||||
scene_data.pop("resolutionHeight")
|
||||
|
||||
if entity_type in skip_timelines_check:
|
||||
scene_data.pop('frameStart', None)
|
||||
scene_data.pop('frameEnd', None)
|
||||
|
||||
return scene_data
|
||||
|
||||
|
||||
def ensure_scene_settings():
|
||||
"""Validate if Harmony scene has valid settings."""
|
||||
settings = get_asset_settings()
|
||||
|
||||
invalid_settings = []
|
||||
|
|
@ -86,23 +83,22 @@ def ensure_scene_settings():
|
|||
|
||||
# Warn about missing attributes.
|
||||
if invalid_settings:
|
||||
print("Starting new QApplication..")
|
||||
app = Qt.QtWidgets.QApplication.instance()
|
||||
if not app:
|
||||
app = Qt.QtWidgets.QApplication(sys.argv)
|
||||
|
||||
message_box = Qt.QtWidgets.QMessageBox()
|
||||
message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning)
|
||||
msg = "Missing attributes:"
|
||||
for item in invalid_settings:
|
||||
msg += f"\n{item}"
|
||||
message_box.setText(msg)
|
||||
message_box.exec_()
|
||||
|
||||
harmony.send(
|
||||
{"function": "PypeHarmony.message", "args": msg})
|
||||
|
||||
set_scene_settings(valid_settings)
|
||||
|
||||
|
||||
def check_inventory():
|
||||
"""Check is scene contains outdated containers.
|
||||
|
||||
If it does it will colorize outdated nodes and display warning message
|
||||
in Harmony.
|
||||
"""
|
||||
if not lib.any_outdated():
|
||||
return
|
||||
|
||||
|
|
@ -121,89 +117,51 @@ def check_inventory():
|
|||
outdated_containers.append(container)
|
||||
|
||||
# Colour nodes.
|
||||
sig = harmony.signature("set_color")
|
||||
func = """function %s(args){
|
||||
|
||||
for( var i =0; i <= args[0].length - 1; ++i)
|
||||
{
|
||||
var red_color = new ColorRGBA(255, 0, 0, 255);
|
||||
node.setColor(args[0][i], red_color);
|
||||
}
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
outdated_nodes = []
|
||||
for container in outdated_containers:
|
||||
if container["loader"] == "ImageSequenceLoader":
|
||||
outdated_nodes.append(
|
||||
harmony.find_node_by_name(container["name"], "READ")
|
||||
)
|
||||
harmony.send({"function": func, "args": [outdated_nodes]})
|
||||
harmony.send({"function": "PypeHarmony.setColor", "args": outdated_nodes})
|
||||
|
||||
# Warn about outdated containers.
|
||||
print("Starting new QApplication..")
|
||||
app = Qt.QtWidgets.QApplication(sys.argv)
|
||||
|
||||
message_box = Qt.QtWidgets.QMessageBox()
|
||||
message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning)
|
||||
msg = "There are outdated containers in the scene."
|
||||
message_box.setText(msg)
|
||||
message_box.exec_()
|
||||
|
||||
# Garbage collect QApplication.
|
||||
del app
|
||||
harmony.send({"function": "PypeHarmony.message", "args": msg})
|
||||
|
||||
|
||||
def application_launch():
|
||||
"""Event that is executed after Harmony is launched."""
|
||||
# FIXME: This is breaking server <-> client communication.
|
||||
# It is now moved so it it manually called.
|
||||
# ensure_scene_settings()
|
||||
# check_inventory()
|
||||
pass
|
||||
pype_harmony_path = Path(__file__).parent / "js" / "PypeHarmony.js"
|
||||
pype_harmony_js = pype_harmony_path.read_text()
|
||||
|
||||
# go through js/creators, loaders and publish folders and load all scripts
|
||||
script = ""
|
||||
for item in ["creators", "loaders", "publish"]:
|
||||
dir_to_scan = Path(__file__).parent / "js" / item
|
||||
for child in dir_to_scan.iterdir():
|
||||
script += child.read_text()
|
||||
|
||||
# send scripts to Harmony
|
||||
harmony.send({"script": pype_harmony_js})
|
||||
harmony.send({"script": script})
|
||||
|
||||
|
||||
def export_template(backdrops, nodes, filepath):
|
||||
"""Export Template to file.
|
||||
|
||||
sig = harmony.signature("set_color")
|
||||
func = """function %s(args)
|
||||
{
|
||||
Args:
|
||||
backdrops (list): List of backdrops to export.
|
||||
nodes (list): List of nodes to export.
|
||||
filepath (str): Path where to save Template.
|
||||
|
||||
var temp_node = node.add("Top", "temp_note", "NOTE", 0, 0, 0);
|
||||
var template_group = node.createGroup(temp_node, "temp_group");
|
||||
node.deleteNode( template_group + "/temp_note" );
|
||||
|
||||
selection.clearSelection();
|
||||
for (var f = 0; f < args[1].length; f++)
|
||||
{
|
||||
selection.addNodeToSelection(args[1][f]);
|
||||
}
|
||||
|
||||
Action.perform("copy()", "Node View");
|
||||
|
||||
selection.clearSelection();
|
||||
selection.addNodeToSelection(template_group);
|
||||
Action.perform("onActionEnterGroup()", "Node View");
|
||||
Action.perform("paste()", "Node View");
|
||||
|
||||
// Recreate backdrops in group.
|
||||
for (var i = 0 ; i < args[0].length; i++)
|
||||
{
|
||||
MessageLog.trace(args[0][i]);
|
||||
Backdrop.addBackdrop(template_group, args[0][i]);
|
||||
};
|
||||
|
||||
Action.perform( "selectAll()", "Node View" );
|
||||
copyPaste.createTemplateFromSelection(args[2], args[3]);
|
||||
|
||||
// Unfocus the group in Node view, delete all nodes and backdrops
|
||||
// created during the process.
|
||||
Action.perform("onActionUpToParent()", "Node View");
|
||||
node.deleteNode(template_group, true, true);
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
"""
|
||||
harmony.send({
|
||||
"function": func,
|
||||
"function": "PypeHarmony.exportTemplate",
|
||||
"args": [
|
||||
backdrops,
|
||||
nodes,
|
||||
|
|
@ -214,7 +172,8 @@ def export_template(backdrops, nodes, filepath):
|
|||
|
||||
|
||||
def install():
|
||||
print("Installing Pype config...")
|
||||
"""Install Pype as host config."""
|
||||
print("Installing Pype config ...")
|
||||
|
||||
plugins_directory = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
|
||||
|
|
@ -242,17 +201,12 @@ def install():
|
|||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle node enabling on instance toggles."""
|
||||
|
||||
sig = harmony.signature("enable_node")
|
||||
func = """function %s(args)
|
||||
{
|
||||
node.setEnable(args[0], args[1])
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
try:
|
||||
harmony.send(
|
||||
{"function": func, "args": [instance[0], new_value]}
|
||||
{
|
||||
"function": "PypeHarmony.toggleInstance",
|
||||
"args": [instance[0], new_value]
|
||||
}
|
||||
)
|
||||
except IndexError:
|
||||
print(f"Instance '{instance}' is missing node")
|
||||
|
|
|
|||
117
pype/hosts/harmony/js/.eslintrc.json
Normal file
117
pype/hosts/harmony/js/.eslintrc.json
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
{
|
||||
"env": {
|
||||
"browser": true
|
||||
},
|
||||
"extends": "eslint:recommended",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 3
|
||||
},
|
||||
"rules": {
|
||||
"indent": [
|
||||
"error",
|
||||
4
|
||||
],
|
||||
"linebreak-style": [
|
||||
"error",
|
||||
"unix"
|
||||
],
|
||||
"quotes": [
|
||||
"error",
|
||||
"single"
|
||||
],
|
||||
"semi": [
|
||||
"error",
|
||||
"always"
|
||||
]
|
||||
},
|
||||
"globals": {
|
||||
"$": "readonly",
|
||||
"Action": "readonly",
|
||||
"Backdrop": "readonly",
|
||||
"Button": "readonly",
|
||||
"Cel": "readonly",
|
||||
"Cel3d": "readonly",
|
||||
"CheckBox": "readonly",
|
||||
"ColorRGBA": "readonly",
|
||||
"ComboBox": "readonly",
|
||||
"DateEdit": "readonly",
|
||||
"DateEditEnum": "readonly",
|
||||
"Dialog": "readonly",
|
||||
"Dir": "readonly",
|
||||
"DirSpec": "readonly",
|
||||
"Drawing": "readonly",
|
||||
"DrawingToolParams": "readonly",
|
||||
"DrawingTools": "readonly",
|
||||
"EnvelopeCreator": "readonly",
|
||||
"ExportVideoDlg": "readonly",
|
||||
"File": "readonly",
|
||||
"FileAccess": "readonly",
|
||||
"FileDialog": "readonly",
|
||||
"GroupBox": "readonly",
|
||||
"ImportDrawingDlg": "readonly",
|
||||
"Input": "readonly",
|
||||
"KeyModifiers": "readonly",
|
||||
"Label": "readonly",
|
||||
"LayoutExports": "readonly",
|
||||
"LayoutExportsParams": "readonly",
|
||||
"LineEdit": "readonly",
|
||||
"Matrix4x4": "readonly",
|
||||
"MessageBox": "readonly",
|
||||
"MessageLog": "readonly",
|
||||
"Model3d": "readonly",
|
||||
"MovieImport": "readonly",
|
||||
"NumberEdit": "readonly",
|
||||
"PaletteManager": "readonly",
|
||||
"PaletteObjectManager": "readonly",
|
||||
"PermanentFile": "readonly",
|
||||
"Point2d": "readonly",
|
||||
"Point3d": "readonly",
|
||||
"Process": "readonly",
|
||||
"Process2": "readonly",
|
||||
"Quaternion": "readonly",
|
||||
"QuicktimeExporter": "readonly",
|
||||
"RadioButton": "readonly",
|
||||
"RemoteCmd": "readonly",
|
||||
"Scene": "readonly",
|
||||
"Settings": "readonly",
|
||||
"Slider": "readonly",
|
||||
"SpinBox": "readonly",
|
||||
"SubnodeData": "readonly",
|
||||
"System": "readonly",
|
||||
"TemporaryFile": "readonly",
|
||||
"TextEdit": "readonly",
|
||||
"TimeEdit": "readonly",
|
||||
"Timeline": "readonly",
|
||||
"ToolProperties": "readonly",
|
||||
"UiLoader": "readonly",
|
||||
"Vector2d": "readonly",
|
||||
"Vector3d": "readonly",
|
||||
"WebCCExporter": "readonly",
|
||||
"Workspaces": "readonly",
|
||||
"__scriptManager__": "readonly",
|
||||
"__temporaryFileContext__": "readonly",
|
||||
"about": "readonly",
|
||||
"column": "readonly",
|
||||
"compositionOrder": "readonly",
|
||||
"copyPaste": "readonly",
|
||||
"deformation": "readonly",
|
||||
"drawingExport": "readonly",
|
||||
"element": "readonly",
|
||||
"exporter": "readonly",
|
||||
"fileMapper": "readonly",
|
||||
"frame": "readonly",
|
||||
"func": "readonly",
|
||||
"library": "readonly",
|
||||
"node": "readonly",
|
||||
"preferences": "readonly",
|
||||
"render": "readonly",
|
||||
"scene": "readonly",
|
||||
"selection": "readonly",
|
||||
"sound": "readonly",
|
||||
"specialFolders": "readonly",
|
||||
"translator": "readonly",
|
||||
"view": "readonly",
|
||||
"waypoint": "readonly",
|
||||
"xsheet": "readonly"
|
||||
}
|
||||
}
|
||||
197
pype/hosts/harmony/js/PypeHarmony.js
Normal file
197
pype/hosts/harmony/js/PypeHarmony.js
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
// ***************************************************************************
|
||||
// * Pype Harmony Host *
|
||||
// ***************************************************************************
|
||||
|
||||
|
||||
/**
|
||||
* @namespace
|
||||
* @classdesc PypeHarmony encapsulate all Pype related functions.
|
||||
* @property {Object} loaders Namespace for Loaders JS code.
|
||||
* @property {Object} Creators Namespace for Creators JS code.
|
||||
* @property {Object} Publish Namespace for Publish plugins JS code.
|
||||
*/
|
||||
var PypeHarmony = {
|
||||
Loaders: {},
|
||||
Creators: {},
|
||||
Publish: {}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Show message in Harmony.
|
||||
* @function
|
||||
* @param {string} message Argument containing message.
|
||||
*/
|
||||
PypeHarmony.message = function(message) {
|
||||
MessageBox.information(message);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Set scene setting based on shot/asset settngs.
|
||||
* @function
|
||||
* @param {obj} settings Scene settings.
|
||||
*/
|
||||
PypeHarmony.setSceneSettings = function(settings) {
|
||||
if (settings.fps) {
|
||||
scene.setFrameRate(settings.fps);
|
||||
}
|
||||
|
||||
if (settings.frameStart && settings.frameEnd) {
|
||||
var duration = settings.frameEnd - settings.frameStart + 1;
|
||||
|
||||
if (frame.numberOf() > duration) {
|
||||
frame.remove(duration, frame.numberOf() - duration);
|
||||
}
|
||||
|
||||
if (frame.numberOf() < duration) {
|
||||
frame.insert(duration, duration - frame.numberOf());
|
||||
}
|
||||
|
||||
scene.setStartFrame(1);
|
||||
scene.setStopFrame(duration);
|
||||
}
|
||||
if (settings.resolutionWidth && settings.resolutionHeight) {
|
||||
scene.setDefaultResolution(
|
||||
settings.resolutionWidth, settings.resolutionHeight, 41.112
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Get scene settings.
|
||||
* @function
|
||||
* @return {array} Scene settings.
|
||||
*/
|
||||
PypeHarmony.getSceneSettings = function() {
|
||||
return [
|
||||
about.getApplicationPath(),
|
||||
scene.currentProjectPath(),
|
||||
scene.currentScene(),
|
||||
scene.getFrameRate(),
|
||||
scene.getStartFrame(),
|
||||
scene.getStopFrame(),
|
||||
sound.getSoundtrackAll().path(),
|
||||
scene.defaultResolutionX(),
|
||||
scene.defaultResolutionY()
|
||||
];
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Set color of nodes.
|
||||
* @function
|
||||
* @param {array} nodes List of nodes.
|
||||
* @param {array} rgba array of RGBA components of color.
|
||||
*/
|
||||
PypeHarmony.setColor = function(nodes, rgba) {
|
||||
for (var i =0; i <= nodes.length - 1; ++i) {
|
||||
var color = PypeHarmony.color(rgba);
|
||||
node.setColor(nodes[i], color);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Extract Template into file.
|
||||
* @function
|
||||
* @param {array} args Arguments for template extraction.
|
||||
*
|
||||
* @example
|
||||
* // arguments are in this order:
|
||||
* var args = [backdrops, nodes, templateFilename, templateDir];
|
||||
*
|
||||
*/
|
||||
PypeHarmony.exportTemplate = function(args) {
|
||||
var tempNode = node.add('Top', 'temp_note', 'NOTE', 0, 0, 0);
|
||||
var templateGroup = node.createGroup(tempNode, 'temp_group');
|
||||
node.deleteNode( templateGroup + '/temp_note' );
|
||||
|
||||
selection.clearSelection();
|
||||
for (var f = 0; f < args[1].length; f++) {
|
||||
selection.addNodeToSelection(args[1][f]);
|
||||
}
|
||||
|
||||
Action.perform('copy()', 'Node View');
|
||||
|
||||
selection.clearSelection();
|
||||
selection.addNodeToSelection(templateGroup);
|
||||
Action.perform('onActionEnterGroup()', 'Node View');
|
||||
Action.perform('paste()', 'Node View');
|
||||
|
||||
// Recreate backdrops in group.
|
||||
for (var i = 0; i < args[0].length; i++) {
|
||||
MessageLog.trace(args[0][i]);
|
||||
Backdrop.addBackdrop(templateGroup, args[0][i]);
|
||||
}
|
||||
|
||||
Action.perform('selectAll()', 'Node View' );
|
||||
copyPaste.createTemplateFromSelection(args[2], args[3]);
|
||||
|
||||
// Unfocus the group in Node view, delete all nodes and backdrops
|
||||
// created during the process.
|
||||
Action.perform('onActionUpToParent()', 'Node View');
|
||||
node.deleteNode(templateGroup, true, true);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Toggle instance in Harmony.
|
||||
* @function
|
||||
* @param {array} args Instance name and value.
|
||||
*/
|
||||
PypeHarmony.toggleInstance = function(args) {
|
||||
node.setEnable(args[0], args[1]);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Delete node in Harmony.
|
||||
* @function
|
||||
* @param {string} _node Node name.
|
||||
*/
|
||||
PypeHarmony.deleteNode = function(_node) {
|
||||
node.deleteNode(_node, true, true);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Copy file.
|
||||
* @function
|
||||
* @param {string} src Source file name.
|
||||
* @param {string} dst Destination file name.
|
||||
*/
|
||||
PypeHarmony.copyFile = function(src, dst) {
|
||||
var srcFile = new PermanentFile(src);
|
||||
var dstFile = new PermanentFile(dst);
|
||||
srcFile.copy(dstFile);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* create RGBA color from array.
|
||||
* @function
|
||||
* @param {array} rgba array of rgba values.
|
||||
* @return {ColorRGBA} ColorRGBA Harmony class.
|
||||
*/
|
||||
PypeHarmony.color = function(rgba) {
|
||||
return new ColorRGBA(rgba[0], rgba[1], rgba[2], rgba[3]);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* get all dependencies for given node.
|
||||
* @function
|
||||
* @param {string} node node path.
|
||||
* @return {array} List of dependent nodes.
|
||||
*/
|
||||
PypeHarmony.getDependencies = function(node) {
|
||||
var target_node = node;
|
||||
var numInput = node.numberOfInputPorts(target_node);
|
||||
var dependencies = [];
|
||||
for (var i = 0 ; i < numInput; i++) {
|
||||
dependencies.push(node.srcNode(target_node, i));
|
||||
}
|
||||
return dependencies;
|
||||
};
|
||||
15
pype/hosts/harmony/js/README.md
Normal file
15
pype/hosts/harmony/js/README.md
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
## Pype - ToonBoom Harmony integration
|
||||
|
||||
### Development
|
||||
|
||||
#### Setting up ESLint as linter for javasript code
|
||||
|
||||
You nee [node.js](https://nodejs.org/en/) installed. All you need to do then
|
||||
is to run:
|
||||
|
||||
```sh
|
||||
npm intall
|
||||
```
|
||||
in **js** directory. This will install eslint and all requirements locally.
|
||||
|
||||
In [Atom](https://atom.io/) it is enough to install [linter-eslint](https://atom.io/packages/lintecr-eslint) and set global *npm* prefix in its settings.
|
||||
33
pype/hosts/harmony/js/creators/CreateRender.js
Normal file
33
pype/hosts/harmony/js/creators/CreateRender.js
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
/* global PypeHarmony:writable, include */
|
||||
// ***************************************************************************
|
||||
// * CreateRender *
|
||||
// ***************************************************************************
|
||||
|
||||
|
||||
// check if PypeHarmony is defined and if not, load it.
|
||||
if (typeof PypeHarmony !== 'undefined') {
|
||||
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
|
||||
include(PYPE_HARMONY_JS + '/pype_harmony.js');
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @namespace
|
||||
* @classdesc Code creating render containers in Harmony.
|
||||
*/
|
||||
var CreateRender = function() {};
|
||||
|
||||
|
||||
/**
|
||||
* Create render instance.
|
||||
* @function
|
||||
* @param {array} args Arguments for instance.
|
||||
*/
|
||||
CreateRender.prototype.create = function(args) {
|
||||
node.setTextAttr(args[0], 'DRAWING_TYPE', 1, 'PNG4');
|
||||
node.setTextAttr(args[0], 'DRAWING_NAME', 1, args[1]);
|
||||
node.setTextAttr(args[0], 'MOVIE_PATH', 1, args[1]);
|
||||
};
|
||||
|
||||
// add self to Pype Loaders
|
||||
PypeHarmony.Creators.CreateRender = new CreateRender();
|
||||
281
pype/hosts/harmony/js/loaders/ImageSequenceLoader.js
Normal file
281
pype/hosts/harmony/js/loaders/ImageSequenceLoader.js
Normal file
|
|
@ -0,0 +1,281 @@
|
|||
/* global PypeHarmony:writable, include */
|
||||
// ***************************************************************************
|
||||
// * ImageSequenceLoader *
|
||||
// ***************************************************************************
|
||||
|
||||
|
||||
// check if PypeHarmony is defined and if not, load it.
|
||||
if (typeof PypeHarmony !== 'undefined') {
|
||||
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
|
||||
include(PYPE_HARMONY_JS + '/pype_harmony.js');
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @namespace
|
||||
* @classdesc Image Sequence loader JS code.
|
||||
*/
|
||||
var ImageSequenceLoader = function() {
|
||||
this.PNGTransparencyMode = 0; // Premultiplied wih Black
|
||||
this.TGATransparencyMode = 0; // Premultiplied wih Black
|
||||
this.SGITransparencyMode = 0; // Premultiplied wih Black
|
||||
this.LayeredPSDTransparencyMode = 1; // Straight
|
||||
this.FlatPSDTransparencyMode = 2; // Premultiplied wih White
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Get unique column name.
|
||||
* @function
|
||||
* @param {string} columnPrefix Column name.
|
||||
* @return {string} Unique column name.
|
||||
*/
|
||||
ImageSequenceLoader.prototype.getUniqueColumnName = function(columnPrefix) {
|
||||
var suffix = 0;
|
||||
// finds if unique name for a column
|
||||
var columnName = columnPrefix;
|
||||
while (suffix < 2000) {
|
||||
if (!column.type(columnName)) {
|
||||
break;
|
||||
}
|
||||
|
||||
suffix = suffix + 1;
|
||||
columnName = columnPrefix + '_' + suffix;
|
||||
}
|
||||
return columnName;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Import file sequences into Harmony.
|
||||
* @function
|
||||
* @param {object} args Arguments for import, see Example.
|
||||
* @return {string} Read node name
|
||||
*
|
||||
* @example
|
||||
* // Agrguments are in following order:
|
||||
* var args = [
|
||||
* files, // Files in file sequences.
|
||||
* asset, // Asset name.
|
||||
* subset, // Subset name.
|
||||
* startFrame, // Sequence starting frame.
|
||||
* groupId // Unique group ID (uuid4).
|
||||
* ];
|
||||
*/
|
||||
ImageSequenceLoader.prototype.importFiles = function(args) {
|
||||
var doc = $.scn;
|
||||
var files = args[0];
|
||||
var asset = args[1];
|
||||
var subset = args[2];
|
||||
var startFrame = args[3];
|
||||
var groupId = args[4];
|
||||
var vectorFormat = null;
|
||||
var extension = null;
|
||||
var filename = files[0];
|
||||
var pos = filename.lastIndexOf('.');
|
||||
if (pos < 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Get the current group
|
||||
var nodeViewWidget = $.app.getWidgetByName('Node View');
|
||||
if (!nodeViewWidget) {
|
||||
$.alert('You must have a Node View open!', 'No Node View!', 'OK!');
|
||||
return;
|
||||
}
|
||||
|
||||
nodeViewWidget.setFocus();
|
||||
var nodeView = view.currentView();
|
||||
var currentGroup = null;
|
||||
if (!nodeView) {
|
||||
currentGroup = doc.root;
|
||||
} else {
|
||||
currentGroup = doc.$node(view.group(nodeView));
|
||||
}
|
||||
// Get a unique iterative name for the container read node
|
||||
var num = 0;
|
||||
var name = '';
|
||||
do {
|
||||
name = asset + '_' + (num++) + '_' + subset;
|
||||
} while (currentGroup.getNodeByName(name) != null);
|
||||
|
||||
|
||||
extension = filename.substr(pos+1).toLowerCase();
|
||||
if (extension == 'jpeg') {
|
||||
extension = 'jpg';
|
||||
}
|
||||
|
||||
if (extension == 'tvg') {
|
||||
vectorFormat = 'TVG';
|
||||
extension ='SCAN'; // element.add() will use this.
|
||||
}
|
||||
|
||||
var elemId = element.add(
|
||||
name,
|
||||
'BW',
|
||||
scene.numberOfUnitsZ(),
|
||||
extension.toUpperCase(),
|
||||
vectorFormat
|
||||
);
|
||||
|
||||
if (elemId == -1) {
|
||||
// hum, unknown file type most likely -- let's skip it.
|
||||
return null; // no read to add.
|
||||
}
|
||||
|
||||
var uniqueColumnName = this.getUniqueColumnName(name);
|
||||
column.add(uniqueColumnName, 'DRAWING');
|
||||
column.setElementIdOfDrawing(uniqueColumnName, elemId);
|
||||
var read = node.add(currentGroup, name, 'READ', 0, 0, 0);
|
||||
var transparencyAttr = node.getAttr(
|
||||
read, frame.current(), 'READ_TRANSPARENCY'
|
||||
);
|
||||
var opacityAttr = node.getAttr(read, frame.current(), 'OPACITY');
|
||||
transparencyAttr.setValue(true);
|
||||
opacityAttr.setValue(true);
|
||||
var alignmentAttr = node.getAttr(read, frame.current(), 'ALIGNMENT_RULE');
|
||||
alignmentAttr.setValue('ASIS');
|
||||
var transparencyModeAttr = node.getAttr(
|
||||
read, frame.current(), 'applyMatteToColor'
|
||||
);
|
||||
if (extension === 'png') {
|
||||
transparencyModeAttr.setValue(this.PNGTransparencyMode);
|
||||
}
|
||||
if (extension === 'tga') {
|
||||
transparencyModeAttr.setValue(this.TGATransparencyMode);
|
||||
}
|
||||
if (extension === 'sgi') {
|
||||
transparencyModeAttr.setValue(this.SGITransparencyMode);
|
||||
}
|
||||
if (extension === 'psd') {
|
||||
transparencyModeAttr.setValue(this.FlatPSDTransparencyMode);
|
||||
}
|
||||
if (extension === 'jpg') {
|
||||
transparencyModeAttr.setValue(this.LayeredPSDTransparencyMode);
|
||||
}
|
||||
|
||||
var drawingFilePath;
|
||||
var timing;
|
||||
node.linkAttr(read, 'DRAWING.ELEMENT', uniqueColumnName);
|
||||
if (files.length === 1) {
|
||||
// Create a drawing drawing, 'true' indicate that the file exists.
|
||||
Drawing.create(elemId, 1, true);
|
||||
// Get the actual path, in tmp folder.
|
||||
drawingFilePath = Drawing.filename(elemId, '1');
|
||||
PypeHarmony.copyFile(files[0], drawingFilePath);
|
||||
// Expose the image for the entire frame range.
|
||||
for (var i =0; i <= frame.numberOf() - 1; ++i) {
|
||||
timing = startFrame + i;
|
||||
column.setEntry(uniqueColumnName, 1, timing, '1');
|
||||
}
|
||||
} else {
|
||||
// Create a drawing for each file.
|
||||
for (var j =0; j <= files.length - 1; ++j) {
|
||||
timing = startFrame + j;
|
||||
// Create a drawing drawing, 'true' indicate that the file exists.
|
||||
Drawing.create(elemId, timing, true);
|
||||
// Get the actual path, in tmp folder.
|
||||
drawingFilePath = Drawing.filename(elemId, timing.toString());
|
||||
PypeHarmony.copyFile(files[j], drawingFilePath);
|
||||
column.setEntry(uniqueColumnName, 1, timing, timing.toString());
|
||||
}
|
||||
}
|
||||
var greenColor = new ColorRGBA(0, 255, 0, 255);
|
||||
node.setColor(read, greenColor);
|
||||
|
||||
// Add uuid to attribute of the container read node
|
||||
node.createDynamicAttr(read, 'STRING', 'uuid', 'uuid', false);
|
||||
node.setTextAttr(read, 'uuid', 1.0, groupId);
|
||||
return read;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Replace files sequences in Harmony.
|
||||
* @function
|
||||
* @param {object} args Arguments for import, see Example.
|
||||
* @return {string} Read node name
|
||||
*
|
||||
* @example
|
||||
* // Agrguments are in following order:
|
||||
* var args = [
|
||||
* files, // Files in file sequences
|
||||
* name, // Node name
|
||||
* startFrame // Sequence starting frame
|
||||
* ];
|
||||
*/
|
||||
ImageSequenceLoader.prototype.replaceFiles = function(args) {
|
||||
var files = args[0];
|
||||
MessageLog.trace(files);
|
||||
MessageLog.trace(files.length);
|
||||
var _node = args[1];
|
||||
var startFrame = args[2];
|
||||
var _column = node.linkedColumn(_node, 'DRAWING.ELEMENT');
|
||||
var elemId = column.getElementIdOfDrawing(_column);
|
||||
// Delete existing drawings.
|
||||
var timings = column.getDrawingTimings(_column);
|
||||
for ( var i =0; i <= timings.length - 1; ++i) {
|
||||
column.deleteDrawingAt(_column, parseInt(timings[i]));
|
||||
}
|
||||
var filename = files[0];
|
||||
var pos = filename.lastIndexOf('.');
|
||||
if (pos < 0) {
|
||||
return null;
|
||||
}
|
||||
var extension = filename.substr(pos+1).toLowerCase();
|
||||
if (extension === 'jpeg') {
|
||||
extension = 'jpg';
|
||||
}
|
||||
|
||||
var transparencyModeAttr = node.getAttr(
|
||||
_node, frame.current(), 'applyMatteToColor'
|
||||
);
|
||||
if (extension === 'png') {
|
||||
transparencyModeAttr.setValue(this.PNGTransparencyMode);
|
||||
}
|
||||
if (extension === 'tga') {
|
||||
transparencyModeAttr.setValue(this.TGATransparencyMode);
|
||||
}
|
||||
if (extension === 'sgi') {
|
||||
transparencyModeAttr.setValue(this.SGITransparencyMode);
|
||||
}
|
||||
if (extension == 'psd') {
|
||||
transparencyModeAttr.setValue(this.FlatPSDTransparencyMode);
|
||||
}
|
||||
if (extension === 'jpg') {
|
||||
transparencyModeAttr.setValue(this.LayeredPSDTransparencyMode);
|
||||
}
|
||||
|
||||
var drawingFilePath;
|
||||
var timing;
|
||||
if (files.length == 1) {
|
||||
// Create a drawing drawing, 'true' indicate that the file exists.
|
||||
Drawing.create(elemId, 1, true);
|
||||
// Get the actual path, in tmp folder.
|
||||
drawingFilePath = Drawing.filename(elemId, '1');
|
||||
PypeHarmony.copyFile(files[0], drawingFilePath);
|
||||
MessageLog.trace(files[0]);
|
||||
MessageLog.trace(drawingFilePath);
|
||||
// Expose the image for the entire frame range.
|
||||
for (var k =0; k <= frame.numberOf() - 1; ++k) {
|
||||
timing = startFrame + k;
|
||||
column.setEntry(_column, 1, timing, '1');
|
||||
}
|
||||
} else {
|
||||
// Create a drawing for each file.
|
||||
for (var l =0; l <= files.length - 1; ++l) {
|
||||
timing = startFrame + l;
|
||||
// Create a drawing drawing, 'true' indicate that the file exists.
|
||||
Drawing.create(elemId, timing, true);
|
||||
// Get the actual path, in tmp folder.
|
||||
drawingFilePath = Drawing.filename(elemId, timing.toString());
|
||||
PypeHarmony.copyFile( files[l], drawingFilePath );
|
||||
column.setEntry(_column, 1, timing, timing.toString());
|
||||
}
|
||||
}
|
||||
var greenColor = new ColorRGBA(0, 255, 0, 255);
|
||||
node.setColor(_node, greenColor);
|
||||
};
|
||||
|
||||
// add self to Pype Loaders
|
||||
PypeHarmony.Loaders.ImageSequenceLoader = new ImageSequenceLoader();
|
||||
177
pype/hosts/harmony/js/loaders/TemplateLoader.js
Normal file
177
pype/hosts/harmony/js/loaders/TemplateLoader.js
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
/* global PypeHarmony:writable, include */
|
||||
// ***************************************************************************
|
||||
// * TemplateLoader *
|
||||
// ***************************************************************************
|
||||
|
||||
|
||||
// check if PypeHarmony is defined and if not, load it.
|
||||
if (typeof PypeHarmony !== 'undefined') {
|
||||
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
|
||||
include(PYPE_HARMONY_JS + '/pype_harmony.js');
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @namespace
|
||||
* @classdesc Image Sequence loader JS code.
|
||||
*/
|
||||
var TemplateLoader = function() {};
|
||||
|
||||
|
||||
/**
|
||||
* Load template as container.
|
||||
* @function
|
||||
* @param {array} args Arguments, see example.
|
||||
* @return {string} Name of container.
|
||||
*
|
||||
* @example
|
||||
* // arguments are in following order:
|
||||
* var args = [
|
||||
* templatePath, // Path to tpl file.
|
||||
* assetName, // Asset name.
|
||||
* subsetName, // Subset name.
|
||||
* groupId // unique ID (uuid4)
|
||||
* ];
|
||||
*/
|
||||
TemplateLoader.prototype.loadContainer = function(args) {
|
||||
var doc = $.scn;
|
||||
var templatePath = args[0];
|
||||
var assetName = args[1];
|
||||
var subset = args[2];
|
||||
var groupId = args[3];
|
||||
|
||||
// Get the current group
|
||||
var nodeViewWidget = $.app.getWidgetByName('Node View');
|
||||
if (!nodeViewWidget) {
|
||||
$.alert('You must have a Node View open!', 'No Node View!', 'OK!');
|
||||
return;
|
||||
}
|
||||
|
||||
nodeViewWidget.setFocus();
|
||||
var currentGroup;
|
||||
var nodeView = view.currentView();
|
||||
if (!nodeView) {
|
||||
currentGroup = doc.root;
|
||||
} else {
|
||||
currentGroup = doc.$node(view.group(nodeView));
|
||||
}
|
||||
|
||||
// Get a unique iterative name for the container group
|
||||
var num = 0;
|
||||
var containerGroupName = '';
|
||||
do {
|
||||
containerGroupName = assetName + '_' + (num++) + '_' + subset;
|
||||
} while (currentGroup.getNodeByName(containerGroupName) != null);
|
||||
|
||||
// import the template
|
||||
var tplNodes = currentGroup.importTemplate(templatePath);
|
||||
MessageLog.trace(tplNodes);
|
||||
// Create the container group
|
||||
var groupNode = currentGroup.addGroup(
|
||||
containerGroupName, false, false, tplNodes);
|
||||
|
||||
// Add uuid to attribute of the container group
|
||||
node.createDynamicAttr(groupNode, 'STRING', 'uuid', 'uuid', false);
|
||||
node.setTextAttr(groupNode, 'uuid', 1.0, groupId);
|
||||
|
||||
return String(groupNode);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Replace existing node container.
|
||||
* @function
|
||||
* @param {string} dstNodePath Harmony path to destination Node.
|
||||
* @param {string} srcNodePath Harmony path to source Node.
|
||||
* @param {string} renameSrc ...
|
||||
* @param {boolean} cloneSrc ...
|
||||
* @return {boolean} Success
|
||||
* @todo This is work in progress.
|
||||
*/
|
||||
TemplateLoader.prototype.replaceNode = function(
|
||||
dstNodePath, srcNodePath, renameSrc, cloneSrc) {
|
||||
var doc = $.scn;
|
||||
var srcNode = doc.$node(srcNodePath);
|
||||
var dstNode = doc.$node(dstNodePath);
|
||||
// var dstNodeName = dstNode.name;
|
||||
var replacementNode = srcNode;
|
||||
// var dstGroup = dstNode.group;
|
||||
$.beginUndo();
|
||||
if (cloneSrc) {
|
||||
replacementNode = doc.$node(
|
||||
$.nodeTools.copy_paste_node(
|
||||
srcNodePath, dstNode.name + '_CLONE', dstNode.group.path));
|
||||
} else {
|
||||
if (replacementNode.group.path != srcNode.group.path) {
|
||||
replacementNode.moveToGroup(dstNode);
|
||||
}
|
||||
}
|
||||
var inLinks = dstNode.getInLinks();
|
||||
var link, inNode, inPort, outPort, outNode, success;
|
||||
for (var l in inLinks) {
|
||||
if (Object.prototype.hasOwnProperty.call(inLinks, l)) {
|
||||
link = inLinks[l];
|
||||
inPort = Number(link.inPort);
|
||||
outPort = Number(link.outPort);
|
||||
outNode = link.outNode;
|
||||
success = replacementNode.linkInNode(outNode, inPort, outPort);
|
||||
if (success) {
|
||||
$.log('Successfully connected ' + outNode + ' : ' +
|
||||
outPort + ' -> ' + replacementNode + ' : ' + inPort);
|
||||
} else {
|
||||
$.alert('Failed to connect ' + outNode + ' : ' +
|
||||
outPort + ' -> ' + replacementNode + ' : ' + inPort);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var outLinks = dstNode.getOutLinks();
|
||||
for (l in outLinks) {
|
||||
if (Object.prototype.hasOwnProperty.call(outLinks, l)) {
|
||||
link = outLinks[l];
|
||||
inPort = Number(link.inPort);
|
||||
outPort = Number(link.outPort);
|
||||
inNode = link.inNode;
|
||||
// first we must disconnect the port from the node being
|
||||
// replaced to this links inNode port
|
||||
inNode.unlinkInPort(inPort);
|
||||
success = replacementNode.linkOutNode(inNode, outPort, inPort);
|
||||
if (success) {
|
||||
$.log('Successfully connected ' + inNode + ' : ' +
|
||||
inPort + ' <- ' + replacementNode + ' : ' + outPort);
|
||||
} else {
|
||||
if (inNode.type == 'MultiLayerWrite') {
|
||||
$.log('Attempting standard api to connect the nodes...');
|
||||
success = node.link(
|
||||
replacementNode, outPort, inNode,
|
||||
inPort, node.numberOfInputPorts(inNode) + 1);
|
||||
if (success) {
|
||||
$.log('Successfully connected ' + inNode + ' : ' +
|
||||
inPort + ' <- ' + replacementNode + ' : ' + outPort);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!success) {
|
||||
$.alert('Failed to connect ' + inNode + ' : ' +
|
||||
inPort + ' <- ' + replacementNode + ' : ' + outPort);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
TemplateLoader.prototype.askForColumnsUpdate = function() {
|
||||
// Ask user if they want to also update columns and
|
||||
// linked attributes here
|
||||
return ($.confirm(
|
||||
'Would you like to update in place and reconnect all \n' +
|
||||
'ins/outs, attributes, and columns?',
|
||||
'Update & Replace?\n' +
|
||||
'If you choose No, the version will only be loaded.',
|
||||
'Yes',
|
||||
'No'));
|
||||
};
|
||||
|
||||
// add self to Pype Loaders
|
||||
PypeHarmony.Loaders.TemplateLoader = new TemplateLoader();
|
||||
19
pype/hosts/harmony/js/package.json
Normal file
19
pype/hosts/harmony/js/package.json
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"name": "pype-harmony",
|
||||
"version": "1.0.0",
|
||||
"description": "Avalon Harmony Host integration",
|
||||
"keywords": [
|
||||
"Pype",
|
||||
"Avalon",
|
||||
"Harmony",
|
||||
"pipeline"
|
||||
],
|
||||
"license": "MIT",
|
||||
"main": "PypeHarmony.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"eslint": "^7.11.0"
|
||||
}
|
||||
}
|
||||
28
pype/hosts/harmony/js/publish/CollectCurrentFile.js
Normal file
28
pype/hosts/harmony/js/publish/CollectCurrentFile.js
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
/* global PypeHarmony:writable, include */
|
||||
// ***************************************************************************
|
||||
// * CollectCurrentFile *
|
||||
// ***************************************************************************
|
||||
|
||||
|
||||
// check if PypeHarmony is defined and if not, load it.
|
||||
if (typeof PypeHarmony !== 'undefined') {
|
||||
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
|
||||
include(PYPE_HARMONY_JS + '/pype_harmony.js');
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @namespace
|
||||
* @classdesc Collect Current file
|
||||
*/
|
||||
var CollectCurrentFile = function() {};
|
||||
|
||||
CollectCurrentFile.prototype.collect = function() {
|
||||
return (
|
||||
scene.currentProjectPath() + '/' +
|
||||
scene.currentVersionName() + '.xstage'
|
||||
);
|
||||
};
|
||||
|
||||
// add self to Pype Loaders
|
||||
PypeHarmony.Publish.CollectCurrentFile = new CollectCurrentFile();
|
||||
33
pype/hosts/harmony/js/publish/CollectPalettes.js
Normal file
33
pype/hosts/harmony/js/publish/CollectPalettes.js
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
/* global PypeHarmony:writable, include */
|
||||
// ***************************************************************************
|
||||
// * CollectPalettes *
|
||||
// ***************************************************************************
|
||||
|
||||
|
||||
// check if PypeHarmony is defined and if not, load it.
|
||||
if (typeof PypeHarmony !== 'undefined') {
|
||||
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
|
||||
include(PYPE_HARMONY_JS + '/pype_harmony.js');
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @namespace
|
||||
* @classdesc Image Sequence loader JS code.
|
||||
*/
|
||||
var CollectPalettes = function() {};
|
||||
|
||||
CollectPalettes.prototype.getPalettes = function() {
|
||||
var palette_list = PaletteObjectManager.getScenePaletteList();
|
||||
|
||||
var palettes = {};
|
||||
for(var i=0; i < palette_list.numPalettes; ++i) {
|
||||
var palette = palette_list.getPaletteByIndex(i);
|
||||
palettes[palette.getName()] = palette.id;
|
||||
}
|
||||
|
||||
return palettes;
|
||||
};
|
||||
|
||||
// add self to Pype Loaders
|
||||
PypeHarmony.Publish.CollectPalettes = new CollectPalettes();
|
||||
38
pype/hosts/harmony/js/publish/ExtractPalette.js
Normal file
38
pype/hosts/harmony/js/publish/ExtractPalette.js
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
/* global PypeHarmony:writable, include */
|
||||
// ***************************************************************************
|
||||
// * ExtractPalette *
|
||||
// ***************************************************************************
|
||||
|
||||
|
||||
// check if PypeHarmony is defined and if not, load it.
|
||||
if (typeof PypeHarmony !== 'undefined') {
|
||||
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
|
||||
include(PYPE_HARMONY_JS + '/pype_harmony.js');
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @namespace
|
||||
* @classdesc Code for extracting palettes.
|
||||
*/
|
||||
var ExtractPalette = function() {};
|
||||
|
||||
|
||||
/**
|
||||
* Get palette from Harmony.
|
||||
* @function
|
||||
* @param {string} paletteId ID of palette to get.
|
||||
* @return {array} [paletteName, palettePath]
|
||||
*/
|
||||
ExtractPalette.prototype.getPalette = function(paletteId) {
|
||||
var palette_list = PaletteObjectManager.getScenePaletteList();
|
||||
var palette = palette_list.getPaletteById(paletteId);
|
||||
var palette_name = palette.getName();
|
||||
return [
|
||||
palette_name,
|
||||
(palette.getPath() + '/' + palette.getName() + '.plt')
|
||||
];
|
||||
};
|
||||
|
||||
// add self to Pype Loaders
|
||||
PypeHarmony.Publish.ExtractPalette = new ExtractPalette();
|
||||
54
pype/hosts/harmony/js/publish/ExtractTemplate.js
Normal file
54
pype/hosts/harmony/js/publish/ExtractTemplate.js
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
/* global PypeHarmony:writable, include */
|
||||
// ***************************************************************************
|
||||
// * ExtractTemplate *
|
||||
// ***************************************************************************
|
||||
|
||||
|
||||
// check if PypeHarmony is defined and if not, load it.
|
||||
if (typeof PypeHarmony !== 'undefined') {
|
||||
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
|
||||
include(PYPE_HARMONY_JS + '/pype_harmony.js');
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @namespace
|
||||
* @classdesc Code for extracting palettes.
|
||||
*/
|
||||
var ExtractTemplate = function() {};
|
||||
|
||||
|
||||
/**
|
||||
* Get backdrops for given node.
|
||||
* @function
|
||||
* @param {string} probeNode Node path to probe for backdrops.
|
||||
* @return {array} list of backdrops.
|
||||
*/
|
||||
ExtractTemplate.prototype.getBackdropsByNode = function(probeNode) {
|
||||
var backdrops = Backdrop.backdrops('Top');
|
||||
var valid_backdrops = [];
|
||||
for(var i=0; i<backdrops.length; i++)
|
||||
{
|
||||
var position = backdrops[i].position;
|
||||
|
||||
var x_valid = false;
|
||||
var node_x = node.coordX(probeNode);
|
||||
if (position.x < node_x && node_x < (position.x + position.w)){
|
||||
x_valid = true;
|
||||
}
|
||||
|
||||
var y_valid = false;
|
||||
var node_y = node.coordY(probeNode);
|
||||
if (position.y < node_y && node_y < (position.y + position.h)){
|
||||
y_valid = true;
|
||||
}
|
||||
|
||||
if (x_valid && y_valid){
|
||||
valid_backdrops.push(backdrops[i]);
|
||||
}
|
||||
}
|
||||
return valid_backdrops;
|
||||
};
|
||||
|
||||
// add self to Pype Loaders
|
||||
PypeHarmony.Publish.ExtractTemplate = new ExtractTemplate();
|
||||
|
|
@ -4,6 +4,7 @@ import sys
|
|||
import hiero
|
||||
import pyblish.api
|
||||
import avalon.api as avalon
|
||||
import avalon.io
|
||||
from avalon.vendor.Qt import (QtWidgets, QtGui)
|
||||
import pype.api as pype
|
||||
from pype.api import Logger, Anatomy
|
||||
|
|
@ -58,7 +59,8 @@ def sync_avalon_data_to_workfile():
|
|||
project.setProjectRoot(active_project_root)
|
||||
|
||||
# get project data from avalon db
|
||||
project_data = pype.get_project()["data"]
|
||||
project_doc = avalon.io.find_one({"type": "project"})
|
||||
project_data = project_doc["data"]
|
||||
|
||||
log.debug("project_data: {}".format(project_data))
|
||||
|
||||
|
|
|
|||
|
|
@ -378,14 +378,8 @@ class AExpectedFiles:
|
|||
renderable = False
|
||||
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam))):
|
||||
renderable = True
|
||||
|
||||
for override in self.get_layer_overrides(
|
||||
"{}.renderable".format(cam), self.layer
|
||||
):
|
||||
renderable = self.maya_is_true(override)
|
||||
|
||||
if renderable:
|
||||
renderable_cameras.append(cam)
|
||||
|
||||
return renderable_cameras
|
||||
|
||||
def maya_is_true(self, attr_val):
|
||||
|
|
@ -564,6 +558,7 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
|
||||
default_ext = "exr"
|
||||
layer_data["defaultExt"] = default_ext
|
||||
layer_data["padding"] = cmds.getAttr("vraySettings.fileNamePadding")
|
||||
return layer_data
|
||||
|
||||
def get_files(self):
|
||||
|
|
@ -614,11 +609,14 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
|
||||
default_ext = "exr"
|
||||
|
||||
# filter all namespace prefixed AOVs - they are pulled in from
|
||||
# references and are not rendered.
|
||||
vr_aovs = [
|
||||
n
|
||||
for n in cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"]
|
||||
)
|
||||
if len(n.split(":")) == 1
|
||||
]
|
||||
|
||||
for aov in vr_aovs:
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import math
|
|||
import bson
|
||||
import json
|
||||
import logging
|
||||
import itertools
|
||||
import contextlib
|
||||
from collections import OrderedDict, defaultdict
|
||||
from math import ceil
|
||||
|
|
@ -122,6 +123,12 @@ def float_round(num, places=0, direction=ceil):
|
|||
return direction(num * (10**places)) / float(10**places)
|
||||
|
||||
|
||||
def pairwise(iterable):
|
||||
"""s -> (s0,s1), (s2,s3), (s4, s5), ..."""
|
||||
a = iter(iterable)
|
||||
return itertools.izip(a, a)
|
||||
|
||||
|
||||
def unique(name):
|
||||
assert isinstance(name, string_types), "`name` must be string"
|
||||
|
||||
|
|
@ -419,12 +426,12 @@ def empty_sets(sets, force=False):
|
|||
plugs=True,
|
||||
connections=True) or []
|
||||
original_connections.extend(connections)
|
||||
for dest, src in lib.pairwise(connections):
|
||||
for dest, src in pairwise(connections):
|
||||
cmds.disconnectAttr(src, dest)
|
||||
yield
|
||||
finally:
|
||||
|
||||
for dest, src in lib.pairwise(original_connections):
|
||||
for dest, src in pairwise(original_connections):
|
||||
cmds.connectAttr(src, dest)
|
||||
|
||||
# Restore original members
|
||||
|
|
@ -1857,8 +1864,8 @@ def set_context_settings():
|
|||
"""
|
||||
|
||||
# Todo (Wijnand): apply renderer and resolution of project
|
||||
|
||||
project_data = lib.get_project()["data"]
|
||||
project_doc = io.find_one({"type": "project"})
|
||||
project_data = project_doc["data"]
|
||||
asset_data = lib.get_asset()["data"]
|
||||
|
||||
# Set project fps
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ def format_anatomy(data):
|
|||
if not version:
|
||||
file = script_name()
|
||||
data["version"] = pype.get_version_from_path(file)
|
||||
project_document = pype.get_project()
|
||||
project_document = io.find_one({"type": "project"})
|
||||
data.update({
|
||||
"subset": data["avalon"]["subset"],
|
||||
"asset": data["avalon"]["asset"],
|
||||
|
|
@ -978,24 +978,30 @@ class WorkfileSettings(object):
|
|||
self.set_colorspace()
|
||||
|
||||
def set_favorites(self):
|
||||
anatomy = get_anatomy()
|
||||
work_template = anatomy.templates["work"]["path"]
|
||||
projects_root = anatomy.root_value_for_template(work_template)
|
||||
work_dir = os.getenv("AVALON_WORKDIR")
|
||||
asset = os.getenv("AVALON_ASSET")
|
||||
project = os.getenv("AVALON_PROJECT")
|
||||
hierarchy = os.getenv("AVALON_HIERARCHY")
|
||||
favorite_items = OrderedDict()
|
||||
|
||||
# project
|
||||
favorite_items.update({"Project dir": os.path.join(
|
||||
projects_root, project).replace("\\", "/")})
|
||||
# shot
|
||||
favorite_items.update({"Shot dir": os.path.join(
|
||||
projects_root, project,
|
||||
hierarchy, asset).replace("\\", "/")})
|
||||
# get project's root and split to parts
|
||||
projects_root = os.path.normpath(work_dir.split(
|
||||
project)[0])
|
||||
# add project name
|
||||
project_dir = os.path.join(projects_root, project) + "/"
|
||||
# add to favorites
|
||||
favorite_items.update({"Project dir": project_dir.replace("\\", "/")})
|
||||
|
||||
# asset
|
||||
asset_root = os.path.normpath(work_dir.split(
|
||||
asset)[0])
|
||||
# add asset name
|
||||
asset_dir = os.path.join(asset_root, asset) + "/"
|
||||
# add to favorites
|
||||
favorite_items.update({"Shot dir": asset_dir.replace("\\", "/")})
|
||||
|
||||
# workdir
|
||||
favorite_items.update({"Work dir": work_dir})
|
||||
favorite_items.update({"Work dir": work_dir.replace("\\", "/")})
|
||||
|
||||
set_context_favorites(favorite_items)
|
||||
|
||||
|
|
@ -1388,8 +1394,18 @@ class ExporterReviewMov(ExporterReview):
|
|||
self.log.debug("Path: {}".format(self.path))
|
||||
write_node["file"].setValue(self.path)
|
||||
write_node["file_type"].setValue(self.ext)
|
||||
write_node["meta_codec"].setValue("ap4h")
|
||||
write_node["mov64_codec"].setValue("ap4h")
|
||||
|
||||
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
|
||||
# TODO change this to use conditions, if possible.
|
||||
try:
|
||||
write_node["meta_codec"].setValue("ap4h")
|
||||
except Exception:
|
||||
self.log.info("`meta_codec` knob was not found")
|
||||
|
||||
try:
|
||||
write_node["mov64_codec"].setValue("ap4h")
|
||||
except Exception:
|
||||
self.log.info("`mov64_codec` knob was not found")
|
||||
write_node["mov64_write_timecode"].setValue(1)
|
||||
write_node["raw"].setValue(1)
|
||||
# connect
|
||||
|
|
|
|||
1904
pype/lib.py
1904
pype/lib.py
File diff suppressed because it is too large
Load diff
71
pype/lib/__init__.py
Normal file
71
pype/lib/__init__.py
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Pype lib module."""
|
||||
|
||||
from .deprecated import (
|
||||
get_avalon_database,
|
||||
set_io_database
|
||||
)
|
||||
|
||||
from .avalon_context import (
|
||||
is_latest,
|
||||
any_outdated,
|
||||
get_asset,
|
||||
get_hierarchy,
|
||||
get_linked_assets,
|
||||
get_latest_version,
|
||||
BuildWorkfile
|
||||
)
|
||||
|
||||
from .hooks import PypeHook, execute_hook
|
||||
|
||||
from .applications import (
|
||||
ApplicationLaunchFailed,
|
||||
launch_application,
|
||||
ApplicationAction,
|
||||
_subprocess
|
||||
)
|
||||
|
||||
from .plugin_tools import filter_pyblish_plugins, source_hash
|
||||
|
||||
from .path_tools import (
|
||||
version_up,
|
||||
get_version_from_path,
|
||||
get_last_version_from_path,
|
||||
get_paths_from_environ,
|
||||
get_ffmpeg_tool_path
|
||||
)
|
||||
|
||||
from .ffmpeg_utils import ffprobe_streams
|
||||
|
||||
__all__ = [
|
||||
"get_avalon_database",
|
||||
"set_io_database",
|
||||
|
||||
"is_latest",
|
||||
"any_outdated",
|
||||
"get_asset",
|
||||
"get_hierarchy",
|
||||
"get_linked_assets",
|
||||
"get_latest_version",
|
||||
"BuildWorkfile",
|
||||
|
||||
"PypeHook",
|
||||
"execute_hook",
|
||||
|
||||
"ApplicationLaunchFailed",
|
||||
"launch_application",
|
||||
"ApplicationAction",
|
||||
|
||||
"filter_pyblish_plugins",
|
||||
|
||||
"version_up",
|
||||
"get_version_from_path",
|
||||
"get_last_version_from_path",
|
||||
"get_paths_from_environ",
|
||||
"get_ffmpeg_tool_path",
|
||||
|
||||
"ffprobe_streams",
|
||||
|
||||
"source_hash",
|
||||
"_subprocess"
|
||||
]
|
||||
457
pype/lib/applications.py
Normal file
457
pype/lib/applications.py
Normal file
|
|
@ -0,0 +1,457 @@
|
|||
import os
|
||||
import sys
|
||||
import getpass
|
||||
import copy
|
||||
import platform
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
import acre
|
||||
|
||||
import avalon.lib
|
||||
|
||||
from ..api import Anatomy, Logger, config
|
||||
from .hooks import execute_hook
|
||||
from .deprecated import get_avalon_database
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApplicationLaunchFailed(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def launch_application(project_name, asset_name, task_name, app_name):
|
||||
"""Launch host application with filling required environments.
|
||||
|
||||
TODO(iLLiCiT): This should be split into more parts.
|
||||
"""
|
||||
# `get_avalon_database` is in Pype 3 replaced with using `AvalonMongoDB`
|
||||
database = get_avalon_database()
|
||||
project_document = database[project_name].find_one({"type": "project"})
|
||||
asset_document = database[project_name].find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
|
||||
asset_doc_parents = asset_document["data"].get("parents")
|
||||
hierarchy = "/".join(asset_doc_parents)
|
||||
|
||||
app_def = avalon.lib.get_application(app_name)
|
||||
app_label = app_def.get("ftrack_label", app_def.get("label", app_name))
|
||||
|
||||
host_name = app_def["application_dir"]
|
||||
# Workfile data collection may be special function?
|
||||
data = {
|
||||
"project": {
|
||||
"name": project_document["name"],
|
||||
"code": project_document["data"].get("code")
|
||||
},
|
||||
"task": task_name,
|
||||
"asset": asset_name,
|
||||
"app": host_name,
|
||||
"hierarchy": hierarchy
|
||||
}
|
||||
|
||||
try:
|
||||
anatomy = Anatomy(project_name)
|
||||
anatomy_filled = anatomy.format(data)
|
||||
workdir = os.path.normpath(anatomy_filled["work"]["folder"])
|
||||
|
||||
except Exception as exc:
|
||||
raise ApplicationLaunchFailed(
|
||||
"Error in anatomy.format: {}".format(str(exc))
|
||||
)
|
||||
|
||||
try:
|
||||
os.makedirs(workdir)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
last_workfile_path = None
|
||||
extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(host_name)
|
||||
if extensions:
|
||||
# Find last workfile
|
||||
file_template = anatomy.templates["work"]["file"]
|
||||
data.update({
|
||||
"version": 1,
|
||||
"user": os.environ.get("PYPE_USERNAME") or getpass.getuser(),
|
||||
"ext": extensions[0]
|
||||
})
|
||||
|
||||
last_workfile_path = avalon.api.last_workfile(
|
||||
workdir, file_template, data, extensions, True
|
||||
)
|
||||
|
||||
# set environments for Avalon
|
||||
prep_env = copy.deepcopy(os.environ)
|
||||
prep_env.update({
|
||||
"AVALON_PROJECT": project_name,
|
||||
"AVALON_ASSET": asset_name,
|
||||
"AVALON_TASK": task_name,
|
||||
"AVALON_APP": host_name,
|
||||
"AVALON_APP_NAME": app_name,
|
||||
"AVALON_HIERARCHY": hierarchy,
|
||||
"AVALON_WORKDIR": workdir
|
||||
})
|
||||
|
||||
start_last_workfile = avalon.api.should_start_last_workfile(
|
||||
project_name, host_name, task_name
|
||||
)
|
||||
# Store boolean as "0"(False) or "1"(True)
|
||||
prep_env["AVALON_OPEN_LAST_WORKFILE"] = (
|
||||
str(int(bool(start_last_workfile)))
|
||||
)
|
||||
|
||||
if (
|
||||
start_last_workfile
|
||||
and last_workfile_path
|
||||
and os.path.exists(last_workfile_path)
|
||||
):
|
||||
prep_env["AVALON_LAST_WORKFILE"] = last_workfile_path
|
||||
|
||||
prep_env.update(anatomy.roots_obj.root_environments())
|
||||
|
||||
# collect all the 'environment' attributes from parents
|
||||
tools_attr = [prep_env["AVALON_APP"], prep_env["AVALON_APP_NAME"]]
|
||||
tools_env = asset_document["data"].get("tools_env") or []
|
||||
tools_attr.extend(tools_env)
|
||||
|
||||
tools_env = acre.get_tools(tools_attr)
|
||||
env = acre.compute(tools_env)
|
||||
env = acre.merge(env, current_env=dict(prep_env))
|
||||
|
||||
# Get path to execute
|
||||
st_temp_path = os.environ["PYPE_CONFIG"]
|
||||
os_plat = platform.system().lower()
|
||||
|
||||
# Path to folder with launchers
|
||||
path = os.path.join(st_temp_path, "launchers", os_plat)
|
||||
|
||||
# Full path to executable launcher
|
||||
execfile = None
|
||||
|
||||
launch_hook = app_def.get("launch_hook")
|
||||
if launch_hook:
|
||||
log.info("launching hook: {}".format(launch_hook))
|
||||
ret_val = execute_hook(launch_hook, env=env)
|
||||
if not ret_val:
|
||||
raise ApplicationLaunchFailed(
|
||||
"Hook didn't finish successfully {}".format(app_label)
|
||||
)
|
||||
|
||||
if sys.platform == "win32":
|
||||
for ext in os.environ["PATHEXT"].split(os.pathsep):
|
||||
fpath = os.path.join(path.strip('"'), app_def["executable"] + ext)
|
||||
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
|
||||
execfile = fpath
|
||||
break
|
||||
|
||||
# Run SW if was found executable
|
||||
if execfile is None:
|
||||
raise ApplicationLaunchFailed(
|
||||
"We didn't find launcher for {}".format(app_label)
|
||||
)
|
||||
|
||||
popen = avalon.lib.launch(
|
||||
executable=execfile, args=[], environment=env
|
||||
)
|
||||
|
||||
elif (
|
||||
sys.platform.startswith("linux")
|
||||
or sys.platform.startswith("darwin")
|
||||
):
|
||||
execfile = os.path.join(path.strip('"'), app_def["executable"])
|
||||
# Run SW if was found executable
|
||||
if execfile is None:
|
||||
raise ApplicationLaunchFailed(
|
||||
"We didn't find launcher for {}".format(app_label)
|
||||
)
|
||||
|
||||
if not os.path.isfile(execfile):
|
||||
raise ApplicationLaunchFailed(
|
||||
"Launcher doesn't exist - {}".format(execfile)
|
||||
)
|
||||
|
||||
try:
|
||||
fp = open(execfile)
|
||||
except PermissionError as perm_exc:
|
||||
raise ApplicationLaunchFailed(
|
||||
"Access denied on launcher {} - {}".format(execfile, perm_exc)
|
||||
)
|
||||
|
||||
fp.close()
|
||||
# check executable permission
|
||||
if not os.access(execfile, os.X_OK):
|
||||
raise ApplicationLaunchFailed(
|
||||
"No executable permission - {}".format(execfile)
|
||||
)
|
||||
|
||||
popen = avalon.lib.launch( # noqa: F841
|
||||
"/usr/bin/env", args=["bash", execfile], environment=env
|
||||
)
|
||||
return popen
|
||||
|
||||
|
||||
class ApplicationAction(avalon.api.Action):
|
||||
"""Default application launcher
|
||||
|
||||
This is a convenience application Action that when "config" refers to a
|
||||
parsed application `.toml` this can launch the application.
|
||||
|
||||
"""
|
||||
_log = None
|
||||
config = None
|
||||
group = None
|
||||
variant = None
|
||||
required_session_keys = (
|
||||
"AVALON_PROJECT",
|
||||
"AVALON_ASSET",
|
||||
"AVALON_TASK"
|
||||
)
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
if self._log is None:
|
||||
self._log = Logger().get_logger(self.__class__.__name__)
|
||||
return self._log
|
||||
|
||||
def is_compatible(self, session):
|
||||
for key in self.required_session_keys:
|
||||
if key not in session:
|
||||
return False
|
||||
return True
|
||||
|
||||
def process(self, session, **kwargs):
|
||||
"""Process the full Application action"""
|
||||
|
||||
project_name = session["AVALON_PROJECT"]
|
||||
asset_name = session["AVALON_ASSET"]
|
||||
task_name = session["AVALON_TASK"]
|
||||
launch_application(
|
||||
project_name, asset_name, task_name, self.name
|
||||
)
|
||||
|
||||
self._ftrack_after_launch_procedure(
|
||||
project_name, asset_name, task_name
|
||||
)
|
||||
|
||||
def _ftrack_after_launch_procedure(
|
||||
self, project_name, asset_name, task_name
|
||||
):
|
||||
# TODO move to launch hook
|
||||
required_keys = ("FTRACK_SERVER", "FTRACK_API_USER", "FTRACK_API_KEY")
|
||||
for key in required_keys:
|
||||
if not os.environ.get(key):
|
||||
self.log.debug((
|
||||
"Missing required environment \"{}\""
|
||||
" for Ftrack after launch procedure."
|
||||
).format(key))
|
||||
return
|
||||
|
||||
try:
|
||||
import ftrack_api
|
||||
session = ftrack_api.Session(auto_connect_event_hub=True)
|
||||
self.log.debug("Ftrack session created")
|
||||
except Exception:
|
||||
self.log.warning("Couldn't create Ftrack session")
|
||||
return
|
||||
|
||||
try:
|
||||
entity = self._find_ftrack_task_entity(
|
||||
session, project_name, asset_name, task_name
|
||||
)
|
||||
self._ftrack_status_change(session, entity, project_name)
|
||||
self._start_timer(session, entity, ftrack_api)
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Couldn't finish Ftrack procedure.", exc_info=True
|
||||
)
|
||||
return
|
||||
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def _find_ftrack_task_entity(
|
||||
self, session, project_name, asset_name, task_name
|
||||
):
|
||||
project_entity = session.query(
|
||||
"Project where full_name is \"{}\"".format(project_name)
|
||||
).first()
|
||||
if not project_entity:
|
||||
self.log.warning(
|
||||
"Couldn't find project \"{}\" in Ftrack.".format(project_name)
|
||||
)
|
||||
return
|
||||
|
||||
potential_task_entities = session.query((
|
||||
"TypedContext where parent.name is \"{}\" and project_id is \"{}\""
|
||||
).format(asset_name, project_entity["id"])).all()
|
||||
filtered_entities = []
|
||||
for _entity in potential_task_entities:
|
||||
if (
|
||||
_entity.entity_type.lower() == "task"
|
||||
and _entity["name"] == task_name
|
||||
):
|
||||
filtered_entities.append(_entity)
|
||||
|
||||
if not filtered_entities:
|
||||
self.log.warning((
|
||||
"Couldn't find task \"{}\" under parent \"{}\" in Ftrack."
|
||||
).format(task_name, asset_name))
|
||||
return
|
||||
|
||||
if len(filtered_entities) > 1:
|
||||
self.log.warning((
|
||||
"Found more than one task \"{}\""
|
||||
" under parent \"{}\" in Ftrack."
|
||||
).format(task_name, asset_name))
|
||||
return
|
||||
|
||||
return filtered_entities[0]
|
||||
|
||||
def _ftrack_status_change(self, session, entity, project_name):
|
||||
presets = config.get_presets(project_name)["ftrack"]["ftrack_config"]
|
||||
statuses = presets.get("status_update")
|
||||
if not statuses:
|
||||
return
|
||||
|
||||
actual_status = entity["status"]["name"].lower()
|
||||
already_tested = set()
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in entity["link"]]
|
||||
)
|
||||
while True:
|
||||
next_status_name = None
|
||||
for key, value in statuses.items():
|
||||
if key in already_tested:
|
||||
continue
|
||||
if actual_status in value or "_any_" in value:
|
||||
if key != "_ignore_":
|
||||
next_status_name = key
|
||||
already_tested.add(key)
|
||||
break
|
||||
already_tested.add(key)
|
||||
|
||||
if next_status_name is None:
|
||||
break
|
||||
|
||||
try:
|
||||
query = "Status where name is \"{}\"".format(
|
||||
next_status_name
|
||||
)
|
||||
status = session.query(query).one()
|
||||
|
||||
entity["status"] = status
|
||||
session.commit()
|
||||
self.log.debug("Changing status to \"{}\" <{}>".format(
|
||||
next_status_name, ent_path
|
||||
))
|
||||
break
|
||||
|
||||
except Exception:
|
||||
session.rollback()
|
||||
msg = (
|
||||
"Status \"{}\" in presets wasn't found"
|
||||
" on Ftrack entity type \"{}\""
|
||||
).format(next_status_name, entity.entity_type)
|
||||
self.log.warning(msg)
|
||||
|
||||
def _start_timer(self, session, entity, _ftrack_api):
|
||||
self.log.debug("Triggering timer start.")
|
||||
|
||||
user_entity = session.query("User where username is \"{}\"".format(
|
||||
os.environ["FTRACK_API_USER"]
|
||||
)).first()
|
||||
if not user_entity:
|
||||
self.log.warning(
|
||||
"Couldn't find user with username \"{}\" in Ftrack".format(
|
||||
os.environ["FTRACK_API_USER"]
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
source = {
|
||||
"user": {
|
||||
"id": user_entity["id"],
|
||||
"username": user_entity["username"]
|
||||
}
|
||||
}
|
||||
event_data = {
|
||||
"actionIdentifier": "start.timer",
|
||||
"selection": [{"entityId": entity["id"], "entityType": "task"}]
|
||||
}
|
||||
session.event_hub.publish(
|
||||
_ftrack_api.event.base.Event(
|
||||
topic="ftrack.action.launch",
|
||||
data=event_data,
|
||||
source=source
|
||||
),
|
||||
on_error="ignore"
|
||||
)
|
||||
self.log.debug("Timer start triggered successfully.")
|
||||
|
||||
|
||||
# Special naming case for subprocess since its a built-in method.
|
||||
def _subprocess(*args, **kwargs):
|
||||
"""Convenience method for getting output errors for subprocess.
|
||||
|
||||
Entered arguments and keyword arguments are passed to subprocess Popen.
|
||||
|
||||
Args:
|
||||
*args: Variable length arument list passed to Popen.
|
||||
**kwargs : Arbitary keyword arguments passed to Popen. Is possible to
|
||||
pass `logging.Logger` object under "logger" if want to use
|
||||
different than lib's logger.
|
||||
|
||||
Returns:
|
||||
str: Full output of subprocess concatenated stdout and stderr.
|
||||
|
||||
Raises:
|
||||
RuntimeError: Exception is raised if process finished with nonzero
|
||||
return code.
|
||||
"""
|
||||
|
||||
# Get environents from kwarg or use current process environments if were
|
||||
# not passed.
|
||||
env = kwargs.get("env") or os.environ
|
||||
# Make sure environment contains only strings
|
||||
filtered_env = {k: str(v) for k, v in env.items()}
|
||||
|
||||
# Use lib's logger if was not passed with kwargs.
|
||||
logger = kwargs.pop("logger", log)
|
||||
|
||||
# set overrides
|
||||
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
|
||||
kwargs['stderr'] = kwargs.get('stderr', subprocess.PIPE)
|
||||
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
|
||||
kwargs['env'] = filtered_env
|
||||
|
||||
proc = subprocess.Popen(*args, **kwargs)
|
||||
|
||||
full_output = ""
|
||||
_stdout, _stderr = proc.communicate()
|
||||
if _stdout:
|
||||
_stdout = _stdout.decode("utf-8")
|
||||
full_output += _stdout
|
||||
logger.debug(_stdout)
|
||||
|
||||
if _stderr:
|
||||
_stderr = _stderr.decode("utf-8")
|
||||
# Add additional line break if output already containt stdout
|
||||
if full_output:
|
||||
full_output += "\n"
|
||||
full_output += _stderr
|
||||
logger.warning(_stderr)
|
||||
|
||||
if proc.returncode != 0:
|
||||
exc_msg = "Executing arguments was not successful: \"{}\"".format(args)
|
||||
if _stdout:
|
||||
exc_msg += "\n\nOutput:\n{}".format(_stdout)
|
||||
|
||||
if _stderr:
|
||||
exc_msg += "Error:\n{}".format(_stderr)
|
||||
|
||||
raise RuntimeError(exc_msg)
|
||||
|
||||
return full_output
|
||||
870
pype/lib/avalon_context.py
Normal file
870
pype/lib/avalon_context.py
Normal file
|
|
@ -0,0 +1,870 @@
|
|||
import os
|
||||
import json
|
||||
import re
|
||||
import logging
|
||||
import collections
|
||||
|
||||
from avalon import io, pipeline
|
||||
from ..api import config
|
||||
import avalon.api
|
||||
|
||||
log = logging.getLogger("AvalonContext")
|
||||
|
||||
|
||||
def is_latest(representation):
|
||||
"""Return whether the representation is from latest version
|
||||
|
||||
Args:
|
||||
representation (dict): The representation document from the database.
|
||||
|
||||
Returns:
|
||||
bool: Whether the representation is of latest version.
|
||||
|
||||
"""
|
||||
|
||||
version = io.find_one({"_id": representation['parent']})
|
||||
if version["type"] == "master_version":
|
||||
return True
|
||||
|
||||
# Get highest version under the parent
|
||||
highest_version = io.find_one({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}, sort=[("name", -1)], projection={"name": True})
|
||||
|
||||
if version['name'] == highest_version['name']:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def any_outdated():
|
||||
"""Return whether the current scene has any outdated content"""
|
||||
|
||||
checked = set()
|
||||
host = avalon.api.registered_host()
|
||||
for container in host.ls():
|
||||
representation = container['representation']
|
||||
if representation in checked:
|
||||
continue
|
||||
|
||||
representation_doc = io.find_one(
|
||||
{
|
||||
"_id": io.ObjectId(representation),
|
||||
"type": "representation"
|
||||
},
|
||||
projection={"parent": True}
|
||||
)
|
||||
if representation_doc and not is_latest(representation_doc):
|
||||
return True
|
||||
elif not representation_doc:
|
||||
log.debug("Container '{objectName}' has an invalid "
|
||||
"representation, it is missing in the "
|
||||
"database".format(**container))
|
||||
|
||||
checked.add(representation)
|
||||
return False
|
||||
|
||||
|
||||
def get_asset(asset_name=None):
|
||||
""" Returning asset document from database by its name.
|
||||
|
||||
Doesn't count with duplicities on asset names!
|
||||
|
||||
Args:
|
||||
asset_name (str)
|
||||
|
||||
Returns:
|
||||
(MongoDB document)
|
||||
"""
|
||||
if not asset_name:
|
||||
asset_name = avalon.api.Session["AVALON_ASSET"]
|
||||
|
||||
asset_document = io.find_one({
|
||||
"name": asset_name,
|
||||
"type": "asset"
|
||||
})
|
||||
|
||||
if not asset_document:
|
||||
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
|
||||
|
||||
return asset_document
|
||||
|
||||
|
||||
def get_hierarchy(asset_name=None):
|
||||
"""
|
||||
Obtain asset hierarchy path string from mongo db
|
||||
|
||||
Args:
|
||||
asset_name (str)
|
||||
|
||||
Returns:
|
||||
(string): asset hierarchy path
|
||||
|
||||
"""
|
||||
if not asset_name:
|
||||
asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"])
|
||||
|
||||
asset_entity = io.find_one({
|
||||
"type": 'asset',
|
||||
"name": asset_name
|
||||
})
|
||||
|
||||
not_set = "PARENTS_NOT_SET"
|
||||
entity_parents = asset_entity.get("data", {}).get("parents", not_set)
|
||||
|
||||
# If entity already have parents then just return joined
|
||||
if entity_parents != not_set:
|
||||
return "/".join(entity_parents)
|
||||
|
||||
# Else query parents through visualParents and store result to entity
|
||||
hierarchy_items = []
|
||||
entity = asset_entity
|
||||
while True:
|
||||
parent_id = entity.get("data", {}).get("visualParent")
|
||||
if not parent_id:
|
||||
break
|
||||
entity = io.find_one({"_id": parent_id})
|
||||
hierarchy_items.append(entity["name"])
|
||||
|
||||
# Add parents to entity data for next query
|
||||
entity_data = asset_entity.get("data", {})
|
||||
entity_data["parents"] = hierarchy_items
|
||||
io.update_many(
|
||||
{"_id": asset_entity["_id"]},
|
||||
{"$set": {"data": entity_data}}
|
||||
)
|
||||
|
||||
return "/".join(hierarchy_items)
|
||||
|
||||
|
||||
def get_linked_assets(asset_entity):
|
||||
"""Return linked assets for `asset_entity` from DB
|
||||
|
||||
Args:
|
||||
asset_entity (dict): asset document from DB
|
||||
|
||||
Returns:
|
||||
(list) of MongoDB documents
|
||||
"""
|
||||
inputs = asset_entity["data"].get("inputs", [])
|
||||
inputs = [io.find_one({"_id": x}) for x in inputs]
|
||||
return inputs
|
||||
|
||||
|
||||
def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
|
||||
"""Retrieve latest version from `asset_name`, and `subset_name`.
|
||||
|
||||
Do not use if you want to query more than 5 latest versions as this method
|
||||
query 3 times to mongo for each call. For those cases is better to use
|
||||
more efficient way, e.g. with help of aggregations.
|
||||
|
||||
Args:
|
||||
asset_name (str): Name of asset.
|
||||
subset_name (str): Name of subset.
|
||||
dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection
|
||||
with Session.
|
||||
project_name (str, optional): Find latest version in specific project.
|
||||
|
||||
Returns:
|
||||
None: If asset, subset or version were not found.
|
||||
dict: Last version document for entered .
|
||||
"""
|
||||
|
||||
if not dbcon:
|
||||
log.debug("Using `avalon.io` for query.")
|
||||
dbcon = io
|
||||
# Make sure is installed
|
||||
io.install()
|
||||
|
||||
if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"):
|
||||
# `avalon.io` has only `_database` attribute
|
||||
# but `AvalonMongoDB` has `database`
|
||||
database = getattr(dbcon, "database", dbcon._database)
|
||||
collection = database[project_name]
|
||||
else:
|
||||
project_name = dbcon.Session.get("AVALON_PROJECT")
|
||||
collection = dbcon
|
||||
|
||||
log.debug((
|
||||
"Getting latest version for Project: \"{}\" Asset: \"{}\""
|
||||
" and Subset: \"{}\""
|
||||
).format(project_name, asset_name, subset_name))
|
||||
|
||||
# Query asset document id by asset name
|
||||
asset_doc = collection.find_one(
|
||||
{"type": "asset", "name": asset_name},
|
||||
{"_id": True}
|
||||
)
|
||||
if not asset_doc:
|
||||
log.info(
|
||||
"Asset \"{}\" was not found in Database.".format(asset_name)
|
||||
)
|
||||
return None
|
||||
|
||||
subset_doc = collection.find_one(
|
||||
{"type": "subset", "name": subset_name, "parent": asset_doc["_id"]},
|
||||
{"_id": True}
|
||||
)
|
||||
if not subset_doc:
|
||||
log.info(
|
||||
"Subset \"{}\" was not found in Database.".format(subset_name)
|
||||
)
|
||||
return None
|
||||
|
||||
version_doc = collection.find_one(
|
||||
{"type": "version", "parent": subset_doc["_id"]},
|
||||
sort=[("name", -1)],
|
||||
)
|
||||
if not version_doc:
|
||||
log.info(
|
||||
"Subset \"{}\" does not have any version yet.".format(subset_name)
|
||||
)
|
||||
return None
|
||||
return version_doc
|
||||
|
||||
|
||||
class BuildWorkfile:
|
||||
"""Wrapper for build workfile process.
|
||||
|
||||
Load representations for current context by build presets. Build presets
|
||||
are host related, since each host has it's loaders.
|
||||
"""
|
||||
|
||||
log = logging.getLogger("BuildWorkfile")
|
||||
|
||||
@staticmethod
|
||||
def map_subsets_by_family(subsets):
|
||||
subsets_by_family = collections.defaultdict(list)
|
||||
for subset in subsets:
|
||||
family = subset["data"].get("family")
|
||||
if not family:
|
||||
families = subset["data"].get("families")
|
||||
if not families:
|
||||
continue
|
||||
family = families[0]
|
||||
|
||||
subsets_by_family[family].append(subset)
|
||||
return subsets_by_family
|
||||
|
||||
def process(self):
|
||||
"""Main method of this wrapper.
|
||||
|
||||
Building of workfile is triggered and is possible to implement
|
||||
post processing of loaded containers if necessary.
|
||||
"""
|
||||
containers = self.build_workfile()
|
||||
|
||||
return containers
|
||||
|
||||
def build_workfile(self):
|
||||
"""Prepares and load containers into workfile.
|
||||
|
||||
Loads latest versions of current and linked assets to workfile by logic
|
||||
stored in Workfile profiles from presets. Profiles are set by host,
|
||||
filtered by current task name and used by families.
|
||||
|
||||
Each family can specify representation names and loaders for
|
||||
representations and first available and successful loaded
|
||||
representation is returned as container.
|
||||
|
||||
At the end you'll get list of loaded containers per each asset.
|
||||
|
||||
loaded_containers [{
|
||||
"asset_entity": <AssetEntity1>,
|
||||
"containers": [<Container1>, <Container2>, ...]
|
||||
}, {
|
||||
"asset_entity": <AssetEntity2>,
|
||||
"containers": [<Container3>, ...]
|
||||
}, {
|
||||
...
|
||||
}]
|
||||
"""
|
||||
# Get current asset name and entity
|
||||
current_asset_name = io.Session["AVALON_ASSET"]
|
||||
current_asset_entity = io.find_one({
|
||||
"type": "asset",
|
||||
"name": current_asset_name
|
||||
})
|
||||
|
||||
# Skip if asset was not found
|
||||
if not current_asset_entity:
|
||||
print("Asset entity with name `{}` was not found".format(
|
||||
current_asset_name
|
||||
))
|
||||
return
|
||||
|
||||
# Prepare available loaders
|
||||
loaders_by_name = {}
|
||||
for loader in avalon.api.discover(avalon.api.Loader):
|
||||
loader_name = loader.__name__
|
||||
if loader_name in loaders_by_name:
|
||||
raise KeyError(
|
||||
"Duplicated loader name {0}!".format(loader_name)
|
||||
)
|
||||
loaders_by_name[loader_name] = loader
|
||||
|
||||
# Skip if there are any loaders
|
||||
if not loaders_by_name:
|
||||
self.log.warning("There are no registered loaders.")
|
||||
return
|
||||
|
||||
# Get current task name
|
||||
current_task_name = io.Session["AVALON_TASK"]
|
||||
|
||||
# Load workfile presets for task
|
||||
self.build_presets = self.get_build_presets(current_task_name)
|
||||
|
||||
# Skip if there are any presets for task
|
||||
if not self.build_presets:
|
||||
self.log.warning(
|
||||
"Current task `{}` does not have any loading preset.".format(
|
||||
current_task_name
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# Get presets for loading current asset
|
||||
current_context_profiles = self.build_presets.get("current_context")
|
||||
# Get presets for loading linked assets
|
||||
link_context_profiles = self.build_presets.get("linked_assets")
|
||||
# Skip if both are missing
|
||||
if not current_context_profiles and not link_context_profiles:
|
||||
self.log.warning(
|
||||
"Current task `{}` has empty loading preset.".format(
|
||||
current_task_name
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
elif not current_context_profiles:
|
||||
self.log.warning((
|
||||
"Current task `{}` doesn't have any loading"
|
||||
" preset for it's context."
|
||||
).format(current_task_name))
|
||||
|
||||
elif not link_context_profiles:
|
||||
self.log.warning((
|
||||
"Current task `{}` doesn't have any"
|
||||
"loading preset for it's linked assets."
|
||||
).format(current_task_name))
|
||||
|
||||
# Prepare assets to process by workfile presets
|
||||
assets = []
|
||||
current_asset_id = None
|
||||
if current_context_profiles:
|
||||
# Add current asset entity if preset has current context set
|
||||
assets.append(current_asset_entity)
|
||||
current_asset_id = current_asset_entity["_id"]
|
||||
|
||||
if link_context_profiles:
|
||||
# Find and append linked assets if preset has set linked mapping
|
||||
link_assets = get_linked_assets(current_asset_entity)
|
||||
if link_assets:
|
||||
assets.extend(link_assets)
|
||||
|
||||
# Skip if there are no assets. This can happen if only linked mapping
|
||||
# is set and there are no links for his asset.
|
||||
if not assets:
|
||||
self.log.warning(
|
||||
"Asset does not have linked assets. Nothing to process."
|
||||
)
|
||||
return
|
||||
|
||||
# Prepare entities from database for assets
|
||||
prepared_entities = self._collect_last_version_repres(assets)
|
||||
|
||||
# Load containers by prepared entities and presets
|
||||
loaded_containers = []
|
||||
# - Current asset containers
|
||||
if current_asset_id and current_asset_id in prepared_entities:
|
||||
current_context_data = prepared_entities.pop(current_asset_id)
|
||||
loaded_data = self.load_containers_by_asset_data(
|
||||
current_context_data, current_context_profiles, loaders_by_name
|
||||
)
|
||||
if loaded_data:
|
||||
loaded_containers.append(loaded_data)
|
||||
|
||||
# - Linked assets container
|
||||
for linked_asset_data in prepared_entities.values():
|
||||
loaded_data = self.load_containers_by_asset_data(
|
||||
linked_asset_data, link_context_profiles, loaders_by_name
|
||||
)
|
||||
if loaded_data:
|
||||
loaded_containers.append(loaded_data)
|
||||
|
||||
# Return list of loaded containers
|
||||
return loaded_containers
|
||||
|
||||
def get_build_presets(self, task_name):
|
||||
""" Returns presets to build workfile for task name.
|
||||
|
||||
Presets are loaded for current project set in
|
||||
io.Session["AVALON_PROJECT"], filtered by registered host
|
||||
and entered task name.
|
||||
|
||||
Args:
|
||||
task_name (str): Task name used for filtering build presets.
|
||||
|
||||
Returns:
|
||||
(dict): preset per entered task name
|
||||
"""
|
||||
host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1]
|
||||
presets = config.get_presets(io.Session["AVALON_PROJECT"])
|
||||
# Get presets for host
|
||||
build_presets = (
|
||||
presets["plugins"]
|
||||
.get(host_name, {})
|
||||
.get("workfile_build")
|
||||
)
|
||||
if not build_presets:
|
||||
return
|
||||
|
||||
task_name_low = task_name.lower()
|
||||
per_task_preset = None
|
||||
for preset in build_presets:
|
||||
preset_tasks = preset.get("tasks") or []
|
||||
preset_tasks_low = [task.lower() for task in preset_tasks]
|
||||
if task_name_low in preset_tasks_low:
|
||||
per_task_preset = preset
|
||||
break
|
||||
|
||||
return per_task_preset
|
||||
|
||||
def _filter_build_profiles(self, build_profiles, loaders_by_name):
|
||||
""" Filter build profiles by loaders and prepare process data.
|
||||
|
||||
Valid profile must have "loaders", "families" and "repre_names" keys
|
||||
with valid values.
|
||||
- "loaders" expects list of strings representing possible loaders.
|
||||
- "families" expects list of strings for filtering
|
||||
by main subset family.
|
||||
- "repre_names" expects list of strings for filtering by
|
||||
representation name.
|
||||
|
||||
Lowered "families" and "repre_names" are prepared for each profile with
|
||||
all required keys.
|
||||
|
||||
Args:
|
||||
build_profiles (dict): Profiles for building workfile.
|
||||
loaders_by_name (dict): Available loaders per name.
|
||||
|
||||
Returns:
|
||||
(list): Filtered and prepared profiles.
|
||||
"""
|
||||
valid_profiles = []
|
||||
for profile in build_profiles:
|
||||
# Check loaders
|
||||
profile_loaders = profile.get("loaders")
|
||||
if not profile_loaders:
|
||||
self.log.warning((
|
||||
"Build profile has missing loaders configuration: {0}"
|
||||
).format(json.dumps(profile, indent=4)))
|
||||
continue
|
||||
|
||||
# Check if any loader is available
|
||||
loaders_match = False
|
||||
for loader_name in profile_loaders:
|
||||
if loader_name in loaders_by_name:
|
||||
loaders_match = True
|
||||
break
|
||||
|
||||
if not loaders_match:
|
||||
self.log.warning((
|
||||
"All loaders from Build profile are not available: {0}"
|
||||
).format(json.dumps(profile, indent=4)))
|
||||
continue
|
||||
|
||||
# Check families
|
||||
profile_families = profile.get("families")
|
||||
if not profile_families:
|
||||
self.log.warning((
|
||||
"Build profile is missing families configuration: {0}"
|
||||
).format(json.dumps(profile, indent=4)))
|
||||
continue
|
||||
|
||||
# Check representation names
|
||||
profile_repre_names = profile.get("repre_names")
|
||||
if not profile_repre_names:
|
||||
self.log.warning((
|
||||
"Build profile is missing"
|
||||
" representation names filtering: {0}"
|
||||
).format(json.dumps(profile, indent=4)))
|
||||
continue
|
||||
|
||||
# Prepare lowered families and representation names
|
||||
profile["families_lowered"] = [
|
||||
fam.lower() for fam in profile_families
|
||||
]
|
||||
profile["repre_names_lowered"] = [
|
||||
name.lower() for name in profile_repre_names
|
||||
]
|
||||
|
||||
valid_profiles.append(profile)
|
||||
|
||||
return valid_profiles
|
||||
|
||||
def _prepare_profile_for_subsets(self, subsets, profiles):
|
||||
"""Select profile for each subset byt it's data.
|
||||
|
||||
Profiles are filtered for each subset individually.
|
||||
Profile is filtered by subset's family, optionally by name regex and
|
||||
representation names set in profile.
|
||||
It is possible to not find matching profile for subset, in that case
|
||||
subset is skipped and it is possible that none of subsets have
|
||||
matching profile.
|
||||
|
||||
Args:
|
||||
subsets (list): Subset documents.
|
||||
profiles (dict): Build profiles.
|
||||
|
||||
Returns:
|
||||
(dict) Profile by subset's id.
|
||||
"""
|
||||
# Prepare subsets
|
||||
subsets_by_family = self.map_subsets_by_family(subsets)
|
||||
|
||||
profiles_per_subset_id = {}
|
||||
for family, subsets in subsets_by_family.items():
|
||||
family_low = family.lower()
|
||||
for profile in profiles:
|
||||
# Skip profile if does not contain family
|
||||
if family_low not in profile["families_lowered"]:
|
||||
continue
|
||||
|
||||
# Precompile name filters as regexes
|
||||
profile_regexes = profile.get("subset_name_filters")
|
||||
if profile_regexes:
|
||||
_profile_regexes = []
|
||||
for regex in profile_regexes:
|
||||
_profile_regexes.append(re.compile(regex))
|
||||
profile_regexes = _profile_regexes
|
||||
|
||||
# TODO prepare regex compilation
|
||||
for subset in subsets:
|
||||
# Verify regex filtering (optional)
|
||||
if profile_regexes:
|
||||
valid = False
|
||||
for pattern in profile_regexes:
|
||||
if re.match(pattern, subset["name"]):
|
||||
valid = True
|
||||
break
|
||||
|
||||
if not valid:
|
||||
continue
|
||||
|
||||
profiles_per_subset_id[subset["_id"]] = profile
|
||||
|
||||
# break profiles loop on finding the first matching profile
|
||||
break
|
||||
return profiles_per_subset_id
|
||||
|
||||
def load_containers_by_asset_data(
|
||||
self, asset_entity_data, build_profiles, loaders_by_name
|
||||
):
|
||||
"""Load containers for entered asset entity by Build profiles.
|
||||
|
||||
Args:
|
||||
asset_entity_data (dict): Prepared data with subsets, last version
|
||||
and representations for specific asset.
|
||||
build_profiles (dict): Build profiles.
|
||||
loaders_by_name (dict): Available loaders per name.
|
||||
|
||||
Returns:
|
||||
(dict) Output contains asset document and loaded containers.
|
||||
"""
|
||||
|
||||
# Make sure all data are not empty
|
||||
if not asset_entity_data or not build_profiles or not loaders_by_name:
|
||||
return
|
||||
|
||||
asset_entity = asset_entity_data["asset_entity"]
|
||||
|
||||
valid_profiles = self._filter_build_profiles(
|
||||
build_profiles, loaders_by_name
|
||||
)
|
||||
if not valid_profiles:
|
||||
self.log.warning(
|
||||
"There are not valid Workfile profiles. Skipping process."
|
||||
)
|
||||
return
|
||||
|
||||
self.log.debug("Valid Workfile profiles: {}".format(valid_profiles))
|
||||
|
||||
subsets_by_id = {}
|
||||
version_by_subset_id = {}
|
||||
repres_by_version_id = {}
|
||||
for subset_id, in_data in asset_entity_data["subsets"].items():
|
||||
subset_entity = in_data["subset_entity"]
|
||||
subsets_by_id[subset_entity["_id"]] = subset_entity
|
||||
|
||||
version_data = in_data["version"]
|
||||
version_entity = version_data["version_entity"]
|
||||
version_by_subset_id[subset_id] = version_entity
|
||||
repres_by_version_id[version_entity["_id"]] = (
|
||||
version_data["repres"]
|
||||
)
|
||||
|
||||
if not subsets_by_id:
|
||||
self.log.warning("There are not subsets for asset {0}".format(
|
||||
asset_entity["name"]
|
||||
))
|
||||
return
|
||||
|
||||
profiles_per_subset_id = self._prepare_profile_for_subsets(
|
||||
subsets_by_id.values(), valid_profiles
|
||||
)
|
||||
if not profiles_per_subset_id:
|
||||
self.log.warning("There are not valid subsets.")
|
||||
return
|
||||
|
||||
valid_repres_by_subset_id = collections.defaultdict(list)
|
||||
for subset_id, profile in profiles_per_subset_id.items():
|
||||
profile_repre_names = profile["repre_names_lowered"]
|
||||
|
||||
version_entity = version_by_subset_id[subset_id]
|
||||
version_id = version_entity["_id"]
|
||||
repres = repres_by_version_id[version_id]
|
||||
for repre in repres:
|
||||
repre_name_low = repre["name"].lower()
|
||||
if repre_name_low in profile_repre_names:
|
||||
valid_repres_by_subset_id[subset_id].append(repre)
|
||||
|
||||
# DEBUG message
|
||||
msg = "Valid representations for Asset: `{}`".format(
|
||||
asset_entity["name"]
|
||||
)
|
||||
for subset_id, repres in valid_repres_by_subset_id.items():
|
||||
subset = subsets_by_id[subset_id]
|
||||
msg += "\n# Subset Name/ID: `{}`/{}".format(
|
||||
subset["name"], subset_id
|
||||
)
|
||||
for repre in repres:
|
||||
msg += "\n## Repre name: `{}`".format(repre["name"])
|
||||
|
||||
self.log.debug(msg)
|
||||
|
||||
containers = self._load_containers(
|
||||
valid_repres_by_subset_id, subsets_by_id,
|
||||
profiles_per_subset_id, loaders_by_name
|
||||
)
|
||||
|
||||
return {
|
||||
"asset_entity": asset_entity,
|
||||
"containers": containers
|
||||
}
|
||||
|
||||
def _load_containers(
|
||||
self, repres_by_subset_id, subsets_by_id,
|
||||
profiles_per_subset_id, loaders_by_name
|
||||
):
|
||||
"""Real load by collected data happens here.
|
||||
|
||||
Loading of representations per subset happens here. Each subset can
|
||||
loads one representation. Loading is tried in specific order.
|
||||
Representations are tried to load by names defined in configuration.
|
||||
If subset has representation matching representation name each loader
|
||||
is tried to load it until any is successful. If none of them was
|
||||
successful then next reprensentation name is tried.
|
||||
Subset process loop ends when any representation is loaded or
|
||||
all matching representations were already tried.
|
||||
|
||||
Args:
|
||||
repres_by_subset_id (dict): Available representations mapped
|
||||
by their parent (subset) id.
|
||||
subsets_by_id (dict): Subset documents mapped by their id.
|
||||
profiles_per_subset_id (dict): Build profiles mapped by subset id.
|
||||
loaders_by_name (dict): Available loaders per name.
|
||||
|
||||
Returns:
|
||||
(list) Objects of loaded containers.
|
||||
"""
|
||||
loaded_containers = []
|
||||
|
||||
# Get subset id order from build presets.
|
||||
build_presets = self.build_presets.get("current_context", [])
|
||||
build_presets += self.build_presets.get("linked_assets", [])
|
||||
subset_ids_ordered = []
|
||||
for preset in build_presets:
|
||||
for preset_family in preset["families"]:
|
||||
for id, subset in subsets_by_id.items():
|
||||
if preset_family not in subset["data"].get("families", []):
|
||||
continue
|
||||
|
||||
subset_ids_ordered.append(id)
|
||||
|
||||
# Order representations from subsets.
|
||||
print("repres_by_subset_id", repres_by_subset_id)
|
||||
representations_ordered = []
|
||||
representations = []
|
||||
for id in subset_ids_ordered:
|
||||
for subset_id, repres in repres_by_subset_id.items():
|
||||
if repres in representations:
|
||||
continue
|
||||
|
||||
if id == subset_id:
|
||||
representations_ordered.append((subset_id, repres))
|
||||
representations.append(repres)
|
||||
|
||||
print("representations", representations)
|
||||
|
||||
# Load ordered reprensentations.
|
||||
for subset_id, repres in representations_ordered:
|
||||
subset_name = subsets_by_id[subset_id]["name"]
|
||||
|
||||
profile = profiles_per_subset_id[subset_id]
|
||||
loaders_last_idx = len(profile["loaders"]) - 1
|
||||
repre_names_last_idx = len(profile["repre_names_lowered"]) - 1
|
||||
|
||||
repre_by_low_name = {
|
||||
repre["name"].lower(): repre for repre in repres
|
||||
}
|
||||
|
||||
is_loaded = False
|
||||
for repre_name_idx, profile_repre_name in enumerate(
|
||||
profile["repre_names_lowered"]
|
||||
):
|
||||
# Break iteration if representation was already loaded
|
||||
if is_loaded:
|
||||
break
|
||||
|
||||
repre = repre_by_low_name.get(profile_repre_name)
|
||||
if not repre:
|
||||
continue
|
||||
|
||||
for loader_idx, loader_name in enumerate(profile["loaders"]):
|
||||
if is_loaded:
|
||||
break
|
||||
|
||||
loader = loaders_by_name.get(loader_name)
|
||||
if not loader:
|
||||
continue
|
||||
try:
|
||||
container = avalon.api.load(
|
||||
loader,
|
||||
repre["_id"],
|
||||
name=subset_name
|
||||
)
|
||||
loaded_containers.append(container)
|
||||
is_loaded = True
|
||||
|
||||
except Exception as exc:
|
||||
if exc == pipeline.IncompatibleLoaderError:
|
||||
self.log.info((
|
||||
"Loader `{}` is not compatible with"
|
||||
" representation `{}`"
|
||||
).format(loader_name, repre["name"]))
|
||||
|
||||
else:
|
||||
self.log.error(
|
||||
"Unexpected error happened during loading",
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
msg = "Loading failed."
|
||||
if loader_idx < loaders_last_idx:
|
||||
msg += " Trying next loader."
|
||||
elif repre_name_idx < repre_names_last_idx:
|
||||
msg += (
|
||||
" Loading of subset `{}` was not successful."
|
||||
).format(subset_name)
|
||||
else:
|
||||
msg += " Trying next representation."
|
||||
self.log.info(msg)
|
||||
|
||||
return loaded_containers
|
||||
|
||||
def _collect_last_version_repres(self, asset_entities):
|
||||
"""Collect subsets, versions and representations for asset_entities.
|
||||
|
||||
Args:
|
||||
asset_entities (list): Asset entities for which want to find data
|
||||
|
||||
Returns:
|
||||
(dict): collected entities
|
||||
|
||||
Example output:
|
||||
```
|
||||
{
|
||||
{Asset ID}: {
|
||||
"asset_entity": <AssetEntity>,
|
||||
"subsets": {
|
||||
{Subset ID}: {
|
||||
"subset_entity": <SubsetEntity>,
|
||||
"version": {
|
||||
"version_entity": <VersionEntity>,
|
||||
"repres": [
|
||||
<RepreEntity1>, <RepreEntity2>, ...
|
||||
]
|
||||
}
|
||||
},
|
||||
...
|
||||
}
|
||||
},
|
||||
...
|
||||
}
|
||||
output[asset_id]["subsets"][subset_id]["version"]["repres"]
|
||||
```
|
||||
"""
|
||||
|
||||
if not asset_entities:
|
||||
return {}
|
||||
|
||||
asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities}
|
||||
|
||||
subsets = list(io.find({
|
||||
"type": "subset",
|
||||
"parent": {"$in": asset_entity_by_ids.keys()}
|
||||
}))
|
||||
subset_entity_by_ids = {subset["_id"]: subset for subset in subsets}
|
||||
|
||||
sorted_versions = list(io.find({
|
||||
"type": "version",
|
||||
"parent": {"$in": subset_entity_by_ids.keys()}
|
||||
}).sort("name", -1))
|
||||
|
||||
subset_id_with_latest_version = []
|
||||
last_versions_by_id = {}
|
||||
for version in sorted_versions:
|
||||
subset_id = version["parent"]
|
||||
if subset_id in subset_id_with_latest_version:
|
||||
continue
|
||||
subset_id_with_latest_version.append(subset_id)
|
||||
last_versions_by_id[version["_id"]] = version
|
||||
|
||||
repres = io.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": last_versions_by_id.keys()}
|
||||
})
|
||||
|
||||
output = {}
|
||||
for repre in repres:
|
||||
version_id = repre["parent"]
|
||||
version = last_versions_by_id[version_id]
|
||||
|
||||
subset_id = version["parent"]
|
||||
subset = subset_entity_by_ids[subset_id]
|
||||
|
||||
asset_id = subset["parent"]
|
||||
asset = asset_entity_by_ids[asset_id]
|
||||
|
||||
if asset_id not in output:
|
||||
output[asset_id] = {
|
||||
"asset_entity": asset,
|
||||
"subsets": {}
|
||||
}
|
||||
|
||||
if subset_id not in output[asset_id]["subsets"]:
|
||||
output[asset_id]["subsets"][subset_id] = {
|
||||
"subset_entity": subset,
|
||||
"version": {
|
||||
"version_entity": version,
|
||||
"repres": []
|
||||
}
|
||||
}
|
||||
|
||||
output[asset_id]["subsets"][subset_id]["version"]["repres"].append(
|
||||
repre
|
||||
)
|
||||
|
||||
return output
|
||||
26
pype/lib/deprecated.py
Normal file
26
pype/lib/deprecated.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
import os
|
||||
|
||||
from avalon import io
|
||||
|
||||
|
||||
def get_avalon_database():
|
||||
"""Mongo database used in avalon's io.
|
||||
|
||||
* Function is not used in pype 3.0 where was replaced with usage of
|
||||
AvalonMongoDB.
|
||||
"""
|
||||
if io._database is None:
|
||||
set_io_database()
|
||||
return io._database
|
||||
|
||||
|
||||
def set_io_database():
|
||||
"""Set avalon's io context with environemnts.
|
||||
|
||||
* Function is not used in pype 3.0 where was replaced with usage of
|
||||
AvalonMongoDB.
|
||||
"""
|
||||
required_keys = ["AVALON_PROJECT", "AVALON_ASSET", "AVALON_SILO"]
|
||||
for key in required_keys:
|
||||
os.environ[key] = os.environ.get(key, "")
|
||||
io.install()
|
||||
46
pype/lib/ffmpeg_utils.py
Normal file
46
pype/lib/ffmpeg_utils.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
import logging
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from . import get_ffmpeg_tool_path
|
||||
|
||||
log = logging.getLogger("FFmpeg utils")
|
||||
|
||||
|
||||
def ffprobe_streams(path_to_file, logger=None):
|
||||
"""Load streams from entered filepath via ffprobe.
|
||||
|
||||
Args:
|
||||
path_to_file (str): absolute path
|
||||
logger (logging.getLogger): injected logger, if empty new is created
|
||||
|
||||
"""
|
||||
if not logger:
|
||||
logger = log
|
||||
logger.info(
|
||||
"Getting information about input \"{}\".".format(path_to_file)
|
||||
)
|
||||
args = [
|
||||
"\"{}\"".format(get_ffmpeg_tool_path("ffprobe")),
|
||||
"-v quiet",
|
||||
"-print_format json",
|
||||
"-show_format",
|
||||
"-show_streams",
|
||||
"\"{}\"".format(path_to_file)
|
||||
]
|
||||
command = " ".join(args)
|
||||
logger.debug("FFprobe command: \"{}\"".format(command))
|
||||
popen = subprocess.Popen(
|
||||
command,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
)
|
||||
|
||||
popen_stdout, popen_stderr = popen.communicate()
|
||||
if popen_stdout:
|
||||
logger.debug("ffprobe stdout: {}".format(popen_stdout))
|
||||
|
||||
if popen_stderr:
|
||||
logger.debug("ffprobe stderr: {}".format(popen_stderr))
|
||||
return json.loads(popen_stdout)["streams"]
|
||||
71
pype/lib/hooks.py
Normal file
71
pype/lib/hooks.py
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package containing code for handling hooks."""
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
import logging
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
import six
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class PypeHook:
|
||||
"""Abstract class from all hooks should inherit."""
|
||||
|
||||
def __init__(self):
|
||||
"""Constructor."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def execute(self, *args, **kwargs):
|
||||
"""Abstract execute method."""
|
||||
pass
|
||||
|
||||
|
||||
def execute_hook(hook, *args, **kwargs):
|
||||
"""Execute hook with arguments.
|
||||
|
||||
This will load hook file, instantiate class and call
|
||||
:meth:`PypeHook.execute` method on it. Hook must be in a form::
|
||||
|
||||
$PYPE_SETUP_PATH/repos/pype/path/to/hook.py/HookClass
|
||||
|
||||
This will load `hook.py`, instantiate HookClass and then execute_hook
|
||||
`execute(*args, **kwargs)`
|
||||
|
||||
Args:
|
||||
hook (str): path to hook class.
|
||||
|
||||
"""
|
||||
class_name = hook.split("/")[-1]
|
||||
|
||||
abspath = os.path.join(os.getenv('PYPE_SETUP_PATH'),
|
||||
'repos', 'pype', *hook.split("/")[:-1])
|
||||
|
||||
mod_name, mod_ext = os.path.splitext(os.path.basename(abspath))
|
||||
|
||||
if not mod_ext == ".py":
|
||||
return False
|
||||
|
||||
module = types.ModuleType(mod_name)
|
||||
module.__file__ = abspath
|
||||
|
||||
try:
|
||||
with open(abspath) as f:
|
||||
six.exec_(f.read(), module.__dict__)
|
||||
|
||||
sys.modules[abspath] = module
|
||||
|
||||
except Exception as exp:
|
||||
log.exception("loading hook failed: {}".format(exp),
|
||||
exc_info=True)
|
||||
return False
|
||||
|
||||
obj = getattr(module, class_name)
|
||||
hook_obj = obj()
|
||||
ret_val = hook_obj.execute(*args, **kwargs)
|
||||
return ret_val
|
||||
181
pype/lib/path_tools.py
Normal file
181
pype/lib/path_tools.py
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
import os
|
||||
import re
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_paths_from_environ(env_key, return_first=False):
|
||||
"""Return existing paths from specific envirnment variable.
|
||||
|
||||
Args:
|
||||
env_key (str): Environment key where should look for paths.
|
||||
|
||||
Returns:
|
||||
(bool): Return first path on `True`, list of all on `False`.
|
||||
|
||||
|
||||
Difference when none of paths exists:
|
||||
- when `return_first` is set to `False` then function returns empty list.
|
||||
- when `return_first` is set to `True` then function returns `None`.
|
||||
"""
|
||||
existing_paths = []
|
||||
paths = os.environ.get(env_key) or ""
|
||||
path_items = paths.split(os.pathsep)
|
||||
for path in path_items:
|
||||
# Skip empty string
|
||||
if not path:
|
||||
continue
|
||||
# Normalize path
|
||||
path = os.path.normpath(path)
|
||||
# Check if path exists
|
||||
if os.path.exists(path):
|
||||
# Return path if `return_first` is set to True
|
||||
if return_first:
|
||||
return path
|
||||
# Store path
|
||||
existing_paths.append(path)
|
||||
|
||||
# Return None if none of paths exists
|
||||
if return_first:
|
||||
return None
|
||||
# Return all existing paths from environment variable
|
||||
return existing_paths
|
||||
|
||||
|
||||
def get_ffmpeg_tool_path(tool="ffmpeg"):
|
||||
"""Find path to ffmpeg tool in FFMPEG_PATH paths.
|
||||
|
||||
Function looks for tool in paths set in FFMPEG_PATH environment. If tool
|
||||
exists then returns it's full path.
|
||||
|
||||
Args:
|
||||
tool (string): tool name
|
||||
|
||||
Returns:
|
||||
(str): tool name itself when tool path was not found. (FFmpeg path
|
||||
may be set in PATH environment variable)
|
||||
"""
|
||||
dir_paths = get_paths_from_environ("FFMPEG_PATH")
|
||||
for dir_path in dir_paths:
|
||||
for file_name in os.listdir(dir_path):
|
||||
base, _ext = os.path.splitext(file_name)
|
||||
if base.lower() == tool.lower():
|
||||
return os.path.join(dir_path, tool)
|
||||
return tool
|
||||
|
||||
|
||||
def _rreplace(s, a, b, n=1):
|
||||
"""Replace a with b in string s from right side n times."""
|
||||
return b.join(s.rsplit(a, n))
|
||||
|
||||
|
||||
def version_up(filepath):
|
||||
"""Version up filepath to a new non-existing version.
|
||||
|
||||
Parses for a version identifier like `_v001` or `.v001`
|
||||
When no version present _v001 is appended as suffix.
|
||||
|
||||
Args:
|
||||
filepath (str): full url
|
||||
|
||||
Returns:
|
||||
(str): filepath with increased version number
|
||||
|
||||
"""
|
||||
dirname = os.path.dirname(filepath)
|
||||
basename, ext = os.path.splitext(os.path.basename(filepath))
|
||||
|
||||
regex = r"[._]v\d+"
|
||||
matches = re.findall(regex, str(basename), re.IGNORECASE)
|
||||
if not matches:
|
||||
log.info("Creating version...")
|
||||
new_label = "_v{version:03d}".format(version=1)
|
||||
new_basename = "{}{}".format(basename, new_label)
|
||||
else:
|
||||
label = matches[-1]
|
||||
version = re.search(r"\d+", label).group()
|
||||
padding = len(version)
|
||||
|
||||
new_version = int(version) + 1
|
||||
new_version = '{version:0{padding}d}'.format(version=new_version,
|
||||
padding=padding)
|
||||
new_label = label.replace(version, new_version, 1)
|
||||
new_basename = _rreplace(basename, label, new_label)
|
||||
|
||||
if not new_basename.endswith(new_label):
|
||||
index = (new_basename.find(new_label))
|
||||
index += len(new_label)
|
||||
new_basename = new_basename[:index]
|
||||
|
||||
new_filename = "{}{}".format(new_basename, ext)
|
||||
new_filename = os.path.join(dirname, new_filename)
|
||||
new_filename = os.path.normpath(new_filename)
|
||||
|
||||
if new_filename == filepath:
|
||||
raise RuntimeError("Created path is the same as current file,"
|
||||
"this is a bug")
|
||||
|
||||
for file in os.listdir(dirname):
|
||||
if file.endswith(ext) and file.startswith(new_basename):
|
||||
log.info("Skipping existing version %s" % new_label)
|
||||
return version_up(new_filename)
|
||||
|
||||
log.info("New version %s" % new_label)
|
||||
return new_filename
|
||||
|
||||
|
||||
def get_version_from_path(file):
|
||||
"""Find version number in file path string.
|
||||
|
||||
Args:
|
||||
file (string): file path
|
||||
|
||||
Returns:
|
||||
v: version number in string ('001')
|
||||
|
||||
"""
|
||||
pattern = re.compile(r"[\._]v([0-9]+)", re.IGNORECASE)
|
||||
try:
|
||||
return pattern.findall(file)[0]
|
||||
except IndexError:
|
||||
log.error(
|
||||
"templates:get_version_from_workfile:"
|
||||
"`{}` missing version string."
|
||||
"Example `v004`".format(file)
|
||||
)
|
||||
|
||||
|
||||
def get_last_version_from_path(path_dir, filter):
|
||||
"""Find last version of given directory content.
|
||||
|
||||
Args:
|
||||
path_dir (string): directory path
|
||||
filter (list): list of strings used as file name filter
|
||||
|
||||
Returns:
|
||||
string: file name with last version
|
||||
|
||||
Example:
|
||||
last_version_file = get_last_version_from_path(
|
||||
"/project/shots/shot01/work", ["shot01", "compositing", "nk"])
|
||||
"""
|
||||
assert os.path.isdir(path_dir), "`path_dir` argument needs to be directory"
|
||||
assert isinstance(filter, list) and (
|
||||
len(filter) != 0), "`filter` argument needs to be list and not empty"
|
||||
|
||||
filtred_files = list()
|
||||
|
||||
# form regex for filtering
|
||||
patern = r".*".join(filter)
|
||||
|
||||
for file in os.listdir(path_dir):
|
||||
if not re.findall(patern, file):
|
||||
continue
|
||||
filtred_files.append(file)
|
||||
|
||||
if filtred_files:
|
||||
sorted(filtred_files)
|
||||
return filtred_files[-1]
|
||||
|
||||
return None
|
||||
80
pype/lib/plugin_tools.py
Normal file
80
pype/lib/plugin_tools.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Avalon/Pyblish plugin tools."""
|
||||
import os
|
||||
import inspect
|
||||
import logging
|
||||
|
||||
from ..api import config
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def filter_pyblish_plugins(plugins):
|
||||
"""Filter pyblish plugins by presets.
|
||||
|
||||
This servers as plugin filter / modifier for pyblish. It will load plugin
|
||||
definitions from presets and filter those needed to be excluded.
|
||||
|
||||
Args:
|
||||
plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base`
|
||||
`discover()` method.
|
||||
|
||||
"""
|
||||
from pyblish import api
|
||||
|
||||
host = api.current_host()
|
||||
|
||||
presets = config.get_presets().get('plugins', {})
|
||||
|
||||
# iterate over plugins
|
||||
for plugin in plugins[:]:
|
||||
# skip if there are no presets to process
|
||||
if not presets:
|
||||
continue
|
||||
|
||||
file = os.path.normpath(inspect.getsourcefile(plugin))
|
||||
file = os.path.normpath(file)
|
||||
|
||||
# host determined from path
|
||||
host_from_file = file.split(os.path.sep)[-3:-2][0]
|
||||
plugin_kind = file.split(os.path.sep)[-2:-1][0]
|
||||
|
||||
try:
|
||||
config_data = presets[host]["publish"][plugin.__name__]
|
||||
except KeyError:
|
||||
try:
|
||||
config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
for option, value in config_data.items():
|
||||
if option == "enabled" and value is False:
|
||||
log.info('removing plugin {}'.format(plugin.__name__))
|
||||
plugins.remove(plugin)
|
||||
else:
|
||||
log.info('setting {}:{} on plugin {}'.format(
|
||||
option, value, plugin.__name__))
|
||||
|
||||
setattr(plugin, option, value)
|
||||
|
||||
|
||||
def source_hash(filepath, *args):
|
||||
"""Generate simple identifier for a source file.
|
||||
This is used to identify whether a source file has previously been
|
||||
processe into the pipeline, e.g. a texture.
|
||||
The hash is based on source filepath, modification time and file size.
|
||||
This is only used to identify whether a specific source file was already
|
||||
published before from the same location with the same modification date.
|
||||
We opt to do it this way as opposed to Avalanch C4 hash as this is much
|
||||
faster and predictable enough for all our production use cases.
|
||||
Args:
|
||||
filepath (str): The source file path.
|
||||
You can specify additional arguments in the function
|
||||
to allow for specific 'processing' values to be included.
|
||||
"""
|
||||
# We replace dots with comma because . cannot be a key in a pymongo dict.
|
||||
file_name = os.path.basename(filepath)
|
||||
time = str(os.path.getmtime(filepath))
|
||||
size = str(os.path.getsize(filepath))
|
||||
return "|".join([file_name, time, size] + list(args)).replace(".", ",")
|
||||
|
|
@ -1,4 +1,6 @@
|
|||
import collections
|
||||
import datetime
|
||||
|
||||
import ftrack_api
|
||||
from pype.modules.ftrack import BaseEvent
|
||||
|
||||
|
|
@ -10,17 +12,24 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
cust_attrs_query = (
|
||||
"select id, key, object_type_id, is_hierarchical, default"
|
||||
" from CustomAttributeConfiguration"
|
||||
" where key in ({}) and object_type_id in ({})"
|
||||
" where key in ({}) and"
|
||||
" (object_type_id in ({}) or is_hierarchical is true)"
|
||||
)
|
||||
|
||||
cust_attr_query = (
|
||||
"select value, entity_id from ContextCustomAttributeValue "
|
||||
"where entity_id in ({}) and configuration_id in ({})"
|
||||
)
|
||||
|
||||
interest_entity_types = {"Shot"}
|
||||
interest_attributes = {"frameStart", "frameEnd"}
|
||||
interest_attr_mapping = {
|
||||
"frameStart": "fstart",
|
||||
"frameEnd": "fend"
|
||||
}
|
||||
_cached_task_object_id = None
|
||||
_cached_interest_object_ids = None
|
||||
_cached_user_id = None
|
||||
_cached_changes = []
|
||||
_max_delta = 30
|
||||
|
||||
# Configrable (lists)
|
||||
interest_entity_types = {"Shot"}
|
||||
interest_attributes = {"frameStart", "frameEnd"}
|
||||
|
||||
@staticmethod
|
||||
def join_keys(keys):
|
||||
|
|
@ -49,8 +58,18 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
)
|
||||
return cls._cached_interest_object_ids
|
||||
|
||||
def session_user_id(self, session):
|
||||
if self._cached_user_id is None:
|
||||
user = session.query(
|
||||
"User where username is \"{}\"".format(session.api_user)
|
||||
).one()
|
||||
self._cached_user_id = user["id"]
|
||||
return self._cached_user_id
|
||||
|
||||
def launch(self, session, event):
|
||||
interesting_data = self.extract_interesting_data(session, event)
|
||||
interesting_data, changed_keys_by_object_id = (
|
||||
self.extract_interesting_data(session, event)
|
||||
)
|
||||
if not interesting_data:
|
||||
return
|
||||
|
||||
|
|
@ -66,92 +85,165 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if entity_id not in entities_by_id:
|
||||
interesting_data.pop(entity_id)
|
||||
|
||||
task_entities = self.get_task_entities(session, interesting_data)
|
||||
attrs_by_obj_id, hier_attrs = self.attrs_configurations(session)
|
||||
|
||||
task_object_id = self.task_object_id(session)
|
||||
task_attrs = attrs_by_obj_id.get(task_object_id)
|
||||
# Skip keys that are not both in hierachical and type specific
|
||||
for object_id, keys in changed_keys_by_object_id.items():
|
||||
object_id_attrs = attrs_by_obj_id.get(object_id)
|
||||
for key in keys:
|
||||
if key not in hier_attrs:
|
||||
attrs_by_obj_id[object_id].pop(key)
|
||||
continue
|
||||
|
||||
if (
|
||||
(not object_id_attrs or key not in object_id_attrs)
|
||||
and (not task_attrs or key not in task_attrs)
|
||||
):
|
||||
hier_attrs.pop(key)
|
||||
|
||||
# Clean up empty values
|
||||
for key, value in tuple(attrs_by_obj_id.items()):
|
||||
if not value:
|
||||
attrs_by_obj_id.pop(key)
|
||||
|
||||
attrs_by_obj_id = self.attrs_configurations(session)
|
||||
if not attrs_by_obj_id:
|
||||
self.log.warning((
|
||||
"There is not created Custom Attributes {}"
|
||||
" for \"Task\" entity type."
|
||||
).format(self.join_keys(self.interest_attributes)))
|
||||
"There is not created Custom Attributes {} "
|
||||
" for entity types: {}"
|
||||
).format(
|
||||
self.join_keys(self.interest_attributes),
|
||||
self.join_keys(self.interest_entity_types)
|
||||
))
|
||||
return
|
||||
|
||||
task_entities_by_parent_id = collections.defaultdict(list)
|
||||
# Prepare task entities
|
||||
task_entities = []
|
||||
# If task entity does not contain changed attribute then skip
|
||||
if task_attrs:
|
||||
task_entities = self.get_task_entities(session, interesting_data)
|
||||
|
||||
task_entities_by_id = {}
|
||||
parent_id_by_task_id = {}
|
||||
for task_entity in task_entities:
|
||||
task_entities_by_parent_id[task_entity["parent_id"]].append(
|
||||
task_entity
|
||||
)
|
||||
task_entities_by_id[task_entity["id"]] = task_entity
|
||||
parent_id_by_task_id[task_entity["id"]] = task_entity["parent_id"]
|
||||
|
||||
missing_keys_by_object_name = collections.defaultdict(set)
|
||||
for parent_id, values in interesting_data.items():
|
||||
entities = task_entities_by_parent_id.get(parent_id) or []
|
||||
entities.append(entities_by_id[parent_id])
|
||||
changed_keys = set()
|
||||
for keys in changed_keys_by_object_id.values():
|
||||
changed_keys |= set(keys)
|
||||
|
||||
for hier_key, value in values.items():
|
||||
changed_ids = []
|
||||
for entity in entities:
|
||||
key = self.interest_attr_mapping[hier_key]
|
||||
entity_attrs_mapping = (
|
||||
attrs_by_obj_id.get(entity["object_type_id"])
|
||||
attr_id_to_key = {}
|
||||
for attr_confs in attrs_by_obj_id.values():
|
||||
for key in changed_keys:
|
||||
custom_attr_id = attr_confs.get(key)
|
||||
if custom_attr_id:
|
||||
attr_id_to_key[custom_attr_id] = key
|
||||
|
||||
for key in changed_keys:
|
||||
custom_attr_id = hier_attrs.get(key)
|
||||
if custom_attr_id:
|
||||
attr_id_to_key[custom_attr_id] = key
|
||||
|
||||
entity_ids = (
|
||||
set(interesting_data.keys()) | set(task_entities_by_id.keys())
|
||||
)
|
||||
attr_ids = set(attr_id_to_key.keys())
|
||||
|
||||
current_values_by_id = self.current_values(
|
||||
session, attr_ids, entity_ids, task_entities_by_id, hier_attrs
|
||||
)
|
||||
|
||||
for entity_id, current_values in current_values_by_id.items():
|
||||
parent_id = parent_id_by_task_id.get(entity_id)
|
||||
if not parent_id:
|
||||
parent_id = entity_id
|
||||
values = interesting_data[parent_id]
|
||||
|
||||
for attr_id, old_value in current_values.items():
|
||||
attr_key = attr_id_to_key.get(attr_id)
|
||||
if not attr_key:
|
||||
continue
|
||||
|
||||
# Convert new value from string
|
||||
new_value = values.get(attr_key)
|
||||
if new_value is not None and old_value is not None:
|
||||
try:
|
||||
new_value = type(old_value)(new_value)
|
||||
except Exception:
|
||||
self.log.warning((
|
||||
"Couldn't convert from {} to {}."
|
||||
" Skipping update values."
|
||||
).format(type(new_value), type(old_value)))
|
||||
if new_value == old_value:
|
||||
continue
|
||||
|
||||
entity_key = collections.OrderedDict({
|
||||
"configuration_id": attr_id,
|
||||
"entity_id": entity_id
|
||||
})
|
||||
self._cached_changes.append({
|
||||
"attr_key": attr_key,
|
||||
"entity_id": entity_id,
|
||||
"value": new_value,
|
||||
"time": datetime.datetime.now()
|
||||
})
|
||||
if new_value is None:
|
||||
op = ftrack_api.operation.DeleteEntityOperation(
|
||||
"CustomAttributeValue",
|
||||
entity_key
|
||||
)
|
||||
else:
|
||||
op = ftrack_api.operation.UpdateEntityOperation(
|
||||
"ContextCustomAttributeValue",
|
||||
entity_key,
|
||||
"value",
|
||||
ftrack_api.symbol.NOT_SET,
|
||||
new_value
|
||||
)
|
||||
if not entity_attrs_mapping:
|
||||
missing_keys_by_object_name[entity.entity_type].add(
|
||||
key
|
||||
)
|
||||
continue
|
||||
|
||||
configuration_id = entity_attrs_mapping.get(key)
|
||||
if not configuration_id:
|
||||
missing_keys_by_object_name[entity.entity_type].add(
|
||||
key
|
||||
)
|
||||
continue
|
||||
|
||||
changed_ids.append(entity["id"])
|
||||
entity_key = collections.OrderedDict({
|
||||
"configuration_id": configuration_id,
|
||||
"entity_id": entity["id"]
|
||||
})
|
||||
if value is None:
|
||||
op = ftrack_api.operation.DeleteEntityOperation(
|
||||
"CustomAttributeValue",
|
||||
entity_key
|
||||
)
|
||||
else:
|
||||
op = ftrack_api.operation.UpdateEntityOperation(
|
||||
"ContextCustomAttributeValue",
|
||||
entity_key,
|
||||
"value",
|
||||
ftrack_api.symbol.NOT_SET,
|
||||
value
|
||||
)
|
||||
|
||||
session.recorded_operations.push(op)
|
||||
session.recorded_operations.push(op)
|
||||
self.log.info((
|
||||
"Changing Custom Attribute \"{}\" to value"
|
||||
" \"{}\" on entities: {}"
|
||||
).format(key, value, self.join_keys(changed_ids)))
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning(
|
||||
"Changing of values failed.",
|
||||
exc_info=True
|
||||
)
|
||||
if not missing_keys_by_object_name:
|
||||
return
|
||||
" \"{}\" on entity: {}"
|
||||
).format(attr_key, new_value, entity_id))
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning("Changing of values failed.", exc_info=True)
|
||||
|
||||
msg_items = []
|
||||
for object_name, missing_keys in missing_keys_by_object_name.items():
|
||||
msg_items.append(
|
||||
"{}: ({})".format(object_name, self.join_keys(missing_keys))
|
||||
def current_values(
|
||||
self, session, attr_ids, entity_ids, task_entities_by_id, hier_attrs
|
||||
):
|
||||
current_values_by_id = {}
|
||||
if not attr_ids or not entity_ids:
|
||||
return current_values_by_id
|
||||
joined_conf_ids = self.join_keys(attr_ids)
|
||||
joined_entity_ids = self.join_keys(entity_ids)
|
||||
|
||||
call_expr = [{
|
||||
"action": "query",
|
||||
"expression": self.cust_attr_query.format(
|
||||
joined_entity_ids, joined_conf_ids
|
||||
)
|
||||
}]
|
||||
if hasattr(session, "call"):
|
||||
[values] = session.call(call_expr)
|
||||
else:
|
||||
[values] = session._call(call_expr)
|
||||
|
||||
self.log.warning((
|
||||
"Missing Custom Attribute configuration"
|
||||
" per specific object types: {}"
|
||||
).format(", ".join(msg_items)))
|
||||
for item in values["data"]:
|
||||
entity_id = item["entity_id"]
|
||||
attr_id = item["configuration_id"]
|
||||
if entity_id in task_entities_by_id and attr_id in hier_attrs:
|
||||
continue
|
||||
|
||||
if entity_id not in current_values_by_id:
|
||||
current_values_by_id[entity_id] = {}
|
||||
current_values_by_id[entity_id][attr_id] = item["value"]
|
||||
return current_values_by_id
|
||||
|
||||
def extract_interesting_data(self, session, event):
|
||||
# Filter if event contain relevant data
|
||||
|
|
@ -159,7 +251,18 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if not entities_info:
|
||||
return
|
||||
|
||||
# for key, value in event["data"].items():
|
||||
# self.log.info("{}: {}".format(key, value))
|
||||
session_user_id = self.session_user_id(session)
|
||||
user_data = event["data"].get("user")
|
||||
changed_by_session = False
|
||||
if user_data and user_data.get("userid") == session_user_id:
|
||||
changed_by_session = True
|
||||
|
||||
current_time = datetime.datetime.now()
|
||||
|
||||
interesting_data = {}
|
||||
changed_keys_by_object_id = {}
|
||||
for entity_info in entities_info:
|
||||
# Care only about tasks
|
||||
if entity_info.get("entityType") != "task":
|
||||
|
|
@ -176,16 +279,47 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if key in changes:
|
||||
entity_changes[key] = changes[key]["new"]
|
||||
|
||||
entity_id = entity_info["entityId"]
|
||||
if changed_by_session:
|
||||
for key, new_value in tuple(entity_changes.items()):
|
||||
for cached in tuple(self._cached_changes):
|
||||
if (
|
||||
cached["entity_id"] != entity_id
|
||||
or cached["attr_key"] != key
|
||||
):
|
||||
continue
|
||||
|
||||
cached_value = cached["value"]
|
||||
try:
|
||||
new_value = type(cached_value)(new_value)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if cached_value == new_value:
|
||||
self._cached_changes.remove(cached)
|
||||
entity_changes.pop(key)
|
||||
break
|
||||
|
||||
delta = (current_time - cached["time"]).seconds
|
||||
if delta > self._max_delta:
|
||||
self._cached_changes.remove(cached)
|
||||
|
||||
if not entity_changes:
|
||||
continue
|
||||
|
||||
# Do not care about "Task" entity_type
|
||||
task_object_id = self.task_object_id(session)
|
||||
if entity_info.get("objectTypeId") == task_object_id:
|
||||
object_id = entity_info.get("objectTypeId")
|
||||
if not object_id or object_id == task_object_id:
|
||||
continue
|
||||
|
||||
interesting_data[entity_info["entityId"]] = entity_changes
|
||||
return interesting_data
|
||||
interesting_data[entity_id] = entity_changes
|
||||
if object_id not in changed_keys_by_object_id:
|
||||
changed_keys_by_object_id[object_id] = set()
|
||||
|
||||
changed_keys_by_object_id[object_id] |= set(entity_changes.keys())
|
||||
|
||||
return interesting_data, changed_keys_by_object_id
|
||||
|
||||
def get_entities(self, session, interesting_data):
|
||||
entities = session.query(
|
||||
|
|
@ -213,17 +347,21 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
object_ids.append(self.task_object_id(session))
|
||||
|
||||
attrs = session.query(self.cust_attrs_query.format(
|
||||
self.join_keys(self.interest_attr_mapping.values()),
|
||||
self.join_keys(self.interest_attributes),
|
||||
self.join_keys(object_ids)
|
||||
)).all()
|
||||
|
||||
output = {}
|
||||
hiearchical = {}
|
||||
for attr in attrs:
|
||||
if attr["is_hierarchical"]:
|
||||
hiearchical[attr["key"]] = attr["id"]
|
||||
continue
|
||||
obj_id = attr["object_type_id"]
|
||||
if obj_id not in output:
|
||||
output[obj_id] = {}
|
||||
output[obj_id][attr["key"]] = attr["id"]
|
||||
return output
|
||||
return output, hiearchical
|
||||
|
||||
|
||||
def register(session, plugins_presets):
|
||||
|
|
|
|||
64
pype/modules/websocket_server/hosts/aftereffects.py
Normal file
64
pype/modules/websocket_server/hosts/aftereffects.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
from pype.api import Logger
|
||||
from wsrpc_aiohttp import WebSocketRoute
|
||||
import functools
|
||||
|
||||
import avalon.aftereffects as aftereffects
|
||||
|
||||
log = Logger().get_logger("WebsocketServer")
|
||||
|
||||
|
||||
class AfterEffects(WebSocketRoute):
|
||||
"""
|
||||
One route, mimicking external application (like Harmony, etc).
|
||||
All functions could be called from client.
|
||||
'do_notify' function calls function on the client - mimicking
|
||||
notification after long running job on the server or similar
|
||||
"""
|
||||
instance = None
|
||||
|
||||
def init(self, **kwargs):
|
||||
# Python __init__ must be return "self".
|
||||
# This method might return anything.
|
||||
log.debug("someone called AfterEffects route")
|
||||
self.instance = self
|
||||
return kwargs
|
||||
|
||||
# server functions
|
||||
async def ping(self):
|
||||
log.debug("someone called AfterEffects route ping")
|
||||
|
||||
# This method calls function on the client side
|
||||
# client functions
|
||||
|
||||
async def read(self):
|
||||
log.debug("aftereffects.read client calls server server calls "
|
||||
"aftereffects client")
|
||||
return await self.socket.call('aftereffects.read')
|
||||
|
||||
# panel routes for tools
|
||||
async def creator_route(self):
|
||||
self._tool_route("creator")
|
||||
|
||||
async def workfiles_route(self):
|
||||
self._tool_route("workfiles")
|
||||
|
||||
async def loader_route(self):
|
||||
self._tool_route("loader")
|
||||
|
||||
async def publish_route(self):
|
||||
self._tool_route("publish")
|
||||
|
||||
async def sceneinventory_route(self):
|
||||
self._tool_route("sceneinventory")
|
||||
|
||||
async def projectmanager_route(self):
|
||||
self._tool_route("projectmanager")
|
||||
|
||||
def _tool_route(self, tool_name):
|
||||
"""The address accessed when clicking on the buttons."""
|
||||
partial_method = functools.partial(aftereffects.show, tool_name)
|
||||
|
||||
aftereffects.execute_in_main_thread(partial_method)
|
||||
|
||||
# Required return statement.
|
||||
return "nothing"
|
||||
284
pype/modules/websocket_server/stubs/aftereffects_server_stub.py
Normal file
284
pype/modules/websocket_server/stubs/aftereffects_server_stub.py
Normal file
|
|
@ -0,0 +1,284 @@
|
|||
from pype.modules.websocket_server import WebSocketServer
|
||||
"""
|
||||
Stub handling connection from server to client.
|
||||
Used anywhere solution is calling client methods.
|
||||
"""
|
||||
import json
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AfterEffectsServerStub():
|
||||
"""
|
||||
Stub for calling function on client (Photoshop js) side.
|
||||
Expects that client is already connected (started when avalon menu
|
||||
is opened).
|
||||
'self.websocketserver.call' is used as async wrapper
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.websocketserver = WebSocketServer.get_instance()
|
||||
self.client = self.websocketserver.get_client()
|
||||
|
||||
def open(self, path):
|
||||
"""
|
||||
Open file located at 'path' (local).
|
||||
Args:
|
||||
path(string): file path locally
|
||||
Returns: None
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.open', path=path)
|
||||
)
|
||||
|
||||
def read(self, layer, layers_meta=None):
|
||||
"""
|
||||
Parses layer metadata from Label field of active document
|
||||
Args:
|
||||
layer: <namedTuple Layer("id":XX, "name":"YYY")
|
||||
layers_meta: full list from Headline (for performance in loops)
|
||||
Returns:
|
||||
"""
|
||||
if layers_meta is None:
|
||||
layers_meta = self.get_metadata()
|
||||
|
||||
return layers_meta.get(str(layer.id))
|
||||
|
||||
def get_metadata(self):
|
||||
"""
|
||||
Get stored JSON with metadata from AE.Metadata.Label field
|
||||
Returns:
|
||||
(dict)
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.get_metadata')
|
||||
)
|
||||
try:
|
||||
layers_data = json.loads(res)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ValueError("Unparsable metadata {}".format(res))
|
||||
return layers_data or {}
|
||||
|
||||
def imprint(self, layer, data, all_layers=None, layers_meta=None):
|
||||
"""
|
||||
Save layer metadata to Label field of metadata of active document
|
||||
Args:
|
||||
layer (namedtuple): Layer("id": XXX, "name":'YYY')
|
||||
data(string): json representation for single layer
|
||||
all_layers (list of namedtuples): for performance, could be
|
||||
injected for usage in loop, if not, single call will be
|
||||
triggered
|
||||
layers_meta(string): json representation from Headline
|
||||
(for performance - provide only if imprint is in
|
||||
loop - value should be same)
|
||||
Returns: None
|
||||
"""
|
||||
if not layers_meta:
|
||||
layers_meta = self.get_metadata()
|
||||
|
||||
# json.dumps writes integer values in a dictionary to string, so
|
||||
# anticipating it here.
|
||||
if str(layer.id) in layers_meta and layers_meta[str(layer.id)]:
|
||||
if data:
|
||||
layers_meta[str(layer.id)].update(data)
|
||||
else:
|
||||
layers_meta.pop(str(layer.id))
|
||||
else:
|
||||
layers_meta[str(layer.id)] = data
|
||||
# Ensure only valid ids are stored.
|
||||
if not all_layers:
|
||||
# loaders create FootageItem now
|
||||
all_layers = self.get_items(comps=True,
|
||||
folders=False,
|
||||
footages=True)
|
||||
item_ids = [int(item.id) for item in all_layers]
|
||||
cleaned_data = {}
|
||||
for id in layers_meta:
|
||||
if int(id) in item_ids:
|
||||
cleaned_data[id] = layers_meta[id]
|
||||
|
||||
payload = json.dumps(cleaned_data, indent=4)
|
||||
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.imprint', payload=payload)
|
||||
)
|
||||
|
||||
def get_active_document_full_name(self):
|
||||
"""
|
||||
Returns just a name of active document via ws call
|
||||
Returns(string): file name
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call(
|
||||
'AfterEffects.get_active_document_full_name'))
|
||||
|
||||
return res
|
||||
|
||||
def get_active_document_name(self):
|
||||
"""
|
||||
Returns just a name of active document via ws call
|
||||
Returns(string): file name
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call(
|
||||
'AfterEffects.get_active_document_name'))
|
||||
|
||||
return res
|
||||
|
||||
def get_items(self, comps, folders=False, footages=False):
|
||||
"""
|
||||
Get all items from Project panel according to arguments.
|
||||
There are multiple different types:
|
||||
CompItem (could have multiple layers - source for Creator)
|
||||
FolderItem (collection type, currently not used
|
||||
FootageItem (imported file - created by Loader)
|
||||
Args:
|
||||
comps (bool): return CompItems
|
||||
folders (bool): return FolderItem
|
||||
footages (bool: return FootageItem
|
||||
|
||||
Returns:
|
||||
(list) of namedtuples
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.get_items',
|
||||
comps=comps,
|
||||
folders=folders,
|
||||
footages=footages)
|
||||
)
|
||||
return self._to_records(res)
|
||||
|
||||
def get_selected_items(self, comps, folders=False, footages=False):
|
||||
"""
|
||||
Same as get_items but using selected items only
|
||||
Args:
|
||||
comps (bool): return CompItems
|
||||
folders (bool): return FolderItem
|
||||
footages (bool: return FootageItem
|
||||
|
||||
Returns:
|
||||
(list) of namedtuples
|
||||
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.get_selected_items',
|
||||
comps=comps,
|
||||
folders=folders,
|
||||
footages=footages)
|
||||
)
|
||||
return self._to_records(res)
|
||||
|
||||
def import_file(self, path, item_name, import_options=None):
|
||||
"""
|
||||
Imports file as a FootageItem. Used in Loader
|
||||
Args:
|
||||
path (string): absolute path for asset file
|
||||
item_name (string): label for created FootageItem
|
||||
import_options (dict): different files (img vs psd) need different
|
||||
config
|
||||
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call(
|
||||
'AfterEffects.import_file',
|
||||
path=path,
|
||||
item_name=item_name,
|
||||
import_options=import_options)
|
||||
)
|
||||
records = self._to_records(res)
|
||||
if records:
|
||||
return records.pop()
|
||||
|
||||
log.debug("Couldn't import {} file".format(path))
|
||||
|
||||
def replace_item(self, item, path, item_name):
|
||||
""" Replace FootageItem with new file
|
||||
|
||||
Args:
|
||||
item (dict):
|
||||
path (string):absolute path
|
||||
item_name (string): label on item in Project list
|
||||
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.replace_item',
|
||||
item_id=item.id,
|
||||
path=path, item_name=item_name))
|
||||
|
||||
def delete_item(self, item):
|
||||
""" Deletes FootageItem with new file
|
||||
Args:
|
||||
item (dict):
|
||||
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.delete_item',
|
||||
item_id=item.id
|
||||
))
|
||||
|
||||
def is_saved(self):
|
||||
# TODO
|
||||
return True
|
||||
|
||||
def set_label_color(self, item_id, color_idx):
|
||||
"""
|
||||
Used for highlight additional information in Project panel.
|
||||
Green color is loaded asset, blue is created asset
|
||||
Args:
|
||||
item_id (int):
|
||||
color_idx (int): 0-16 Label colors from AE Project view
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.set_label_color',
|
||||
item_id=item_id,
|
||||
color_idx=color_idx
|
||||
))
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Saves active document
|
||||
Returns: None
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.save'))
|
||||
|
||||
def saveAs(self, project_path, as_copy):
|
||||
"""
|
||||
Saves active project to aep (copy) or png or jpg
|
||||
Args:
|
||||
project_path(string): full local path
|
||||
as_copy: <boolean>
|
||||
Returns: None
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.saveAs',
|
||||
image_path=project_path,
|
||||
as_copy=as_copy))
|
||||
|
||||
def close(self):
|
||||
self.client.close()
|
||||
|
||||
def _to_records(self, res):
|
||||
"""
|
||||
Converts string json representation into list of named tuples for
|
||||
dot notation access to work.
|
||||
Returns: <list of named tuples>
|
||||
res(string): - json representation
|
||||
"""
|
||||
if not res:
|
||||
return []
|
||||
|
||||
try:
|
||||
layers_data = json.loads(res)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ValueError("Received broken JSON {}".format(res))
|
||||
if not layers_data:
|
||||
return []
|
||||
|
||||
ret = []
|
||||
# convert to namedtuple to use dot donation
|
||||
if isinstance(layers_data, dict): # TODO refactore
|
||||
layers_data = [layers_data]
|
||||
for d in layers_data:
|
||||
ret.append(namedtuple('Layer', d.keys())(*d.values()))
|
||||
return ret
|
||||
52
pype/plugins/aftereffects/create/create_render.py
Normal file
52
pype/plugins/aftereffects/create/create_render.py
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
from avalon import api
|
||||
from avalon.vendor import Qt
|
||||
from avalon import aftereffects
|
||||
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CreateRender(api.Creator):
|
||||
"""Render folder for publish."""
|
||||
|
||||
name = "renderDefault"
|
||||
label = "Render"
|
||||
family = "render"
|
||||
|
||||
def process(self):
|
||||
# Photoshop can have multiple LayerSets with the same name, which does
|
||||
# not work with Avalon.
|
||||
txt = "Instance with name \"{}\" already exists.".format(self.name)
|
||||
stub = aftereffects.stub() # only after After Effects is up
|
||||
for layer in stub.get_items(comps=True,
|
||||
folders=False,
|
||||
footages=False):
|
||||
if self.name.lower() == layer.name.lower():
|
||||
msg = Qt.QtWidgets.QMessageBox()
|
||||
msg.setIcon(Qt.QtWidgets.QMessageBox.Warning)
|
||||
msg.setText(txt)
|
||||
msg.exec_()
|
||||
return False
|
||||
log.debug("options:: {}".format(self.options))
|
||||
print("options:: {}".format(self.options))
|
||||
if (self.options or {}).get("useSelection"):
|
||||
log.debug("useSelection")
|
||||
print("useSelection")
|
||||
items = stub.get_selected_items(comps=True,
|
||||
folders=False,
|
||||
footages=False)
|
||||
else:
|
||||
items = stub.get_items(comps=True,
|
||||
folders=False,
|
||||
footages=False)
|
||||
log.debug("items:: {}".format(items))
|
||||
print("items:: {}".format(items))
|
||||
if not items:
|
||||
raise ValueError("Nothing to create. Select composition " +
|
||||
"if 'useSelection' or create at least " +
|
||||
"one composition.")
|
||||
|
||||
for item in items:
|
||||
stub.imprint(item, self.data)
|
||||
stub.set_label_color(item.id, 14) # Cyan options 0 - 16
|
||||
105
pype/plugins/aftereffects/load/load_file.py
Normal file
105
pype/plugins/aftereffects/load/load_file.py
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
from avalon import api, aftereffects
|
||||
from pype.plugins import lib
|
||||
import re
|
||||
|
||||
stub = aftereffects.stub()
|
||||
|
||||
|
||||
class FileLoader(api.Loader):
|
||||
"""Load images
|
||||
|
||||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
label = "Load file"
|
||||
|
||||
families = ["image",
|
||||
"plate",
|
||||
"render",
|
||||
"prerender",
|
||||
"review",
|
||||
"audio"]
|
||||
representations = ["*"]
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
comp_name = lib.get_unique_layer_name(stub.get_items(comps=True),
|
||||
context["asset"]["name"],
|
||||
name)
|
||||
|
||||
import_options = {}
|
||||
|
||||
file = self.fname
|
||||
|
||||
repr_cont = context["representation"]["context"]
|
||||
if "#" not in file:
|
||||
frame = repr_cont.get("frame")
|
||||
if frame:
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#" * padding)
|
||||
import_options['sequence'] = True
|
||||
|
||||
if not file:
|
||||
repr_id = context["representation"]["_id"]
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
if '.psd' in file:
|
||||
import_options['ImportAsType'] = 'ImportAsType.COMP'
|
||||
|
||||
comp = stub.import_file(self.fname, comp_name, import_options)
|
||||
|
||||
if not comp:
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(file))
|
||||
self.log.warning("Check host app for alert error.")
|
||||
return
|
||||
|
||||
self[:] = [comp]
|
||||
namespace = namespace or comp_name
|
||||
|
||||
return aftereffects.containerise(
|
||||
name,
|
||||
namespace,
|
||||
comp,
|
||||
context,
|
||||
self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
""" Switch asset or change version """
|
||||
layer = container.pop("layer")
|
||||
|
||||
context = representation.get("context", {})
|
||||
|
||||
namespace_from_container = re.sub(r'_\d{3}$', '',
|
||||
container["namespace"])
|
||||
layer_name = "{}_{}".format(context["asset"], context["subset"])
|
||||
# switching assets
|
||||
if namespace_from_container != layer_name:
|
||||
layer_name = lib.get_unique_layer_name(stub.get_items(comps=True),
|
||||
context["asset"],
|
||||
context["subset"])
|
||||
else: # switching version - keep same name
|
||||
layer_name = container["namespace"]
|
||||
path = api.get_representation_path(representation)
|
||||
# with aftereffects.maintained_selection(): # TODO
|
||||
stub.replace_item(layer, path, layer_name)
|
||||
stub.imprint(
|
||||
layer, {"representation": str(representation["_id"]),
|
||||
"name": context["subset"],
|
||||
"namespace": layer_name}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
"""
|
||||
Removes element from scene: deletes layer + removes from Headline
|
||||
Args:
|
||||
container (dict): container to be removed - used to get layer_id
|
||||
"""
|
||||
layer = container.pop("layer")
|
||||
stub.imprint(layer, {})
|
||||
stub.delete_item(layer.id)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
@ -1,7 +1,9 @@
|
|||
import pyblish.api
|
||||
import os
|
||||
import collections
|
||||
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
import pype.api as pype
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
|
|
@ -12,12 +14,13 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
self.log.info('Collecting Audio Data')
|
||||
asset_entity = context.data["assetEntity"]
|
||||
asset_doc = context.data["assetEntity"]
|
||||
|
||||
# get all available representations
|
||||
subsets = pype.get_subsets(asset_entity["name"],
|
||||
representations=["audio", "wav"]
|
||||
)
|
||||
subsets = self.get_subsets(
|
||||
asset_doc,
|
||||
representations=["audio", "wav"]
|
||||
)
|
||||
self.log.info(f"subsets is: {pformat(subsets)}")
|
||||
|
||||
if not subsets.get("audioMain"):
|
||||
|
|
@ -39,3 +42,85 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
|||
'audio_file: {}, has been added to context'.format(audio_file))
|
||||
else:
|
||||
self.log.warning("Couldn't find any audio file on Ftrack.")
|
||||
|
||||
def get_subsets(self, asset_doc, representations):
|
||||
"""
|
||||
Query subsets with filter on name.
|
||||
|
||||
The method will return all found subsets and its defined version
|
||||
and subsets. Version could be specified with number. Representation
|
||||
can be filtered.
|
||||
|
||||
Arguments:
|
||||
asset_doct (dict): Asset (shot) mongo document
|
||||
representations (list): list for all representations
|
||||
|
||||
Returns:
|
||||
dict: subsets with version and representaions in keys
|
||||
"""
|
||||
|
||||
# Query all subsets for asset
|
||||
subset_docs = io.find({
|
||||
"type": "subset",
|
||||
"parent": asset_doc["_id"]
|
||||
})
|
||||
# Collect all subset ids
|
||||
subset_ids = [
|
||||
subset_doc["_id"]
|
||||
for subset_doc in subset_docs
|
||||
]
|
||||
|
||||
# Check if we found anything
|
||||
assert subset_ids, (
|
||||
"No subsets found. Check correct filter. "
|
||||
"Try this for start `r'.*'`: asset: `{}`"
|
||||
).format(asset_doc["name"])
|
||||
|
||||
# Last version aggregation
|
||||
pipeline = [
|
||||
# Find all versions of those subsets
|
||||
{"$match": {
|
||||
"type": "version",
|
||||
"parent": {"$in": subset_ids}
|
||||
}},
|
||||
# Sorting versions all together
|
||||
{"$sort": {"name": 1}},
|
||||
# Group them by "parent", but only take the last
|
||||
{"$group": {
|
||||
"_id": "$parent",
|
||||
"_version_id": {"$last": "$_id"},
|
||||
"name": {"$last": "$name"}
|
||||
}}
|
||||
]
|
||||
last_versions_by_subset_id = dict()
|
||||
for doc in io.aggregate(pipeline):
|
||||
doc["parent"] = doc["_id"]
|
||||
doc["_id"] = doc.pop("_version_id")
|
||||
last_versions_by_subset_id[doc["parent"]] = doc
|
||||
|
||||
version_docs_by_id = {}
|
||||
for version_doc in last_versions_by_subset_id.values():
|
||||
version_docs_by_id[version_doc["_id"]] = version_doc
|
||||
|
||||
repre_docs = io.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": list(version_docs_by_id.keys())},
|
||||
"name": {"$in": representations}
|
||||
})
|
||||
repre_docs_by_version_id = collections.defaultdict(list)
|
||||
for repre_doc in repre_docs:
|
||||
version_id = repre_doc["parent"]
|
||||
repre_docs_by_version_id[version_id].append(repre_doc)
|
||||
|
||||
output_dict = {}
|
||||
for version_id, repre_docs in repre_docs_by_version_id.items():
|
||||
version_doc = version_docs_by_id[version_id]
|
||||
subset_id = version_doc["parent"]
|
||||
subset_doc = last_versions_by_subset_id[subset_id]
|
||||
# Store queried docs by subset name
|
||||
output_dict[subset_doc["name"]] = {
|
||||
"representations": repre_docs,
|
||||
"version": version_doc
|
||||
}
|
||||
|
||||
return output_dict
|
||||
|
|
|
|||
|
|
@ -19,12 +19,16 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
|
|||
if "unreal" in pyblish.api.registered_hosts():
|
||||
return
|
||||
|
||||
assert context.data.get('currentFile'), "Cannot get curren file"
|
||||
filename = os.path.basename(context.data.get('currentFile'))
|
||||
|
||||
if '<shell>' in filename:
|
||||
return
|
||||
|
||||
rootVersion = int(pype.get_version_from_path(filename))
|
||||
version = pype.get_version_from_path(filename)
|
||||
assert version, "Cannot determine version"
|
||||
|
||||
rootVersion = int(version)
|
||||
context.data['version'] = rootVersion
|
||||
self.log.info("{}".format(type(rootVersion)))
|
||||
self.log.info('Scene Version: %s' % context.data.get('version'))
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import re
|
||||
import json
|
||||
import copy
|
||||
import tempfile
|
||||
|
||||
import pype.api
|
||||
import pyblish
|
||||
|
|
@ -26,7 +27,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
"hiero",
|
||||
"premiere",
|
||||
"standalonepublisher",
|
||||
"harmony"
|
||||
"harmony",
|
||||
"fusion"
|
||||
]
|
||||
optional = True
|
||||
|
|
@ -227,12 +228,30 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
# Dump data to string
|
||||
dumped_script_data = json.dumps(script_data)
|
||||
|
||||
# Store dumped json to temporary file
|
||||
temporary_json_file = tempfile.NamedTemporaryFile(
|
||||
mode="w", suffix=".json", delete=False
|
||||
)
|
||||
temporary_json_file.write(dumped_script_data)
|
||||
temporary_json_file.close()
|
||||
temporary_json_filepath = temporary_json_file.name.replace(
|
||||
"\\", "/"
|
||||
)
|
||||
|
||||
# Prepare subprocess arguments
|
||||
args = [executable, scriptpath, dumped_script_data]
|
||||
self.log.debug("Executing: {}".format(args))
|
||||
args = [
|
||||
"\"{}\"".format(executable),
|
||||
"\"{}\"".format(scriptpath),
|
||||
"\"{}\"".format(temporary_json_filepath)
|
||||
]
|
||||
subprcs_cmd = " ".join(args)
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
|
||||
# Run burnin script
|
||||
pype.api.subprocess(args, shell=True, logger=self.log)
|
||||
pype.api.subprocess(subprcs_cmd, shell=True, logger=self.log)
|
||||
|
||||
# Remove the temporary json
|
||||
os.remove(temporary_json_filepath)
|
||||
|
||||
for filepath in temp_data["full_input_paths"]:
|
||||
filepath = filepath.replace("\\", "/")
|
||||
|
|
@ -974,7 +993,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
|
||||
args = [executable, scriptpath, json_data]
|
||||
self.log.debug("Executing: {}".format(args))
|
||||
output = pype.api.subprocess(args, shell=True)
|
||||
output = pype.api.subprocess(args, shell=True, logger=self.log)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_update = {
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
ffmpeg_args = self.ffmpeg_args or {}
|
||||
|
||||
jpeg_items = []
|
||||
jpeg_items.append(ffmpeg_path)
|
||||
jpeg_items.append("\"{}\"".format(ffmpeg_path))
|
||||
# override file if already exists
|
||||
jpeg_items.append("-y")
|
||||
# use same input args like with mov
|
||||
|
|
|
|||
|
|
@ -466,7 +466,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
audio_filters.append(arg)
|
||||
|
||||
all_args = []
|
||||
all_args.append(self.ffmpeg_path)
|
||||
all_args.append("\"{}\"".format(self.ffmpeg_path))
|
||||
all_args.extend(input_args)
|
||||
if video_filters:
|
||||
all_args.append("-filter:v {}".format(",".join(video_filters)))
|
||||
|
|
@ -650,7 +650,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
# NOTE Skipped using instance's resolution
|
||||
full_input_path_single_file = temp_data["full_input_path_single_file"]
|
||||
input_data = pype.lib.ffprobe_streams(full_input_path_single_file)[0]
|
||||
input_data = pype.lib.ffprobe_streams(
|
||||
full_input_path_single_file, self.log
|
||||
)[0]
|
||||
input_width = int(input_data["width"])
|
||||
input_height = int(input_data["height"])
|
||||
|
||||
|
|
@ -1543,7 +1545,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
os.mkdir(stg_dir)
|
||||
|
||||
mov_args = [
|
||||
ffmpeg_path,
|
||||
"\"{}\"".format(ffmpeg_path),
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
slate_path = inst_data.get("slateFrame")
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
slate_stream = pype.lib.ffprobe_streams(slate_path)[0]
|
||||
slate_stream = pype.lib.ffprobe_streams(slate_path, self.log)[0]
|
||||
slate_width = slate_stream["width"]
|
||||
slate_height = slate_stream["height"]
|
||||
|
||||
|
|
@ -178,7 +178,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
_remove_at_end.append(slate_v_path)
|
||||
|
||||
slate_args = [
|
||||
ffmpeg_path,
|
||||
"\"{}\"".format(ffmpeg_path),
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
|
|
@ -299,7 +299,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
|
||||
try:
|
||||
# Get information about input file via ffprobe tool
|
||||
streams = pype.lib.ffprobe_streams(full_input_path)
|
||||
streams = pype.lib.ffprobe_streams(full_input_path, self.log)
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Could not get codec data from input.",
|
||||
|
|
|
|||
|
|
@ -615,12 +615,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
# copy file with speedcopy and check if size of files are simetrical
|
||||
while True:
|
||||
import shutil
|
||||
try:
|
||||
if not shutil._samefile(src, dst):
|
||||
copyfile(src, dst)
|
||||
except shutil.SameFileError:
|
||||
self.log.critical("files are the same {} to {}".format(src,
|
||||
dst))
|
||||
else:
|
||||
self.log.critical(
|
||||
"files are the same {} to {}".format(src, dst)
|
||||
)
|
||||
os.remove(dst)
|
||||
try:
|
||||
shutil.copyfile(src, dst)
|
||||
|
|
|
|||
|
|
@ -151,6 +151,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
deadline_pool_secondary = ""
|
||||
deadline_group = ""
|
||||
deadline_chunk_size = 1
|
||||
deadline_priority = None
|
||||
|
||||
# regex for finding frame number in string
|
||||
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
|
||||
|
|
@ -902,7 +903,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
render_job["Props"]["User"] = context.data.get(
|
||||
"deadlineUser", getpass.getuser())
|
||||
# Priority is now not handled at all
|
||||
render_job["Props"]["Pri"] = instance.data.get("priority")
|
||||
|
||||
if self.deadline_priority:
|
||||
render_job["Props"]["Pri"] = self.deadline_priority
|
||||
else:
|
||||
render_job["Props"]["Pri"] = instance.data.get("priority")
|
||||
|
||||
render_job["Props"]["Env"] = {
|
||||
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),
|
||||
|
|
@ -1024,6 +1029,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
version = pype.api.get_latest_version(asset, subset)
|
||||
if version:
|
||||
version = int(version["name"]) + 1
|
||||
else:
|
||||
version = 1
|
||||
|
||||
template_data["subset"] = subset
|
||||
template_data["family"] = "render"
|
||||
|
|
@ -1031,8 +1038,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
|
||||
if "folder" in anatomy.templates["publish"]:
|
||||
publish_folder = anatomy_filled["publish"]["folder"]
|
||||
if "folder" in anatomy.templates["render"]:
|
||||
publish_folder = anatomy_filled["render"]["folder"]
|
||||
else:
|
||||
# solve deprecated situation when `folder` key is not underneath
|
||||
# `publish` anatomy
|
||||
|
|
@ -1042,7 +1049,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
" key underneath `publish` (in global of for project `{}`)."
|
||||
).format(project_name))
|
||||
|
||||
file_path = anatomy_filled["publish"]["path"]
|
||||
file_path = anatomy_filled["render"]["path"]
|
||||
# Directory
|
||||
publish_folder = os.path.dirname(file_path)
|
||||
|
||||
|
|
|
|||
|
|
@ -29,6 +29,6 @@ class ValidateFFmpegInstalled(pyblish.api.ContextPlugin):
|
|||
def process(self, context):
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
self.log.info("ffmpeg path: `{}`".format(ffmpeg_path))
|
||||
if self.is_tool(ffmpeg_path) is False:
|
||||
if self.is_tool("{}".format(ffmpeg_path)) is False:
|
||||
self.log.error("ffmpeg not found in PATH")
|
||||
raise RuntimeError('ffmpeg not installed.')
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Create render node."""
|
||||
from avalon import harmony
|
||||
|
||||
|
||||
|
|
@ -10,17 +12,15 @@ class CreateRender(harmony.Creator):
|
|||
node_type = "WRITE"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Constructor."""
|
||||
super(CreateRender, self).__init__(*args, **kwargs)
|
||||
|
||||
def setup_node(self, node):
|
||||
sig = harmony.signature()
|
||||
func = """function %s(args)
|
||||
{
|
||||
node.setTextAttr(args[0], "DRAWING_TYPE", 1, "PNG4");
|
||||
node.setTextAttr(args[0], "DRAWING_NAME", 1, args[1]);
|
||||
node.setTextAttr(args[0], "MOVIE_PATH", 1, args[1]);
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
"""Set render node."""
|
||||
self_name = self.__class__.__name__
|
||||
path = "{0}/{0}".format(node.split("/")[-1])
|
||||
harmony.send({"function": func, "args": [node, path]})
|
||||
harmony.send(
|
||||
{
|
||||
"function": f"PypeHarmony.Creators.{self_name}.create",
|
||||
"args": [node, path]
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,277 +1,81 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Loader for image sequences."""
|
||||
import os
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
import clique
|
||||
|
||||
from avalon import api, harmony
|
||||
import pype.lib
|
||||
|
||||
copy_files = """function copyFile(srcFilename, dstFilename)
|
||||
{
|
||||
var srcFile = new PermanentFile(srcFilename);
|
||||
var dstFile = new PermanentFile(dstFilename);
|
||||
srcFile.copy(dstFile);
|
||||
}
|
||||
"""
|
||||
|
||||
import_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black
|
||||
var TGATransparencyMode = 0; //Premultiplied wih Black
|
||||
var SGITransparencyMode = 0; //Premultiplied wih Black
|
||||
var LayeredPSDTransparencyMode = 1; //Straight
|
||||
var FlatPSDTransparencyMode = 2; //Premultiplied wih White
|
||||
|
||||
function getUniqueColumnName( column_prefix )
|
||||
{
|
||||
var suffix = 0;
|
||||
// finds if unique name for a column
|
||||
var column_name = column_prefix;
|
||||
while(suffix < 2000)
|
||||
{
|
||||
if(!column.type(column_name))
|
||||
break;
|
||||
|
||||
suffix = suffix + 1;
|
||||
column_name = column_prefix + "_" + suffix;
|
||||
}
|
||||
return column_name;
|
||||
}
|
||||
|
||||
function import_files(args)
|
||||
{
|
||||
var root = args[0];
|
||||
var files = args[1];
|
||||
var name = args[2];
|
||||
var start_frame = args[3];
|
||||
|
||||
var vectorFormat = null;
|
||||
var extension = null;
|
||||
var filename = files[0];
|
||||
|
||||
var pos = filename.lastIndexOf(".");
|
||||
if( pos < 0 )
|
||||
return null;
|
||||
|
||||
extension = filename.substr(pos+1).toLowerCase();
|
||||
|
||||
if(extension == "jpeg")
|
||||
extension = "jpg";
|
||||
if(extension == "tvg")
|
||||
{
|
||||
vectorFormat = "TVG"
|
||||
extension ="SCAN"; // element.add() will use this.
|
||||
}
|
||||
|
||||
var elemId = element.add(
|
||||
name,
|
||||
"BW",
|
||||
scene.numberOfUnitsZ(),
|
||||
extension.toUpperCase(),
|
||||
vectorFormat
|
||||
);
|
||||
if (elemId == -1)
|
||||
{
|
||||
// hum, unknown file type most likely -- let's skip it.
|
||||
return null; // no read to add.
|
||||
}
|
||||
|
||||
var uniqueColumnName = getUniqueColumnName(name);
|
||||
column.add(uniqueColumnName , "DRAWING");
|
||||
column.setElementIdOfDrawing(uniqueColumnName, elemId);
|
||||
|
||||
var read = node.add(root, name, "READ", 0, 0, 0);
|
||||
var transparencyAttr = node.getAttr(
|
||||
read, frame.current(), "READ_TRANSPARENCY"
|
||||
);
|
||||
var opacityAttr = node.getAttr(read, frame.current(), "OPACITY");
|
||||
transparencyAttr.setValue(true);
|
||||
opacityAttr.setValue(true);
|
||||
|
||||
var alignmentAttr = node.getAttr(read, frame.current(), "ALIGNMENT_RULE");
|
||||
alignmentAttr.setValue("ASIS");
|
||||
|
||||
var transparencyModeAttr = node.getAttr(
|
||||
read, frame.current(), "applyMatteToColor"
|
||||
);
|
||||
if (extension == "png")
|
||||
transparencyModeAttr.setValue(PNGTransparencyMode);
|
||||
if (extension == "tga")
|
||||
transparencyModeAttr.setValue(TGATransparencyMode);
|
||||
if (extension == "sgi")
|
||||
transparencyModeAttr.setValue(SGITransparencyMode);
|
||||
if (extension == "psd")
|
||||
transparencyModeAttr.setValue(FlatPSDTransparencyMode);
|
||||
if (extension == "jpg")
|
||||
transparencyModeAttr.setValue(LayeredPSDTransparencyMode);
|
||||
|
||||
node.linkAttr(read, "DRAWING.ELEMENT", uniqueColumnName);
|
||||
|
||||
if (files.length == 1)
|
||||
{
|
||||
// Create a drawing drawing, 'true' indicate that the file exists.
|
||||
Drawing.create(elemId, 1, true);
|
||||
// Get the actual path, in tmp folder.
|
||||
var drawingFilePath = Drawing.filename(elemId, "1");
|
||||
copyFile(files[0], drawingFilePath);
|
||||
// Expose the image for the entire frame range.
|
||||
for( var i =0; i <= frame.numberOf() - 1; ++i)
|
||||
{
|
||||
timing = start_frame + i
|
||||
column.setEntry(uniqueColumnName, 1, timing, "1");
|
||||
}
|
||||
} else {
|
||||
// Create a drawing for each file.
|
||||
for( var i =0; i <= files.length - 1; ++i)
|
||||
{
|
||||
timing = start_frame + i
|
||||
// Create a drawing drawing, 'true' indicate that the file exists.
|
||||
Drawing.create(elemId, timing, true);
|
||||
// Get the actual path, in tmp folder.
|
||||
var drawingFilePath = Drawing.filename(elemId, timing.toString());
|
||||
copyFile( files[i], drawingFilePath );
|
||||
|
||||
column.setEntry(uniqueColumnName, 1, timing, timing.toString());
|
||||
}
|
||||
}
|
||||
|
||||
var green_color = new ColorRGBA(0, 255, 0, 255);
|
||||
node.setColor(read, green_color);
|
||||
|
||||
return read;
|
||||
}
|
||||
import_files
|
||||
"""
|
||||
|
||||
replace_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black
|
||||
var TGATransparencyMode = 0; //Premultiplied wih Black
|
||||
var SGITransparencyMode = 0; //Premultiplied wih Black
|
||||
var LayeredPSDTransparencyMode = 1; //Straight
|
||||
var FlatPSDTransparencyMode = 2; //Premultiplied wih White
|
||||
|
||||
function replace_files(args)
|
||||
{
|
||||
var files = args[0];
|
||||
MessageLog.trace(files);
|
||||
MessageLog.trace(files.length);
|
||||
var _node = args[1];
|
||||
var start_frame = args[2];
|
||||
|
||||
var _column = node.linkedColumn(_node, "DRAWING.ELEMENT");
|
||||
var elemId = column.getElementIdOfDrawing(_column);
|
||||
|
||||
// Delete existing drawings.
|
||||
var timings = column.getDrawingTimings(_column);
|
||||
for( var i =0; i <= timings.length - 1; ++i)
|
||||
{
|
||||
column.deleteDrawingAt(_column, parseInt(timings[i]));
|
||||
}
|
||||
|
||||
|
||||
var filename = files[0];
|
||||
var pos = filename.lastIndexOf(".");
|
||||
if( pos < 0 )
|
||||
return null;
|
||||
var extension = filename.substr(pos+1).toLowerCase();
|
||||
|
||||
if(extension == "jpeg")
|
||||
extension = "jpg";
|
||||
|
||||
var transparencyModeAttr = node.getAttr(
|
||||
_node, frame.current(), "applyMatteToColor"
|
||||
);
|
||||
if (extension == "png")
|
||||
transparencyModeAttr.setValue(PNGTransparencyMode);
|
||||
if (extension == "tga")
|
||||
transparencyModeAttr.setValue(TGATransparencyMode);
|
||||
if (extension == "sgi")
|
||||
transparencyModeAttr.setValue(SGITransparencyMode);
|
||||
if (extension == "psd")
|
||||
transparencyModeAttr.setValue(FlatPSDTransparencyMode);
|
||||
if (extension == "jpg")
|
||||
transparencyModeAttr.setValue(LayeredPSDTransparencyMode);
|
||||
|
||||
if (files.length == 1)
|
||||
{
|
||||
// Create a drawing drawing, 'true' indicate that the file exists.
|
||||
Drawing.create(elemId, 1, true);
|
||||
// Get the actual path, in tmp folder.
|
||||
var drawingFilePath = Drawing.filename(elemId, "1");
|
||||
copyFile(files[0], drawingFilePath);
|
||||
MessageLog.trace(files[0]);
|
||||
MessageLog.trace(drawingFilePath);
|
||||
// Expose the image for the entire frame range.
|
||||
for( var i =0; i <= frame.numberOf() - 1; ++i)
|
||||
{
|
||||
timing = start_frame + i
|
||||
column.setEntry(_column, 1, timing, "1");
|
||||
}
|
||||
} else {
|
||||
// Create a drawing for each file.
|
||||
for( var i =0; i <= files.length - 1; ++i)
|
||||
{
|
||||
timing = start_frame + i
|
||||
// Create a drawing drawing, 'true' indicate that the file exists.
|
||||
Drawing.create(elemId, timing, true);
|
||||
// Get the actual path, in tmp folder.
|
||||
var drawingFilePath = Drawing.filename(elemId, timing.toString());
|
||||
copyFile( files[i], drawingFilePath );
|
||||
|
||||
column.setEntry(_column, 1, timing, timing.toString());
|
||||
}
|
||||
}
|
||||
|
||||
var green_color = new ColorRGBA(0, 255, 0, 255);
|
||||
node.setColor(_node, green_color);
|
||||
}
|
||||
replace_files
|
||||
"""
|
||||
|
||||
|
||||
class ImageSequenceLoader(api.Loader):
|
||||
"""Load images
|
||||
"""Load image sequences.
|
||||
|
||||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
|
||||
families = ["shot", "render", "image", "plate", "reference"]
|
||||
representations = ["jpeg", "png", "jpg"]
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
"""Plugin entry point.
|
||||
|
||||
Args:
|
||||
context (:class:`pyblish.api.Context`): Context.
|
||||
name (str, optional): Container name.
|
||||
namespace (str, optional): Container namespace.
|
||||
data (dict, optional): Additional data passed into loader.
|
||||
|
||||
"""
|
||||
fname = Path(self.fname)
|
||||
self_name = self.__class__.__name__
|
||||
collections, remainder = clique.assemble(
|
||||
os.listdir(os.path.dirname(self.fname))
|
||||
os.listdir(fname.parent.as_posix())
|
||||
)
|
||||
files = []
|
||||
if collections:
|
||||
for f in list(collections[0]):
|
||||
files.append(
|
||||
os.path.join(
|
||||
os.path.dirname(self.fname), f
|
||||
).replace("\\", "/")
|
||||
)
|
||||
files.append(fname.parent.joinpath(f).as_posix())
|
||||
else:
|
||||
files.append(
|
||||
os.path.join(
|
||||
os.path.dirname(self.fname), remainder[0]
|
||||
).replace("\\", "/")
|
||||
)
|
||||
files.append(fname.parent.joinpath(remainder[0]).as_posix())
|
||||
|
||||
name = context["subset"]["name"]
|
||||
name += "_{}".format(uuid.uuid4())
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
|
||||
group_id = str(uuid.uuid4())
|
||||
read_node = harmony.send(
|
||||
{
|
||||
"function": copy_files + import_files,
|
||||
"args": ["Top", files, name, 1]
|
||||
"function": f"PypeHarmony.Loaders.{self_name}.importFiles", # noqa: E501
|
||||
"args": [
|
||||
files,
|
||||
asset,
|
||||
subset,
|
||||
1,
|
||||
group_id
|
||||
]
|
||||
}
|
||||
)["result"]
|
||||
|
||||
return harmony.containerise(
|
||||
name,
|
||||
f"{asset}_{subset}",
|
||||
namespace,
|
||||
read_node,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
self_name,
|
||||
nodes=[read_node]
|
||||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update loaded containers.
|
||||
|
||||
Args:
|
||||
container (dict): Container data.
|
||||
representation (dict): Representation data.
|
||||
|
||||
"""
|
||||
self_name = self.__class__.__name__
|
||||
node = harmony.find_node_by_name(container["name"], "READ")
|
||||
|
||||
path = api.get_representation_path(representation)
|
||||
|
|
@ -295,50 +99,42 @@ class ImageSequenceLoader(api.Loader):
|
|||
|
||||
harmony.send(
|
||||
{
|
||||
"function": copy_files + replace_files,
|
||||
"function": f"PypeHarmony.Loaders.{self_name}.replaceFiles",
|
||||
"args": [files, node, 1]
|
||||
}
|
||||
)
|
||||
|
||||
# Colour node.
|
||||
sig = harmony.signature("copyFile")
|
||||
func = """function %s(args){
|
||||
for( var i =0; i <= args[0].length - 1; ++i)
|
||||
{
|
||||
var red_color = new ColorRGBA(255, 0, 0, 255);
|
||||
var green_color = new ColorRGBA(0, 255, 0, 255);
|
||||
if (args[1] == "red"){
|
||||
node.setColor(args[0], red_color);
|
||||
}
|
||||
if (args[1] == "green"){
|
||||
node.setColor(args[0], green_color);
|
||||
}
|
||||
}
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
if pype.lib.is_latest(representation):
|
||||
harmony.send({"function": func, "args": [node, "green"]})
|
||||
harmony.send(
|
||||
{
|
||||
"function": "PypeHarmony.setColor",
|
||||
"args": [node, [0, 255, 0, 255]]
|
||||
})
|
||||
else:
|
||||
harmony.send({"function": func, "args": [node, "red"]})
|
||||
harmony.send(
|
||||
{
|
||||
"function": "PypeHarmony.setColor",
|
||||
"args": [node, [255, 0, 0, 255]]
|
||||
})
|
||||
|
||||
harmony.imprint(
|
||||
node, {"representation": str(representation["_id"])}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
node = harmony.find_node_by_name(container["name"], "READ")
|
||||
"""Remove loaded container.
|
||||
|
||||
Args:
|
||||
container (dict): Container data.
|
||||
|
||||
func = """function deleteNode(_node)
|
||||
{
|
||||
node.deleteNode(_node, true, true);
|
||||
}
|
||||
deleteNode
|
||||
"""
|
||||
node = harmony.find_node_by_name(container["name"], "READ")
|
||||
harmony.send(
|
||||
{"function": func, "args": [node]}
|
||||
{"function": "PypeHarmony.deleteNode", "args": [node]}
|
||||
)
|
||||
harmony.imprint(node, {}, remove=True)
|
||||
|
||||
def switch(self, container, representation):
|
||||
"""Switch loaded representations."""
|
||||
self.update(container, representation)
|
||||
|
|
|
|||
|
|
@ -2,13 +2,12 @@ import os
|
|||
import shutil
|
||||
|
||||
from avalon import api, harmony
|
||||
from avalon.vendor import Qt
|
||||
|
||||
|
||||
class ImportPaletteLoader(api.Loader):
|
||||
"""Import palettes."""
|
||||
|
||||
families = ["harmony.palette"]
|
||||
families = ["palette"]
|
||||
representations = ["plt"]
|
||||
label = "Import Palette"
|
||||
|
||||
|
|
@ -41,14 +40,14 @@ class ImportPaletteLoader(api.Loader):
|
|||
|
||||
harmony.save_scene()
|
||||
|
||||
# Dont allow instances with the same name.
|
||||
message_box = Qt.QtWidgets.QMessageBox()
|
||||
message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning)
|
||||
msg = "Updated {}.".format(subset_name)
|
||||
msg += " You need to reload the scene to see the changes."
|
||||
message_box.setText(msg)
|
||||
message_box.exec_()
|
||||
|
||||
harmony.send(
|
||||
{
|
||||
"function": "PypeHarmony.message",
|
||||
"args": msg
|
||||
})
|
||||
return name
|
||||
|
||||
def remove(self, container):
|
||||
|
|
|
|||
143
pype/plugins/harmony/load/load_template.py
Normal file
143
pype/plugins/harmony/load/load_template.py
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Load template."""
|
||||
import tempfile
|
||||
import zipfile
|
||||
import os
|
||||
import shutil
|
||||
import uuid
|
||||
|
||||
from avalon import api, harmony
|
||||
import pype.lib
|
||||
|
||||
|
||||
class TemplateLoader(api.Loader):
|
||||
"""Load Harmony template as container.
|
||||
|
||||
.. todo::
|
||||
|
||||
This must be implemented properly.
|
||||
|
||||
"""
|
||||
|
||||
families = ["template", "workfile"]
|
||||
representations = ["*"]
|
||||
label = "Load Template"
|
||||
icon = "gift"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
"""Plugin entry point.
|
||||
|
||||
Args:
|
||||
context (:class:`pyblish.api.Context`): Context.
|
||||
name (str, optional): Container name.
|
||||
namespace (str, optional): Container namespace.
|
||||
data (dict, optional): Additional data passed into loader.
|
||||
|
||||
"""
|
||||
# Load template.
|
||||
self_name = self.__class__.__name__
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
zip_file = api.get_representation_path(context["representation"])
|
||||
template_path = os.path.join(temp_dir, "temp.tpl")
|
||||
with zipfile.ZipFile(zip_file, "r") as zip_ref:
|
||||
zip_ref.extractall(template_path)
|
||||
|
||||
group_id = "{}".format(uuid.uuid4())
|
||||
|
||||
container_group = harmony.send(
|
||||
{
|
||||
"function": f"PypeHarmony.Loaders.{self_name}.loadContainer",
|
||||
"args": [template_path,
|
||||
context["asset"]["name"],
|
||||
context["subset"]["name"],
|
||||
group_id]
|
||||
}
|
||||
)["result"]
|
||||
|
||||
# Cleanup the temp directory
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
# We must validate the group_node
|
||||
return harmony.containerise(
|
||||
name,
|
||||
namespace,
|
||||
container_group,
|
||||
context,
|
||||
self_name
|
||||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update loaded containers.
|
||||
|
||||
Args:
|
||||
container (dict): Container data.
|
||||
representation (dict): Representation data.
|
||||
|
||||
"""
|
||||
node_name = container["name"]
|
||||
node = harmony.find_node_by_name(node_name, "GROUP")
|
||||
self_name = self.__class__.__name__
|
||||
|
||||
update_and_replace = False
|
||||
if pype.lib.is_latest(representation):
|
||||
self._set_green(node)
|
||||
else:
|
||||
self._set_red(node)
|
||||
|
||||
update_and_replace = harmony.send(
|
||||
{
|
||||
"function": f"PypeHarmony.Loaders.{self_name}."
|
||||
"askForColumnsUpdate",
|
||||
"args": []
|
||||
}
|
||||
)["result"]
|
||||
|
||||
if update_and_replace:
|
||||
# FIXME: This won't work, need to implement it.
|
||||
harmony.send(
|
||||
{
|
||||
"function": f"PypeHarmony.Loaders.{self_name}."
|
||||
"replaceNode",
|
||||
"args": []
|
||||
}
|
||||
)
|
||||
else:
|
||||
self.load(
|
||||
container["context"], container["name"],
|
||||
None, container["data"])
|
||||
|
||||
harmony.imprint(
|
||||
node, {"representation": str(representation["_id"])}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
"""Remove container.
|
||||
|
||||
Args:
|
||||
container (dict): container definition.
|
||||
|
||||
"""
|
||||
node = harmony.find_node_by_name(container["name"], "GROUP")
|
||||
harmony.send(
|
||||
{"function": "PypeHarmony.deleteNode", "args": [node]}
|
||||
)
|
||||
|
||||
def switch(self, container, representation):
|
||||
"""Switch representation containers."""
|
||||
self.update(container, representation)
|
||||
|
||||
def _set_green(self, node):
|
||||
"""Set node color to green `rgba(0, 255, 0, 255)`."""
|
||||
harmony.send(
|
||||
{
|
||||
"function": "PypeHarmony.setColor",
|
||||
"args": [node, [0, 255, 0, 255]]
|
||||
})
|
||||
|
||||
def _set_red(self, node):
|
||||
"""Set node color to red `rgba(255, 0, 0, 255)`."""
|
||||
harmony.send(
|
||||
{
|
||||
"function": "PypeHarmony.setColor",
|
||||
"args": [node, [255, 0, 0, 255]]
|
||||
})
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect information about current file."""
|
||||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
|
@ -5,24 +7,16 @@ from avalon import harmony
|
|||
|
||||
|
||||
class CollectCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
"""Inject the current working file into context."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
label = "Current File"
|
||||
hosts = ["harmony"]
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
sig = harmony.signature()
|
||||
func = """function %s()
|
||||
{
|
||||
return (
|
||||
scene.currentProjectPath() + "/" +
|
||||
scene.currentVersionName() + ".xstage"
|
||||
);
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
"""Inject the current working file."""
|
||||
self_name = self.__class__.__name__
|
||||
|
||||
current_file = harmony.send({"function": func})["result"]
|
||||
current_file = harmony.send(
|
||||
{"function": f"PypeHarmony.Publish.{self_name}.collect"})["result"]
|
||||
context.data["currentFile"] = os.path.normpath(current_file)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect instances in Harmony."""
|
||||
import json
|
||||
|
||||
import pyblish.api
|
||||
|
|
@ -8,7 +10,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
"""Gather instances by nodes metadata.
|
||||
|
||||
This collector takes into account assets that are associated with
|
||||
a composite node and marked with a unique identifier;
|
||||
a composite node and marked with a unique identifier.
|
||||
|
||||
Identifier:
|
||||
id (str): "pyblish.avalon.instance"
|
||||
|
|
@ -19,10 +21,19 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
hosts = ["harmony"]
|
||||
families_mapping = {
|
||||
"render": ["imagesequence", "review", "ftrack"],
|
||||
"harmony.template": []
|
||||
"harmony.template": [],
|
||||
"palette": ["palette", "ftrack"]
|
||||
}
|
||||
|
||||
pair_media = True
|
||||
|
||||
def process(self, context):
|
||||
"""Plugin entry point.
|
||||
|
||||
Args:
|
||||
context (:class:`pyblish.api.Context`): Context data.
|
||||
|
||||
"""
|
||||
nodes = harmony.send(
|
||||
{"function": "node.subNodes", "args": ["Top"]}
|
||||
)["result"]
|
||||
|
|
@ -46,6 +57,11 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
)["result"]
|
||||
instance.data["families"] = self.families_mapping[data["family"]]
|
||||
|
||||
# If set in plugin, pair the scene Version in ftrack with
|
||||
# thumbnails and review media.
|
||||
if (self.pair_media and instance.data["family"] == "scene"):
|
||||
context.data["scene_instance"] = instance
|
||||
|
||||
# Produce diagnostic message for any graphical
|
||||
# user interface interested in visualising it.
|
||||
self.log.info(
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect palettes from Harmony."""
|
||||
import os
|
||||
import json
|
||||
|
||||
|
|
@ -13,23 +15,12 @@ class CollectPalettes(pyblish.api.ContextPlugin):
|
|||
hosts = ["harmony"]
|
||||
|
||||
def process(self, context):
|
||||
sig = harmony.signature()
|
||||
func = """function %s()
|
||||
{
|
||||
var palette_list = PaletteObjectManager.getScenePaletteList();
|
||||
|
||||
var palettes = {};
|
||||
for(var i=0; i < palette_list.numPalettes; ++i)
|
||||
"""Collector entry point."""
|
||||
self_name = self.__class__.__name__
|
||||
palettes = harmony.send(
|
||||
{
|
||||
var palette = palette_list.getPaletteByIndex(i);
|
||||
palettes[palette.getName()] = palette.id;
|
||||
}
|
||||
|
||||
return palettes;
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
palettes = harmony.send({"function": func})["result"]
|
||||
"function": f"PypeHarmony.Publish.{self_name}.getPalettes",
|
||||
})["result"]
|
||||
|
||||
for name, id in palettes.items():
|
||||
instance = context.create_instance(name)
|
||||
|
|
@ -37,7 +28,7 @@ class CollectPalettes(pyblish.api.ContextPlugin):
|
|||
"id": id,
|
||||
"family": "harmony.palette",
|
||||
"asset": os.environ["AVALON_ASSET"],
|
||||
"subset": "palette" + name
|
||||
"subset": "{}{}".format("palette", name)
|
||||
})
|
||||
self.log.info(
|
||||
"Created instance:\n" + json.dumps(
|
||||
|
|
|
|||
|
|
@ -14,26 +14,11 @@ class CollectScene(pyblish.api.ContextPlugin):
|
|||
hosts = ["harmony"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
sig = harmony.signature()
|
||||
func = """function %s()
|
||||
{
|
||||
return [
|
||||
about.getApplicationPath(),
|
||||
scene.currentProjectPath(),
|
||||
scene.currentScene(),
|
||||
scene.getFrameRate(),
|
||||
scene.getStartFrame(),
|
||||
scene.getStopFrame(),
|
||||
sound.getSoundtrackAll().path(),
|
||||
scene.defaultResolutionX(),
|
||||
scene.defaultResolutionY()
|
||||
]
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
"""Plugin entry point."""
|
||||
result = harmony.send(
|
||||
{"function": func, "args": []}
|
||||
{
|
||||
f"function": "PypeHarmony.getSceneSettings",
|
||||
"args": []}
|
||||
)["result"]
|
||||
|
||||
context.data["applicationPath"] = result[0]
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect current workfile from Harmony."""
|
||||
import pyblish.api
|
||||
import os
|
||||
|
||||
|
|
@ -10,10 +12,12 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
|
|||
hosts = ["harmony"]
|
||||
|
||||
def process(self, context):
|
||||
"""Plugin entry point."""
|
||||
family = "workfile"
|
||||
task = os.getenv("AVALON_TASK", None)
|
||||
subset = family + task.capitalize()
|
||||
sanitized_task_name = task[0].upper() + task[1:]
|
||||
basename = os.path.basename(context.data["currentFile"])
|
||||
subset = "{}{}".format(family, sanitized_task_name)
|
||||
|
||||
# Create instance
|
||||
instance = context.create_instance(subset)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Extract palette from Harmony."""
|
||||
import os
|
||||
import csv
|
||||
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
|
||||
from avalon import harmony
|
||||
import pype.api
|
||||
|
|
@ -13,18 +18,53 @@ class ExtractPalette(pype.api.Extractor):
|
|||
families = ["harmony.palette"]
|
||||
|
||||
def process(self, instance):
|
||||
sig = harmony.signature()
|
||||
func = """function %s(args)
|
||||
{
|
||||
var palette_list = PaletteObjectManager.getScenePaletteList();
|
||||
var palette = palette_list.getPaletteById(args[0]);
|
||||
return (palette.getPath() + "/" + palette.getName() + ".plt");
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
palette_file = harmony.send(
|
||||
{"function": func, "args": [instance.data["id"]]}
|
||||
)["result"]
|
||||
"""Plugin entry point."""
|
||||
self_name = self.__class__.__name__
|
||||
result = harmony.send(
|
||||
{
|
||||
"function": f"PypeHarmony.Publish.{self_name}.getPalette",
|
||||
"args": instance.data["id"]
|
||||
})["result"]
|
||||
|
||||
if not isinstance(result, list):
|
||||
self.log.error(f"Invalid reply: {result}")
|
||||
raise AssertionError("Invalid reply from server.")
|
||||
palette_name = result[0]
|
||||
palette_file = result[1]
|
||||
self.log.info(f"Got palette named {palette_name} "
|
||||
f"and file {palette_file}.")
|
||||
|
||||
tmp_thumb_path = os.path.join(os.path.dirname(palette_file),
|
||||
os.path.basename(palette_file)
|
||||
.split(".plt")[0] + "_swatches.png"
|
||||
)
|
||||
self.log.info(f"Temporary humbnail path {tmp_thumb_path}")
|
||||
|
||||
palette_version = str(instance.data.get("version")).zfill(3)
|
||||
|
||||
self.log.info(f"Palette version {palette_version}")
|
||||
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = []
|
||||
|
||||
try:
|
||||
thumbnail_path = self.create_palette_thumbnail(palette_name,
|
||||
palette_version,
|
||||
palette_file,
|
||||
tmp_thumb_path)
|
||||
except ValueError:
|
||||
self.log.error("Unsupported palette type for thumbnail.")
|
||||
|
||||
else:
|
||||
thumbnail = {
|
||||
"name": "thumbnail",
|
||||
"ext": "png",
|
||||
"files": os.path.basename(thumbnail_path),
|
||||
"stagingDir": os.path.dirname(thumbnail_path),
|
||||
"tags": ["thumbnail"]
|
||||
}
|
||||
|
||||
instance.data["representations"].append(thumbnail)
|
||||
|
||||
representation = {
|
||||
"name": "plt",
|
||||
|
|
@ -32,4 +72,130 @@ class ExtractPalette(pype.api.Extractor):
|
|||
"files": os.path.basename(palette_file),
|
||||
"stagingDir": os.path.dirname(palette_file)
|
||||
}
|
||||
instance.data["representations"] = [representation]
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def create_palette_thumbnail(self,
|
||||
palette_name,
|
||||
palette_version,
|
||||
palette_path,
|
||||
dst_path):
|
||||
"""Create thumbnail for palette file.
|
||||
|
||||
Args:
|
||||
palette_name (str): Name of palette.
|
||||
palette_version (str): Version of palette.
|
||||
palette_path (str): Path to palette file.
|
||||
dst_path (str): Thumbnail path.
|
||||
|
||||
Returns:
|
||||
str: Thumbnail path.
|
||||
|
||||
"""
|
||||
colors = {}
|
||||
|
||||
with open(palette_path, newline='') as plt:
|
||||
plt_parser = csv.reader(plt, delimiter=" ")
|
||||
for i, line in enumerate(plt_parser):
|
||||
if i == 0:
|
||||
continue
|
||||
while ("" in line):
|
||||
line.remove("")
|
||||
# self.log.debug(line)
|
||||
if line[0] not in ["Solid"]:
|
||||
raise ValueError("Unsupported palette type.")
|
||||
color_name = line[1].strip('"')
|
||||
colors[color_name] = {"type": line[0],
|
||||
"uuid": line[2],
|
||||
"rgba": (int(line[3]),
|
||||
int(line[4]),
|
||||
int(line[5]),
|
||||
int(line[6])),
|
||||
}
|
||||
plt.close()
|
||||
|
||||
img_pad_top = 80
|
||||
label_pad_name = 30
|
||||
label_pad_rgb = 580
|
||||
swatch_pad_left = 300
|
||||
swatch_pad_top = 10
|
||||
swatch_w = 120
|
||||
swatch_h = 50
|
||||
|
||||
image_w = 800
|
||||
image_h = (img_pad_top +
|
||||
(len(colors.keys()) *
|
||||
swatch_h) +
|
||||
(swatch_pad_top *
|
||||
len(colors.keys()))
|
||||
)
|
||||
|
||||
img = Image.new("RGBA", (image_w, image_h), "white")
|
||||
|
||||
# For bg of colors with alpha, create checkerboard image
|
||||
checkers = Image.new("RGB", (swatch_w, swatch_h))
|
||||
pixels = checkers.load()
|
||||
|
||||
# Make pixels white where (row+col) is odd
|
||||
for i in range(swatch_w):
|
||||
for j in range(swatch_h):
|
||||
if (i + j) % 2:
|
||||
pixels[i, j] = (255, 255, 255)
|
||||
|
||||
draw = ImageDraw.Draw(img)
|
||||
# TODO: This needs to be font included with Pype because
|
||||
# arial is not available on other platforms then Windows.
|
||||
title_font = ImageFont.truetype("arial.ttf", 28)
|
||||
label_font = ImageFont.truetype("arial.ttf", 20)
|
||||
|
||||
draw.text((label_pad_name, 20),
|
||||
"{} (v{})".format(palette_name, palette_version),
|
||||
"black",
|
||||
font=title_font)
|
||||
|
||||
for i, name in enumerate(colors):
|
||||
rgba = colors[name]["rgba"]
|
||||
# @TODO: Fix this so alpha colors are displayed with checkboard
|
||||
# if not rgba[3] == "255":
|
||||
# img.paste(checkers,
|
||||
# (swatch_pad_left,
|
||||
# img_pad_top + swatch_pad_top + (i * swatch_h))
|
||||
# )
|
||||
#
|
||||
# half_y = (img_pad_top + swatch_pad_top + (i * swatch_h))/2
|
||||
#
|
||||
# draw.rectangle((
|
||||
# swatch_pad_left, # upper LX
|
||||
# img_pad_top + swatch_pad_top + (i * swatch_h), # upper LY
|
||||
# swatch_pad_left + (swatch_w * 2), # lower RX
|
||||
# half_y), # lower RY
|
||||
# fill=rgba[:-1], outline=(0, 0, 0), width=2)
|
||||
# draw.rectangle((
|
||||
# swatch_pad_left, # upper LX
|
||||
# half_y, # upper LY
|
||||
# swatch_pad_left + (swatch_w * 2), # lower RX
|
||||
# img_pad_top + swatch_h + (i * swatch_h)), # lower RY
|
||||
# fill=rgba, outline=(0, 0, 0), width=2)
|
||||
# else:
|
||||
|
||||
draw.rectangle((
|
||||
swatch_pad_left, # upper left x
|
||||
img_pad_top + swatch_pad_top + (i * swatch_h), # upper left y
|
||||
swatch_pad_left + (swatch_w * 2), # lower right x
|
||||
img_pad_top + swatch_h + (i * swatch_h)), # lower right y
|
||||
fill=rgba, outline=(0, 0, 0), width=2)
|
||||
|
||||
draw.text((label_pad_name, img_pad_top + (i * swatch_h) + swatch_pad_top + (swatch_h / 4)), # noqa: E501
|
||||
name,
|
||||
"black",
|
||||
font=label_font)
|
||||
|
||||
draw.text((label_pad_rgb, img_pad_top + (i * swatch_h) + swatch_pad_top + (swatch_h / 4)), # noqa: E501
|
||||
str(rgba),
|
||||
"black",
|
||||
font=label_font)
|
||||
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
img.save(dst_path)
|
||||
return dst_path
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
thumbnail_path = os.path.join(path, "thumbnail.png")
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
args = [
|
||||
ffmpeg_path, "-y",
|
||||
"{}".format(ffmpeg_path), "-y",
|
||||
"-i", os.path.join(path, list(collections[0])[0]),
|
||||
"-vf", "scale=300:-1",
|
||||
"-vframes", "1",
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Extract template."""
|
||||
import os
|
||||
import shutil
|
||||
|
||||
|
|
@ -14,6 +16,7 @@ class ExtractTemplate(pype.api.Extractor):
|
|||
families = ["harmony.template"]
|
||||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
staging_dir = self.staging_dir(instance)
|
||||
filepath = os.path.join(staging_dir, f"{instance.name}.tpl")
|
||||
|
||||
|
|
@ -61,60 +64,49 @@ class ExtractTemplate(pype.api.Extractor):
|
|||
"files": f"{instance.name}.zip",
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
instance.data["representations"] = [representation]
|
||||
|
||||
def get_backdrops(self, node):
|
||||
sig = harmony.signature()
|
||||
func = """function %s(probe_node)
|
||||
{
|
||||
var backdrops = Backdrop.backdrops("Top");
|
||||
var valid_backdrops = [];
|
||||
for(var i=0; i<backdrops.length; i++)
|
||||
{
|
||||
var position = backdrops[i].position;
|
||||
self.log.info(instance.data.get("representations"))
|
||||
if instance.data.get("representations"):
|
||||
instance.data["representations"].extend([representation])
|
||||
else:
|
||||
instance.data["representations"] = [representation]
|
||||
|
||||
var x_valid = false;
|
||||
var node_x = node.coordX(probe_node);
|
||||
if (position.x < node_x && node_x < (position.x + position.w)){
|
||||
x_valid = true
|
||||
};
|
||||
instance.data["version_name"] = "{}_{}".format(
|
||||
instance.data["subset"], os.environ["AVALON_TASK"])
|
||||
|
||||
var y_valid = false;
|
||||
var node_y = node.coordY(probe_node);
|
||||
if (position.y < node_y && node_y < (position.y + position.h)){
|
||||
y_valid = true
|
||||
};
|
||||
def get_backdrops(self, node: str) -> list:
|
||||
"""Get backdrops for the node.
|
||||
|
||||
if (x_valid && y_valid){
|
||||
valid_backdrops.push(backdrops[i])
|
||||
};
|
||||
}
|
||||
return valid_backdrops;
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
return harmony.send(
|
||||
{"function": func, "args": [node]}
|
||||
)["result"]
|
||||
Args:
|
||||
node (str): Node path.
|
||||
|
||||
def get_dependencies(self, node, dependencies):
|
||||
sig = harmony.signature()
|
||||
func = """function %s(args)
|
||||
{
|
||||
var target_node = args[0];
|
||||
var numInput = node.numberOfInputPorts(target_node);
|
||||
var dependencies = [];
|
||||
for (var i = 0 ; i < numInput; i++)
|
||||
{
|
||||
dependencies.push(node.srcNode(target_node, i));
|
||||
}
|
||||
return dependencies;
|
||||
}
|
||||
%s
|
||||
""" % (sig, sig)
|
||||
Returns:
|
||||
list: list of Backdrops.
|
||||
|
||||
"""
|
||||
self_name = self.__class__.__name__
|
||||
return harmony.send({
|
||||
"function": f"PypeHarmony.Publish.{self_name}.getBackdropsByNode",
|
||||
"args": node})["result"]
|
||||
|
||||
def get_dependencies(
|
||||
self, node: str, dependencies: list = None) -> list:
|
||||
"""Get node dependencies.
|
||||
|
||||
This will return recursive dependency list of given node.
|
||||
|
||||
Args:
|
||||
node (str): Path to the node.
|
||||
dependencies (list, optional): existing dependency list.
|
||||
|
||||
Returns:
|
||||
list: List of dependent nodes.
|
||||
|
||||
"""
|
||||
current_dependencies = harmony.send(
|
||||
{"function": func, "args": [node]}
|
||||
{
|
||||
"function": "PypeHarmony.getDependencies",
|
||||
"args": node}
|
||||
)["result"]
|
||||
|
||||
for dependency in current_dependencies:
|
||||
|
|
|
|||
|
|
@ -130,8 +130,8 @@ class ExtractReviewCutUp(pype.api.Extractor):
|
|||
|
||||
# check if audio stream is in input video file
|
||||
ffprob_cmd = (
|
||||
"{ffprobe_path} -i \"{full_input_path}\" -show_streams "
|
||||
"-select_streams a -loglevel error"
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams"
|
||||
" -select_streams a -loglevel error"
|
||||
).format(**locals())
|
||||
|
||||
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
|
||||
|
|
@ -171,7 +171,8 @@ class ExtractReviewCutUp(pype.api.Extractor):
|
|||
# try to get video native resolution data
|
||||
try:
|
||||
resolution_output = pype.api.subprocess((
|
||||
"{ffprobe_path} -i \"{full_input_path}\" -v error "
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\""
|
||||
" -v error "
|
||||
"-select_streams v:0 -show_entries "
|
||||
"stream=width,height -of csv=s=x:p=0"
|
||||
).format(**locals()))
|
||||
|
|
@ -274,7 +275,7 @@ class ExtractReviewCutUp(pype.api.Extractor):
|
|||
output_args.append("-y \"{}\"".format(full_output_path))
|
||||
|
||||
mov_args = [
|
||||
ffmpeg_path,
|
||||
"\"{}\"".format(ffmpeg_path),
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,61 +0,0 @@
|
|||
import os
|
||||
import acre
|
||||
|
||||
from avalon import api, lib
|
||||
import pype.api as pype
|
||||
from pype.aport import lib as aportlib
|
||||
|
||||
log = pype.Logger().get_logger(__name__, "aport")
|
||||
|
||||
|
||||
class Aport(api.Action):
|
||||
|
||||
name = "aport"
|
||||
label = "Aport - Avalon's Server"
|
||||
icon = "retweet"
|
||||
order = 996
|
||||
|
||||
def is_compatible(self, session):
|
||||
"""Return whether the action is compatible with the session"""
|
||||
if "AVALON_TASK" in session:
|
||||
return True
|
||||
return False
|
||||
|
||||
def process(self, session, **kwargs):
|
||||
"""Implement the behavior for when the action is triggered
|
||||
|
||||
Args:
|
||||
session (dict): environment dictionary
|
||||
|
||||
Returns:
|
||||
Popen instance of newly spawned process
|
||||
|
||||
"""
|
||||
|
||||
with pype.modified_environ(**session):
|
||||
# Get executable by name
|
||||
print(self.name)
|
||||
app = lib.get_application(self.name)
|
||||
executable = lib.which(app["executable"])
|
||||
|
||||
# Run as server
|
||||
arguments = []
|
||||
|
||||
tools_env = acre.get_tools([self.name])
|
||||
env = acre.compute(tools_env)
|
||||
env = acre.merge(env, current_env=dict(os.environ))
|
||||
|
||||
if not env.get('AVALON_WORKDIR', None):
|
||||
os.environ["AVALON_WORKDIR"] = aportlib.get_workdir_template()
|
||||
|
||||
env.update(dict(os.environ))
|
||||
|
||||
try:
|
||||
lib.launch(
|
||||
executable=executable,
|
||||
args=arguments,
|
||||
environment=env
|
||||
)
|
||||
except Exception as e:
|
||||
log.error(e)
|
||||
return
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
import os
|
||||
import acre
|
||||
|
||||
from avalon import api, lib, io
|
||||
import pype.api as pype
|
||||
|
||||
|
||||
class PremierePro(api.Action):
|
||||
|
||||
name = "premiere_2019"
|
||||
label = "Premiere Pro"
|
||||
icon = "premiere_icon"
|
||||
order = 996
|
||||
|
||||
def is_compatible(self, session):
|
||||
"""Return whether the action is compatible with the session"""
|
||||
if "AVALON_TASK" in session:
|
||||
return True
|
||||
return False
|
||||
|
||||
def process(self, session, **kwargs):
|
||||
"""Implement the behavior for when the action is triggered
|
||||
|
||||
Args:
|
||||
session (dict): environment dictionary
|
||||
|
||||
Returns:
|
||||
Popen instance of newly spawned process
|
||||
|
||||
"""
|
||||
|
||||
with pype.modified_environ(**session):
|
||||
# Get executable by name
|
||||
app = lib.get_application(self.name)
|
||||
executable = lib.which(app["executable"])
|
||||
|
||||
# Run as server
|
||||
arguments = []
|
||||
|
||||
tools_env = acre.get_tools([self.name])
|
||||
env = acre.compute(tools_env)
|
||||
env = acre.merge(env, current_env=dict(os.environ))
|
||||
|
||||
if not env.get('AVALON_WORKDIR', None):
|
||||
project_name = env.get("AVALON_PROJECT")
|
||||
anatomy = pype.Anatomy(project_name)
|
||||
os.environ['AVALON_PROJECT'] = project_name
|
||||
io.Session['AVALON_PROJECT'] = project_name
|
||||
|
||||
task_name = os.environ.get(
|
||||
"AVALON_TASK", io.Session["AVALON_TASK"]
|
||||
)
|
||||
asset_name = os.environ.get(
|
||||
"AVALON_ASSET", io.Session["AVALON_ASSET"]
|
||||
)
|
||||
application = lib.get_application(
|
||||
os.environ["AVALON_APP_NAME"]
|
||||
)
|
||||
|
||||
project_doc = io.find_one({"type": "project"})
|
||||
data = {
|
||||
"task": task_name,
|
||||
"asset": asset_name,
|
||||
"project": {
|
||||
"name": project_doc["name"],
|
||||
"code": project_doc["data"].get("code", '')
|
||||
},
|
||||
"hierarchy": pype.get_hierarchy(),
|
||||
"app": application["application_dir"]
|
||||
}
|
||||
anatomy_filled = anatomy.format(data)
|
||||
workdir = anatomy_filled["work"]["folder"]
|
||||
|
||||
os.environ["AVALON_WORKDIR"] = workdir
|
||||
|
||||
env.update(dict(os.environ))
|
||||
|
||||
lib.launch(
|
||||
executable=executable,
|
||||
args=arguments,
|
||||
environment=env
|
||||
)
|
||||
return
|
||||
26
pype/plugins/lib.py
Normal file
26
pype/plugins/lib.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
import re
|
||||
|
||||
|
||||
def get_unique_layer_name(layers, asset_name, subset_name):
|
||||
"""
|
||||
Gets all layer names and if 'name' is present in them, increases
|
||||
suffix by 1 (eg. creates unique layer name - for Loader)
|
||||
Args:
|
||||
layers (list): of namedtuples, expects 'name' field present
|
||||
asset_name (string): in format asset_subset (Hero)
|
||||
subset_name (string): (LOD)
|
||||
|
||||
Returns:
|
||||
(string): name_00X (without version)
|
||||
"""
|
||||
name = "{}_{}".format(asset_name, subset_name)
|
||||
names = {}
|
||||
for layer in layers:
|
||||
layer_name = re.sub(r'_\d{3}$', '', layer.name)
|
||||
if layer_name in names.keys():
|
||||
names[layer_name] = names[layer_name] + 1
|
||||
else:
|
||||
names[layer_name] = 1
|
||||
occurrences = names.get(name, 0)
|
||||
|
||||
return "{}_{:0>3d}".format(name, occurrences + 1)
|
||||
|
|
@ -20,7 +20,8 @@ class CollectFtrackFamilies(pyblish.api.InstancePlugin):
|
|||
"model",
|
||||
"animation",
|
||||
"look",
|
||||
"rig"
|
||||
"rig",
|
||||
"camera"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -18,7 +18,12 @@ class CollectRemoveMarked(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
|
||||
self.log.debug(context)
|
||||
# make ftrack publishable
|
||||
instances_to_remove = []
|
||||
for instance in context:
|
||||
if instance.data.get('remove'):
|
||||
context.remove(instance)
|
||||
instances_to_remove.append(instance)
|
||||
|
||||
for instance in instances_to_remove:
|
||||
context.remove(instance)
|
||||
|
|
|
|||
|
|
@ -254,6 +254,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
if self.sync_workfile_version:
|
||||
data["version"] = context.data["version"]
|
||||
|
||||
for instance in context:
|
||||
if instance.data['family'] == "workfile":
|
||||
instance.data["version"] = context.data["version"]
|
||||
|
||||
# Apply each user defined attribute as data
|
||||
for attr in cmds.listAttr(layer, userDefined=True) or list():
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -43,33 +43,38 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
i = 0
|
||||
for inst in instance.context:
|
||||
|
||||
self.log.debug('processing {}'.format(inst))
|
||||
self.log.debug('processing2 {}'.format(instance.context[i]))
|
||||
self.log.debug('filtering {}'.format(inst))
|
||||
data = instance.context[i].data
|
||||
|
||||
if inst.name == reviewable_subset[0]:
|
||||
if data.get('families'):
|
||||
data['families'].append('review')
|
||||
else:
|
||||
data['families'] = ['review']
|
||||
self.log.debug('adding review family to {}'.format(reviewable_subset))
|
||||
data['review_camera'] = camera
|
||||
# data["publish"] = False
|
||||
data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
data['frameStartHandle'] = instance.data["frameStartHandle"]
|
||||
data['frameEndHandle'] = instance.data["frameEndHandle"]
|
||||
data["frameStart"] = instance.data["frameStart"]
|
||||
data["frameEnd"] = instance.data["frameEnd"]
|
||||
data['handles'] = instance.data.get('handles', None)
|
||||
data['step'] = instance.data['step']
|
||||
data['fps'] = instance.data['fps']
|
||||
data["isolate"] = instance.data["isolate"]
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
self.log.debug('data {}'.format(instance.context[i].data))
|
||||
instance.context[i].data.update(data)
|
||||
instance.data['remove'] = True
|
||||
i += 1
|
||||
if inst.name != reviewable_subset[0]:
|
||||
self.log.debug('subset name does not match {}'.format(
|
||||
reviewable_subset[0]))
|
||||
i += 1
|
||||
continue
|
||||
|
||||
if data.get('families'):
|
||||
data['families'].append('review')
|
||||
else:
|
||||
data['families'] = ['review']
|
||||
self.log.debug('adding review family to {}'.format(
|
||||
reviewable_subset))
|
||||
data['review_camera'] = camera
|
||||
# data["publish"] = False
|
||||
data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
data['frameStartHandle'] = instance.data["frameStartHandle"]
|
||||
data['frameEndHandle'] = instance.data["frameEndHandle"]
|
||||
data["frameStart"] = instance.data["frameStart"]
|
||||
data["frameEnd"] = instance.data["frameEnd"]
|
||||
data['handles'] = instance.data.get('handles', None)
|
||||
data['step'] = instance.data['step']
|
||||
data['fps'] = instance.data['fps']
|
||||
data["isolate"] = instance.data["isolate"]
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
self.log.debug('data {}'.format(instance.context[i].data))
|
||||
instance.context[i].data.update(data)
|
||||
instance.data['remove'] = True
|
||||
self.log.debug('isntance data {}'.format(instance.data))
|
||||
else:
|
||||
if self.legacy:
|
||||
instance.data['subset'] = task + 'Review'
|
||||
|
|
@ -82,8 +87,10 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
instance.data['subset'] = subset
|
||||
|
||||
instance.data['review_camera'] = camera
|
||||
instance.data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
instance.data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
instance.data['frameStartFtrack'] = \
|
||||
instance.data["frameStartHandle"]
|
||||
instance.data['frameEndFtrack'] = \
|
||||
instance.data["frameEndHandle"]
|
||||
|
||||
# make ftrack publishable
|
||||
instance.data["families"] = ['ftrack']
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from maya import cmds
|
|||
import pyblish.api
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.lib import pairwise
|
||||
|
||||
|
||||
SETTINGS = {"renderDensity",
|
||||
|
|
@ -78,7 +77,7 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
connections = cmds.ls(connections, long=True) # Ensure long names
|
||||
|
||||
inputs = []
|
||||
for dest, src in pairwise(connections):
|
||||
for dest, src in lib.pairwise(connections):
|
||||
source_node, source_attr = src.split(".", 1)
|
||||
dest_node, dest_attr = dest.split(".", 1)
|
||||
|
||||
|
|
@ -119,7 +118,7 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
texture_filenames = []
|
||||
if image_search_paths:
|
||||
|
||||
|
||||
|
||||
# TODO: Somehow this uses OS environment path separator, `:` vs `;`
|
||||
# Later on check whether this is pipeline OS cross-compatible.
|
||||
image_search_paths = [p for p in
|
||||
|
|
@ -127,7 +126,7 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
|
||||
# find all ${TOKEN} tokens and replace them with $TOKEN env. variable
|
||||
image_search_paths = self._replace_tokens(image_search_paths)
|
||||
|
||||
|
||||
# List all related textures
|
||||
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
|
||||
self.log.info("Found %i texture(s)" % len(texture_filenames))
|
||||
|
|
|
|||
|
|
@ -26,7 +26,15 @@ class ExtractCameraAlembic(pype.api.Extractor):
|
|||
# get settings
|
||||
framerange = [instance.data.get("frameStart", 1),
|
||||
instance.data.get("frameEnd", 1)]
|
||||
handles = instance.data.get("handles", 0)
|
||||
handle_start = instance.data.get("handleStart", 0)
|
||||
handle_end = instance.data.get("handleEnd", 0)
|
||||
|
||||
# TODO: deprecated attribute "handles"
|
||||
|
||||
if handle_start is None:
|
||||
handle_start = instance.data.get("handles", 0)
|
||||
handle_end = instance.data.get("handles", 0)
|
||||
|
||||
step = instance.data.get("step", 1.0)
|
||||
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
|
||||
|
||||
|
|
@ -55,8 +63,10 @@ class ExtractCameraAlembic(pype.api.Extractor):
|
|||
|
||||
job_str = ' -selection -dataFormat "ogawa" '
|
||||
job_str += ' -attrPrefix cb'
|
||||
job_str += ' -frameRange {0} {1} '.format(framerange[0] - handles,
|
||||
framerange[1] + handles)
|
||||
job_str += ' -frameRange {0} {1} '.format(framerange[0]
|
||||
- handle_start,
|
||||
framerange[1]
|
||||
+ handle_end)
|
||||
job_str += ' -step {0} '.format(step)
|
||||
|
||||
if bake_to_worldspace:
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Extract camera as Maya Scene."""
|
||||
import os
|
||||
import itertools
|
||||
|
||||
from maya import cmds
|
||||
|
||||
import avalon.maya
|
||||
import pype.api
|
||||
from pype.lib import grouper
|
||||
from pype.hosts.maya import lib
|
||||
|
||||
|
||||
|
|
@ -36,6 +36,17 @@ def massage_ma_file(path):
|
|||
f.close()
|
||||
|
||||
|
||||
def grouper(iterable, n, fillvalue=None):
|
||||
"""Collect data into fixed-length chunks or blocks.
|
||||
|
||||
Examples:
|
||||
grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
|
||||
|
||||
"""
|
||||
args = [iter(iterable)] * n
|
||||
return itertools.izip_longest(fillvalue=fillvalue, *args)
|
||||
|
||||
|
||||
def unlock(plug):
|
||||
"""Unlocks attribute and disconnects inputs for a plug.
|
||||
|
||||
|
|
@ -107,7 +118,18 @@ class ExtractCameraMayaScene(pype.api.Extractor):
|
|||
|
||||
framerange = [instance.data.get("frameStart", 1),
|
||||
instance.data.get("frameEnd", 1)]
|
||||
handles = instance.data.get("handles", 0)
|
||||
handle_start = instance.data.get("handleStart", 0)
|
||||
handle_end = instance.data.get("handleEnd", 0)
|
||||
|
||||
# TODO: deprecated attribute "handles"
|
||||
|
||||
if handle_start is None:
|
||||
handle_start = instance.data.get("handles", 0)
|
||||
handle_end = instance.data.get("handles", 0)
|
||||
|
||||
range_with_handles = [framerange[0] - handle_start,
|
||||
framerange[1] + handle_end]
|
||||
|
||||
step = instance.data.get("step", 1.0)
|
||||
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
|
||||
|
||||
|
|
@ -121,9 +143,6 @@ class ExtractCameraMayaScene(pype.api.Extractor):
|
|||
cameras = cmds.ls(members, leaf=True, shapes=True, long=True,
|
||||
dag=True, type="camera")
|
||||
|
||||
range_with_handles = [framerange[0] - handles,
|
||||
framerange[1] + handles]
|
||||
|
||||
# validate required settings
|
||||
assert len(cameras) == 1, "Single camera must be found in extraction"
|
||||
assert isinstance(step, float), "Step must be a float value"
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ def preserve_trim(node):
|
|||
"{}".format(script_start))
|
||||
|
||||
|
||||
def loader_shift(node, frame, relative=True):
|
||||
def loader_shift(node, frame, relative=False):
|
||||
"""Shift global in time by i preserving duration
|
||||
|
||||
This moves the loader by i frames preserving global duration. When relative
|
||||
|
|
@ -61,11 +61,12 @@ def loader_shift(node, frame, relative=True):
|
|||
script_start = nuke.root()["first_frame"].value()
|
||||
|
||||
if relative:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start))
|
||||
else:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(frame))
|
||||
|
||||
return int(script_start)
|
||||
|
||||
|
||||
class LoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
|
@ -73,10 +74,10 @@ class LoadSequence(api.Loader):
|
|||
families = ["render2d", "source", "plate", "render", "prerender", "review"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
|
||||
|
||||
label = "Load sequence"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
label = "Load Image Sequence"
|
||||
order = -20
|
||||
icon = "file-video-o"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
from avalon.nuke import (
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class ImageLoader(api.Loader):
|
|||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
|
||||
families = ["image"]
|
||||
families = ["image", "render"]
|
||||
representations = ["*"]
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
|
|
|||
|
|
@ -34,8 +34,6 @@ class ExtractImage(pype.api.Extractor):
|
|||
# limit unnecessary calls to client
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
stub.set_visible(layer.id, False)
|
||||
if not layer.visible and layer.id in extract_ids:
|
||||
stub.set_visible(layer.id, True)
|
||||
|
||||
save_options = []
|
||||
if "png" in self.formats:
|
||||
|
|
|
|||
|
|
@ -38,8 +38,6 @@ class ExtractReview(pype.api.Extractor):
|
|||
# limit unnecessary calls to client
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
stub.set_visible(layer.id, False)
|
||||
if not layer.visible and layer.id in extract_ids:
|
||||
stub.set_visible(layer.id, True)
|
||||
|
||||
stub.saveAs(output_image_path, 'jpg', True)
|
||||
|
||||
|
|
@ -56,7 +54,7 @@ class ExtractReview(pype.api.Extractor):
|
|||
# Generate thumbnail.
|
||||
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
|
||||
args = [
|
||||
ffmpeg_path, "-y",
|
||||
"{}".format(ffmpeg_path), "-y",
|
||||
"-i", output_image_path,
|
||||
"-vf", "scale=300:-1",
|
||||
"-vframes", "1",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,87 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect Harmony scenes in Standalone Publisher."""
|
||||
import copy
|
||||
import glob
|
||||
import os
|
||||
from pprint import pformat
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectHarmonyScenes(pyblish.api.InstancePlugin):
|
||||
"""Collect Harmony xstage files."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.498
|
||||
label = "Collect Harmony Scene"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["harmony.scene"]
|
||||
|
||||
# presets
|
||||
ignored_instance_data_keys = ("name", "label", "stagingDir", "version")
|
||||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
context = instance.context
|
||||
asset_data = instance.context.data["assetEntity"]
|
||||
asset_name = instance.data["asset"]
|
||||
subset_name = instance.data.get("subset", "sceneMain")
|
||||
anatomy_data = instance.context.data["anatomyData"]
|
||||
repres = instance.data["representations"]
|
||||
staging_dir = repres[0]["stagingDir"]
|
||||
files = repres[0]["files"]
|
||||
|
||||
if not files.endswith(".zip"):
|
||||
# A harmony project folder / .xstage was dropped
|
||||
instance_name = f"{asset_name}_{subset_name}"
|
||||
task = instance.data.get("task", "harmonyIngest")
|
||||
|
||||
# create new instance
|
||||
new_instance = context.create_instance(instance_name)
|
||||
|
||||
# add original instance data except name key
|
||||
for key, value in instance.data.items():
|
||||
# Make sure value is copy since value may be object which
|
||||
# can be shared across all new created objects
|
||||
if key not in self.ignored_instance_data_keys:
|
||||
new_instance.data[key] = copy.deepcopy(value)
|
||||
|
||||
self.log.info("Copied data: {}".format(new_instance.data))
|
||||
|
||||
# fix anatomy data
|
||||
anatomy_data_new = copy.deepcopy(anatomy_data)
|
||||
# updating hierarchy data
|
||||
anatomy_data_new.update({
|
||||
"asset": asset_data["name"],
|
||||
"task": task,
|
||||
"subset": subset_name
|
||||
})
|
||||
|
||||
new_instance.data["label"] = f"{instance_name}"
|
||||
new_instance.data["subset"] = subset_name
|
||||
new_instance.data["extension"] = ".zip"
|
||||
new_instance.data["anatomyData"] = anatomy_data_new
|
||||
new_instance.data["publish"] = True
|
||||
|
||||
# When a project folder was dropped vs. just an xstage file, find
|
||||
# the latest file xstage version and update the instance
|
||||
if not files.endswith(".xstage"):
|
||||
|
||||
source_dir = os.path.join(
|
||||
staging_dir, files
|
||||
).replace("\\", "/")
|
||||
|
||||
latest_file = max(glob.iglob(source_dir + "/*.xstage"),
|
||||
key=os.path.getctime).replace("\\", "/")
|
||||
|
||||
new_instance.data["representations"][0]["stagingDir"] = (
|
||||
source_dir
|
||||
)
|
||||
new_instance.data["representations"][0]["files"] = (
|
||||
os.path.basename(latest_file)
|
||||
)
|
||||
self.log.info(f"Created new instance: {instance_name}")
|
||||
self.log.debug(f"_ inst_data: {pformat(new_instance.data)}")
|
||||
|
||||
# set original instance for removal
|
||||
self.log.info("Context data: {}".format(context.data))
|
||||
instance.data["remove"] = True
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect zips as Harmony scene files."""
|
||||
import copy
|
||||
from pprint import pformat
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectHarmonyZips(pyblish.api.InstancePlugin):
|
||||
"""Collect Harmony zipped projects."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.497
|
||||
label = "Collect Harmony Zipped Projects"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["harmony.scene"]
|
||||
extensions = ["zip"]
|
||||
|
||||
# presets
|
||||
ignored_instance_data_keys = ("name", "label", "stagingDir", "version")
|
||||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
context = instance.context
|
||||
asset_data = instance.context.data["assetEntity"]
|
||||
asset_name = instance.data["asset"]
|
||||
subset_name = instance.data.get("subset", "sceneMain")
|
||||
anatomy_data = instance.context.data["anatomyData"]
|
||||
repres = instance.data["representations"]
|
||||
files = repres[0]["files"]
|
||||
|
||||
if files.endswith(".zip"):
|
||||
# A zip file was dropped
|
||||
instance_name = f"{asset_name}_{subset_name}"
|
||||
task = instance.data.get("task", "harmonyIngest")
|
||||
|
||||
# create new instance
|
||||
new_instance = context.create_instance(instance_name)
|
||||
|
||||
# add original instance data except name key
|
||||
for key, value in instance.data.items():
|
||||
# Make sure value is copy since value may be object which
|
||||
# can be shared across all new created objects
|
||||
if key not in self.ignored_instance_data_keys:
|
||||
new_instance.data[key] = copy.deepcopy(value)
|
||||
|
||||
self.log.info("Copied data: {}".format(new_instance.data))
|
||||
|
||||
# fix anatomy data
|
||||
anatomy_data_new = copy.deepcopy(anatomy_data)
|
||||
# updating hierarchy data
|
||||
anatomy_data_new.update({
|
||||
"asset": asset_data["name"],
|
||||
"task": task,
|
||||
"subset": subset_name
|
||||
})
|
||||
|
||||
new_instance.data["label"] = f"{instance_name}"
|
||||
new_instance.data["subset"] = subset_name
|
||||
new_instance.data["extension"] = ".zip"
|
||||
new_instance.data["anatomyData"] = anatomy_data_new
|
||||
new_instance.data["publish"] = True
|
||||
|
||||
self.log.info(f"Created new instance: {instance_name}")
|
||||
self.log.debug(f"_ inst_data: {pformat(new_instance.data)}")
|
||||
|
||||
# set original instance for removal
|
||||
self.log.info("Context data: {}".format(context.data))
|
||||
instance.data["remove"] = True
|
||||
|
|
@ -18,7 +18,7 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Collect instance data"
|
||||
order = pyblish.api.CollectorOrder + 0.49
|
||||
families = ["render", "plate"]
|
||||
families = ["render", "plate", "review"]
|
||||
hosts = ["standalonepublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,21 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect instances that are marked for removal and remove them."""
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectRemoveMarked(pyblish.api.ContextPlugin):
|
||||
"""Clean up instances marked for removal.
|
||||
|
||||
Note:
|
||||
This is a workaround for race conditions and removing of instances
|
||||
used to generate other instances.
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
label = 'Remove Marked Instances'
|
||||
|
||||
def process(self, context):
|
||||
"""Plugin entry point."""
|
||||
for instance in context:
|
||||
if instance.data.get('remove'):
|
||||
context.remove(instance)
|
||||
404
pype/plugins/standalonepublisher/publish/extract_harmony_zip.py
Normal file
404
pype/plugins/standalonepublisher/publish/extract_harmony_zip.py
Normal file
|
|
@ -0,0 +1,404 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Extract Harmony scene from zip file."""
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
import six
|
||||
import sys
|
||||
import tempfile
|
||||
import zipfile
|
||||
|
||||
import pyblish.api
|
||||
from avalon import api, io
|
||||
import pype.api
|
||||
|
||||
|
||||
class ExtractHarmonyZip(pype.api.Extractor):
|
||||
"""Extract Harmony zip."""
|
||||
|
||||
# Pyblish settings
|
||||
label = "Extract Harmony zip"
|
||||
order = pyblish.api.ExtractorOrder + 0.02
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["scene"]
|
||||
|
||||
# Properties
|
||||
session = None
|
||||
task_types = None
|
||||
task_statuses = None
|
||||
assetversion_statuses = None
|
||||
|
||||
# Presets
|
||||
create_workfile = True
|
||||
default_task = "harmonyIngest"
|
||||
default_task_type = "Ingest"
|
||||
default_task_status = "Ingested"
|
||||
assetversion_status = "Ingested"
|
||||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
context = instance.context
|
||||
self.session = context.data["ftrackSession"]
|
||||
asset_doc = context.data["assetEntity"]
|
||||
# asset_name = instance.data["asset"]
|
||||
subset_name = instance.data["subset"]
|
||||
instance_name = instance.data["name"]
|
||||
family = instance.data["family"]
|
||||
task = context.data["anatomyData"]["task"] or self.default_task
|
||||
project_entity = instance.context.data["projectEntity"]
|
||||
ftrack_id = asset_doc["data"]["ftrackId"]
|
||||
repres = instance.data["representations"]
|
||||
submitted_staging_dir = repres[0]["stagingDir"]
|
||||
submitted_files = repres[0]["files"]
|
||||
|
||||
# Get all the ftrack entities needed
|
||||
|
||||
# Asset Entity
|
||||
query = 'AssetBuild where id is "{}"'.format(ftrack_id)
|
||||
asset_entity = self.session.query(query).first()
|
||||
|
||||
# Project Entity
|
||||
query = 'Project where full_name is "{}"'.format(
|
||||
project_entity["name"]
|
||||
)
|
||||
project_entity = self.session.query(query).one()
|
||||
|
||||
# Get Task types and Statuses for creation if needed
|
||||
self.task_types = self._get_all_task_types(project_entity)
|
||||
self.task_statuses = self.get_all_task_statuses(project_entity)
|
||||
|
||||
# Get Statuses of AssetVersions
|
||||
self.assetversion_statuses = self.get_all_assetversion_statuses(
|
||||
project_entity
|
||||
)
|
||||
|
||||
# Setup the status that we want for the AssetVersion
|
||||
if self.assetversion_status:
|
||||
instance.data["assetversion_status"] = self.assetversion_status
|
||||
|
||||
# Create the default_task if it does not exist
|
||||
if task == self.default_task:
|
||||
existing_tasks = []
|
||||
entity_children = asset_entity.get('children', [])
|
||||
for child in entity_children:
|
||||
if child.entity_type.lower() == 'task':
|
||||
existing_tasks.append(child['name'].lower())
|
||||
|
||||
if task.lower() in existing_tasks:
|
||||
print("Task {} already exists".format(task))
|
||||
|
||||
else:
|
||||
self.create_task(
|
||||
name=task,
|
||||
task_type=self.default_task_type,
|
||||
task_status=self.default_task_status,
|
||||
parent=asset_entity,
|
||||
)
|
||||
|
||||
# Find latest version
|
||||
latest_version = self._find_last_version(subset_name, asset_doc)
|
||||
version_number = 1
|
||||
if latest_version is not None:
|
||||
version_number += latest_version
|
||||
|
||||
self.log.info(
|
||||
"Next version of instance \"{}\" will be {}".format(
|
||||
instance_name, version_number
|
||||
)
|
||||
)
|
||||
|
||||
# update instance info
|
||||
instance.data["task"] = task
|
||||
instance.data["version_name"] = "{}_{}".format(subset_name, task)
|
||||
instance.data["family"] = family
|
||||
instance.data["subset"] = subset_name
|
||||
instance.data["version"] = version_number
|
||||
instance.data["latestVersion"] = latest_version
|
||||
instance.data["anatomyData"].update({
|
||||
"subset": subset_name,
|
||||
"family": family,
|
||||
"version": version_number
|
||||
})
|
||||
|
||||
# Copy `families` and check if `family` is not in current families
|
||||
families = instance.data.get("families") or list()
|
||||
if families:
|
||||
families = list(set(families))
|
||||
|
||||
instance.data["families"] = families
|
||||
|
||||
# Prepare staging dir for new instance and zip + sanitize scene name
|
||||
staging_dir = tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
|
||||
# Handle if the representation is a .zip and not an .xstage
|
||||
pre_staged = False
|
||||
if submitted_files.endswith(".zip"):
|
||||
submitted_zip_file = os.path.join(submitted_staging_dir,
|
||||
submitted_files
|
||||
).replace("\\", "/")
|
||||
|
||||
pre_staged = self.sanitize_prezipped_project(instance,
|
||||
submitted_zip_file,
|
||||
staging_dir)
|
||||
|
||||
# Get the file to work with
|
||||
source_dir = str(repres[0]["stagingDir"])
|
||||
source_file = str(repres[0]["files"])
|
||||
|
||||
staging_scene_dir = os.path.join(staging_dir, "scene")
|
||||
staging_scene = os.path.join(staging_scene_dir, source_file)
|
||||
|
||||
# If the file is an .xstage / directory, we must stage it
|
||||
if not pre_staged:
|
||||
shutil.copytree(source_dir, staging_scene_dir)
|
||||
|
||||
# Rename this latest file as 'scene.xstage'
|
||||
# This is is determined in the collector from the latest scene in a
|
||||
# submitted directory / directory the submitted .xstage is in.
|
||||
# In the case of a zip file being submitted, this is determined within
|
||||
# the self.sanitize_project() method in this extractor.
|
||||
os.rename(staging_scene,
|
||||
os.path.join(staging_scene_dir, "scene.xstage")
|
||||
)
|
||||
|
||||
# Required to set the current directory where the zip will end up
|
||||
os.chdir(staging_dir)
|
||||
|
||||
# Create the zip file
|
||||
zip_filepath = shutil.make_archive(os.path.basename(source_dir),
|
||||
"zip",
|
||||
staging_scene_dir
|
||||
)
|
||||
|
||||
zip_filename = os.path.basename(zip_filepath)
|
||||
|
||||
self.log.info("Zip file: {}".format(zip_filepath))
|
||||
|
||||
# Setup representation
|
||||
new_repre = {
|
||||
"name": "zip",
|
||||
"ext": "zip",
|
||||
"files": zip_filename,
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
|
||||
self.log.debug(
|
||||
"Creating new representation: {}".format(new_repre)
|
||||
)
|
||||
instance.data["representations"] = [new_repre]
|
||||
|
||||
self.log.debug("Completed prep of zipped Harmony scene: {}"
|
||||
.format(zip_filepath)
|
||||
)
|
||||
|
||||
# If this extractor is setup to also extract a workfile...
|
||||
if self.create_workfile:
|
||||
workfile_path = self.extract_workfile(instance,
|
||||
staging_scene
|
||||
)
|
||||
|
||||
self.log.debug("Extracted Workfile to: {}".format(workfile_path))
|
||||
|
||||
def extract_workfile(self, instance, staging_scene):
|
||||
"""Extract a valid workfile for this corresponding publish.
|
||||
|
||||
Args:
|
||||
instance (:class:`pyblish.api.Instance`): Instance data.
|
||||
staging_scene (str): path of staging scene.
|
||||
|
||||
Returns:
|
||||
str: Path to workdir.
|
||||
|
||||
"""
|
||||
# Since the staging scene was renamed to "scene.xstage" for publish
|
||||
# rename the staging scene in the temp stagingdir
|
||||
staging_scene = os.path.join(os.path.dirname(staging_scene),
|
||||
"scene.xstage")
|
||||
|
||||
# Setup the data needed to form a valid work path filename
|
||||
anatomy = pype.api.Anatomy()
|
||||
project_entity = instance.context.data["projectEntity"]
|
||||
|
||||
data = {
|
||||
"root": api.registered_root(),
|
||||
"project": {
|
||||
"name": project_entity["name"],
|
||||
"code": project_entity["data"].get("code", '')
|
||||
},
|
||||
"asset": instance.data["asset"],
|
||||
"hierarchy": pype.api.get_hierarchy(instance.data["asset"]),
|
||||
"family": instance.data["family"],
|
||||
"task": instance.data.get("task"),
|
||||
"subset": instance.data["subset"],
|
||||
"version": 1,
|
||||
"ext": "zip",
|
||||
}
|
||||
|
||||
# Get a valid work filename first with version 1
|
||||
file_template = anatomy.templates["work"]["file"]
|
||||
anatomy_filled = anatomy.format(data)
|
||||
work_path = anatomy_filled["work"]["path"]
|
||||
|
||||
# Get the final work filename with the proper version
|
||||
data["version"] = api.last_workfile_with_version(
|
||||
os.path.dirname(work_path), file_template, data, [".zip"]
|
||||
)[1]
|
||||
|
||||
work_path = anatomy_filled["work"]["path"]
|
||||
base_name = os.path.splitext(os.path.basename(work_path))[0]
|
||||
|
||||
staging_work_path = os.path.join(os.path.dirname(staging_scene),
|
||||
base_name + ".xstage"
|
||||
)
|
||||
|
||||
# Rename this latest file after the workfile path filename
|
||||
os.rename(staging_scene, staging_work_path)
|
||||
|
||||
# Required to set the current directory where the zip will end up
|
||||
os.chdir(os.path.dirname(os.path.dirname(staging_scene)))
|
||||
|
||||
# Create the zip file
|
||||
zip_filepath = shutil.make_archive(base_name,
|
||||
"zip",
|
||||
os.path.dirname(staging_scene)
|
||||
)
|
||||
self.log.info(staging_scene)
|
||||
self.log.info(work_path)
|
||||
self.log.info(staging_work_path)
|
||||
self.log.info(os.path.dirname(os.path.dirname(staging_scene)))
|
||||
self.log.info(base_name)
|
||||
self.log.info(zip_filepath)
|
||||
|
||||
# Create the work path on disk if it does not exist
|
||||
os.makedirs(os.path.dirname(work_path), exist_ok=True)
|
||||
shutil.copy(zip_filepath, work_path)
|
||||
|
||||
return work_path
|
||||
|
||||
def sanitize_prezipped_project(
|
||||
self, instance, zip_filepath, staging_dir):
|
||||
"""Fix when a zip contains a folder.
|
||||
|
||||
Handle zip file root contains folder instead of the project.
|
||||
|
||||
Args:
|
||||
instance (:class:`pyblish.api.Instance`): Instance data.
|
||||
zip_filepath (str): Path to zip.
|
||||
staging_dir (str): Path to staging directory.
|
||||
|
||||
"""
|
||||
zip = zipfile.ZipFile(zip_filepath)
|
||||
zip_contents = zipfile.ZipFile.namelist(zip)
|
||||
|
||||
# Determine if any xstage file is in root of zip
|
||||
project_in_root = [pth for pth in zip_contents
|
||||
if "/" not in pth and pth.endswith(".xstage")]
|
||||
|
||||
staging_scene_dir = os.path.join(staging_dir, "scene")
|
||||
|
||||
# The project is nested, so we must extract and move it
|
||||
if not project_in_root:
|
||||
|
||||
staging_tmp_dir = os.path.join(staging_dir, "tmp")
|
||||
|
||||
with zipfile.ZipFile(zip_filepath, "r") as zip_ref:
|
||||
zip_ref.extractall(staging_tmp_dir)
|
||||
|
||||
nested_project_folder = os.path.join(staging_tmp_dir,
|
||||
zip_contents[0]
|
||||
)
|
||||
|
||||
shutil.copytree(nested_project_folder, staging_scene_dir)
|
||||
|
||||
else:
|
||||
# The project is not nested, so we just extract to scene folder
|
||||
with zipfile.ZipFile(zip_filepath, "r") as zip_ref:
|
||||
zip_ref.extractall(staging_scene_dir)
|
||||
|
||||
latest_file = max(glob.iglob(staging_scene_dir + "/*.xstage"),
|
||||
key=os.path.getctime).replace("\\", "/")
|
||||
|
||||
instance.data["representations"][0]["stagingDir"] = staging_scene_dir
|
||||
instance.data["representations"][0]["files"] = os.path.basename(
|
||||
latest_file)
|
||||
|
||||
# We have staged the scene already so return True
|
||||
return True
|
||||
|
||||
def _find_last_version(self, subset_name, asset_doc):
|
||||
"""Find last version of subset."""
|
||||
subset_doc = io.find_one({
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset_doc["_id"]
|
||||
})
|
||||
|
||||
if subset_doc is None:
|
||||
self.log.debug("Subset entity does not exist yet.")
|
||||
else:
|
||||
version_doc = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset_doc["_id"]
|
||||
},
|
||||
sort=[("name", -1)]
|
||||
)
|
||||
if version_doc:
|
||||
return int(version_doc["name"])
|
||||
return None
|
||||
|
||||
def _get_all_task_types(self, project):
|
||||
"""Get all task types."""
|
||||
tasks = {}
|
||||
proj_template = project['project_schema']
|
||||
temp_task_types = proj_template['_task_type_schema']['types']
|
||||
|
||||
for type in temp_task_types:
|
||||
if type['name'] not in tasks:
|
||||
tasks[type['name']] = type
|
||||
|
||||
return tasks
|
||||
|
||||
def _get_all_task_statuses(self, project):
|
||||
"""Get all statuses of tasks."""
|
||||
statuses = {}
|
||||
proj_template = project['project_schema']
|
||||
temp_task_statuses = proj_template.get_statuses("Task")
|
||||
|
||||
for status in temp_task_statuses:
|
||||
if status['name'] not in statuses:
|
||||
statuses[status['name']] = status
|
||||
|
||||
return statuses
|
||||
|
||||
def _get_all_assetversion_statuses(self, project):
|
||||
"""Get statuses of all asset versions."""
|
||||
statuses = {}
|
||||
proj_template = project['project_schema']
|
||||
temp_task_statuses = proj_template.get_statuses("AssetVersion")
|
||||
|
||||
for status in temp_task_statuses:
|
||||
if status['name'] not in statuses:
|
||||
statuses[status['name']] = status
|
||||
|
||||
return statuses
|
||||
|
||||
def _create_task(self, name, task_type, parent, task_status):
|
||||
"""Create task."""
|
||||
task_data = {
|
||||
'name': name,
|
||||
'parent': parent,
|
||||
}
|
||||
self.log.info(task_type)
|
||||
task_data['type'] = self.task_types[task_type]
|
||||
task_data['status'] = self.task_statuses[task_status]
|
||||
self.log.info(task_data)
|
||||
task = self.session.create('Task', task_data)
|
||||
try:
|
||||
self.session.commit()
|
||||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
return task
|
||||
|
|
@ -47,7 +47,7 @@ class ExtractShotData(pype.api.Extractor):
|
|||
start += 0.5
|
||||
|
||||
args = [
|
||||
ffmpeg_path,
|
||||
"\"{}\"".format(ffmpeg_path),
|
||||
"-ss", str(start / fps),
|
||||
"-i", f"\"{video_file_path}\"",
|
||||
"-t", str(dur / fps)
|
||||
|
|
|
|||
|
|
@ -56,7 +56,9 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
|||
|
||||
elif is_jpeg:
|
||||
# use first frame as thumbnail if is sequence of jpegs
|
||||
full_thumbnail_path = file
|
||||
full_thumbnail_path = os.path.join(
|
||||
thumbnail_repre["stagingDir"], file
|
||||
)
|
||||
self.log.info(
|
||||
"For thumbnail is used file: {}".format(full_thumbnail_path)
|
||||
)
|
||||
|
|
@ -75,7 +77,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
|||
ffmpeg_args = self.ffmpeg_args or {}
|
||||
|
||||
jpeg_items = []
|
||||
jpeg_items.append(ffmpeg_path)
|
||||
jpeg_items.append("\"{}\"".format(ffmpeg_path))
|
||||
# override file if already exists
|
||||
jpeg_items.append("-y")
|
||||
# add input filters from peresets
|
||||
|
|
|
|||
|
|
@ -10,12 +10,28 @@ def get_resource(*args):
|
|||
"""
|
||||
return os.path.normpath(
|
||||
os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
os.path.dirname(os.path.abspath(__file__)),
|
||||
*args
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def get_liberation_font_path(bold=False, italic=False):
|
||||
font_name = "LiberationSans"
|
||||
suffix = ""
|
||||
if bold:
|
||||
suffix += "Bold"
|
||||
if italic:
|
||||
suffix += "Italic"
|
||||
|
||||
if not suffix:
|
||||
suffix = "Regular"
|
||||
|
||||
filename = "{}-{}.ttf".format(font_name, suffix)
|
||||
font_path = get_resource("fonts", font_name, filename)
|
||||
return font_path
|
||||
|
||||
|
||||
def pype_icon_filepath(debug=None):
|
||||
if debug is None:
|
||||
debug = bool(os.getenv("PYPE_DEV"))
|
||||
|
|
|
|||
BIN
pype/resources/app_icons/aftereffects.png
Normal file
BIN
pype/resources/app_icons/aftereffects.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 25 KiB |
BIN
pype/resources/fonts/LiberationSans/LiberationSans-Bold.ttf
Normal file
BIN
pype/resources/fonts/LiberationSans/LiberationSans-Bold.ttf
Normal file
Binary file not shown.
Binary file not shown.
BIN
pype/resources/fonts/LiberationSans/LiberationSans-Italic.ttf
Normal file
BIN
pype/resources/fonts/LiberationSans/LiberationSans-Italic.ttf
Normal file
Binary file not shown.
BIN
pype/resources/fonts/LiberationSans/LiberationSans-Regular.ttf
Normal file
BIN
pype/resources/fonts/LiberationSans/LiberationSans-Regular.ttf
Normal file
Binary file not shown.
77
pype/resources/fonts/LiberationSans/License.txt
Normal file
77
pype/resources/fonts/LiberationSans/License.txt
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
LICENSE AGREEMENT AND LIMITED PRODUCT WARRANTY LIBERATION FONT SOFTWARE
|
||||
This agreement governs the use of the Software and any updates to the
|
||||
Software, regardless of the delivery mechanism. Subject to the following
|
||||
terms, Red Hat, Inc. ("Red Hat") grants to the user ("Client") a license to
|
||||
this collective work pursuant to the GNU General Public License v.2 with the
|
||||
exceptions set forth below and such other terms as our set forth in this End
|
||||
User License Agreement.
|
||||
1. The Software and License Exception. LIBERATION font software (the
|
||||
"Software") consists of TrueType-OpenType formatted font software for
|
||||
rendering LIBERATION typefaces in sans serif, serif, and monospaced character
|
||||
styles. You are licensed to use, modify, copy, and distribute the Software
|
||||
pursuant to the GNU General Public License v.2 with the following exceptions:
|
||||
1) As a special exception, if you create a document which uses this font, and
|
||||
embed this font or unaltered portions of this font into the document, this
|
||||
font does not by itself cause the resulting document to be covered by the GNU
|
||||
General Public License. This exception does not however invalidate any other
|
||||
reasons why the document might be covered by the GNU General Public License.
|
||||
If you modify this font, you may extend this exception to your version of the
|
||||
font, but you are not obligated to do so. If you do not wish to do so, delete
|
||||
this exception statement from your version.
|
||||
|
||||
2) As a further exception, any distribution of the object code of the Software
|
||||
in a physical product must provide you the right to access and modify the
|
||||
source code for the Software and to reinstall that modified version of the
|
||||
Software in object code form on the same physical product on which you
|
||||
received it.
|
||||
2. Intellectual Property Rights. The Software and each of its components,
|
||||
including the source code, documentation, appearance, structure and
|
||||
organization are owned by Red Hat and others and are protected under copyright
|
||||
and other laws. Title to the Software and any component, or to any copy,
|
||||
modification, or merged portion shall remain with the aforementioned, subject
|
||||
to the applicable license. The "LIBERATION" trademark is a trademark of Red
|
||||
Hat, Inc. in the U.S. and other countries. This agreement does not permit
|
||||
Client to distribute modified versions of the Software using Red Hat's
|
||||
trademarks. If Client makes a redistribution of a modified version of the
|
||||
Software, then Client must modify the files names to remove any reference to
|
||||
the Red Hat trademarks and must not use the Red Hat trademarks in any way to
|
||||
reference or promote the modified Software.
|
||||
3. Limited Warranty. To the maximum extent permitted under applicable law, the
|
||||
Software is provided and licensed "as is" without warranty of any kind,
|
||||
expressed or implied, including the implied warranties of merchantability,
|
||||
non-infringement or fitness for a particular purpose. Red Hat does not warrant
|
||||
that the functions contained in the Software will meet Client's requirements
|
||||
or that the operation of the Software will be entirely error free or appear
|
||||
precisely as described in the accompanying documentation.
|
||||
4. Limitation of Remedies and Liability. To the maximum extent permitted by
|
||||
applicable law, Red Hat or any Red Hat authorized dealer will not be liable to
|
||||
Client for any incidental or consequential damages, including lost profits or
|
||||
lost savings arising out of the use or inability to use the Software, even if
|
||||
Red Hat or such dealer has been advised of the possibility of such damages.
|
||||
5. Export Control. As required by U.S. law, Client represents and warrants
|
||||
that it: (a) understands that the Software is subject to export controls under
|
||||
the U.S. Commerce Department's Export Administration Regulations ("EAR"); (b)
|
||||
is not located in a prohibited destination country under the EAR or U.S.
|
||||
sanctions regulations (currently Cuba, Iran, Iraq, Libya, North Korea, Sudan
|
||||
and Syria); (c) will not export, re-export, or transfer the Software to any
|
||||
prohibited destination, entity, or individual without the necessary export
|
||||
license(s) or authorizations(s) from the U.S. Government; (d) will not use or
|
||||
transfer the Software for use in any sensitive nuclear, chemical or biological
|
||||
weapons, or missile technology end-uses unless authorized by the U.S.
|
||||
Government by regulation or specific license; (e) understands and agrees that
|
||||
if it is in the United States and exports or transfers the Software to
|
||||
eligible end users, it will, as required by EAR Section 740.17(e), submit
|
||||
semi-annual reports to the Commerce Department's Bureau of Industry & Security
|
||||
(BIS), which include the name and address (including country) of each
|
||||
transferee; and (f) understands that countries other than the United States
|
||||
may restrict the import, use, or export of encryption products and that it
|
||||
shall be solely responsible for compliance with any such import, use, or
|
||||
export restrictions.
|
||||
6. General. If any provision of this agreement is held to be unenforceable,
|
||||
that shall not affect the enforceability of the remaining provisions. This
|
||||
agreement shall be governed by the laws of the State of North Carolina and of
|
||||
the United States, without regard to any conflict of laws provisions, except
|
||||
that the United Nations Convention on the International Sale of Goods shall
|
||||
not apply.
|
||||
Copyright © 2007 Red Hat, Inc. All rights reserved. LIBERATION is a trademark
|
||||
of Red Hat, Inc.
|
||||
|
|
@ -191,7 +191,7 @@ def switch(asset_name, filepath=None, new=True):
|
|||
representations = []
|
||||
for container in containers:
|
||||
try:
|
||||
representation = pype.switch_item(container,
|
||||
representation = fusion_lib.switch_item(container,
|
||||
asset_name=asset_name)
|
||||
representations.append(representation)
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -2,9 +2,10 @@ import os
|
|||
import sys
|
||||
import re
|
||||
import subprocess
|
||||
import platform
|
||||
import json
|
||||
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
|
||||
from pype.api import config
|
||||
from pype.api import config, resources
|
||||
import pype.lib
|
||||
|
||||
|
||||
|
|
@ -13,16 +14,16 @@ ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
|
|||
|
||||
|
||||
FFMPEG = (
|
||||
'{} -i "%(input)s" %(filters)s %(args)s%(output)s'
|
||||
'"{}" -i "%(input)s" %(filters)s %(args)s%(output)s'
|
||||
).format(ffmpeg_path)
|
||||
|
||||
FFPROBE = (
|
||||
'{} -v quiet -print_format json -show_format -show_streams "%(source)s"'
|
||||
'"{}" -v quiet -print_format json -show_format -show_streams "%(source)s"'
|
||||
).format(ffprobe_path)
|
||||
|
||||
DRAWTEXT = (
|
||||
"drawtext=text=\\'%(text)s\\':x=%(x)s:y=%(y)s:fontcolor="
|
||||
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'"
|
||||
"drawtext=fontfile='%(font)s':text=\\'%(text)s\\':"
|
||||
"x=%(x)s:y=%(y)s:fontcolor=%(color)s@%(opacity).1f:fontsize=%(size)d"
|
||||
)
|
||||
TIMECODE = (
|
||||
"drawtext=timecode=\\'%(timecode)s\\':text=\\'%(text)s\\'"
|
||||
|
|
@ -212,9 +213,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if frame_start is None:
|
||||
replacement_final = replacement_size = str(MISSING_KEY_VALUE)
|
||||
else:
|
||||
replacement_final = "\\'{}\\'".format(
|
||||
r'%%{eif\:n+%d\:d}' % frame_start
|
||||
)
|
||||
replacement_final = "%{eif:n+" + str(frame_start) + ":d}"
|
||||
replacement_size = str(frame_end)
|
||||
|
||||
final_text = final_text.replace(
|
||||
|
|
@ -236,13 +235,32 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
}
|
||||
timecode_text = options.get("timecode") or ""
|
||||
text_for_size += timecode_text
|
||||
|
||||
data.update(options)
|
||||
|
||||
os_system = platform.system().lower()
|
||||
data_font = data.get("font")
|
||||
if not data_font:
|
||||
data_font = (
|
||||
resources.get_liberation_font_path().replace("\\", "/")
|
||||
)
|
||||
elif isinstance(data_font, dict):
|
||||
data_font = data_font[os_system]
|
||||
|
||||
if data_font:
|
||||
data["font"] = data_font
|
||||
options["font"] = data_font
|
||||
if ffmpeg_burnins._is_windows():
|
||||
data["font"] = (
|
||||
data_font
|
||||
.replace(os.sep, r'\\' + os.sep)
|
||||
.replace(':', r'\:')
|
||||
)
|
||||
|
||||
data.update(
|
||||
ffmpeg_burnins._drawtext(align, resolution, text_for_size, options)
|
||||
)
|
||||
if 'font' in data and ffmpeg_burnins._is_windows():
|
||||
data['font'] = data['font'].replace(os.sep, r'\\' + os.sep)
|
||||
data['font'] = data['font'].replace(':', r'\:')
|
||||
|
||||
self.filters['drawtext'].append(draw % data)
|
||||
|
||||
if options.get('bg_color') is not None:
|
||||
|
|
@ -308,11 +326,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
|
||||
_stdout, _stderr = proc.communicate()
|
||||
if _stdout:
|
||||
print(_stdout.decode("utf-8"))
|
||||
for line in _stdout.split(b"\r\n"):
|
||||
print(line.decode("utf-8"))
|
||||
|
||||
# This will probably never happen as ffmpeg use stdout
|
||||
if _stderr:
|
||||
print(_stderr.decode("utf-8"))
|
||||
for line in _stderr.split(b"\r\n"):
|
||||
print(line.decode("utf-8"))
|
||||
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError(
|
||||
|
|
@ -474,7 +494,7 @@ def burnins_from_data(
|
|||
# Replace with missing key value if frame_start_tc is not set
|
||||
if frame_start_tc is None and has_timecode:
|
||||
has_timecode = False
|
||||
log.warning(
|
||||
print(
|
||||
"`frame_start` and `frame_start_tc`"
|
||||
" are not set in entered data."
|
||||
)
|
||||
|
|
@ -483,7 +503,7 @@ def burnins_from_data(
|
|||
has_source_timecode = SOURCE_TIMECODE_KEY in value
|
||||
if source_timecode is None and has_source_timecode:
|
||||
has_source_timecode = False
|
||||
log.warning("Source does not have set timecode value.")
|
||||
print("Source does not have set timecode value.")
|
||||
value = value.replace(SOURCE_TIMECODE_KEY, MISSING_KEY_VALUE)
|
||||
|
||||
key_pattern = re.compile(r"(\{.*?[^{0]*\})")
|
||||
|
|
@ -558,7 +578,10 @@ def burnins_from_data(
|
|||
|
||||
if __name__ == "__main__":
|
||||
print("* Burnin script started")
|
||||
in_data = json.loads(sys.argv[-1])
|
||||
in_data_json_path = sys.argv[-1]
|
||||
with open(in_data_json_path, "r") as file_stream:
|
||||
in_data = json.load(file_stream)
|
||||
|
||||
burnins_from_data(
|
||||
in_data["input"],
|
||||
in_data["output"],
|
||||
|
|
|
|||
4
pype/tests/README.md
Normal file
4
pype/tests/README.md
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
Tests for Pype
|
||||
--------------
|
||||
Trigger by:
|
||||
`pype test --pype`
|
||||
39
pype/tests/test_lib_restructuralization.py
Normal file
39
pype/tests/test_lib_restructuralization.py
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
# Test for backward compatibility of restructure of lib.py into lib library
|
||||
# Contains simple imports that should still work
|
||||
|
||||
|
||||
def test_backward_compatibility(printer):
|
||||
printer("Test if imports still work")
|
||||
try:
|
||||
from pype.lib import filter_pyblish_plugins
|
||||
from pype.lib import execute_hook
|
||||
from pype.lib import PypeHook
|
||||
|
||||
from pype.lib import get_latest_version
|
||||
from pype.lib import ApplicationLaunchFailed
|
||||
from pype.lib import launch_application
|
||||
from pype.lib import ApplicationAction
|
||||
from pype.lib import get_avalon_database
|
||||
from pype.lib import set_io_database
|
||||
|
||||
from pype.lib import get_ffmpeg_tool_path
|
||||
from pype.lib import get_last_version_from_path
|
||||
from pype.lib import get_paths_from_environ
|
||||
from pype.lib import get_version_from_path
|
||||
from pype.lib import version_up
|
||||
|
||||
from pype.lib import is_latest
|
||||
from pype.lib import any_outdated
|
||||
from pype.lib import get_asset
|
||||
from pype.lib import get_hierarchy
|
||||
from pype.lib import get_linked_assets
|
||||
from pype.lib import get_latest_version
|
||||
from pype.lib import ffprobe_streams
|
||||
|
||||
from pype.hosts.fusion.lib import switch_item
|
||||
|
||||
from pype.lib import source_hash
|
||||
from pype.lib import _subprocess
|
||||
|
||||
except ImportError as e:
|
||||
raise
|
||||
|
|
@ -222,10 +222,6 @@ QToolButton {
|
|||
background: #444;
|
||||
}
|
||||
|
||||
#Header #ArtistTab {
|
||||
background-image: url("img/tab-home.png");
|
||||
}
|
||||
|
||||
#Header #TerminalTab {
|
||||
background-image: url("img/tab-terminal.png");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import os
|
|||
import sys
|
||||
import traceback
|
||||
import inspect
|
||||
import logging
|
||||
|
||||
from Qt import QtCore
|
||||
|
||||
|
|
@ -29,6 +30,7 @@ class IterationBreak(Exception):
|
|||
|
||||
|
||||
class Controller(QtCore.QObject):
|
||||
log = logging.getLogger("PyblishController")
|
||||
# Emitted when the GUI is about to start processing;
|
||||
# e.g. resetting, validating or publishing.
|
||||
about_to_process = QtCore.Signal(object, object)
|
||||
|
|
@ -72,6 +74,8 @@ class Controller(QtCore.QObject):
|
|||
self.instance_toggled.connect(self._on_instance_toggled)
|
||||
|
||||
def reset_variables(self):
|
||||
self.log.debug("Resetting pyblish context variables")
|
||||
|
||||
# Data internal to the GUI itself
|
||||
self.is_running = False
|
||||
self.stopped = False
|
||||
|
|
@ -113,6 +117,7 @@ class Controller(QtCore.QObject):
|
|||
"nextOrder": None,
|
||||
"ordersWithError": set()
|
||||
}
|
||||
self.log.debug("Reset of pyblish context variables done")
|
||||
|
||||
def presets_by_hosts(self):
|
||||
# Get global filters as base
|
||||
|
|
@ -138,6 +143,8 @@ class Controller(QtCore.QObject):
|
|||
return result
|
||||
|
||||
def reset_context(self):
|
||||
self.log.debug("Resetting pyblish context object")
|
||||
|
||||
self.context = pyblish.api.Context()
|
||||
|
||||
self.context._publish_states = InstanceStates.ContextType
|
||||
|
|
@ -159,6 +166,8 @@ class Controller(QtCore.QObject):
|
|||
|
||||
self.context.families = ("__context__",)
|
||||
|
||||
self.log.debug("Reset of pyblish context object done")
|
||||
|
||||
def reset(self):
|
||||
"""Discover plug-ins and run collection."""
|
||||
|
||||
|
|
@ -202,6 +211,7 @@ class Controller(QtCore.QObject):
|
|||
self.was_finished.emit()
|
||||
|
||||
def stop(self):
|
||||
self.log.debug("Stopping")
|
||||
self.stopped = True
|
||||
|
||||
def act(self, plugin, action):
|
||||
|
|
@ -346,27 +356,30 @@ class Controller(QtCore.QObject):
|
|||
This process don't stop on one
|
||||
"""
|
||||
def on_next():
|
||||
self.log.debug("Looking for next pair to process")
|
||||
try:
|
||||
self.current_pair = next(self.pair_generator)
|
||||
if isinstance(self.current_pair, IterationBreak):
|
||||
raise self.current_pair
|
||||
|
||||
except IterationBreak:
|
||||
self.log.debug("Iteration break was raised")
|
||||
self.is_running = False
|
||||
self.was_stopped.emit()
|
||||
return
|
||||
|
||||
except StopIteration:
|
||||
self.log.debug("Iteration stop was raised")
|
||||
self.is_running = False
|
||||
# All pairs were processed successfully!
|
||||
return util.defer(500, on_finished)
|
||||
|
||||
except Exception:
|
||||
# This is a bug
|
||||
exc_type, exc_msg, exc_tb = sys.exc_info()
|
||||
traceback.print_exception(exc_type, exc_msg, exc_tb)
|
||||
self.is_running = False
|
||||
self.was_stopped.emit()
|
||||
except Exception as exc:
|
||||
self.log.warning(
|
||||
"Unexpected exception during `on_next` happened",
|
||||
exc_info=True
|
||||
)
|
||||
exc_msg = str(exc)
|
||||
return util.defer(
|
||||
500, lambda: on_unexpected_error(error=exc_msg)
|
||||
)
|
||||
|
|
@ -376,19 +389,23 @@ class Controller(QtCore.QObject):
|
|||
|
||||
def on_process():
|
||||
try:
|
||||
self.log.debug(
|
||||
"Processing pair: {}".format(str(self.current_pair))
|
||||
)
|
||||
result = self._process(*self.current_pair)
|
||||
if result["error"] is not None:
|
||||
self.log.debug("Error happened")
|
||||
self.errored = True
|
||||
|
||||
self.log.debug("Pair processed")
|
||||
self.was_processed.emit(result)
|
||||
|
||||
except Exception:
|
||||
# TODO this should be handled much differently
|
||||
# TODO emit crash signal to show message box with traceback
|
||||
exc_type, exc_msg, exc_tb = sys.exc_info()
|
||||
traceback.print_exception(exc_type, exc_msg, exc_tb)
|
||||
self.is_running = False
|
||||
self.was_stopped.emit()
|
||||
except Exception as exc:
|
||||
self.log.warning(
|
||||
"Unexpected exception during `on_process` happened",
|
||||
exc_info=True
|
||||
)
|
||||
exc_msg = str(exc)
|
||||
return util.defer(
|
||||
500, lambda: on_unexpected_error(error=exc_msg)
|
||||
)
|
||||
|
|
@ -396,6 +413,10 @@ class Controller(QtCore.QObject):
|
|||
util.defer(10, on_next)
|
||||
|
||||
def on_unexpected_error(error):
|
||||
# TODO this should be handled much differently
|
||||
# TODO emit crash signal to show message box with traceback?
|
||||
self.is_running = False
|
||||
self.was_stopped.emit()
|
||||
util.u_print(u"An unexpected error occurred:\n %s" % error)
|
||||
return util.defer(500, on_finished)
|
||||
|
||||
|
|
@ -446,9 +467,9 @@ class Controller(QtCore.QObject):
|
|||
try:
|
||||
callback(instance, old_value, new_value)
|
||||
except Exception:
|
||||
print(
|
||||
self.log.warning(
|
||||
"Callback for `instanceToggled` crashed. {}".format(
|
||||
os.path.abspath(inspect.getfile(callback))
|
||||
)
|
||||
),
|
||||
exc_info=True
|
||||
)
|
||||
traceback.print_exception(*sys.exc_info())
|
||||
|
|
|
|||
|
|
@ -522,168 +522,6 @@ class PluginDelegate(QtWidgets.QStyledItemDelegate):
|
|||
return QtCore.QSize(option.rect.width(), 20)
|
||||
|
||||
|
||||
class ArtistDelegate(QtWidgets.QStyledItemDelegate):
|
||||
"""Delegate used on Artist page"""
|
||||
|
||||
def paint(self, painter, option, index):
|
||||
"""Paint checkbox and text
|
||||
|
||||
_______________________________________________
|
||||
| | label | duration |arrow|
|
||||
|toggle |_____________________| | to |
|
||||
| | families | |persp|
|
||||
|_______|_____________________|___________|_____|
|
||||
|
||||
"""
|
||||
|
||||
# Layout
|
||||
spacing = 10
|
||||
|
||||
body_rect = QtCore.QRectF(option.rect).adjusted(2, 2, -8, -2)
|
||||
content_rect = body_rect.adjusted(5, 5, -5, -5)
|
||||
|
||||
perspective_rect = QtCore.QRectF(body_rect)
|
||||
perspective_rect.setWidth(35)
|
||||
perspective_rect.setHeight(35)
|
||||
perspective_rect.translate(
|
||||
content_rect.width() - (perspective_rect.width() / 2) + 10,
|
||||
(content_rect.height() / 2) - (perspective_rect.height() / 2)
|
||||
)
|
||||
|
||||
toggle_rect = QtCore.QRectF(body_rect)
|
||||
toggle_rect.setWidth(7)
|
||||
toggle_rect.adjust(1, 1, 0, -1)
|
||||
|
||||
icon_rect = QtCore.QRectF(content_rect)
|
||||
icon_rect.translate(toggle_rect.width() + spacing, 3)
|
||||
icon_rect.setWidth(35)
|
||||
icon_rect.setHeight(35)
|
||||
|
||||
duration_rect = QtCore.QRectF(content_rect)
|
||||
duration_rect.translate(content_rect.width() - 50, 0)
|
||||
|
||||
# Colors
|
||||
check_color = colors["idle"]
|
||||
|
||||
publish_states = index.data(Roles.PublishFlagsRole)
|
||||
if publish_states is None:
|
||||
return
|
||||
if publish_states & InstanceStates.InProgress:
|
||||
check_color = colors["active"]
|
||||
|
||||
elif publish_states & InstanceStates.HasError:
|
||||
check_color = colors["error"]
|
||||
|
||||
elif publish_states & InstanceStates.HasWarning:
|
||||
check_color = colors["warning"]
|
||||
|
||||
elif publish_states & InstanceStates.HasFinished:
|
||||
check_color = colors["ok"]
|
||||
|
||||
elif not index.data(Roles.IsEnabledRole):
|
||||
check_color = colors["inactive"]
|
||||
|
||||
perspective_icon = icons["angle-right"]
|
||||
|
||||
if not index.data(QtCore.Qt.CheckStateRole):
|
||||
font_color = colors["inactive"]
|
||||
else:
|
||||
font_color = colors["idle"]
|
||||
|
||||
if (
|
||||
option.state
|
||||
& (
|
||||
QtWidgets.QStyle.State_MouseOver
|
||||
or QtWidgets.QStyle.State_Selected
|
||||
)
|
||||
):
|
||||
perspective_color = colors["idle"]
|
||||
else:
|
||||
perspective_color = colors["inactive"]
|
||||
# Maintan reference to state, so we can restore it once we're done
|
||||
painter.save()
|
||||
|
||||
# Draw background
|
||||
painter.fillRect(body_rect, colors["hover"])
|
||||
|
||||
# Draw icon
|
||||
icon = index.data(QtCore.Qt.DecorationRole)
|
||||
|
||||
painter.setFont(fonts["largeAwesome"])
|
||||
painter.setPen(QtGui.QPen(font_color))
|
||||
painter.drawText(icon_rect, icon)
|
||||
|
||||
# Draw label
|
||||
painter.setFont(fonts["h3"])
|
||||
label_rect = QtCore.QRectF(content_rect)
|
||||
label_x_offset = icon_rect.width() + spacing
|
||||
label_rect.translate(
|
||||
label_x_offset,
|
||||
0
|
||||
)
|
||||
metrics = painter.fontMetrics()
|
||||
label_rect.setHeight(metrics.lineSpacing())
|
||||
label_rect.setWidth(
|
||||
content_rect.width()
|
||||
- label_x_offset
|
||||
- perspective_rect.width()
|
||||
)
|
||||
# Elide label
|
||||
label = index.data(QtCore.Qt.DisplayRole)
|
||||
label = metrics.elidedText(
|
||||
label, QtCore.Qt.ElideRight, label_rect.width()
|
||||
)
|
||||
painter.drawText(label_rect, label)
|
||||
|
||||
# Draw families
|
||||
painter.setFont(fonts["h5"])
|
||||
painter.setPen(QtGui.QPen(colors["inactive"]))
|
||||
|
||||
families = ", ".join(index.data(Roles.FamiliesRole))
|
||||
families = painter.fontMetrics().elidedText(
|
||||
families, QtCore.Qt.ElideRight, label_rect.width()
|
||||
)
|
||||
|
||||
families_rect = QtCore.QRectF(label_rect)
|
||||
families_rect.translate(0, label_rect.height() + spacing)
|
||||
|
||||
painter.drawText(families_rect, families)
|
||||
|
||||
painter.setFont(fonts["largeAwesome"])
|
||||
painter.setPen(QtGui.QPen(perspective_color))
|
||||
painter.drawText(perspective_rect, perspective_icon)
|
||||
|
||||
# Draw checkbox
|
||||
pen = QtGui.QPen(check_color, 1)
|
||||
painter.setPen(pen)
|
||||
|
||||
if index.data(Roles.IsOptionalRole):
|
||||
painter.drawRect(toggle_rect)
|
||||
|
||||
if index.data(QtCore.Qt.CheckStateRole):
|
||||
painter.fillRect(toggle_rect, check_color)
|
||||
|
||||
elif (
|
||||
index.data(QtCore.Qt.CheckStateRole)
|
||||
):
|
||||
painter.fillRect(toggle_rect, check_color)
|
||||
|
||||
if option.state & QtWidgets.QStyle.State_MouseOver:
|
||||
painter.fillRect(body_rect, colors["hover"])
|
||||
|
||||
if option.state & QtWidgets.QStyle.State_Selected:
|
||||
painter.fillRect(body_rect, colors["selected"])
|
||||
|
||||
painter.setPen(colors["outline"])
|
||||
painter.drawRect(body_rect)
|
||||
|
||||
# Ok, we're done, tidy up.
|
||||
painter.restore()
|
||||
|
||||
def sizeHint(self, option, index):
|
||||
return QtCore.QSize(option.rect.width(), 80)
|
||||
|
||||
|
||||
class TerminalItem(QtWidgets.QStyledItemDelegate):
|
||||
"""Delegate used exclusively for the Terminal"""
|
||||
|
||||
|
|
|
|||
Binary file not shown.
|
Before Width: | Height: | Size: 313 B |
|
|
@ -842,164 +842,6 @@ class InstanceModel(QtGui.QStandardItemModel):
|
|||
)
|
||||
|
||||
|
||||
class ArtistProxy(QtCore.QAbstractProxyModel):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.mapping_from = []
|
||||
self.mapping_to = []
|
||||
super(ArtistProxy, self).__init__(*args, **kwargs)
|
||||
|
||||
def on_rows_inserted(self, parent_index, from_row, to_row):
|
||||
if not parent_index.isValid():
|
||||
return
|
||||
|
||||
parent_row = parent_index.row()
|
||||
if parent_row >= len(self.mapping_from):
|
||||
self.mapping_from.append(list())
|
||||
|
||||
new_from = None
|
||||
new_to = None
|
||||
for row_num in range(from_row, to_row + 1):
|
||||
new_row = len(self.mapping_to)
|
||||
new_to = new_row
|
||||
if new_from is None:
|
||||
new_from = new_row
|
||||
|
||||
self.mapping_from[parent_row].insert(row_num, new_row)
|
||||
self.mapping_to.insert(new_row, [parent_row, row_num])
|
||||
|
||||
self.rowsInserted.emit(self.parent(), new_from, new_to + 1)
|
||||
|
||||
def _remove_rows(self, parent_row, from_row, to_row):
|
||||
increment_num = self.mapping_from[parent_row][from_row]
|
||||
|
||||
to_end_index = len(self.mapping_from[parent_row]) - 1
|
||||
for _idx in range(0, parent_row):
|
||||
to_end_index += len(self.mapping_from[_idx])
|
||||
|
||||
removed_rows = 0
|
||||
_emit_last = None
|
||||
for row_num in reversed(range(from_row, to_row + 1)):
|
||||
row = self.mapping_from[parent_row].pop(row_num)
|
||||
_emit_last = row
|
||||
removed_rows += 1
|
||||
|
||||
_emit_first = int(increment_num)
|
||||
mapping_from_len = len(self.mapping_from)
|
||||
mapping_from_parent_len = len(self.mapping_from[parent_row])
|
||||
if parent_row < mapping_from_len:
|
||||
for idx in range(from_row, mapping_from_parent_len):
|
||||
self.mapping_from[parent_row][idx] = increment_num
|
||||
increment_num += 1
|
||||
|
||||
if parent_row < mapping_from_len - 1:
|
||||
for idx_i in range(parent_row + 1, mapping_from_len):
|
||||
sub_values = self.mapping_from[idx_i]
|
||||
if not sub_values:
|
||||
continue
|
||||
|
||||
for idx_j in range(0, len(sub_values)):
|
||||
self.mapping_from[idx_i][idx_j] = increment_num
|
||||
increment_num += 1
|
||||
|
||||
for idx in range(removed_rows):
|
||||
self.mapping_to.pop(to_end_index - idx)
|
||||
|
||||
return (_emit_first, _emit_last)
|
||||
|
||||
def on_rows_removed(self, parent_index, from_row, to_row):
|
||||
if parent_index.isValid():
|
||||
parent_row = parent_index.row()
|
||||
_emit_first, _emit_last = self._remove_rows(
|
||||
parent_row, from_row, to_row
|
||||
)
|
||||
self.rowsRemoved.emit(self.parent(), _emit_first, _emit_last)
|
||||
|
||||
else:
|
||||
removed_rows = False
|
||||
emit_first = None
|
||||
emit_last = None
|
||||
for row_num in reversed(range(from_row, to_row + 1)):
|
||||
remaining_rows = self.mapping_from[row_num]
|
||||
if remaining_rows:
|
||||
removed_rows = True
|
||||
_emit_first, _emit_last = self._remove_rows(
|
||||
row_num, 0, len(remaining_rows) - 1
|
||||
)
|
||||
if emit_first is None:
|
||||
emit_first = _emit_first
|
||||
emit_last = _emit_last
|
||||
|
||||
self.mapping_from.pop(row_num)
|
||||
|
||||
diff = to_row - from_row + 1
|
||||
mapping_to_len = len(self.mapping_to)
|
||||
if from_row < mapping_to_len:
|
||||
for idx in range(from_row, mapping_to_len):
|
||||
self.mapping_to[idx][0] -= diff
|
||||
|
||||
if removed_rows:
|
||||
self.rowsRemoved.emit(self.parent(), emit_first, emit_last)
|
||||
|
||||
def on_reset(self):
|
||||
self.modelReset.emit()
|
||||
self.mapping_from = []
|
||||
self.mapping_to = []
|
||||
|
||||
def setSourceModel(self, source_model):
|
||||
super(ArtistProxy, self).setSourceModel(source_model)
|
||||
source_model.rowsInserted.connect(self.on_rows_inserted)
|
||||
source_model.rowsRemoved.connect(self.on_rows_removed)
|
||||
source_model.modelReset.connect(self.on_reset)
|
||||
source_model.dataChanged.connect(self.on_data_changed)
|
||||
|
||||
def on_data_changed(self, from_index, to_index, roles=None):
|
||||
proxy_from_index = self.mapFromSource(from_index)
|
||||
if from_index == to_index:
|
||||
proxy_to_index = proxy_from_index
|
||||
else:
|
||||
proxy_to_index = self.mapFromSource(to_index)
|
||||
|
||||
args = [proxy_from_index, proxy_to_index]
|
||||
if Qt.__binding__ not in ("PyQt4", "PySide"):
|
||||
args.append(roles or [])
|
||||
self.dataChanged.emit(*args)
|
||||
|
||||
def columnCount(self, parent=QtCore.QModelIndex()):
|
||||
# This is not right for global proxy, but in this case it is enough
|
||||
return self.sourceModel().columnCount()
|
||||
|
||||
def rowCount(self, parent=QtCore.QModelIndex()):
|
||||
if parent.isValid():
|
||||
return 0
|
||||
return len(self.mapping_to)
|
||||
|
||||
def mapFromSource(self, index):
|
||||
if not index.isValid():
|
||||
return QtCore.QModelIndex()
|
||||
|
||||
parent_index = index.parent()
|
||||
if not parent_index.isValid():
|
||||
return QtCore.QModelIndex()
|
||||
|
||||
parent_idx = self.mapping_from[parent_index.row()]
|
||||
my_row = parent_idx[index.row()]
|
||||
return self.index(my_row, index.column())
|
||||
|
||||
def mapToSource(self, index):
|
||||
if not index.isValid() or index.row() > len(self.mapping_to):
|
||||
return self.sourceModel().index(index.row(), index.column())
|
||||
|
||||
parent_row, item_row = self.mapping_to[index.row()]
|
||||
parent_index = self.sourceModel().index(parent_row, 0)
|
||||
return self.sourceModel().index(item_row, 0, parent_index)
|
||||
|
||||
def index(self, row, column, parent=QtCore.QModelIndex()):
|
||||
return self.createIndex(row, column, QtCore.QModelIndex())
|
||||
|
||||
def parent(self, index=None):
|
||||
return QtCore.QModelIndex()
|
||||
|
||||
|
||||
class TerminalDetailItem(QtGui.QStandardItem):
|
||||
key_label_record_map = (
|
||||
("instance", "Instance"),
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ UseLabel = True
|
|||
|
||||
# Customize which tab to start on. Possible choices are: "artist", "overview"
|
||||
# and "terminal".
|
||||
InitialTab = "artist"
|
||||
InitialTab = "overview"
|
||||
|
||||
# Customize the window size.
|
||||
WindowSize = (430, 600)
|
||||
|
|
|
|||
|
|
@ -11,61 +11,6 @@ def _import_widgets():
|
|||
from . import widgets
|
||||
|
||||
|
||||
class ArtistView(QtWidgets.QListView):
|
||||
# An item is requesting to be toggled, with optional forced-state
|
||||
toggled = QtCore.Signal(QtCore.QModelIndex, object)
|
||||
show_perspective = QtCore.Signal(QtCore.QModelIndex)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super(ArtistView, self).__init__(parent)
|
||||
|
||||
self.horizontalScrollBar().hide()
|
||||
self.viewport().setAttribute(QtCore.Qt.WA_Hover, True)
|
||||
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
|
||||
self.setResizeMode(QtWidgets.QListView.Adjust)
|
||||
self.setVerticalScrollMode(QtWidgets.QListView.ScrollPerPixel)
|
||||
|
||||
def event(self, event):
|
||||
if not event.type() == QtCore.QEvent.KeyPress:
|
||||
return super(ArtistView, self).event(event)
|
||||
|
||||
elif event.key() == QtCore.Qt.Key_Space:
|
||||
for index in self.selectionModel().selectedIndexes():
|
||||
self.toggled.emit(index, None)
|
||||
|
||||
return True
|
||||
|
||||
elif event.key() == QtCore.Qt.Key_Backspace:
|
||||
for index in self.selectionModel().selectedIndexes():
|
||||
self.toggled.emit(index, False)
|
||||
|
||||
return True
|
||||
|
||||
elif event.key() == QtCore.Qt.Key_Return:
|
||||
for index in self.selectionModel().selectedIndexes():
|
||||
self.toggled.emit(index, True)
|
||||
|
||||
return True
|
||||
|
||||
return super(ArtistView, self).event(event)
|
||||
|
||||
def focusOutEvent(self, event):
|
||||
self.selectionModel().clear()
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
if event.button() == QtCore.Qt.LeftButton:
|
||||
indexes = self.selectionModel().selectedIndexes()
|
||||
if len(indexes) <= 1 and event.pos().x() < 20:
|
||||
for index in indexes:
|
||||
self.toggled.emit(index, None)
|
||||
if len(indexes) == 1 and event.pos().x() > self.width() - 40:
|
||||
for index in indexes:
|
||||
self.show_perspective.emit(index)
|
||||
|
||||
return super(ArtistView, self).mouseReleaseEvent(event)
|
||||
|
||||
|
||||
class OverviewView(QtWidgets.QTreeView):
|
||||
# An item is requesting to be toggled, with optional forced-state
|
||||
toggled = QtCore.Signal(QtCore.QModelIndex, object)
|
||||
|
|
|
|||
|
|
@ -97,7 +97,6 @@ class Window(QtWidgets.QDialog):
|
|||
header_widget = QtWidgets.QWidget(parent=main_widget)
|
||||
|
||||
header_tab_widget = QtWidgets.QWidget(header_widget)
|
||||
header_tab_artist = QtWidgets.QRadioButton(header_tab_widget)
|
||||
header_tab_overview = QtWidgets.QRadioButton(header_tab_widget)
|
||||
header_tab_terminal = QtWidgets.QRadioButton(header_tab_widget)
|
||||
header_spacer = QtWidgets.QWidget(header_tab_widget)
|
||||
|
|
@ -125,7 +124,6 @@ class Window(QtWidgets.QDialog):
|
|||
layout_tab = QtWidgets.QHBoxLayout(header_tab_widget)
|
||||
layout_tab.setContentsMargins(0, 0, 0, 0)
|
||||
layout_tab.setSpacing(0)
|
||||
layout_tab.addWidget(header_tab_artist, 0)
|
||||
layout_tab.addWidget(header_tab_overview, 0)
|
||||
layout_tab.addWidget(header_tab_terminal, 0)
|
||||
layout_tab.addWidget(button_suspend_logs_widget, 0)
|
||||
|
|
@ -141,37 +139,18 @@ class Window(QtWidgets.QDialog):
|
|||
|
||||
header_widget.setLayout(layout)
|
||||
|
||||
# Artist Page
|
||||
instance_model = model.InstanceModel(controller)
|
||||
|
||||
artist_page = QtWidgets.QWidget()
|
||||
|
||||
artist_view = view.ArtistView()
|
||||
artist_view.show_perspective.connect(self.toggle_perspective_widget)
|
||||
artist_proxy = model.ArtistProxy()
|
||||
artist_proxy.setSourceModel(instance_model)
|
||||
artist_view.setModel(artist_proxy)
|
||||
|
||||
artist_delegate = delegate.ArtistDelegate()
|
||||
artist_view.setItemDelegate(artist_delegate)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(artist_page)
|
||||
layout.addWidget(artist_view)
|
||||
layout.setContentsMargins(5, 5, 5, 5)
|
||||
layout.setSpacing(0)
|
||||
|
||||
artist_page.setLayout(layout)
|
||||
|
||||
# Overview Page
|
||||
# TODO add parent
|
||||
overview_page = QtWidgets.QWidget()
|
||||
|
||||
instance_model = model.InstanceModel(controller)
|
||||
overview_instance_view = view.InstanceView(
|
||||
animated=settings.Animated, parent=overview_page
|
||||
)
|
||||
overview_instance_delegate = delegate.InstanceDelegate(
|
||||
parent=overview_instance_view
|
||||
)
|
||||
|
||||
overview_instance_view.setItemDelegate(overview_instance_delegate)
|
||||
overview_instance_view.setModel(instance_model)
|
||||
|
||||
|
|
@ -223,7 +202,6 @@ class Window(QtWidgets.QDialog):
|
|||
body_widget = QtWidgets.QWidget(main_widget)
|
||||
layout = QtWidgets.QHBoxLayout(body_widget)
|
||||
layout.setContentsMargins(5, 5, 5, 1)
|
||||
layout.addWidget(artist_page)
|
||||
layout.addWidget(overview_page)
|
||||
layout.addWidget(terminal_page)
|
||||
|
||||
|
|
@ -361,12 +339,10 @@ class Window(QtWidgets.QDialog):
|
|||
"Footer": footer_widget,
|
||||
|
||||
# Pages
|
||||
"Artist": artist_page,
|
||||
"Overview": overview_page,
|
||||
"Terminal": terminal_page,
|
||||
|
||||
# Tabs
|
||||
"ArtistTab": header_tab_artist,
|
||||
"OverviewTab": header_tab_overview,
|
||||
"TerminalTab": header_tab_terminal,
|
||||
|
||||
|
|
@ -399,7 +375,6 @@ class Window(QtWidgets.QDialog):
|
|||
pages_widget,
|
||||
header_widget,
|
||||
body_widget,
|
||||
artist_page,
|
||||
comment_box,
|
||||
overview_page,
|
||||
terminal_page,
|
||||
|
|
@ -415,9 +390,6 @@ class Window(QtWidgets.QDialog):
|
|||
_widget.setAttribute(QtCore.Qt.WA_StyledBackground)
|
||||
|
||||
# Signals
|
||||
header_tab_artist.toggled.connect(
|
||||
lambda: self.on_tab_changed("artist")
|
||||
)
|
||||
header_tab_overview.toggled.connect(
|
||||
lambda: self.on_tab_changed("overview")
|
||||
)
|
||||
|
|
@ -450,7 +422,6 @@ class Window(QtWidgets.QDialog):
|
|||
QtCore.Qt.DirectConnection
|
||||
)
|
||||
|
||||
artist_view.toggled.connect(self.on_instance_toggle)
|
||||
overview_instance_view.toggled.connect(self.on_instance_toggle)
|
||||
overview_plugin_view.toggled.connect(self.on_plugin_toggle)
|
||||
|
||||
|
|
@ -491,9 +462,6 @@ class Window(QtWidgets.QDialog):
|
|||
self.plugin_proxy = plugin_proxy
|
||||
self.instance_model = instance_model
|
||||
|
||||
self.artist_proxy = artist_proxy
|
||||
self.artist_view = artist_view
|
||||
|
||||
self.presets_button = presets_button
|
||||
|
||||
self.animation_info_msg = animation_info_msg
|
||||
|
|
@ -510,17 +478,15 @@ class Window(QtWidgets.QDialog):
|
|||
self.perspective_widget = perspective_widget
|
||||
|
||||
self.tabs = {
|
||||
"artist": header_tab_artist,
|
||||
"overview": header_tab_overview,
|
||||
"terminal": header_tab_terminal
|
||||
}
|
||||
self.pages = (
|
||||
("artist", artist_page),
|
||||
("overview", overview_page),
|
||||
("terminal", terminal_page)
|
||||
)
|
||||
|
||||
current_page = settings.InitialTab or "artist"
|
||||
current_page = settings.InitialTab or "overview"
|
||||
self.comment_main_widget.setVisible(
|
||||
not current_page == "terminal"
|
||||
)
|
||||
|
|
@ -1122,11 +1088,6 @@ class Window(QtWidgets.QDialog):
|
|||
for instance_id in existing_ids:
|
||||
self.instance_model.remove(instance_id)
|
||||
|
||||
if result.get("error"):
|
||||
# Toggle from artist to overview tab on error
|
||||
if self.tabs["artist"].isChecked():
|
||||
self.tabs["overview"].toggle()
|
||||
|
||||
result["records"] = self.terminal_model.prepare_records(
|
||||
result,
|
||||
self._suspend_logs
|
||||
|
|
@ -1274,7 +1235,6 @@ class Window(QtWidgets.QDialog):
|
|||
self.terminal_proxy.deleteLater()
|
||||
self.plugin_proxy.deleteLater()
|
||||
|
||||
self.artist_view.setModel(None)
|
||||
self.overview_instance_view.setModel(None)
|
||||
self.overview_plugin_view.setModel(None)
|
||||
self.terminal_view.setModel(None)
|
||||
|
|
|
|||
|
|
@ -266,7 +266,7 @@ class DropDataFrame(QtWidgets.QFrame):
|
|||
def load_data_with_probe(self, filepath):
|
||||
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
|
||||
args = [
|
||||
ffprobe_path,
|
||||
"\"{}\"".format(ffprobe_path),
|
||||
'-v', 'quiet',
|
||||
'-print_format json',
|
||||
'-show_format',
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue