Merge remote-tracking branch 'origin/develop' into 3.0/refactoring

This commit is contained in:
Ondřej Samohel 2020-11-18 15:19:17 +01:00
commit 7ad9e1bd5b
No known key found for this signature in database
GPG key ID: 8A29C663C672C2B7
233 changed files with 408501 additions and 2838 deletions

3
.gitattributes vendored
View file

@ -2,4 +2,5 @@
*.sh text eol=lf
*.command eol=lf
*.bat text eol=crlf
pype text eol=lf
*.js eol=lf
*.c eol=lf

View file

@ -38,13 +38,9 @@ from .action import (
from .lib import (
version_up,
get_asset,
get_project,
get_hierarchy,
get_subsets,
get_version_from_path,
get_last_version_from_path,
modified_environ,
add_tool_to_environment,
source_hash,
get_latest_version
)
@ -84,14 +80,10 @@ __all__ = [
# get contextual data
"version_up",
"get_project",
"get_hierarchy",
"get_asset",
"get_subsets",
"get_version_from_path",
"get_last_version_from_path",
"modified_environ",
"add_tool_to_environment",
"source_hash",
"subprocess",

View file

@ -1,15 +1,13 @@
import os
import shutil
from pype.lib import PypeHook
from pype.api import (
Anatomy,
Logger
)
import platform
import pype.lib
from pype.api import Anatomy, Logger
import getpass
import avalon.api
class TvpaintPrelaunchHook(PypeHook):
class TvpaintPrelaunchHook(pype.lib.PypeHook):
"""
Workfile preparation hook
"""
@ -23,10 +21,22 @@ class TvpaintPrelaunchHook(PypeHook):
self.signature = "( {} )".format(self.__class__.__name__)
def install_pywin(self):
if platform.system().lower() != "windows":
return
try:
from win32com.shell import shell
except Exception:
output = pype.lib._subprocess(["pip", "install", "pywin32==227"])
self.log.info(output)
def execute(self, *args, env: dict = None) -> bool:
if not env:
env = os.environ
self.install_pywin()
# get context variables
project_name = env["AVALON_PROJECT"]
asset_name = env["AVALON_ASSET"]

View file

@ -0,0 +1,74 @@
import os
import sys
from avalon import api, io
from avalon.vendor import Qt
from pype import lib
import pyblish.api
def check_inventory():
if not lib.any_outdated():
return
host = api.registered_host()
outdated_containers = []
for container in host.ls():
representation = container['representation']
representation_doc = io.find_one(
{
"_id": io.ObjectId(representation),
"type": "representation"
},
projection={"parent": True}
)
if representation_doc and not lib.is_latest(representation_doc):
outdated_containers.append(container)
# Warn about outdated containers.
print("Starting new QApplication..")
app = Qt.QtWidgets.QApplication(sys.argv)
message_box = Qt.QtWidgets.QMessageBox()
message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning)
msg = "There are outdated containers in the scene."
message_box.setText(msg)
message_box.exec_()
# Garbage collect QApplication.
del app
def application_launch():
check_inventory()
def install():
print("Installing Pype config...")
plugins_directory = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
"plugins",
"aftereffects"
)
pyblish.api.register_plugin_path(
os.path.join(plugins_directory, "publish")
)
api.register_plugin_path(
api.Loader, os.path.join(plugins_directory, "load")
)
api.register_plugin_path(
api.Creator, os.path.join(plugins_directory, "create")
)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
api.on("application.launched", application_launch)
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle layer visibility on instance toggles."""
instance[0].Visible = new_value

View file

@ -2,7 +2,7 @@ import sys
from avalon.vendor.Qt import QtGui
import avalon.fusion
from avalon import io
self = sys.modules[__name__]
self._project = None
@ -59,3 +59,84 @@ def get_additional_data(container):
return {"color": QtGui.QColor.fromRgbF(tile_color["R"],
tile_color["G"],
tile_color["B"])}
def switch_item(container,
asset_name=None,
subset_name=None,
representation_name=None):
"""Switch container asset, subset or representation of a container by name.
It'll always switch to the latest version - of course a different
approach could be implemented.
Args:
container (dict): data of the item to switch with
asset_name (str): name of the asset
subset_name (str): name of the subset
representation_name (str): name of the representation
Returns:
dict
"""
if all(not x for x in [asset_name, subset_name, representation_name]):
raise ValueError("Must have at least one change provided to switch.")
# Collect any of current asset, subset and representation if not provided
# so we can use the original name from those.
if any(not x for x in [asset_name, subset_name, representation_name]):
_id = io.ObjectId(container["representation"])
representation = io.find_one({"type": "representation", "_id": _id})
version, subset, asset, project = io.parenthood(representation)
if asset_name is None:
asset_name = asset["name"]
if subset_name is None:
subset_name = subset["name"]
if representation_name is None:
representation_name = representation["name"]
# Find the new one
asset = io.find_one({
"name": asset_name,
"type": "asset"
})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({
"name": subset_name,
"type": "subset",
"parent": asset["_id"]
})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[('name', -1)]
)
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({
"name": representation_name,
"type": "representation",
"parent": version["_id"]}
)
assert representation, ("Could not find representation in the database "
"with the name '%s'" % representation_name)
avalon.api.switch(container, representation)
return representation

View file

@ -234,7 +234,7 @@ def switch(asset_name, filepath=None, new=True):
representations = []
for container in containers:
try:
representation = pype.switch_item(
representation = fusion_lib.switch_item(
container,
asset_name=asset_name)
representations.append(representation)

View file

@ -1,57 +1,45 @@
# -*- coding: utf-8 -*-
"""Pype Harmony Host implementation."""
import os
import sys
from pathlib import Path
import avalon.tools.sceneinventory
import pyblish.api
from avalon import api, io, harmony
from avalon.vendor import Qt
import avalon.tools.sceneinventory
import pyblish.api
from pype import lib
from pype.api import config
def set_scene_settings(settings):
"""Set correct scene settings in Harmony.
signature = harmony.signature("set_scene_settings")
func = """function %s(args)
{
if (args[0]["fps"])
{
scene.setFrameRate(args[0]["fps"]);
}
if (args[0]["frameStart"] && args[0]["frameEnd"])
{
var duration = args[0]["frameEnd"] - args[0]["frameStart"] + 1
Args:
settings (dict): Scene settings.
if (frame.numberOf() < duration)
{
frame.insert(
duration, duration - frame.numberOf()
);
}
Returns:
dict: Dictionary of settings to set.
scene.setStartFrame(1);
scene.setStopFrame(duration);
}
if (args[0]["resolutionWidth"] && args[0]["resolutionHeight"])
{
scene.setDefaultResolution(
args[0]["resolutionWidth"], args[0]["resolutionHeight"], 41.112
)
}
}
%s
""" % (signature, signature)
harmony.send({"function": func, "args": [settings]})
"""
harmony.send(
{"function": "PypeHarmony.setSceneSettings", "args": settings})
def get_asset_settings():
"""Get settings on current asset from database.
Returns:
dict: Scene data.
"""
asset_data = lib.get_asset()["data"]
fps = asset_data.get("fps")
frame_start = asset_data.get("frameStart")
frame_end = asset_data.get("frameEnd")
resolution_width = asset_data.get("resolutionWidth")
resolution_height = asset_data.get("resolutionHeight")
entity_type = asset_data.get("entityType")
scene_data = {
"fps": fps,
@ -64,17 +52,25 @@ def get_asset_settings():
try:
skip_resolution_check = \
config.get_presets()["harmony"]["general"]["skip_resolution_check"]
skip_timelines_check = \
config.get_presets()["harmony"]["general"]["skip_timelines_check"]
except KeyError:
skip_resolution_check = []
skip_timelines_check = []
if os.getenv('AVALON_TASK') in skip_resolution_check:
scene_data.pop("resolutionWidth")
scene_data.pop("resolutionHeight")
if entity_type in skip_timelines_check:
scene_data.pop('frameStart', None)
scene_data.pop('frameEnd', None)
return scene_data
def ensure_scene_settings():
"""Validate if Harmony scene has valid settings."""
settings = get_asset_settings()
invalid_settings = []
@ -87,26 +83,22 @@ def ensure_scene_settings():
# Warn about missing attributes.
if invalid_settings:
print("Starting new QApplication..")
app = Qt.QtWidgets.QApplication.instance()
if not app:
app = Qt.QtWidgets.QApplication(sys.argv)
message_box = Qt.QtWidgets.QMessageBox()
message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning)
msg = "Missing attributes:"
for item in invalid_settings:
msg += f"\n{item}"
message_box.setText(msg)
message_box.exec_()
# Garbage collect QApplication.
del app
harmony.send(
{"function": "PypeHarmony.message", "args": msg})
set_scene_settings(valid_settings)
def check_inventory():
"""Check is scene contains outdated containers.
If it does it will colorize outdated nodes and display warning message
in Harmony.
"""
if not lib.any_outdated():
return
@ -125,89 +117,51 @@ def check_inventory():
outdated_containers.append(container)
# Colour nodes.
sig = harmony.signature("set_color")
func = """function %s(args){
for( var i =0; i <= args[0].length - 1; ++i)
{
var red_color = new ColorRGBA(255, 0, 0, 255);
node.setColor(args[0][i], red_color);
}
}
%s
""" % (sig, sig)
outdated_nodes = []
for container in outdated_containers:
if container["loader"] == "ImageSequenceLoader":
outdated_nodes.append(
harmony.find_node_by_name(container["name"], "READ")
)
harmony.send({"function": func, "args": [outdated_nodes]})
harmony.send({"function": "PypeHarmony.setColor", "args": outdated_nodes})
# Warn about outdated containers.
print("Starting new QApplication..")
app = Qt.QtWidgets.QApplication(sys.argv)
message_box = Qt.QtWidgets.QMessageBox()
message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning)
msg = "There are outdated containers in the scene."
message_box.setText(msg)
message_box.exec_()
# Garbage collect QApplication.
del app
harmony.send({"function": "PypeHarmony.message", "args": msg})
def application_launch():
"""Event that is executed after Harmony is launched."""
# FIXME: This is breaking server <-> client communication.
# It is now moved so it it manually called.
# ensure_scene_settings()
# check_inventory()
pass
pype_harmony_path = Path(__file__).parent / "js" / "PypeHarmony.js"
pype_harmony_js = pype_harmony_path.read_text()
# go through js/creators, loaders and publish folders and load all scripts
script = ""
for item in ["creators", "loaders", "publish"]:
dir_to_scan = Path(__file__).parent / "js" / item
for child in dir_to_scan.iterdir():
script += child.read_text()
# send scripts to Harmony
harmony.send({"script": pype_harmony_js})
harmony.send({"script": script})
def export_template(backdrops, nodes, filepath):
"""Export Template to file.
sig = harmony.signature("set_color")
func = """function %s(args)
{
Args:
backdrops (list): List of backdrops to export.
nodes (list): List of nodes to export.
filepath (str): Path where to save Template.
var temp_node = node.add("Top", "temp_note", "NOTE", 0, 0, 0);
var template_group = node.createGroup(temp_node, "temp_group");
node.deleteNode( template_group + "/temp_note" );
selection.clearSelection();
for (var f = 0; f < args[1].length; f++)
{
selection.addNodeToSelection(args[1][f]);
}
Action.perform("copy()", "Node View");
selection.clearSelection();
selection.addNodeToSelection(template_group);
Action.perform("onActionEnterGroup()", "Node View");
Action.perform("paste()", "Node View");
// Recreate backdrops in group.
for (var i = 0 ; i < args[0].length; i++)
{
MessageLog.trace(args[0][i]);
Backdrop.addBackdrop(template_group, args[0][i]);
};
Action.perform( "selectAll()", "Node View" );
copyPaste.createTemplateFromSelection(args[2], args[3]);
// Unfocus the group in Node view, delete all nodes and backdrops
// created during the process.
Action.perform("onActionUpToParent()", "Node View");
node.deleteNode(template_group, true, true);
}
%s
""" % (sig, sig)
"""
harmony.send({
"function": func,
"function": "PypeHarmony.exportTemplate",
"args": [
backdrops,
nodes,
@ -218,6 +172,7 @@ def export_template(backdrops, nodes, filepath):
def install():
"""Install Pype as host config."""
print("Installing Pype config ...")
plugins_directory = os.path.join(
@ -246,17 +201,12 @@ def install():
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node enabling on instance toggles."""
sig = harmony.signature("enable_node")
func = """function %s(args)
{
node.setEnable(args[0], args[1])
}
%s
""" % (sig, sig)
try:
harmony.send(
{"function": func, "args": [instance[0], new_value]}
{
"function": "PypeHarmony.toggleInstance",
"args": [instance[0], new_value]
}
)
except IndexError:
print(f"Instance '{instance}' is missing node")

View file

@ -0,0 +1,117 @@
{
"env": {
"browser": true
},
"extends": "eslint:recommended",
"parserOptions": {
"ecmaVersion": 3
},
"rules": {
"indent": [
"error",
4
],
"linebreak-style": [
"error",
"unix"
],
"quotes": [
"error",
"single"
],
"semi": [
"error",
"always"
]
},
"globals": {
"$": "readonly",
"Action": "readonly",
"Backdrop": "readonly",
"Button": "readonly",
"Cel": "readonly",
"Cel3d": "readonly",
"CheckBox": "readonly",
"ColorRGBA": "readonly",
"ComboBox": "readonly",
"DateEdit": "readonly",
"DateEditEnum": "readonly",
"Dialog": "readonly",
"Dir": "readonly",
"DirSpec": "readonly",
"Drawing": "readonly",
"DrawingToolParams": "readonly",
"DrawingTools": "readonly",
"EnvelopeCreator": "readonly",
"ExportVideoDlg": "readonly",
"File": "readonly",
"FileAccess": "readonly",
"FileDialog": "readonly",
"GroupBox": "readonly",
"ImportDrawingDlg": "readonly",
"Input": "readonly",
"KeyModifiers": "readonly",
"Label": "readonly",
"LayoutExports": "readonly",
"LayoutExportsParams": "readonly",
"LineEdit": "readonly",
"Matrix4x4": "readonly",
"MessageBox": "readonly",
"MessageLog": "readonly",
"Model3d": "readonly",
"MovieImport": "readonly",
"NumberEdit": "readonly",
"PaletteManager": "readonly",
"PaletteObjectManager": "readonly",
"PermanentFile": "readonly",
"Point2d": "readonly",
"Point3d": "readonly",
"Process": "readonly",
"Process2": "readonly",
"Quaternion": "readonly",
"QuicktimeExporter": "readonly",
"RadioButton": "readonly",
"RemoteCmd": "readonly",
"Scene": "readonly",
"Settings": "readonly",
"Slider": "readonly",
"SpinBox": "readonly",
"SubnodeData": "readonly",
"System": "readonly",
"TemporaryFile": "readonly",
"TextEdit": "readonly",
"TimeEdit": "readonly",
"Timeline": "readonly",
"ToolProperties": "readonly",
"UiLoader": "readonly",
"Vector2d": "readonly",
"Vector3d": "readonly",
"WebCCExporter": "readonly",
"Workspaces": "readonly",
"__scriptManager__": "readonly",
"__temporaryFileContext__": "readonly",
"about": "readonly",
"column": "readonly",
"compositionOrder": "readonly",
"copyPaste": "readonly",
"deformation": "readonly",
"drawingExport": "readonly",
"element": "readonly",
"exporter": "readonly",
"fileMapper": "readonly",
"frame": "readonly",
"func": "readonly",
"library": "readonly",
"node": "readonly",
"preferences": "readonly",
"render": "readonly",
"scene": "readonly",
"selection": "readonly",
"sound": "readonly",
"specialFolders": "readonly",
"translator": "readonly",
"view": "readonly",
"waypoint": "readonly",
"xsheet": "readonly"
}
}

View file

@ -0,0 +1,197 @@
// ***************************************************************************
// * Pype Harmony Host *
// ***************************************************************************
/**
* @namespace
* @classdesc PypeHarmony encapsulate all Pype related functions.
* @property {Object} loaders Namespace for Loaders JS code.
* @property {Object} Creators Namespace for Creators JS code.
* @property {Object} Publish Namespace for Publish plugins JS code.
*/
var PypeHarmony = {
Loaders: {},
Creators: {},
Publish: {}
};
/**
* Show message in Harmony.
* @function
* @param {string} message Argument containing message.
*/
PypeHarmony.message = function(message) {
MessageBox.information(message);
};
/**
* Set scene setting based on shot/asset settngs.
* @function
* @param {obj} settings Scene settings.
*/
PypeHarmony.setSceneSettings = function(settings) {
if (settings.fps) {
scene.setFrameRate(settings.fps);
}
if (settings.frameStart && settings.frameEnd) {
var duration = settings.frameEnd - settings.frameStart + 1;
if (frame.numberOf() > duration) {
frame.remove(duration, frame.numberOf() - duration);
}
if (frame.numberOf() < duration) {
frame.insert(duration, duration - frame.numberOf());
}
scene.setStartFrame(1);
scene.setStopFrame(duration);
}
if (settings.resolutionWidth && settings.resolutionHeight) {
scene.setDefaultResolution(
settings.resolutionWidth, settings.resolutionHeight, 41.112
);
}
};
/**
* Get scene settings.
* @function
* @return {array} Scene settings.
*/
PypeHarmony.getSceneSettings = function() {
return [
about.getApplicationPath(),
scene.currentProjectPath(),
scene.currentScene(),
scene.getFrameRate(),
scene.getStartFrame(),
scene.getStopFrame(),
sound.getSoundtrackAll().path(),
scene.defaultResolutionX(),
scene.defaultResolutionY()
];
};
/**
* Set color of nodes.
* @function
* @param {array} nodes List of nodes.
* @param {array} rgba array of RGBA components of color.
*/
PypeHarmony.setColor = function(nodes, rgba) {
for (var i =0; i <= nodes.length - 1; ++i) {
var color = PypeHarmony.color(rgba);
node.setColor(nodes[i], color);
}
};
/**
* Extract Template into file.
* @function
* @param {array} args Arguments for template extraction.
*
* @example
* // arguments are in this order:
* var args = [backdrops, nodes, templateFilename, templateDir];
*
*/
PypeHarmony.exportTemplate = function(args) {
var tempNode = node.add('Top', 'temp_note', 'NOTE', 0, 0, 0);
var templateGroup = node.createGroup(tempNode, 'temp_group');
node.deleteNode( templateGroup + '/temp_note' );
selection.clearSelection();
for (var f = 0; f < args[1].length; f++) {
selection.addNodeToSelection(args[1][f]);
}
Action.perform('copy()', 'Node View');
selection.clearSelection();
selection.addNodeToSelection(templateGroup);
Action.perform('onActionEnterGroup()', 'Node View');
Action.perform('paste()', 'Node View');
// Recreate backdrops in group.
for (var i = 0; i < args[0].length; i++) {
MessageLog.trace(args[0][i]);
Backdrop.addBackdrop(templateGroup, args[0][i]);
}
Action.perform('selectAll()', 'Node View' );
copyPaste.createTemplateFromSelection(args[2], args[3]);
// Unfocus the group in Node view, delete all nodes and backdrops
// created during the process.
Action.perform('onActionUpToParent()', 'Node View');
node.deleteNode(templateGroup, true, true);
};
/**
* Toggle instance in Harmony.
* @function
* @param {array} args Instance name and value.
*/
PypeHarmony.toggleInstance = function(args) {
node.setEnable(args[0], args[1]);
};
/**
* Delete node in Harmony.
* @function
* @param {string} _node Node name.
*/
PypeHarmony.deleteNode = function(_node) {
node.deleteNode(_node, true, true);
};
/**
* Copy file.
* @function
* @param {string} src Source file name.
* @param {string} dst Destination file name.
*/
PypeHarmony.copyFile = function(src, dst) {
var srcFile = new PermanentFile(src);
var dstFile = new PermanentFile(dst);
srcFile.copy(dstFile);
};
/**
* create RGBA color from array.
* @function
* @param {array} rgba array of rgba values.
* @return {ColorRGBA} ColorRGBA Harmony class.
*/
PypeHarmony.color = function(rgba) {
return new ColorRGBA(rgba[0], rgba[1], rgba[2], rgba[3]);
};
/**
* get all dependencies for given node.
* @function
* @param {string} node node path.
* @return {array} List of dependent nodes.
*/
PypeHarmony.getDependencies = function(node) {
var target_node = node;
var numInput = node.numberOfInputPorts(target_node);
var dependencies = [];
for (var i = 0 ; i < numInput; i++) {
dependencies.push(node.srcNode(target_node, i));
}
return dependencies;
};

View file

@ -0,0 +1,15 @@
## Pype - ToonBoom Harmony integration
### Development
#### Setting up ESLint as linter for javasript code
You nee [node.js](https://nodejs.org/en/) installed. All you need to do then
is to run:
```sh
npm intall
```
in **js** directory. This will install eslint and all requirements locally.
In [Atom](https://atom.io/) it is enough to install [linter-eslint](https://atom.io/packages/lintecr-eslint) and set global *npm* prefix in its settings.

View file

@ -0,0 +1,33 @@
/* global PypeHarmony:writable, include */
// ***************************************************************************
// * CreateRender *
// ***************************************************************************
// check if PypeHarmony is defined and if not, load it.
if (typeof PypeHarmony !== 'undefined') {
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
include(PYPE_HARMONY_JS + '/pype_harmony.js');
}
/**
* @namespace
* @classdesc Code creating render containers in Harmony.
*/
var CreateRender = function() {};
/**
* Create render instance.
* @function
* @param {array} args Arguments for instance.
*/
CreateRender.prototype.create = function(args) {
node.setTextAttr(args[0], 'DRAWING_TYPE', 1, 'PNG4');
node.setTextAttr(args[0], 'DRAWING_NAME', 1, args[1]);
node.setTextAttr(args[0], 'MOVIE_PATH', 1, args[1]);
};
// add self to Pype Loaders
PypeHarmony.Creators.CreateRender = new CreateRender();

View file

@ -0,0 +1,281 @@
/* global PypeHarmony:writable, include */
// ***************************************************************************
// * ImageSequenceLoader *
// ***************************************************************************
// check if PypeHarmony is defined and if not, load it.
if (typeof PypeHarmony !== 'undefined') {
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
include(PYPE_HARMONY_JS + '/pype_harmony.js');
}
/**
* @namespace
* @classdesc Image Sequence loader JS code.
*/
var ImageSequenceLoader = function() {
this.PNGTransparencyMode = 0; // Premultiplied wih Black
this.TGATransparencyMode = 0; // Premultiplied wih Black
this.SGITransparencyMode = 0; // Premultiplied wih Black
this.LayeredPSDTransparencyMode = 1; // Straight
this.FlatPSDTransparencyMode = 2; // Premultiplied wih White
};
/**
* Get unique column name.
* @function
* @param {string} columnPrefix Column name.
* @return {string} Unique column name.
*/
ImageSequenceLoader.prototype.getUniqueColumnName = function(columnPrefix) {
var suffix = 0;
// finds if unique name for a column
var columnName = columnPrefix;
while (suffix < 2000) {
if (!column.type(columnName)) {
break;
}
suffix = suffix + 1;
columnName = columnPrefix + '_' + suffix;
}
return columnName;
};
/**
* Import file sequences into Harmony.
* @function
* @param {object} args Arguments for import, see Example.
* @return {string} Read node name
*
* @example
* // Agrguments are in following order:
* var args = [
* files, // Files in file sequences.
* asset, // Asset name.
* subset, // Subset name.
* startFrame, // Sequence starting frame.
* groupId // Unique group ID (uuid4).
* ];
*/
ImageSequenceLoader.prototype.importFiles = function(args) {
var doc = $.scn;
var files = args[0];
var asset = args[1];
var subset = args[2];
var startFrame = args[3];
var groupId = args[4];
var vectorFormat = null;
var extension = null;
var filename = files[0];
var pos = filename.lastIndexOf('.');
if (pos < 0) {
return null;
}
// Get the current group
var nodeViewWidget = $.app.getWidgetByName('Node View');
if (!nodeViewWidget) {
$.alert('You must have a Node View open!', 'No Node View!', 'OK!');
return;
}
nodeViewWidget.setFocus();
var nodeView = view.currentView();
var currentGroup = null;
if (!nodeView) {
currentGroup = doc.root;
} else {
currentGroup = doc.$node(view.group(nodeView));
}
// Get a unique iterative name for the container read node
var num = 0;
var name = '';
do {
name = asset + '_' + (num++) + '_' + subset;
} while (currentGroup.getNodeByName(name) != null);
extension = filename.substr(pos+1).toLowerCase();
if (extension == 'jpeg') {
extension = 'jpg';
}
if (extension == 'tvg') {
vectorFormat = 'TVG';
extension ='SCAN'; // element.add() will use this.
}
var elemId = element.add(
name,
'BW',
scene.numberOfUnitsZ(),
extension.toUpperCase(),
vectorFormat
);
if (elemId == -1) {
// hum, unknown file type most likely -- let's skip it.
return null; // no read to add.
}
var uniqueColumnName = this.getUniqueColumnName(name);
column.add(uniqueColumnName, 'DRAWING');
column.setElementIdOfDrawing(uniqueColumnName, elemId);
var read = node.add(currentGroup, name, 'READ', 0, 0, 0);
var transparencyAttr = node.getAttr(
read, frame.current(), 'READ_TRANSPARENCY'
);
var opacityAttr = node.getAttr(read, frame.current(), 'OPACITY');
transparencyAttr.setValue(true);
opacityAttr.setValue(true);
var alignmentAttr = node.getAttr(read, frame.current(), 'ALIGNMENT_RULE');
alignmentAttr.setValue('ASIS');
var transparencyModeAttr = node.getAttr(
read, frame.current(), 'applyMatteToColor'
);
if (extension === 'png') {
transparencyModeAttr.setValue(this.PNGTransparencyMode);
}
if (extension === 'tga') {
transparencyModeAttr.setValue(this.TGATransparencyMode);
}
if (extension === 'sgi') {
transparencyModeAttr.setValue(this.SGITransparencyMode);
}
if (extension === 'psd') {
transparencyModeAttr.setValue(this.FlatPSDTransparencyMode);
}
if (extension === 'jpg') {
transparencyModeAttr.setValue(this.LayeredPSDTransparencyMode);
}
var drawingFilePath;
var timing;
node.linkAttr(read, 'DRAWING.ELEMENT', uniqueColumnName);
if (files.length === 1) {
// Create a drawing drawing, 'true' indicate that the file exists.
Drawing.create(elemId, 1, true);
// Get the actual path, in tmp folder.
drawingFilePath = Drawing.filename(elemId, '1');
PypeHarmony.copyFile(files[0], drawingFilePath);
// Expose the image for the entire frame range.
for (var i =0; i <= frame.numberOf() - 1; ++i) {
timing = startFrame + i;
column.setEntry(uniqueColumnName, 1, timing, '1');
}
} else {
// Create a drawing for each file.
for (var j =0; j <= files.length - 1; ++j) {
timing = startFrame + j;
// Create a drawing drawing, 'true' indicate that the file exists.
Drawing.create(elemId, timing, true);
// Get the actual path, in tmp folder.
drawingFilePath = Drawing.filename(elemId, timing.toString());
PypeHarmony.copyFile(files[j], drawingFilePath);
column.setEntry(uniqueColumnName, 1, timing, timing.toString());
}
}
var greenColor = new ColorRGBA(0, 255, 0, 255);
node.setColor(read, greenColor);
// Add uuid to attribute of the container read node
node.createDynamicAttr(read, 'STRING', 'uuid', 'uuid', false);
node.setTextAttr(read, 'uuid', 1.0, groupId);
return read;
};
/**
* Replace files sequences in Harmony.
* @function
* @param {object} args Arguments for import, see Example.
* @return {string} Read node name
*
* @example
* // Agrguments are in following order:
* var args = [
* files, // Files in file sequences
* name, // Node name
* startFrame // Sequence starting frame
* ];
*/
ImageSequenceLoader.prototype.replaceFiles = function(args) {
var files = args[0];
MessageLog.trace(files);
MessageLog.trace(files.length);
var _node = args[1];
var startFrame = args[2];
var _column = node.linkedColumn(_node, 'DRAWING.ELEMENT');
var elemId = column.getElementIdOfDrawing(_column);
// Delete existing drawings.
var timings = column.getDrawingTimings(_column);
for ( var i =0; i <= timings.length - 1; ++i) {
column.deleteDrawingAt(_column, parseInt(timings[i]));
}
var filename = files[0];
var pos = filename.lastIndexOf('.');
if (pos < 0) {
return null;
}
var extension = filename.substr(pos+1).toLowerCase();
if (extension === 'jpeg') {
extension = 'jpg';
}
var transparencyModeAttr = node.getAttr(
_node, frame.current(), 'applyMatteToColor'
);
if (extension === 'png') {
transparencyModeAttr.setValue(this.PNGTransparencyMode);
}
if (extension === 'tga') {
transparencyModeAttr.setValue(this.TGATransparencyMode);
}
if (extension === 'sgi') {
transparencyModeAttr.setValue(this.SGITransparencyMode);
}
if (extension == 'psd') {
transparencyModeAttr.setValue(this.FlatPSDTransparencyMode);
}
if (extension === 'jpg') {
transparencyModeAttr.setValue(this.LayeredPSDTransparencyMode);
}
var drawingFilePath;
var timing;
if (files.length == 1) {
// Create a drawing drawing, 'true' indicate that the file exists.
Drawing.create(elemId, 1, true);
// Get the actual path, in tmp folder.
drawingFilePath = Drawing.filename(elemId, '1');
PypeHarmony.copyFile(files[0], drawingFilePath);
MessageLog.trace(files[0]);
MessageLog.trace(drawingFilePath);
// Expose the image for the entire frame range.
for (var k =0; k <= frame.numberOf() - 1; ++k) {
timing = startFrame + k;
column.setEntry(_column, 1, timing, '1');
}
} else {
// Create a drawing for each file.
for (var l =0; l <= files.length - 1; ++l) {
timing = startFrame + l;
// Create a drawing drawing, 'true' indicate that the file exists.
Drawing.create(elemId, timing, true);
// Get the actual path, in tmp folder.
drawingFilePath = Drawing.filename(elemId, timing.toString());
PypeHarmony.copyFile( files[l], drawingFilePath );
column.setEntry(_column, 1, timing, timing.toString());
}
}
var greenColor = new ColorRGBA(0, 255, 0, 255);
node.setColor(_node, greenColor);
};
// add self to Pype Loaders
PypeHarmony.Loaders.ImageSequenceLoader = new ImageSequenceLoader();

View file

@ -0,0 +1,177 @@
/* global PypeHarmony:writable, include */
// ***************************************************************************
// * TemplateLoader *
// ***************************************************************************
// check if PypeHarmony is defined and if not, load it.
if (typeof PypeHarmony !== 'undefined') {
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
include(PYPE_HARMONY_JS + '/pype_harmony.js');
}
/**
* @namespace
* @classdesc Image Sequence loader JS code.
*/
var TemplateLoader = function() {};
/**
* Load template as container.
* @function
* @param {array} args Arguments, see example.
* @return {string} Name of container.
*
* @example
* // arguments are in following order:
* var args = [
* templatePath, // Path to tpl file.
* assetName, // Asset name.
* subsetName, // Subset name.
* groupId // unique ID (uuid4)
* ];
*/
TemplateLoader.prototype.loadContainer = function(args) {
var doc = $.scn;
var templatePath = args[0];
var assetName = args[1];
var subset = args[2];
var groupId = args[3];
// Get the current group
var nodeViewWidget = $.app.getWidgetByName('Node View');
if (!nodeViewWidget) {
$.alert('You must have a Node View open!', 'No Node View!', 'OK!');
return;
}
nodeViewWidget.setFocus();
var currentGroup;
var nodeView = view.currentView();
if (!nodeView) {
currentGroup = doc.root;
} else {
currentGroup = doc.$node(view.group(nodeView));
}
// Get a unique iterative name for the container group
var num = 0;
var containerGroupName = '';
do {
containerGroupName = assetName + '_' + (num++) + '_' + subset;
} while (currentGroup.getNodeByName(containerGroupName) != null);
// import the template
var tplNodes = currentGroup.importTemplate(templatePath);
MessageLog.trace(tplNodes);
// Create the container group
var groupNode = currentGroup.addGroup(
containerGroupName, false, false, tplNodes);
// Add uuid to attribute of the container group
node.createDynamicAttr(groupNode, 'STRING', 'uuid', 'uuid', false);
node.setTextAttr(groupNode, 'uuid', 1.0, groupId);
return String(groupNode);
};
/**
* Replace existing node container.
* @function
* @param {string} dstNodePath Harmony path to destination Node.
* @param {string} srcNodePath Harmony path to source Node.
* @param {string} renameSrc ...
* @param {boolean} cloneSrc ...
* @return {boolean} Success
* @todo This is work in progress.
*/
TemplateLoader.prototype.replaceNode = function(
dstNodePath, srcNodePath, renameSrc, cloneSrc) {
var doc = $.scn;
var srcNode = doc.$node(srcNodePath);
var dstNode = doc.$node(dstNodePath);
// var dstNodeName = dstNode.name;
var replacementNode = srcNode;
// var dstGroup = dstNode.group;
$.beginUndo();
if (cloneSrc) {
replacementNode = doc.$node(
$.nodeTools.copy_paste_node(
srcNodePath, dstNode.name + '_CLONE', dstNode.group.path));
} else {
if (replacementNode.group.path != srcNode.group.path) {
replacementNode.moveToGroup(dstNode);
}
}
var inLinks = dstNode.getInLinks();
var link, inNode, inPort, outPort, outNode, success;
for (var l in inLinks) {
if (Object.prototype.hasOwnProperty.call(inLinks, l)) {
link = inLinks[l];
inPort = Number(link.inPort);
outPort = Number(link.outPort);
outNode = link.outNode;
success = replacementNode.linkInNode(outNode, inPort, outPort);
if (success) {
$.log('Successfully connected ' + outNode + ' : ' +
outPort + ' -> ' + replacementNode + ' : ' + inPort);
} else {
$.alert('Failed to connect ' + outNode + ' : ' +
outPort + ' -> ' + replacementNode + ' : ' + inPort);
}
}
}
var outLinks = dstNode.getOutLinks();
for (l in outLinks) {
if (Object.prototype.hasOwnProperty.call(outLinks, l)) {
link = outLinks[l];
inPort = Number(link.inPort);
outPort = Number(link.outPort);
inNode = link.inNode;
// first we must disconnect the port from the node being
// replaced to this links inNode port
inNode.unlinkInPort(inPort);
success = replacementNode.linkOutNode(inNode, outPort, inPort);
if (success) {
$.log('Successfully connected ' + inNode + ' : ' +
inPort + ' <- ' + replacementNode + ' : ' + outPort);
} else {
if (inNode.type == 'MultiLayerWrite') {
$.log('Attempting standard api to connect the nodes...');
success = node.link(
replacementNode, outPort, inNode,
inPort, node.numberOfInputPorts(inNode) + 1);
if (success) {
$.log('Successfully connected ' + inNode + ' : ' +
inPort + ' <- ' + replacementNode + ' : ' + outPort);
}
}
}
if (!success) {
$.alert('Failed to connect ' + inNode + ' : ' +
inPort + ' <- ' + replacementNode + ' : ' + outPort);
return false;
}
}
}
};
TemplateLoader.prototype.askForColumnsUpdate = function() {
// Ask user if they want to also update columns and
// linked attributes here
return ($.confirm(
'Would you like to update in place and reconnect all \n' +
'ins/outs, attributes, and columns?',
'Update & Replace?\n' +
'If you choose No, the version will only be loaded.',
'Yes',
'No'));
};
// add self to Pype Loaders
PypeHarmony.Loaders.TemplateLoader = new TemplateLoader();

View file

@ -0,0 +1,19 @@
{
"name": "pype-harmony",
"version": "1.0.0",
"description": "Avalon Harmony Host integration",
"keywords": [
"Pype",
"Avalon",
"Harmony",
"pipeline"
],
"license": "MIT",
"main": "PypeHarmony.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"devDependencies": {
"eslint": "^7.11.0"
}
}

View file

@ -0,0 +1,28 @@
/* global PypeHarmony:writable, include */
// ***************************************************************************
// * CollectCurrentFile *
// ***************************************************************************
// check if PypeHarmony is defined and if not, load it.
if (typeof PypeHarmony !== 'undefined') {
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
include(PYPE_HARMONY_JS + '/pype_harmony.js');
}
/**
* @namespace
* @classdesc Collect Current file
*/
var CollectCurrentFile = function() {};
CollectCurrentFile.prototype.collect = function() {
return (
scene.currentProjectPath() + '/' +
scene.currentVersionName() + '.xstage'
);
};
// add self to Pype Loaders
PypeHarmony.Publish.CollectCurrentFile = new CollectCurrentFile();

View file

@ -0,0 +1,33 @@
/* global PypeHarmony:writable, include */
// ***************************************************************************
// * CollectPalettes *
// ***************************************************************************
// check if PypeHarmony is defined and if not, load it.
if (typeof PypeHarmony !== 'undefined') {
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
include(PYPE_HARMONY_JS + '/pype_harmony.js');
}
/**
* @namespace
* @classdesc Image Sequence loader JS code.
*/
var CollectPalettes = function() {};
CollectPalettes.prototype.getPalettes = function() {
var palette_list = PaletteObjectManager.getScenePaletteList();
var palettes = {};
for(var i=0; i < palette_list.numPalettes; ++i) {
var palette = palette_list.getPaletteByIndex(i);
palettes[palette.getName()] = palette.id;
}
return palettes;
};
// add self to Pype Loaders
PypeHarmony.Publish.CollectPalettes = new CollectPalettes();

View file

@ -0,0 +1,38 @@
/* global PypeHarmony:writable, include */
// ***************************************************************************
// * ExtractPalette *
// ***************************************************************************
// check if PypeHarmony is defined and if not, load it.
if (typeof PypeHarmony !== 'undefined') {
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
include(PYPE_HARMONY_JS + '/pype_harmony.js');
}
/**
* @namespace
* @classdesc Code for extracting palettes.
*/
var ExtractPalette = function() {};
/**
* Get palette from Harmony.
* @function
* @param {string} paletteId ID of palette to get.
* @return {array} [paletteName, palettePath]
*/
ExtractPalette.prototype.getPalette = function(paletteId) {
var palette_list = PaletteObjectManager.getScenePaletteList();
var palette = palette_list.getPaletteById(paletteId);
var palette_name = palette.getName();
return [
palette_name,
(palette.getPath() + '/' + palette.getName() + '.plt')
];
};
// add self to Pype Loaders
PypeHarmony.Publish.ExtractPalette = new ExtractPalette();

View file

@ -0,0 +1,54 @@
/* global PypeHarmony:writable, include */
// ***************************************************************************
// * ExtractTemplate *
// ***************************************************************************
// check if PypeHarmony is defined and if not, load it.
if (typeof PypeHarmony !== 'undefined') {
var PYPE_HARMONY_JS = System.getenv('PYPE_HARMONY_JS');
include(PYPE_HARMONY_JS + '/pype_harmony.js');
}
/**
* @namespace
* @classdesc Code for extracting palettes.
*/
var ExtractTemplate = function() {};
/**
* Get backdrops for given node.
* @function
* @param {string} probeNode Node path to probe for backdrops.
* @return {array} list of backdrops.
*/
ExtractTemplate.prototype.getBackdropsByNode = function(probeNode) {
var backdrops = Backdrop.backdrops('Top');
var valid_backdrops = [];
for(var i=0; i<backdrops.length; i++)
{
var position = backdrops[i].position;
var x_valid = false;
var node_x = node.coordX(probeNode);
if (position.x < node_x && node_x < (position.x + position.w)){
x_valid = true;
}
var y_valid = false;
var node_y = node.coordY(probeNode);
if (position.y < node_y && node_y < (position.y + position.h)){
y_valid = true;
}
if (x_valid && y_valid){
valid_backdrops.push(backdrops[i]);
}
}
return valid_backdrops;
};
// add self to Pype Loaders
PypeHarmony.Publish.ExtractTemplate = new ExtractTemplate();

View file

@ -5,6 +5,7 @@ import sys
import avalon.api as avalon
import hiero
import pyblish.api
import avalon.io
from avalon.vendor.Qt import (QtWidgets, QtGui)
import pype.api as pype
@ -60,7 +61,8 @@ def sync_avalon_data_to_workfile():
project.setProjectRoot(active_project_root)
# get project data from avalon db
project_data = pype.get_project()["data"]
project_doc = avalon.io.find_one({"type": "project"})
project_data = project_doc["data"]
log.debug("project_data: {}".format(project_data))
@ -92,7 +94,6 @@ def launch_workfiles_app(event):
set_workfiles()
def reload_config():
"""Attempt to reload pipeline at run-time.

View file

@ -564,6 +564,7 @@ class ExpectedFilesVray(AExpectedFiles):
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
default_ext = "exr"
layer_data["defaultExt"] = default_ext
layer_data["padding"] = cmds.getAttr("vraySettings.fileNamePadding")
return layer_data
def get_files(self):
@ -614,11 +615,14 @@ class ExpectedFilesVray(AExpectedFiles):
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
default_ext = "exr"
# filter all namespace prefixed AOVs - they are pulled in from
# references and are not rendered.
vr_aovs = [
n
for n in cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"]
)
if len(n.split(":")) == 1
]
for aov in vr_aovs:

View file

@ -8,6 +8,7 @@ import math
import bson
import json
import logging
import itertools
import contextlib
from collections import OrderedDict, defaultdict
from math import ceil
@ -122,6 +123,12 @@ def float_round(num, places=0, direction=ceil):
return direction(num * (10**places)) / float(10**places)
def pairwise(iterable):
"""s -> (s0,s1), (s2,s3), (s4, s5), ..."""
a = iter(iterable)
return itertools.izip(a, a)
def unique(name):
assert isinstance(name, string_types), "`name` must be string"
@ -419,12 +426,12 @@ def empty_sets(sets, force=False):
plugs=True,
connections=True) or []
original_connections.extend(connections)
for dest, src in lib.pairwise(connections):
for dest, src in pairwise(connections):
cmds.disconnectAttr(src, dest)
yield
finally:
for dest, src in lib.pairwise(original_connections):
for dest, src in pairwise(original_connections):
cmds.connectAttr(src, dest)
# Restore original members
@ -1857,8 +1864,8 @@ def set_context_settings():
"""
# Todo (Wijnand): apply renderer and resolution of project
project_data = lib.get_project()["data"]
project_doc = io.find_one({"type": "project"})
project_data = project_doc["data"]
asset_data = lib.get_asset()["data"]
# Set project fps

View file

@ -195,7 +195,7 @@ def format_anatomy(data):
if not version:
file = script_name()
data["version"] = pype.get_version_from_path(file)
project_document = pype.get_project()
project_document = io.find_one({"type": "project"})
data.update({
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
@ -978,24 +978,30 @@ class WorkfileSettings(object):
self.set_colorspace()
def set_favorites(self):
anatomy = get_anatomy()
work_template = anatomy.templates["work"]["path"]
projects_root = anatomy.root_value_for_template(work_template)
work_dir = os.getenv("AVALON_WORKDIR")
asset = os.getenv("AVALON_ASSET")
project = os.getenv("AVALON_PROJECT")
hierarchy = os.getenv("AVALON_HIERARCHY")
favorite_items = OrderedDict()
# project
favorite_items.update({"Project dir": os.path.join(
projects_root, project).replace("\\", "/")})
# shot
favorite_items.update({"Shot dir": os.path.join(
projects_root, project,
hierarchy, asset).replace("\\", "/")})
# get project's root and split to parts
projects_root = os.path.normpath(work_dir.split(
project)[0])
# add project name
project_dir = os.path.join(projects_root, project) + "/"
# add to favorites
favorite_items.update({"Project dir": project_dir.replace("\\", "/")})
# asset
asset_root = os.path.normpath(work_dir.split(
asset)[0])
# add asset name
asset_dir = os.path.join(asset_root, asset) + "/"
# add to favorites
favorite_items.update({"Shot dir": asset_dir.replace("\\", "/")})
# workdir
favorite_items.update({"Work dir": work_dir})
favorite_items.update({"Work dir": work_dir.replace("\\", "/")})
set_context_favorites(favorite_items)
@ -1388,8 +1394,18 @@ class ExporterReviewMov(ExporterReview):
self.log.debug("Path: {}".format(self.path))
write_node["file"].setValue(self.path)
write_node["file_type"].setValue(self.ext)
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
# TODO change this to use conditions, if possible.
try:
write_node["meta_codec"].setValue("ap4h")
except Exception:
self.log.info("`meta_codec` knob was not found")
try:
write_node["mov64_codec"].setValue("ap4h")
except Exception:
self.log.info("`mov64_codec` knob was not found")
write_node["mov64_write_timecode"].setValue(1)
write_node["raw"].setValue(1)
# connect

View file

@ -1 +1,31 @@
kwargs = None
import os
import logging
from avalon.tvpaint.communication_server import register_localization_file
import avalon.api
import pyblish.api
from pype import PLUGINS_DIR
log = logging.getLogger("pype.hosts.tvpaint")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "tvpaint", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "tvpaint", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "tvpaint", "create")
def install():
log.info("Pype - Installing TVPaint integration")
current_dir = os.path.dirname(os.path.abspath(__file__))
localization_file = os.path.join(current_dir, "avalon.loc")
register_localization_file(localization_file)
pyblish.api.register_plugin_path(PUBLISH_PATH)
avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
def uninstall():
log.info("Pype - Uninstalling TVPaint integration")
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)

View file

@ -0,0 +1,37 @@
#-------------------------------------------------
#------------ AVALON PLUGIN LOC FILE -------------
#-------------------------------------------------
#Language : English
#Version : 1.0
#Date : 27/10/2020
#-------------------------------------------------
#------------ COMMON -----------------------------
#-------------------------------------------------
$100 "Pype Tools"
$10010 "Workfiles"
$10020 "Load"
$10030 "Create"
$10040 "Scene inventory"
$10050 "Publish"
$10060 "Library"
#------------ Help -------------------------------
$20010 "Open workfiles tool"
$20020 "Open loader tool"
$20030 "Open creator tool"
$20040 "Open scene inventory tool"
$20050 "Open publisher"
$20060 "Open library loader tool"
#------------ Errors -----------------------------
$30001 "Can't Open Requester !"
#-------------------------------------------------
#------------ END --------------------------------
#-------------------------------------------------

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,35 @@
# -*- coding: utf-8 -*-
"""Pype module API."""
from .ffmpeg_utils import ffprobe_streams
from .path_tools import (
version_up,
get_version_from_path,
get_last_version_from_path,
get_paths_from_environ,
get_ffmpeg_tool_path
)
from .plugin_tools import filter_pyblish_plugins, source_hash
from .applications import (
ApplicationLaunchFailed,
launch_application,
ApplicationAction,
_subprocess
)
from .hooks import PypeHook, execute_hook
from .avalon_context import (
is_latest,
any_outdated,
get_asset,
get_hierarchy,
get_linked_assets,
get_latest_version,
BuildWorkfile
)
from .deprecated import (
get_avalon_database,
set_io_database
)
from .terminal import Terminal
from .anatomy import Anatomy
from .config import (
@ -22,23 +52,57 @@ from .user_settings import IniSettingRegistry
from .user_settings import JSONSettingRegistry
from .user_settings import PypeSettingsRegistry
"""Pype lib module."""
terminal = Terminal
__all__ = [
terminal,
Anatomy,
get_datetime_data,
load_json,
collect_json_from_path,
get_presets,
get_init_presets,
update_dict,
execute,
PypeLogger,
decompose_url,
compose_url,
get_default_components,
IniSettingRegistry,
JSONSettingRegistry,
PypeSettingsRegistry
"get_avalon_database",
"set_io_database",
"is_latest",
"any_outdated",
"get_asset",
"get_hierarchy",
"get_linked_assets",
"get_latest_version",
"BuildWorkfile",
"PypeHook",
"execute_hook",
"ApplicationLaunchFailed",
"launch_application",
"ApplicationAction",
"filter_pyblish_plugins",
"version_up",
"get_version_from_path",
"get_last_version_from_path",
"get_paths_from_environ",
"get_ffmpeg_tool_path",
"ffprobe_streams",
"source_hash",
"_subprocess",
"terminal",
"Anatomy",
"get_datetime_data",
"load_json",
"collect_json_from_path",
"get_presets",
"get_init_presets",
"update_dict",
"execute",
"PypeLogger",
"decompose_url",
"compose_url",
"get_default_components",
"IniSettingRegistry",
"JSONSettingRegistry",
"PypeSettingsRegistry"
]

457
pype/lib/applications.py Normal file
View file

@ -0,0 +1,457 @@
import os
import sys
import getpass
import copy
import platform
import logging
import subprocess
import acre
import avalon.lib
from ..api import Anatomy, Logger, config
from .hooks import execute_hook
from .deprecated import get_avalon_database
log = logging.getLogger(__name__)
class ApplicationLaunchFailed(Exception):
pass
def launch_application(project_name, asset_name, task_name, app_name):
"""Launch host application with filling required environments.
TODO(iLLiCiT): This should be split into more parts.
"""
# `get_avalon_database` is in Pype 3 replaced with using `AvalonMongoDB`
database = get_avalon_database()
project_document = database[project_name].find_one({"type": "project"})
asset_document = database[project_name].find_one({
"type": "asset",
"name": asset_name
})
asset_doc_parents = asset_document["data"].get("parents")
hierarchy = "/".join(asset_doc_parents)
app_def = avalon.lib.get_application(app_name)
app_label = app_def.get("ftrack_label", app_def.get("label", app_name))
host_name = app_def["application_dir"]
# Workfile data collection may be special function?
data = {
"project": {
"name": project_document["name"],
"code": project_document["data"].get("code")
},
"task": task_name,
"asset": asset_name,
"app": host_name,
"hierarchy": hierarchy
}
try:
anatomy = Anatomy(project_name)
anatomy_filled = anatomy.format(data)
workdir = os.path.normpath(anatomy_filled["work"]["folder"])
except Exception as exc:
raise ApplicationLaunchFailed(
"Error in anatomy.format: {}".format(str(exc))
)
try:
os.makedirs(workdir)
except FileExistsError:
pass
last_workfile_path = None
extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(host_name)
if extensions:
# Find last workfile
file_template = anatomy.templates["work"]["file"]
data.update({
"version": 1,
"user": os.environ.get("PYPE_USERNAME") or getpass.getuser(),
"ext": extensions[0]
})
last_workfile_path = avalon.api.last_workfile(
workdir, file_template, data, extensions, True
)
# set environments for Avalon
prep_env = copy.deepcopy(os.environ)
prep_env.update({
"AVALON_PROJECT": project_name,
"AVALON_ASSET": asset_name,
"AVALON_TASK": task_name,
"AVALON_APP": host_name,
"AVALON_APP_NAME": app_name,
"AVALON_HIERARCHY": hierarchy,
"AVALON_WORKDIR": workdir
})
start_last_workfile = avalon.api.should_start_last_workfile(
project_name, host_name, task_name
)
# Store boolean as "0"(False) or "1"(True)
prep_env["AVALON_OPEN_LAST_WORKFILE"] = (
str(int(bool(start_last_workfile)))
)
if (
start_last_workfile
and last_workfile_path
and os.path.exists(last_workfile_path)
):
prep_env["AVALON_LAST_WORKFILE"] = last_workfile_path
prep_env.update(anatomy.roots_obj.root_environments())
# collect all the 'environment' attributes from parents
tools_attr = [prep_env["AVALON_APP"], prep_env["AVALON_APP_NAME"]]
tools_env = asset_document["data"].get("tools_env") or []
tools_attr.extend(tools_env)
tools_env = acre.get_tools(tools_attr)
env = acre.compute(tools_env)
env = acre.merge(env, current_env=dict(prep_env))
# Get path to execute
st_temp_path = os.environ["PYPE_CONFIG"]
os_plat = platform.system().lower()
# Path to folder with launchers
path = os.path.join(st_temp_path, "launchers", os_plat)
# Full path to executable launcher
execfile = None
launch_hook = app_def.get("launch_hook")
if launch_hook:
log.info("launching hook: {}".format(launch_hook))
ret_val = execute_hook(launch_hook, env=env)
if not ret_val:
raise ApplicationLaunchFailed(
"Hook didn't finish successfully {}".format(app_label)
)
if sys.platform == "win32":
for ext in os.environ["PATHEXT"].split(os.pathsep):
fpath = os.path.join(path.strip('"'), app_def["executable"] + ext)
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
execfile = fpath
break
# Run SW if was found executable
if execfile is None:
raise ApplicationLaunchFailed(
"We didn't find launcher for {}".format(app_label)
)
popen = avalon.lib.launch(
executable=execfile, args=[], environment=env
)
elif (
sys.platform.startswith("linux")
or sys.platform.startswith("darwin")
):
execfile = os.path.join(path.strip('"'), app_def["executable"])
# Run SW if was found executable
if execfile is None:
raise ApplicationLaunchFailed(
"We didn't find launcher for {}".format(app_label)
)
if not os.path.isfile(execfile):
raise ApplicationLaunchFailed(
"Launcher doesn't exist - {}".format(execfile)
)
try:
fp = open(execfile)
except PermissionError as perm_exc:
raise ApplicationLaunchFailed(
"Access denied on launcher {} - {}".format(execfile, perm_exc)
)
fp.close()
# check executable permission
if not os.access(execfile, os.X_OK):
raise ApplicationLaunchFailed(
"No executable permission - {}".format(execfile)
)
popen = avalon.lib.launch( # noqa: F841
"/usr/bin/env", args=["bash", execfile], environment=env
)
return popen
class ApplicationAction(avalon.api.Action):
"""Default application launcher
This is a convenience application Action that when "config" refers to a
parsed application `.toml` this can launch the application.
"""
_log = None
config = None
group = None
variant = None
required_session_keys = (
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK"
)
@property
def log(self):
if self._log is None:
self._log = Logger().get_logger(self.__class__.__name__)
return self._log
def is_compatible(self, session):
for key in self.required_session_keys:
if key not in session:
return False
return True
def process(self, session, **kwargs):
"""Process the full Application action"""
project_name = session["AVALON_PROJECT"]
asset_name = session["AVALON_ASSET"]
task_name = session["AVALON_TASK"]
launch_application(
project_name, asset_name, task_name, self.name
)
self._ftrack_after_launch_procedure(
project_name, asset_name, task_name
)
def _ftrack_after_launch_procedure(
self, project_name, asset_name, task_name
):
# TODO move to launch hook
required_keys = ("FTRACK_SERVER", "FTRACK_API_USER", "FTRACK_API_KEY")
for key in required_keys:
if not os.environ.get(key):
self.log.debug((
"Missing required environment \"{}\""
" for Ftrack after launch procedure."
).format(key))
return
try:
import ftrack_api
session = ftrack_api.Session(auto_connect_event_hub=True)
self.log.debug("Ftrack session created")
except Exception:
self.log.warning("Couldn't create Ftrack session")
return
try:
entity = self._find_ftrack_task_entity(
session, project_name, asset_name, task_name
)
self._ftrack_status_change(session, entity, project_name)
self._start_timer(session, entity, ftrack_api)
except Exception:
self.log.warning(
"Couldn't finish Ftrack procedure.", exc_info=True
)
return
finally:
session.close()
def _find_ftrack_task_entity(
self, session, project_name, asset_name, task_name
):
project_entity = session.query(
"Project where full_name is \"{}\"".format(project_name)
).first()
if not project_entity:
self.log.warning(
"Couldn't find project \"{}\" in Ftrack.".format(project_name)
)
return
potential_task_entities = session.query((
"TypedContext where parent.name is \"{}\" and project_id is \"{}\""
).format(asset_name, project_entity["id"])).all()
filtered_entities = []
for _entity in potential_task_entities:
if (
_entity.entity_type.lower() == "task"
and _entity["name"] == task_name
):
filtered_entities.append(_entity)
if not filtered_entities:
self.log.warning((
"Couldn't find task \"{}\" under parent \"{}\" in Ftrack."
).format(task_name, asset_name))
return
if len(filtered_entities) > 1:
self.log.warning((
"Found more than one task \"{}\""
" under parent \"{}\" in Ftrack."
).format(task_name, asset_name))
return
return filtered_entities[0]
def _ftrack_status_change(self, session, entity, project_name):
presets = config.get_presets(project_name)["ftrack"]["ftrack_config"]
statuses = presets.get("status_update")
if not statuses:
return
actual_status = entity["status"]["name"].lower()
already_tested = set()
ent_path = "/".join(
[ent["name"] for ent in entity["link"]]
)
while True:
next_status_name = None
for key, value in statuses.items():
if key in already_tested:
continue
if actual_status in value or "_any_" in value:
if key != "_ignore_":
next_status_name = key
already_tested.add(key)
break
already_tested.add(key)
if next_status_name is None:
break
try:
query = "Status where name is \"{}\"".format(
next_status_name
)
status = session.query(query).one()
entity["status"] = status
session.commit()
self.log.debug("Changing status to \"{}\" <{}>".format(
next_status_name, ent_path
))
break
except Exception:
session.rollback()
msg = (
"Status \"{}\" in presets wasn't found"
" on Ftrack entity type \"{}\""
).format(next_status_name, entity.entity_type)
self.log.warning(msg)
def _start_timer(self, session, entity, _ftrack_api):
self.log.debug("Triggering timer start.")
user_entity = session.query("User where username is \"{}\"".format(
os.environ["FTRACK_API_USER"]
)).first()
if not user_entity:
self.log.warning(
"Couldn't find user with username \"{}\" in Ftrack".format(
os.environ["FTRACK_API_USER"]
)
)
return
source = {
"user": {
"id": user_entity["id"],
"username": user_entity["username"]
}
}
event_data = {
"actionIdentifier": "start.timer",
"selection": [{"entityId": entity["id"], "entityType": "task"}]
}
session.event_hub.publish(
_ftrack_api.event.base.Event(
topic="ftrack.action.launch",
data=event_data,
source=source
),
on_error="ignore"
)
self.log.debug("Timer start triggered successfully.")
# Special naming case for subprocess since its a built-in method.
def _subprocess(*args, **kwargs):
"""Convenience method for getting output errors for subprocess.
Entered arguments and keyword arguments are passed to subprocess Popen.
Args:
*args: Variable length arument list passed to Popen.
**kwargs : Arbitary keyword arguments passed to Popen. Is possible to
pass `logging.Logger` object under "logger" if want to use
different than lib's logger.
Returns:
str: Full output of subprocess concatenated stdout and stderr.
Raises:
RuntimeError: Exception is raised if process finished with nonzero
return code.
"""
# Get environents from kwarg or use current process environments if were
# not passed.
env = kwargs.get("env") or os.environ
# Make sure environment contains only strings
filtered_env = {k: str(v) for k, v in env.items()}
# Use lib's logger if was not passed with kwargs.
logger = kwargs.pop("logger", log)
# set overrides
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
kwargs['stderr'] = kwargs.get('stderr', subprocess.PIPE)
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
kwargs['env'] = filtered_env
proc = subprocess.Popen(*args, **kwargs)
full_output = ""
_stdout, _stderr = proc.communicate()
if _stdout:
_stdout = _stdout.decode("utf-8")
full_output += _stdout
logger.debug(_stdout)
if _stderr:
_stderr = _stderr.decode("utf-8")
# Add additional line break if output already containt stdout
if full_output:
full_output += "\n"
full_output += _stderr
logger.warning(_stderr)
if proc.returncode != 0:
exc_msg = "Executing arguments was not successful: \"{}\"".format(args)
if _stdout:
exc_msg += "\n\nOutput:\n{}".format(_stdout)
if _stderr:
exc_msg += "Error:\n{}".format(_stderr)
raise RuntimeError(exc_msg)
return full_output

870
pype/lib/avalon_context.py Normal file
View file

@ -0,0 +1,870 @@
import os
import json
import re
import logging
import collections
from avalon import io, pipeline
from ..api import config
import avalon.api
log = logging.getLogger("AvalonContext")
def is_latest(representation):
"""Return whether the representation is from latest version
Args:
representation (dict): The representation document from the database.
Returns:
bool: Whether the representation is of latest version.
"""
version = io.find_one({"_id": representation['parent']})
if version["type"] == "master_version":
return True
# Get highest version under the parent
highest_version = io.find_one({
"type": "version",
"parent": version["parent"]
}, sort=[("name", -1)], projection={"name": True})
if version['name'] == highest_version['name']:
return True
else:
return False
def any_outdated():
"""Return whether the current scene has any outdated content"""
checked = set()
host = avalon.api.registered_host()
for container in host.ls():
representation = container['representation']
if representation in checked:
continue
representation_doc = io.find_one(
{
"_id": io.ObjectId(representation),
"type": "representation"
},
projection={"parent": True}
)
if representation_doc and not is_latest(representation_doc):
return True
elif not representation_doc:
log.debug("Container '{objectName}' has an invalid "
"representation, it is missing in the "
"database".format(**container))
checked.add(representation)
return False
def get_asset(asset_name=None):
""" Returning asset document from database by its name.
Doesn't count with duplicities on asset names!
Args:
asset_name (str)
Returns:
(MongoDB document)
"""
if not asset_name:
asset_name = avalon.api.Session["AVALON_ASSET"]
asset_document = io.find_one({
"name": asset_name,
"type": "asset"
})
if not asset_document:
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
return asset_document
def get_hierarchy(asset_name=None):
"""
Obtain asset hierarchy path string from mongo db
Args:
asset_name (str)
Returns:
(string): asset hierarchy path
"""
if not asset_name:
asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"])
asset_entity = io.find_one({
"type": 'asset',
"name": asset_name
})
not_set = "PARENTS_NOT_SET"
entity_parents = asset_entity.get("data", {}).get("parents", not_set)
# If entity already have parents then just return joined
if entity_parents != not_set:
return "/".join(entity_parents)
# Else query parents through visualParents and store result to entity
hierarchy_items = []
entity = asset_entity
while True:
parent_id = entity.get("data", {}).get("visualParent")
if not parent_id:
break
entity = io.find_one({"_id": parent_id})
hierarchy_items.append(entity["name"])
# Add parents to entity data for next query
entity_data = asset_entity.get("data", {})
entity_data["parents"] = hierarchy_items
io.update_many(
{"_id": asset_entity["_id"]},
{"$set": {"data": entity_data}}
)
return "/".join(hierarchy_items)
def get_linked_assets(asset_entity):
"""Return linked assets for `asset_entity` from DB
Args:
asset_entity (dict): asset document from DB
Returns:
(list) of MongoDB documents
"""
inputs = asset_entity["data"].get("inputs", [])
inputs = [io.find_one({"_id": x}) for x in inputs]
return inputs
def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
"""Retrieve latest version from `asset_name`, and `subset_name`.
Do not use if you want to query more than 5 latest versions as this method
query 3 times to mongo for each call. For those cases is better to use
more efficient way, e.g. with help of aggregations.
Args:
asset_name (str): Name of asset.
subset_name (str): Name of subset.
dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection
with Session.
project_name (str, optional): Find latest version in specific project.
Returns:
None: If asset, subset or version were not found.
dict: Last version document for entered .
"""
if not dbcon:
log.debug("Using `avalon.io` for query.")
dbcon = io
# Make sure is installed
io.install()
if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"):
# `avalon.io` has only `_database` attribute
# but `AvalonMongoDB` has `database`
database = getattr(dbcon, "database", dbcon._database)
collection = database[project_name]
else:
project_name = dbcon.Session.get("AVALON_PROJECT")
collection = dbcon
log.debug((
"Getting latest version for Project: \"{}\" Asset: \"{}\""
" and Subset: \"{}\""
).format(project_name, asset_name, subset_name))
# Query asset document id by asset name
asset_doc = collection.find_one(
{"type": "asset", "name": asset_name},
{"_id": True}
)
if not asset_doc:
log.info(
"Asset \"{}\" was not found in Database.".format(asset_name)
)
return None
subset_doc = collection.find_one(
{"type": "subset", "name": subset_name, "parent": asset_doc["_id"]},
{"_id": True}
)
if not subset_doc:
log.info(
"Subset \"{}\" was not found in Database.".format(subset_name)
)
return None
version_doc = collection.find_one(
{"type": "version", "parent": subset_doc["_id"]},
sort=[("name", -1)],
)
if not version_doc:
log.info(
"Subset \"{}\" does not have any version yet.".format(subset_name)
)
return None
return version_doc
class BuildWorkfile:
"""Wrapper for build workfile process.
Load representations for current context by build presets. Build presets
are host related, since each host has it's loaders.
"""
log = logging.getLogger("BuildWorkfile")
@staticmethod
def map_subsets_by_family(subsets):
subsets_by_family = collections.defaultdict(list)
for subset in subsets:
family = subset["data"].get("family")
if not family:
families = subset["data"].get("families")
if not families:
continue
family = families[0]
subsets_by_family[family].append(subset)
return subsets_by_family
def process(self):
"""Main method of this wrapper.
Building of workfile is triggered and is possible to implement
post processing of loaded containers if necessary.
"""
containers = self.build_workfile()
return containers
def build_workfile(self):
"""Prepares and load containers into workfile.
Loads latest versions of current and linked assets to workfile by logic
stored in Workfile profiles from presets. Profiles are set by host,
filtered by current task name and used by families.
Each family can specify representation names and loaders for
representations and first available and successful loaded
representation is returned as container.
At the end you'll get list of loaded containers per each asset.
loaded_containers [{
"asset_entity": <AssetEntity1>,
"containers": [<Container1>, <Container2>, ...]
}, {
"asset_entity": <AssetEntity2>,
"containers": [<Container3>, ...]
}, {
...
}]
"""
# Get current asset name and entity
current_asset_name = io.Session["AVALON_ASSET"]
current_asset_entity = io.find_one({
"type": "asset",
"name": current_asset_name
})
# Skip if asset was not found
if not current_asset_entity:
print("Asset entity with name `{}` was not found".format(
current_asset_name
))
return
# Prepare available loaders
loaders_by_name = {}
for loader in avalon.api.discover(avalon.api.Loader):
loader_name = loader.__name__
if loader_name in loaders_by_name:
raise KeyError(
"Duplicated loader name {0}!".format(loader_name)
)
loaders_by_name[loader_name] = loader
# Skip if there are any loaders
if not loaders_by_name:
self.log.warning("There are no registered loaders.")
return
# Get current task name
current_task_name = io.Session["AVALON_TASK"]
# Load workfile presets for task
self.build_presets = self.get_build_presets(current_task_name)
# Skip if there are any presets for task
if not self.build_presets:
self.log.warning(
"Current task `{}` does not have any loading preset.".format(
current_task_name
)
)
return
# Get presets for loading current asset
current_context_profiles = self.build_presets.get("current_context")
# Get presets for loading linked assets
link_context_profiles = self.build_presets.get("linked_assets")
# Skip if both are missing
if not current_context_profiles and not link_context_profiles:
self.log.warning(
"Current task `{}` has empty loading preset.".format(
current_task_name
)
)
return
elif not current_context_profiles:
self.log.warning((
"Current task `{}` doesn't have any loading"
" preset for it's context."
).format(current_task_name))
elif not link_context_profiles:
self.log.warning((
"Current task `{}` doesn't have any"
"loading preset for it's linked assets."
).format(current_task_name))
# Prepare assets to process by workfile presets
assets = []
current_asset_id = None
if current_context_profiles:
# Add current asset entity if preset has current context set
assets.append(current_asset_entity)
current_asset_id = current_asset_entity["_id"]
if link_context_profiles:
# Find and append linked assets if preset has set linked mapping
link_assets = get_linked_assets(current_asset_entity)
if link_assets:
assets.extend(link_assets)
# Skip if there are no assets. This can happen if only linked mapping
# is set and there are no links for his asset.
if not assets:
self.log.warning(
"Asset does not have linked assets. Nothing to process."
)
return
# Prepare entities from database for assets
prepared_entities = self._collect_last_version_repres(assets)
# Load containers by prepared entities and presets
loaded_containers = []
# - Current asset containers
if current_asset_id and current_asset_id in prepared_entities:
current_context_data = prepared_entities.pop(current_asset_id)
loaded_data = self.load_containers_by_asset_data(
current_context_data, current_context_profiles, loaders_by_name
)
if loaded_data:
loaded_containers.append(loaded_data)
# - Linked assets container
for linked_asset_data in prepared_entities.values():
loaded_data = self.load_containers_by_asset_data(
linked_asset_data, link_context_profiles, loaders_by_name
)
if loaded_data:
loaded_containers.append(loaded_data)
# Return list of loaded containers
return loaded_containers
def get_build_presets(self, task_name):
""" Returns presets to build workfile for task name.
Presets are loaded for current project set in
io.Session["AVALON_PROJECT"], filtered by registered host
and entered task name.
Args:
task_name (str): Task name used for filtering build presets.
Returns:
(dict): preset per entered task name
"""
host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1]
presets = config.get_presets(io.Session["AVALON_PROJECT"])
# Get presets for host
build_presets = (
presets["plugins"]
.get(host_name, {})
.get("workfile_build")
)
if not build_presets:
return
task_name_low = task_name.lower()
per_task_preset = None
for preset in build_presets:
preset_tasks = preset.get("tasks") or []
preset_tasks_low = [task.lower() for task in preset_tasks]
if task_name_low in preset_tasks_low:
per_task_preset = preset
break
return per_task_preset
def _filter_build_profiles(self, build_profiles, loaders_by_name):
""" Filter build profiles by loaders and prepare process data.
Valid profile must have "loaders", "families" and "repre_names" keys
with valid values.
- "loaders" expects list of strings representing possible loaders.
- "families" expects list of strings for filtering
by main subset family.
- "repre_names" expects list of strings for filtering by
representation name.
Lowered "families" and "repre_names" are prepared for each profile with
all required keys.
Args:
build_profiles (dict): Profiles for building workfile.
loaders_by_name (dict): Available loaders per name.
Returns:
(list): Filtered and prepared profiles.
"""
valid_profiles = []
for profile in build_profiles:
# Check loaders
profile_loaders = profile.get("loaders")
if not profile_loaders:
self.log.warning((
"Build profile has missing loaders configuration: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Check if any loader is available
loaders_match = False
for loader_name in profile_loaders:
if loader_name in loaders_by_name:
loaders_match = True
break
if not loaders_match:
self.log.warning((
"All loaders from Build profile are not available: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Check families
profile_families = profile.get("families")
if not profile_families:
self.log.warning((
"Build profile is missing families configuration: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Check representation names
profile_repre_names = profile.get("repre_names")
if not profile_repre_names:
self.log.warning((
"Build profile is missing"
" representation names filtering: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Prepare lowered families and representation names
profile["families_lowered"] = [
fam.lower() for fam in profile_families
]
profile["repre_names_lowered"] = [
name.lower() for name in profile_repre_names
]
valid_profiles.append(profile)
return valid_profiles
def _prepare_profile_for_subsets(self, subsets, profiles):
"""Select profile for each subset byt it's data.
Profiles are filtered for each subset individually.
Profile is filtered by subset's family, optionally by name regex and
representation names set in profile.
It is possible to not find matching profile for subset, in that case
subset is skipped and it is possible that none of subsets have
matching profile.
Args:
subsets (list): Subset documents.
profiles (dict): Build profiles.
Returns:
(dict) Profile by subset's id.
"""
# Prepare subsets
subsets_by_family = self.map_subsets_by_family(subsets)
profiles_per_subset_id = {}
for family, subsets in subsets_by_family.items():
family_low = family.lower()
for profile in profiles:
# Skip profile if does not contain family
if family_low not in profile["families_lowered"]:
continue
# Precompile name filters as regexes
profile_regexes = profile.get("subset_name_filters")
if profile_regexes:
_profile_regexes = []
for regex in profile_regexes:
_profile_regexes.append(re.compile(regex))
profile_regexes = _profile_regexes
# TODO prepare regex compilation
for subset in subsets:
# Verify regex filtering (optional)
if profile_regexes:
valid = False
for pattern in profile_regexes:
if re.match(pattern, subset["name"]):
valid = True
break
if not valid:
continue
profiles_per_subset_id[subset["_id"]] = profile
# break profiles loop on finding the first matching profile
break
return profiles_per_subset_id
def load_containers_by_asset_data(
self, asset_entity_data, build_profiles, loaders_by_name
):
"""Load containers for entered asset entity by Build profiles.
Args:
asset_entity_data (dict): Prepared data with subsets, last version
and representations for specific asset.
build_profiles (dict): Build profiles.
loaders_by_name (dict): Available loaders per name.
Returns:
(dict) Output contains asset document and loaded containers.
"""
# Make sure all data are not empty
if not asset_entity_data or not build_profiles or not loaders_by_name:
return
asset_entity = asset_entity_data["asset_entity"]
valid_profiles = self._filter_build_profiles(
build_profiles, loaders_by_name
)
if not valid_profiles:
self.log.warning(
"There are not valid Workfile profiles. Skipping process."
)
return
self.log.debug("Valid Workfile profiles: {}".format(valid_profiles))
subsets_by_id = {}
version_by_subset_id = {}
repres_by_version_id = {}
for subset_id, in_data in asset_entity_data["subsets"].items():
subset_entity = in_data["subset_entity"]
subsets_by_id[subset_entity["_id"]] = subset_entity
version_data = in_data["version"]
version_entity = version_data["version_entity"]
version_by_subset_id[subset_id] = version_entity
repres_by_version_id[version_entity["_id"]] = (
version_data["repres"]
)
if not subsets_by_id:
self.log.warning("There are not subsets for asset {0}".format(
asset_entity["name"]
))
return
profiles_per_subset_id = self._prepare_profile_for_subsets(
subsets_by_id.values(), valid_profiles
)
if not profiles_per_subset_id:
self.log.warning("There are not valid subsets.")
return
valid_repres_by_subset_id = collections.defaultdict(list)
for subset_id, profile in profiles_per_subset_id.items():
profile_repre_names = profile["repre_names_lowered"]
version_entity = version_by_subset_id[subset_id]
version_id = version_entity["_id"]
repres = repres_by_version_id[version_id]
for repre in repres:
repre_name_low = repre["name"].lower()
if repre_name_low in profile_repre_names:
valid_repres_by_subset_id[subset_id].append(repre)
# DEBUG message
msg = "Valid representations for Asset: `{}`".format(
asset_entity["name"]
)
for subset_id, repres in valid_repres_by_subset_id.items():
subset = subsets_by_id[subset_id]
msg += "\n# Subset Name/ID: `{}`/{}".format(
subset["name"], subset_id
)
for repre in repres:
msg += "\n## Repre name: `{}`".format(repre["name"])
self.log.debug(msg)
containers = self._load_containers(
valid_repres_by_subset_id, subsets_by_id,
profiles_per_subset_id, loaders_by_name
)
return {
"asset_entity": asset_entity,
"containers": containers
}
def _load_containers(
self, repres_by_subset_id, subsets_by_id,
profiles_per_subset_id, loaders_by_name
):
"""Real load by collected data happens here.
Loading of representations per subset happens here. Each subset can
loads one representation. Loading is tried in specific order.
Representations are tried to load by names defined in configuration.
If subset has representation matching representation name each loader
is tried to load it until any is successful. If none of them was
successful then next reprensentation name is tried.
Subset process loop ends when any representation is loaded or
all matching representations were already tried.
Args:
repres_by_subset_id (dict): Available representations mapped
by their parent (subset) id.
subsets_by_id (dict): Subset documents mapped by their id.
profiles_per_subset_id (dict): Build profiles mapped by subset id.
loaders_by_name (dict): Available loaders per name.
Returns:
(list) Objects of loaded containers.
"""
loaded_containers = []
# Get subset id order from build presets.
build_presets = self.build_presets.get("current_context", [])
build_presets += self.build_presets.get("linked_assets", [])
subset_ids_ordered = []
for preset in build_presets:
for preset_family in preset["families"]:
for id, subset in subsets_by_id.items():
if preset_family not in subset["data"].get("families", []):
continue
subset_ids_ordered.append(id)
# Order representations from subsets.
print("repres_by_subset_id", repres_by_subset_id)
representations_ordered = []
representations = []
for id in subset_ids_ordered:
for subset_id, repres in repres_by_subset_id.items():
if repres in representations:
continue
if id == subset_id:
representations_ordered.append((subset_id, repres))
representations.append(repres)
print("representations", representations)
# Load ordered reprensentations.
for subset_id, repres in representations_ordered:
subset_name = subsets_by_id[subset_id]["name"]
profile = profiles_per_subset_id[subset_id]
loaders_last_idx = len(profile["loaders"]) - 1
repre_names_last_idx = len(profile["repre_names_lowered"]) - 1
repre_by_low_name = {
repre["name"].lower(): repre for repre in repres
}
is_loaded = False
for repre_name_idx, profile_repre_name in enumerate(
profile["repre_names_lowered"]
):
# Break iteration if representation was already loaded
if is_loaded:
break
repre = repre_by_low_name.get(profile_repre_name)
if not repre:
continue
for loader_idx, loader_name in enumerate(profile["loaders"]):
if is_loaded:
break
loader = loaders_by_name.get(loader_name)
if not loader:
continue
try:
container = avalon.api.load(
loader,
repre["_id"],
name=subset_name
)
loaded_containers.append(container)
is_loaded = True
except Exception as exc:
if exc == pipeline.IncompatibleLoaderError:
self.log.info((
"Loader `{}` is not compatible with"
" representation `{}`"
).format(loader_name, repre["name"]))
else:
self.log.error(
"Unexpected error happened during loading",
exc_info=True
)
msg = "Loading failed."
if loader_idx < loaders_last_idx:
msg += " Trying next loader."
elif repre_name_idx < repre_names_last_idx:
msg += (
" Loading of subset `{}` was not successful."
).format(subset_name)
else:
msg += " Trying next representation."
self.log.info(msg)
return loaded_containers
def _collect_last_version_repres(self, asset_entities):
"""Collect subsets, versions and representations for asset_entities.
Args:
asset_entities (list): Asset entities for which want to find data
Returns:
(dict): collected entities
Example output:
```
{
{Asset ID}: {
"asset_entity": <AssetEntity>,
"subsets": {
{Subset ID}: {
"subset_entity": <SubsetEntity>,
"version": {
"version_entity": <VersionEntity>,
"repres": [
<RepreEntity1>, <RepreEntity2>, ...
]
}
},
...
}
},
...
}
output[asset_id]["subsets"][subset_id]["version"]["repres"]
```
"""
if not asset_entities:
return {}
asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities}
subsets = list(io.find({
"type": "subset",
"parent": {"$in": asset_entity_by_ids.keys()}
}))
subset_entity_by_ids = {subset["_id"]: subset for subset in subsets}
sorted_versions = list(io.find({
"type": "version",
"parent": {"$in": subset_entity_by_ids.keys()}
}).sort("name", -1))
subset_id_with_latest_version = []
last_versions_by_id = {}
for version in sorted_versions:
subset_id = version["parent"]
if subset_id in subset_id_with_latest_version:
continue
subset_id_with_latest_version.append(subset_id)
last_versions_by_id[version["_id"]] = version
repres = io.find({
"type": "representation",
"parent": {"$in": last_versions_by_id.keys()}
})
output = {}
for repre in repres:
version_id = repre["parent"]
version = last_versions_by_id[version_id]
subset_id = version["parent"]
subset = subset_entity_by_ids[subset_id]
asset_id = subset["parent"]
asset = asset_entity_by_ids[asset_id]
if asset_id not in output:
output[asset_id] = {
"asset_entity": asset,
"subsets": {}
}
if subset_id not in output[asset_id]["subsets"]:
output[asset_id]["subsets"][subset_id] = {
"subset_entity": subset,
"version": {
"version_entity": version,
"repres": []
}
}
output[asset_id]["subsets"][subset_id]["version"]["repres"].append(
repre
)
return output

26
pype/lib/deprecated.py Normal file
View file

@ -0,0 +1,26 @@
import os
from avalon import io
def get_avalon_database():
"""Mongo database used in avalon's io.
* Function is not used in pype 3.0 where was replaced with usage of
AvalonMongoDB.
"""
if io._database is None:
set_io_database()
return io._database
def set_io_database():
"""Set avalon's io context with environemnts.
* Function is not used in pype 3.0 where was replaced with usage of
AvalonMongoDB.
"""
required_keys = ["AVALON_PROJECT", "AVALON_ASSET", "AVALON_SILO"]
for key in required_keys:
os.environ[key] = os.environ.get(key, "")
io.install()

46
pype/lib/ffmpeg_utils.py Normal file
View file

@ -0,0 +1,46 @@
import logging
import json
import subprocess
from . import get_ffmpeg_tool_path
log = logging.getLogger("FFmpeg utils")
def ffprobe_streams(path_to_file, logger=None):
"""Load streams from entered filepath via ffprobe.
Args:
path_to_file (str): absolute path
logger (logging.getLogger): injected logger, if empty new is created
"""
if not logger:
logger = log
logger.info(
"Getting information about input \"{}\".".format(path_to_file)
)
args = [
"\"{}\"".format(get_ffmpeg_tool_path("ffprobe")),
"-v quiet",
"-print_format json",
"-show_format",
"-show_streams",
"\"{}\"".format(path_to_file)
]
command = " ".join(args)
logger.debug("FFprobe command: \"{}\"".format(command))
popen = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
popen_stdout, popen_stderr = popen.communicate()
if popen_stdout:
logger.debug("ffprobe stdout: {}".format(popen_stdout))
if popen_stderr:
logger.debug("ffprobe stderr: {}".format(popen_stderr))
return json.loads(popen_stdout)["streams"]

71
pype/lib/hooks.py Normal file
View file

@ -0,0 +1,71 @@
# -*- coding: utf-8 -*-
"""Package containing code for handling hooks."""
import os
import sys
import types
import logging
from abc import ABCMeta, abstractmethod
import six
log = logging.getLogger(__name__)
@six.add_metaclass(ABCMeta)
class PypeHook:
"""Abstract class from all hooks should inherit."""
def __init__(self):
"""Constructor."""
pass
@abstractmethod
def execute(self, *args, **kwargs):
"""Abstract execute method."""
pass
def execute_hook(hook, *args, **kwargs):
"""Execute hook with arguments.
This will load hook file, instantiate class and call
:meth:`PypeHook.execute` method on it. Hook must be in a form::
$PYPE_SETUP_PATH/repos/pype/path/to/hook.py/HookClass
This will load `hook.py`, instantiate HookClass and then execute_hook
`execute(*args, **kwargs)`
Args:
hook (str): path to hook class.
"""
class_name = hook.split("/")[-1]
abspath = os.path.join(os.getenv('PYPE_SETUP_PATH'),
'repos', 'pype', *hook.split("/")[:-1])
mod_name, mod_ext = os.path.splitext(os.path.basename(abspath))
if not mod_ext == ".py":
return False
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath) as f:
six.exec_(f.read(), module.__dict__)
sys.modules[abspath] = module
except Exception as exp:
log.exception("loading hook failed: {}".format(exp),
exc_info=True)
return False
obj = getattr(module, class_name)
hook_obj = obj()
ret_val = hook_obj.execute(*args, **kwargs)
return ret_val

181
pype/lib/path_tools.py Normal file
View file

@ -0,0 +1,181 @@
import os
import re
import logging
log = logging.getLogger(__name__)
def get_paths_from_environ(env_key, return_first=False):
"""Return existing paths from specific envirnment variable.
Args:
env_key (str): Environment key where should look for paths.
Returns:
(bool): Return first path on `True`, list of all on `False`.
Difference when none of paths exists:
- when `return_first` is set to `False` then function returns empty list.
- when `return_first` is set to `True` then function returns `None`.
"""
existing_paths = []
paths = os.environ.get(env_key) or ""
path_items = paths.split(os.pathsep)
for path in path_items:
# Skip empty string
if not path:
continue
# Normalize path
path = os.path.normpath(path)
# Check if path exists
if os.path.exists(path):
# Return path if `return_first` is set to True
if return_first:
return path
# Store path
existing_paths.append(path)
# Return None if none of paths exists
if return_first:
return None
# Return all existing paths from environment variable
return existing_paths
def get_ffmpeg_tool_path(tool="ffmpeg"):
"""Find path to ffmpeg tool in FFMPEG_PATH paths.
Function looks for tool in paths set in FFMPEG_PATH environment. If tool
exists then returns it's full path.
Args:
tool (string): tool name
Returns:
(str): tool name itself when tool path was not found. (FFmpeg path
may be set in PATH environment variable)
"""
dir_paths = get_paths_from_environ("FFMPEG_PATH")
for dir_path in dir_paths:
for file_name in os.listdir(dir_path):
base, _ext = os.path.splitext(file_name)
if base.lower() == tool.lower():
return os.path.join(dir_path, tool)
return tool
def _rreplace(s, a, b, n=1):
"""Replace a with b in string s from right side n times."""
return b.join(s.rsplit(a, n))
def version_up(filepath):
"""Version up filepath to a new non-existing version.
Parses for a version identifier like `_v001` or `.v001`
When no version present _v001 is appended as suffix.
Args:
filepath (str): full url
Returns:
(str): filepath with increased version number
"""
dirname = os.path.dirname(filepath)
basename, ext = os.path.splitext(os.path.basename(filepath))
regex = r"[._]v\d+"
matches = re.findall(regex, str(basename), re.IGNORECASE)
if not matches:
log.info("Creating version...")
new_label = "_v{version:03d}".format(version=1)
new_basename = "{}{}".format(basename, new_label)
else:
label = matches[-1]
version = re.search(r"\d+", label).group()
padding = len(version)
new_version = int(version) + 1
new_version = '{version:0{padding}d}'.format(version=new_version,
padding=padding)
new_label = label.replace(version, new_version, 1)
new_basename = _rreplace(basename, label, new_label)
if not new_basename.endswith(new_label):
index = (new_basename.find(new_label))
index += len(new_label)
new_basename = new_basename[:index]
new_filename = "{}{}".format(new_basename, ext)
new_filename = os.path.join(dirname, new_filename)
new_filename = os.path.normpath(new_filename)
if new_filename == filepath:
raise RuntimeError("Created path is the same as current file,"
"this is a bug")
for file in os.listdir(dirname):
if file.endswith(ext) and file.startswith(new_basename):
log.info("Skipping existing version %s" % new_label)
return version_up(new_filename)
log.info("New version %s" % new_label)
return new_filename
def get_version_from_path(file):
"""Find version number in file path string.
Args:
file (string): file path
Returns:
v: version number in string ('001')
"""
pattern = re.compile(r"[\._]v([0-9]+)", re.IGNORECASE)
try:
return pattern.findall(file)[0]
except IndexError:
log.error(
"templates:get_version_from_workfile:"
"`{}` missing version string."
"Example `v004`".format(file)
)
def get_last_version_from_path(path_dir, filter):
"""Find last version of given directory content.
Args:
path_dir (string): directory path
filter (list): list of strings used as file name filter
Returns:
string: file name with last version
Example:
last_version_file = get_last_version_from_path(
"/project/shots/shot01/work", ["shot01", "compositing", "nk"])
"""
assert os.path.isdir(path_dir), "`path_dir` argument needs to be directory"
assert isinstance(filter, list) and (
len(filter) != 0), "`filter` argument needs to be list and not empty"
filtred_files = list()
# form regex for filtering
patern = r".*".join(filter)
for file in os.listdir(path_dir):
if not re.findall(patern, file):
continue
filtred_files.append(file)
if filtred_files:
sorted(filtred_files)
return filtred_files[-1]
return None

80
pype/lib/plugin_tools.py Normal file
View file

@ -0,0 +1,80 @@
# -*- coding: utf-8 -*-
"""Avalon/Pyblish plugin tools."""
import os
import inspect
import logging
from ..api import config
log = logging.getLogger(__name__)
def filter_pyblish_plugins(plugins):
"""Filter pyblish plugins by presets.
This servers as plugin filter / modifier for pyblish. It will load plugin
definitions from presets and filter those needed to be excluded.
Args:
plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base`
`discover()` method.
"""
from pyblish import api
host = api.current_host()
presets = config.get_presets().get('plugins', {})
# iterate over plugins
for plugin in plugins[:]:
# skip if there are no presets to process
if not presets:
continue
file = os.path.normpath(inspect.getsourcefile(plugin))
file = os.path.normpath(file)
# host determined from path
host_from_file = file.split(os.path.sep)[-3:-2][0]
plugin_kind = file.split(os.path.sep)[-2:-1][0]
try:
config_data = presets[host]["publish"][plugin.__name__]
except KeyError:
try:
config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501
except KeyError:
continue
for option, value in config_data.items():
if option == "enabled" and value is False:
log.info('removing plugin {}'.format(plugin.__name__))
plugins.remove(plugin)
else:
log.info('setting {}:{} on plugin {}'.format(
option, value, plugin.__name__))
setattr(plugin, option, value)
def source_hash(filepath, *args):
"""Generate simple identifier for a source file.
This is used to identify whether a source file has previously been
processe into the pipeline, e.g. a texture.
The hash is based on source filepath, modification time and file size.
This is only used to identify whether a specific source file was already
published before from the same location with the same modification date.
We opt to do it this way as opposed to Avalanch C4 hash as this is much
faster and predictable enough for all our production use cases.
Args:
filepath (str): The source file path.
You can specify additional arguments in the function
to allow for specific 'processing' values to be included.
"""
# We replace dots with comma because . cannot be a key in a pymongo dict.
file_name = os.path.basename(filepath)
time = str(os.path.getmtime(filepath))
size = str(os.path.getsize(filepath))
return "|".join([file_name, time, size] + list(args)).replace(".", ",")

View file

@ -0,0 +1,77 @@
Synchronization server
---------------------
This server is scheduled at start of Pype, it periodically checks avalon DB
for 'representation' records which have in theirs files.sites record with
name: 'gdrive' (or any other site name from 'gdrive.json') without
field 'created_dt'.
This denotes that this representation should be synced to GDrive.
Records like these are created by IntegrateNew process based on configuration.
Leave 'config.json.remote_site' empty for not synchronizing at all.
One provider could have multiple sites. (GDrive implementation is 'a provider',
target folder on it is 'a site')
Quick HOWTOs:
-------------
I want to start syncing my newly published files:
------------------------------------------------
Get credentials for service account, share target folder on Gdrive with it
Set path to stored credentils file in gdrive.json
Set name of site, root folder in gdrive.json
Update config.json/remote_site to name of site you set in previous step
Start Pype and publish
My published file is not syncing:
--------------------------------
Check that representation record contains for all 'files.site' skeleton in
format: {name: "MY_CONFIGURED_REMOTE_SITE"}
Check if that record doesn't have already 'created_dt' filled. That would
denote that file was synced but someone might have had removed it on remote
site.
If that records contains field "error", check that "tries" field doesn't
contain same value as threshold in config.json.retry_cnt. If it does fix
the problem mentioned in 'error' field, delete 'tries' field.
I want to sync my already published files:
-----------------------------------------
Configure your Pype for syncing (see first section of Howtos).
Manually add skeleton {name: "MY_CONFIGURED_REMOTE_SITE"} to all
representation.files.sites:
db.getCollection('MY_PROJECT').update({type:"representation"},
{$set:{"files.$[].sites.MY_CONFIGURED_REMOTE_SITE" : {}}}, true, true)
Needed configuration:
--------------------
pype-config/presets/config.json:
"local_id": "local_0", -- identifier of user pype
"retry_cnt": 3, -- how many times try to synch file in case of error
"loop_delay": 60, -- how many seconds between sync loops
"active_site": "studio", -- which site user current, 'studio' by default,
could by same as 'local_id' if user is working
from home without connection to studio
infrastructure
"remote_site": "gdrive" -- key for site to synchronize to. Must match to site
configured in 'gdrive.json'.
Used in IntegrateNew to prepare skeleton for
syncing in the representation record.
Leave empty if no syncing is wanted.
This is a general configuration, 'local_id', 'active_site' and 'remote_site'
will be set and changed by some GUI in the future.
pype-config/presets/gdrive.json:
"gdrive": { - site name, must be unique
"credentials_url": "/my_secret_folder/credentials.json",
-- path to credentials for service account
"root": { -- "root": "/My Drive" in simple scenario, config here for
-- multiroot projects
"root_one": "/My Drive/work_folder",
"root_tow": "/My Drive/publish_folder"
}
}

View file

@ -0,0 +1,5 @@
from .sync_server import SyncServer
def tray_init(tray_widget, main_widget):
return SyncServer()

View file

@ -0,0 +1,65 @@
from abc import ABCMeta, abstractmethod
class AbstractProvider(metaclass=ABCMeta):
@abstractmethod
def is_active(self):
"""
Returns True if provider is activated, eg. has working credentials.
Returns:
(boolean)
"""
@abstractmethod
def upload_file(self, source_path, target_path, overwrite=True):
"""
Copy file from 'source_path' to 'target_path' on provider.
Use 'overwrite' boolean to rewrite existing file on provider
Args:
source_path (string): absolute path on local system
target_path (string): absolute path on provider (GDrive etc.)
overwrite (boolean): True if overwite existing
Returns:
(string) file_id of created file, raises exception
"""
pass
@abstractmethod
def download_file(self, source_path, local_path):
"""
Download file from provider into local system
Args:
source_path (string): absolute path on provider
local_path (string): absolute path on local
Returns:
None
"""
pass
@abstractmethod
def delete_file(self, path):
"""
Deletes file from 'path'. Expects path to specific file.
Args:
path (string): absolute path to particular file
Returns:
None
"""
pass
@abstractmethod
def list_folder(self, folder_path):
"""
List all files and subfolders of particular path non-recursively.
Args:
folder_path (string): absolut path on provider
Returns:
(list)
"""
pass

View file

@ -0,0 +1,629 @@
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
import google.oauth2.service_account as service_account
from googleapiclient import errors
from .abstract_provider import AbstractProvider
from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
from pype.api import Logger
from pype.lib import timeit
from pype.api import config
SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/drive.readonly'] # for write|delete
log = Logger().get_logger("SyncServer")
class GDriveHandler(AbstractProvider):
"""
Implementation of Google Drive API.
As GD API doesn't have real folder structure, 'tree' in memory
structure is build in constructor to map folder paths to folder ids,
which are used in API. Building of this tree might be expensive and
slow and should be run only when necessary. Currently is set to
lazy creation, created only after first call when necessary.
Configuration for provider is in pype-config/presets/gdrive.json
Example of config:
"gdrive": { - site name
"credentials_url": "/my_secret_folder/credentials.json",
"root": { - could be "root": "/My Drive" for single root
"root_one": "/My Drive",
"root_two": "/My Drive/different_folder"
}
}
"""
FOLDER_STR = 'application/vnd.google-apps.folder'
MY_DRIVE_STR = 'My Drive' # name of root folder of regular Google drive
def __init__(self, site_name, tree=None):
self.presets = None
self.active = False
self.site_name = site_name
self.presets = self.get_presets().get(site_name, None)
if not self.presets:
log.info("Sync Server: There are no presets for {}.".
format(site_name))
return
if not os.path.exists(self.presets["credentials_url"]):
log.info("Sync Server: No credentials for Gdrive provider! ")
return
self.service = self._get_gd_service()
self.root = self._prepare_root_info()
self._tree = tree
self.active = True
def _get_gd_service(self):
"""
Authorize client with 'credentials.json', uses service account.
Service account needs to have target folder shared with.
Produces service that communicates with GDrive API.
Returns:
None
"""
creds = service_account.Credentials.from_service_account_file(
self.presets["credentials_url"],
scopes=SCOPES)
service = build('drive', 'v3',
credentials=creds, cache_discovery=False)
return service
def _prepare_root_info(self):
"""
Prepare info about roots and theirs folder ids from 'presets'.
Configuration might be for single or multiroot projects.
Regular My Drive and Shared drives are implemented, their root
folder ids need to be queried in slightly different way.
Returns:
(dicts) of dicts where root folders are keys
"""
roots = {}
for path in self.get_roots_config().values():
if self.MY_DRIVE_STR in path:
roots[self.MY_DRIVE_STR] = self.service.files()\
.get(fileId='root').execute()
else:
shared_drives = []
page_token = None
while True:
response = self.service.drives().list(
pageSize=100,
pageToken=page_token).execute()
shared_drives.extend(response.get('drives', []))
page_token = response.get('nextPageToken', None)
if page_token is None:
break
folders = path.split('/')
if len(folders) < 2:
raise ValueError("Wrong root folder definition {}".
format(path))
for shared_drive in shared_drives:
if folders[1] in shared_drive["name"]:
roots[shared_drive["name"]] = {
"name": shared_drive["name"],
"id": shared_drive["id"]}
if self.MY_DRIVE_STR not in roots: # add My Drive always
roots[self.MY_DRIVE_STR] = self.service.files() \
.get(fileId='root').execute()
return roots
@timeit
def _build_tree(self, folders):
"""
Create in-memory structure resolving paths to folder id as
recursive querying might be slower.
Initialized in the time of class initialization.
Maybe should be persisted
Tree is structure of path to id:
'/ROOT': {'id': '1234567'}
'/ROOT/PROJECT_FOLDER': {'id':'222222'}
'/ROOT/PROJECT_FOLDER/Assets': {'id': '3434545'}
Args:
folders (list): list of dictionaries with folder metadata
Returns:
(dictionary) path as a key, folder id as a value
"""
log.debug("build_tree len {}".format(len(folders)))
root_ids = []
default_root_id = None
tree = {}
ending_by = {}
for root_name, root in self.root.items(): # might be multiple roots
if root["id"] not in root_ids:
tree["/" + root_name] = {"id": root["id"]}
ending_by[root["id"]] = "/" + root_name
root_ids.append(root["id"])
if self.MY_DRIVE_STR == root_name:
default_root_id = root["id"]
no_parents_yet = {}
while folders:
folder = folders.pop(0)
parents = folder.get("parents", [])
# weird cases, shared folders, etc, parent under root
if not parents:
parent = default_root_id
else:
parent = parents[0]
if folder["id"] in root_ids: # do not process root
continue
if parent in ending_by:
path_key = ending_by[parent] + "/" + folder["name"]
ending_by[folder["id"]] = path_key
tree[path_key] = {"id": folder["id"]}
else:
no_parents_yet.setdefault(parent, []).append((folder["id"],
folder["name"]))
loop_cnt = 0
# break if looped more then X times - safety against infinite loop
while no_parents_yet and loop_cnt < 20:
keys = list(no_parents_yet.keys())
for parent in keys:
if parent in ending_by.keys():
subfolders = no_parents_yet.pop(parent)
for folder_id, folder_name in subfolders:
path_key = ending_by[parent] + "/" + folder_name
ending_by[folder_id] = path_key
tree[path_key] = {"id": folder_id}
loop_cnt += 1
if len(no_parents_yet) > 0:
log.debug("Some folders path are not resolved {}".
format(no_parents_yet))
log.debug("Remove deleted folders from trash.")
return tree
def is_active(self):
"""
Returns True if provider is activated, eg. has working credentials.
Returns:
(boolean)
"""
return self.active
def get_tree(self):
"""
Building of the folder tree could be potentially expensive,
constructor provides argument that could inject previously created
tree.
Tree structure must be handled in thread safe fashion!
Returns:
(dictionary) - url to id mapping
"""
if not self._tree:
self._tree = self._build_tree(self.list_folders())
return self._tree
def get_roots_config(self):
"""
Returns value from presets of roots. It calculates with multi
roots. Config should be simple key value, or dictionary.
Examples:
"root": "/My Drive"
OR
"root": {"root_ONE": "value", "root_TWO":"value}
Returns:
(dict) - {"root": {"root": "/My Drive"}}
OR
{"root": {"root_ONE": "value", "root_TWO":"value}}
Format is importing for usage of python's format ** approach
"""
roots = self.presets["root"]
if isinstance(roots, str):
roots = {"root": roots}
return roots
def create_folder(self, path):
"""
Create all nonexistent folders and subfolders in 'path'.
Updates self._tree structure with new paths
Args:
path (string): absolute path, starts with GDrive root,
without filename
Returns:
(string) folder id of lowest subfolder from 'path'
"""
folder_id = self.folder_path_exists(path)
if folder_id:
return folder_id
parts = path.split('/')
folders_to_create = []
while parts:
folders_to_create.append(parts.pop())
path = '/'.join(parts)
path = path.strip()
folder_id = self.folder_path_exists(path) # lowest common path
if folder_id:
while folders_to_create:
new_folder_name = folders_to_create.pop()
folder_metadata = {
'name': new_folder_name,
'mimeType': 'application/vnd.google-apps.folder',
'parents': [folder_id]
}
folder = self.service.files().create(
body=folder_metadata,
supportsAllDrives=True,
fields='id').execute()
folder_id = folder["id"]
new_path_key = path + '/' + new_folder_name
self.get_tree()[new_path_key] = {"id": folder_id}
path = new_path_key
return folder_id
def upload_file(self, source_path, path, overwrite=False):
"""
Uploads single file from 'source_path' to destination 'path'.
It creates all folders on the path if are not existing.
Args:
source_path (string):
path (string): absolute path with or without name of the file
overwrite (boolean): replace existing file
Returns:
(string) file_id of created/modified file ,
throws FileExistsError, FileNotFoundError exceptions
"""
if not os.path.isfile(source_path):
raise FileNotFoundError("Source file {} doesn't exist."
.format(source_path))
root, ext = os.path.splitext(path)
if ext:
# full path
target_name = os.path.basename(path)
path = os.path.dirname(path)
else:
target_name = os.path.basename(source_path)
file = self.file_path_exists(path + "/" + target_name)
if file and not overwrite:
raise FileExistsError("File already exists, "
"use 'overwrite' argument")
folder_id = self.folder_path_exists(path)
if not folder_id:
raise NotADirectoryError("Folder {} doesn't exists".format(path))
file_metadata = {
'name': target_name
}
media = MediaFileUpload(source_path,
mimetype='application/octet-stream',
resumable=True)
try:
if not file:
# update doesnt like parent
file_metadata['parents'] = [folder_id]
file = self.service.files().create(body=file_metadata,
supportsAllDrives=True,
media_body=media,
fields='id').execute()
else:
file = self.service.files().update(fileId=file["id"],
body=file_metadata,
supportsAllDrives=True,
media_body=media,
fields='id').execute()
except errors.HttpError as ex:
if ex.resp['status'] == '404':
return False
if ex.resp['status'] == '403':
# real permission issue
if 'has not granted' in ex._get_reason().strip():
raise PermissionError(ex._get_reason().strip())
log.warning("Forbidden received, hit quota. "
"Injecting 60s delay.")
import time
time.sleep(60)
return False
raise
return file["id"]
def download_file(self, source_path, local_path, overwrite=False):
"""
Downloads single file from 'source_path' (remote) to 'local_path'.
It creates all folders on the local_path if are not existing.
By default existing file on 'local_path' will trigger an exception
Args:
source_path (string): absolute path on provider
local_path (string): absolute path with or without name of the file
overwrite (boolean): replace existing file
Returns:
(string) file_id of created/modified file ,
throws FileExistsError, FileNotFoundError exceptions
"""
remote_file = self.file_path_exists(source_path)
if not remote_file:
raise FileNotFoundError("Source file {} doesn't exist."
.format(source_path))
root, ext = os.path.splitext(local_path)
if ext:
# full path with file name
target_name = os.path.basename(local_path)
local_path = os.path.dirname(local_path)
else: # just folder, get file name from source
target_name = os.path.basename(source_path)
file = os.path.isfile(local_path + "/" + target_name)
if file and not overwrite:
raise FileExistsError("File already exists, "
"use 'overwrite' argument")
request = self.service.files().get_media(fileId=remote_file["id"],
supportsAllDrives=True)
with open(local_path + "/" + target_name, "wb") as fh:
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
return target_name
def delete_folder(self, path, force=False):
"""
Deletes folder on GDrive. Checks if folder contains any files or
subfolders. In that case raises error, could be overriden by
'force' argument.
In that case deletes folder on 'path' and all its children.
Args:
path (string): absolute path on GDrive
force (boolean): delete even if children in folder
Returns:
None
"""
folder_id = self.folder_path_exists(path)
if not folder_id:
raise ValueError("Not valid folder path {}".format(path))
fields = 'nextPageToken, files(id, name, parents)'
q = self._handle_q("'{}' in parents ".format(folder_id))
response = self.service.files().list(
q=q,
corpora="allDrives",
includeItemsFromAllDrives=True,
supportsAllDrives=True,
pageSize='1',
fields=fields).execute()
children = response.get('files', [])
if children and not force:
raise ValueError("Folder {} is not empty, use 'force'".
format(path))
self.service.files().delete(fileId=folder_id,
supportsAllDrives=True).execute()
def delete_file(self, path):
"""
Deletes file from 'path'. Expects path to specific file.
Args:
path: absolute path to particular file
Returns:
None
"""
file = self.file_path_exists(path)
if not file:
raise ValueError("File {} doesn't exist")
self.service.files().delete(fileId=file["id"],
supportsAllDrives=True).execute()
def _get_folder_metadata(self, path):
"""
Get info about folder with 'path'
Args:
path (string):
Returns:
(dictionary) with metadata or raises ValueError
"""
try:
return self.get_tree()[path]
except Exception:
raise ValueError("Uknown folder id {}".format(id))
def list_folder(self, folder_path):
"""
List all files and subfolders of particular path non-recursively.
Args:
folder_path (string): absolut path on provider
Returns:
(list)
"""
pass
@timeit
def list_folders(self):
""" Lists all folders in GDrive.
Used to build in-memory structure of path to folder ids model.
Returns:
(list) of dictionaries('id', 'name', [parents])
"""
folders = []
page_token = None
fields = 'nextPageToken, files(id, name, parents)'
while True:
q = self._handle_q("mimeType='application/vnd.google-apps.folder'")
response = self.service.files().list(
q=q,
pageSize=1000,
corpora="allDrives",
includeItemsFromAllDrives=True,
supportsAllDrives=True,
fields=fields,
pageToken=page_token).execute()
folders.extend(response.get('files', []))
page_token = response.get('nextPageToken', None)
if page_token is None:
break
return folders
def list_files(self):
""" Lists all files in GDrive
Runs loop through possibly multiple pages. Result could be large,
if it would be a problem, change it to generator
Returns:
(list) of dictionaries('id', 'name', [parents])
"""
files = []
page_token = None
fields = 'nextPageToken, files(id, name, parents)'
while True:
q = self._handle_q("")
response = self.service.files().list(
q=q,
corpora="allDrives",
includeItemsFromAllDrives=True,
supportsAllDrives=True,
fields=fields,
pageToken=page_token).execute()
files.extend(response.get('files', []))
page_token = response.get('nextPageToken', None)
if page_token is None:
break
return files
def folder_path_exists(self, file_path):
"""
Checks if path from 'file_path' exists. If so, return its
folder id.
Args:
file_path (string): gdrive path with / as a separator
Returns:
(string) folder id or False
"""
if not file_path:
return False
root, ext = os.path.splitext(file_path)
if not ext:
file_path += '/'
dir_path = os.path.dirname(file_path)
path = self.get_tree().get(dir_path, None)
if path:
return path["id"]
return False
def file_path_exists(self, file_path):
"""
Checks if 'file_path' exists on GDrive
Args:
file_path (string): separated by '/', from root, with file name
Returns:
(dictionary|boolean) file metadata | False if not found
"""
folder_id = self.folder_path_exists(file_path)
if folder_id:
return self.file_exists(os.path.basename(file_path), folder_id)
return False
def file_exists(self, file_name, folder_id):
"""
Checks if 'file_name' exists in 'folder_id'
Args:
file_name (string):
folder_id (int): google drive folder id
Returns:
(dictionary|boolean) file metadata, False if not found
"""
q = self._handle_q("name = '{}' and '{}' in parents"
.format(file_name, folder_id))
response = self.service.files().list(
q=q,
corpora="allDrives",
includeItemsFromAllDrives=True,
supportsAllDrives=True,
fields='nextPageToken, files(id, name, parents, '
'mimeType, modifiedTime,size,md5Checksum)').execute()
if len(response.get('files')) > 1:
raise ValueError("Too many files returned for {} in {}"
.format(file_name, folder_id))
file = response.get('files', [])
if not file:
return False
return file[0]
@classmethod
def get_presets(cls):
"""
Get presets for this provider
Returns:
(dictionary) of configured sites
"""
provider_presets = None
try:
provider_presets = config.get_presets()["sync_server"]["gdrive"]
except KeyError:
log.info(("Sync Server: There are no presets for Gdrive " +
"provider.").
format(str(provider_presets)))
return
return provider_presets
def _handle_q(self, q, trashed=False):
""" API list call contain trashed and hidden files/folder by default.
Usually we dont want those, must be included in query explicitly.
Args:
q (string): query portion
trashed (boolean): False|True
Returns:
(string) - modified query
"""
parts = [q]
if not trashed:
parts.append(" trashed = false ")
return " and ".join(parts)
if __name__ == '__main__':
gd = GDriveHandler('gdrive')
print(gd.root)
print(gd.get_tree())

View file

@ -0,0 +1,84 @@
from enum import Enum
from .gdrive import GDriveHandler
class Providers(Enum):
LOCAL = 'studio'
GDRIVE = 'gdrive'
class ProviderFactory:
"""
Factory class as a creator of multiple cloud destination.
Each new implementation needs to be registered and added to Providers
enum.
"""
def __init__(self):
self.providers = {} # {'PROVIDER_LABEL: {cls, int},..}
def register_provider(self, provider, creator, batch_limit):
"""
Provide all necessary information for one specific remote provider
Args:
provider (string): name of provider
creator (class): class implementing AbstractProvider
batch_limit (int): number of files that could be processed in
one loop (based on provider API quota)
Returns:
modifies self.providers and self.sites
"""
self.providers[provider] = (creator, batch_limit)
def get_provider(self, provider, site_name, tree=None):
"""
Returns new instance of provider client for specific site.
One provider could have multiple sites.
'tree' is used for injecting already created memory structure,
without it constructor of provider would need to calculate it
from scratch, which could be expensive.
Args:
provider (string): 'gdrive','S3'
site_name (string): descriptor of site, different service accounts
must have different site name
tree (dictionary): - folder paths to folder id structure
Returns:
(implementation of AbstractProvider)
"""
creator_info = self._get_creator_info(provider)
site = creator_info[0](site_name, tree) # call init
return site
def get_provider_batch_limit(self, provider):
"""
Each provider has some limit of files that could be processed in
one batch (loop step). It is not 'file' limit per se, but
calculation based on API queries for provider.
(For example 'gdrive' has 1000 queries for 100 sec, one file could
be multiple queries (one for each level of path + check if file
exists)
Args:
provider (string): 'gdrive','S3'
Returns:
"""
info = self._get_creator_info(provider)
return info[1]
def _get_creator_info(self, provider):
"""
Collect all necessary info for provider. Currently only creator
class and batch limit
Args:
provider (string): 'gdrive' etc
Returns:
"""
creator_info = self.providers.get(provider)
if not creator_info:
raise ValueError(
"Provider {} not registered yet".format(provider))
return creator_info
factory = ProviderFactory()
factory.register_provider('gdrive', GDriveHandler, 7)

View file

@ -0,0 +1,828 @@
from pype.api import config, Logger
from pype.lib import timeit
import threading
import asyncio
import concurrent.futures
from concurrent.futures._base import CancelledError
from enum import Enum
from datetime import datetime
from .providers import lib
import os
from avalon import io
from avalon.api import AvalonMongoDB
log = Logger().get_logger("SyncServer")
class SyncStatus(Enum):
DO_NOTHING = 0
DO_UPLOAD = 1
DO_DOWNLOAD = 2
class SyncServer():
"""
Synchronization server that is syncing published files from local to
any of implemented providers (like GDrive, S3 etc.)
Runs in the background and checks all representations, looks for files
that are marked to be in different location than 'studio' (temporary),
checks if 'created_dt' field is present denoting successful sync
with provider destination.
Sites structure is created during publish and by default it will
always contain 1 record with "name" == self.presets["active_site"] and
filled "created_dt" AND 1 or multiple records for all defined
remote sites, where "created_dt" is not present.
This highlights that file should be uploaded to
remote destination
''' - example of synced file test_Cylinder_lookMain_v010.ma to GDrive
"files" : [
{
"path" : "{root}/Test/Assets/Cylinder/publish/look/lookMain/v010/
test_Cylinder_lookMain_v010.ma",
"_id" : ObjectId("5eeb25e411e06a16209ab78f"),
"hash" : "test_Cylinder_lookMain_v010,ma|1592468963,24|4822",
"size" : NumberLong(4822),
"sites" : [
{
"name": "john_local_XD4345",
"created_dt" : ISODate("2020-05-22T08:05:44.000Z")
},
{
"id" : ObjectId("5eeb25e411e06a16209ab78f"),
"name": "gdrive",
"created_dt" : ISODate("2020-05-55T08:54:35.833Z")
]
}
},
'''
Each Tray app has assigned its own self.presets["local_id"]
used in sites as a name.
Tray is searching only for records where name matches its
self.presets["active_site"] + self.presets["remote_site"].
"active_site" could be storage in studio ('studio'), or specific
"local_id" when user is working disconnected from home.
If the local record has its "created_dt" filled, it is a source and
process will try to upload the file to all defined remote sites.
Remote files "id" is real id that could be used in appropriate API.
Local files have "id" too, for conformity, contains just file name.
It is expected that multiple providers will be implemented in separate
classes and registered in 'providers.py'.
"""
# limit querying DB to look for X number of representations that should
# be sync, we try to run more loops with less records
# actual number of files synced could be lower as providers can have
# different limits imposed by its API
# set 0 to no limit
REPRESENTATION_LIMIT = 100
def __init__(self):
self.qaction = None
self.failed_icon = None
self._is_running = False
self.presets = None
self.lock = threading.Lock()
self.connection = AvalonMongoDB()
try:
self.presets = config.get_presets()["sync_server"]["config"]
except KeyError:
log.debug(("There are not set presets for SyncServer."
" No credentials provided, no syncing possible").
format(str(self.presets)))
self.sync_server_thread = SyncServerThread(self)
self.active_site = self.presets["active_site"]
self.remote_site = self.presets["remote_site"]
# try to activate providers, need to have valid credentials
self.active_sites = []
for provider in lib.factory.providers.keys():
for site in lib.factory.providers[provider][0].get_presets().\
keys():
handler = lib.factory.get_provider(provider, site)
if handler.is_active():
self.active_sites.append((provider, site))
@property
def active_site(self):
"""
Returns active 'local' site (could be personal location on user
laptop or general 'studio' mounted disk.
Its 'mine' part of synchronization.
Returns:
(string)
"""
return self._active_site
@active_site.setter
def active_site(self, value):
"""
Sets 'mine' part of synchronization process. It is expected only
single site is active at the time. Active site could be changed
though on different location (user working in studio has
'active_site' = 'studio', when user is at home changes
'active_site' to 'john_doe_local_001'.
Args:
value (string): label for site, needs to match representation's
'files.site'.keys()
Returns:
(string)
"""
self._active_site = value
@property
def remote_site(self):
"""
Remote side of synchronization, where "to synchronize to".
Currently expected only single remote destination ('gdrive'..),
but prepared for multiple.
Denotes 'theirs' side of synchronization.
Returns:
(list) of strings (['gdrive'])
"""
return [self._remote_site]
@remote_site.setter
def remote_site(self, value):
self._remote_site = value
def get_collections(self):
"""
Returns:
(list) of strings with collection names in avalon DB
"""
return self.connection.database.collection_names(False)
@timeit
def get_sync_representations(self, collection, active_site, remote_site):
"""
Get representations that should be synced, these could be
recognised by presence of document in 'files.sites', where key is
a provider (GDrive, S3) and value is empty document or document
without 'created_dt' field. (Don't put null to 'created_dt'!).
Querying of 'to-be-synched' files is offloaded to Mongod for
better performance. Goal is to get as few representations as
possible.
Args:
collection (string): name of collection (in most cases matches
project name
active_site (string): identifier of current active site (could be
'local_0' when working from home, 'studio' when working in the
studio (default)
remote_site (string): identifier of remote site I want to sync to
Returns:
(list) of dictionaries
"""
log.debug("Check representations for : {}".format(collection))
self.connection.Session["AVALON_PROJECT"] = collection
# retry_cnt - number of attempts to sync specific file before giving up
retries_arr = self._get_retries_arr()
active_providers_str = ",".join(remote_site)
query = {
"type": "representation",
"$or": [
{"$and": [
{
"files.sites": {
"$elemMatch": {
"name": active_site,
"created_dt": {"$exists": True}
}
}}, {
"files.sites": {
"$elemMatch": {
"name": {"$in": [active_providers_str]},
"created_dt": {"$exists": False},
"tries": {"$in": retries_arr}
}
}
}]},
{"$and": [
{
"files.sites": {
"$elemMatch": {
"name": active_site,
"created_dt": {"$exists": False},
"tries": {"$in": retries_arr}
}
}}, {
"files.sites": {
"$elemMatch": {
"name": {"$in": [active_providers_str]},
"created_dt": {"$exists": True}
}
}
}
]}
]
}
log.debug("get_sync_representations.query: {}".format(query))
representations = self.connection.find(query)
return representations
def check_status(self, file, provider_name):
"""
Check synchronization status for single 'file' of single
'representation' by single 'provider'.
(Eg. check if 'scene.ma' of lookdev.v10 should be synced to GDrive
Always is comparing local record, eg. site with
'name' == self.presets["active_site"]
Args:
file (dictionary): of file from representation in Mongo
provider_name (string): - gdrive etc.
Returns:
(string) - one of SyncStatus
"""
sites = file.get("sites") or []
# if isinstance(sites, list): # temporary, old format of 'sites'
# return SyncStatus.DO_NOTHING
_, provider_rec = self._get_provider_rec(sites, provider_name) or {}
if provider_rec: # sync remote target
created_dt = provider_rec.get("created_dt")
if not created_dt:
tries = self._get_tries_count_from_rec(provider_rec)
# file will be skipped if unsuccessfully tried over threshold
# error metadata needs to be purged manually in DB to reset
if tries < self.presets["retry_cnt"]:
return SyncStatus.DO_UPLOAD
else:
_, local_rec = self._get_provider_rec(
sites,
self.presets["active_site"]) or {}
if not local_rec or not local_rec.get("created_dt"):
tries = self._get_tries_count_from_rec(local_rec)
# file will be skipped if unsuccessfully tried over
# threshold times, error metadata needs to be purged
# manually in DB to reset
if tries < self.presets["retry_cnt"]:
return SyncStatus.DO_DOWNLOAD
return SyncStatus.DO_NOTHING
async def upload(self, file, representation, provider_name, site_name,
tree=None):
"""
Upload single 'file' of a 'representation' to 'provider'.
Source url is taken from 'file' portion, where {root} placeholder
is replaced by 'representation.Context.root'
Provider could be one of implemented in provider.py.
Updates MongoDB, fills in id of file from provider (ie. file_id
from GDrive), 'created_dt' - time of upload
Args:
file (dictionary): of file from representation in Mongo
representation (dictionary): of representation
provider_name (string): gdrive, gdc etc.
site_name (string): site on provider, single provider(gdrive) could
have multiple sites (different accounts, credentials)
tree (dictionary): injected memory structure for performance
"""
# create ids sequentially, upload file in parallel later
with self.lock:
# this part modifies structure on 'remote_site', only single
# thread can do that at a time, upload/download to prepared
# structure should be run in parallel
handler = lib.factory.get_provider(provider_name, site_name, tree)
remote_file = self._get_remote_file_path(file,
handler.get_roots_config()
)
local_root = representation.get("context", {}).get("root")
local_file = self._get_local_file_path(file, local_root)
target_folder = os.path.dirname(remote_file)
folder_id = handler.create_folder(target_folder)
if not folder_id:
err = "Folder {} wasn't created. Check permissions.".\
format(target_folder)
raise NotADirectoryError(err)
loop = asyncio.get_running_loop()
file_id = await loop.run_in_executor(None,
handler.upload_file,
local_file,
remote_file,
True)
return file_id
async def download(self, file, representation, provider_name,
site_name, tree=None):
"""
Downloads file to local folder denoted in representation.Context.
Args:
file (dictionary) : info about processed file
representation (dictionary): repr that 'file' belongs to
provider_name (string): 'gdrive' etc
site_name (string): site on provider, single provider(gdrive) could
have multiple sites (different accounts, credentials)
tree (dictionary): injected memory structure for performance
Returns:
(string) - 'name' of local file
"""
with self.lock:
handler = lib.factory.get_provider(provider_name, site_name, tree)
remote_file = self._get_remote_file_path(file,
handler.get_roots_config()
)
local_root = representation.get("context", {}).get("root")
local_file = self._get_local_file_path(file, local_root)
local_folder = os.path.dirname(local_file)
os.makedirs(local_folder, exist_ok=True)
loop = asyncio.get_running_loop()
file_id = await loop.run_in_executor(None,
handler.download_file,
remote_file,
local_file,
False)
return file_id
def update_db(self, new_file_id, file, representation, provider_name,
error=None):
"""
Update 'provider' portion of records in DB with success (file_id)
or error (exception)
Args:
new_file_id (string):
file (dictionary): info about processed file (pulled from DB)
representation (dictionary): parent repr of file (from DB)
provider_name (string): label ('gdrive', 'S3')
error (string): exception message
Returns:
None
"""
representation_id = representation.get("_id")
file_id = file.get("_id")
query = {
"_id": representation_id,
"files._id": file_id
}
file_index, _ = self._get_file_info(representation.get('files', []),
file_id)
site_index, _ = self._get_provider_rec(file.get('sites', []),
provider_name)
update = {}
if new_file_id:
update["$set"] = self._get_success_dict(file_index, site_index,
new_file_id)
# reset previous errors if any
update["$unset"] = self._get_error_dict(file_index, site_index,
"", "")
else:
tries = self._get_tries_count(file, provider_name)
tries += 1
update["$set"] = self._get_error_dict(file_index, site_index,
error, tries)
self.connection.update_one(
query,
update
)
status = 'failed'
error_str = 'with error {}'.format(error)
if new_file_id:
status = 'succeeded with id {}'.format(new_file_id)
error_str = ''
source_file = file.get("path", "")
log.debug("File {source_file} process {status} {error_str}".
format(status=status,
source_file=source_file,
error_str=error_str))
def tray_start(self):
"""
Triggered when Tray is started. Checks if configuration presets
are available and if there is any provider ('gdrive', 'S3') that
is activated (eg. has valid credentials).
Returns:
None
"""
if self.presets and self.active_sites:
self.sync_server_thread.start()
else:
log.debug("No presets or active providers. " +
"Synchronization not possible.")
def tray_exit(self):
self.stop()
def thread_stopped(self):
self._is_running = False
@property
def is_running(self):
return self.sync_server_thread.is_running
def stop(self):
if not self.is_running:
return
try:
log.debug("Stopping sync server server")
self.sync_server_thread.is_running = False
self.sync_server_thread.stop()
except Exception:
log.warning(
"Error has happened during Killing sync server",
exc_info=True
)
def _get_file_info(self, files, _id):
"""
Return record from list of records which name matches to 'provider'
Could be possibly refactored with '_get_file_info' together.
Args:
files (list): of dictionaries with info about published files
_id (string): _id of specific file
Returns:
(int, dictionary): index from list and record with metadata
about site (if/when created, errors..)
OR (-1, None) if not present
"""
for index, rec in enumerate(files):
if rec.get("_id") == _id:
return index, rec
return -1, None
def _get_provider_rec(self, sites, provider):
"""
Return record from list of records which name matches to 'provider'
Args:
sites (list): of dictionaries
provider (string): 'local_XXX', 'gdrive'
Returns:
(int, dictionary): index from list and record with metadata
about site (if/when created, errors..)
OR (-1, None) if not present
"""
for index, rec in enumerate(sites):
if rec.get("name") == provider:
return index, rec
return -1, None
def reset_provider_for_file(self, collection, representation_id,
file_id, site_name):
"""
Reset information about synchronization for particular 'file_id'
and provider.
Useful for testing or forcing file to be reuploaded.
Args:
collection (string): name of project (eg. collection) in DB
representation_id(string): _id of representation
file_id (string): file _id in representation
site_name (string): 'gdrive', 'S3' etc
Returns:
None
"""
# TODO - implement reset for ALL files or ALL sites
query = {
"_id": io.ObjectId(representation_id)
}
self.connection.Session["AVALON_PROJECT"] = collection
representation = list(self.connection.find(query))
if not representation:
raise ValueError("Representation {} not found in {}".
format(representation_id, collection))
files = representation[0].get('files', [])
file_index, _ = self._get_file_info(files,
file_id)
site_index, _ = self._get_provider_rec(files[file_index].
get('sites', []),
site_name)
if file_index > 0 and site_index > 0:
elem = {"name": site_name}
update = {
"$set": {"files.{}.sites.{}".format(file_index, site_index):
elem
}
}
self.connection.update_one(
query,
update
)
def get_loop_delay(self):
"""
Return count of seconds before next synchronization loop starts
after finish of previous loop.
Returns:
(int): in seconds
"""
return self.presets["loop_delay"]
def _get_success_dict(self, file_index, site_index, new_file_id):
"""
Provide success metadata ("id", "created_dt") to be stored in Db.
Used in $set: "DICT" part of query.
Sites are array inside of array(file), so real indexes for both
file and site are needed for upgrade in DB.
Args:
file_index: (int) - index of modified file
site_index: (int) - index of modified site of modified file
new_file_id: id of created file
Returns:
(dictionary)
"""
val = {"files.{}.sites.{}.id".format(file_index, site_index):
new_file_id,
"files.{}.sites.{}.created_dt".format(file_index, site_index):
datetime.utcnow()}
return val
def _get_error_dict(self, file_index, site_index, error="", tries=""):
"""
Provide error metadata to be stored in Db.
Used for set (error and tries provided) or unset mode.
Args:
file_index: (int) - index of modified file
site_index: (int) - index of modified site of modified file
error: (string) - message
tries: how many times failed
Returns:
(dictionary)
"""
val = {"files.{}.sites.{}.last_failed_dt".
format(file_index, site_index): datetime.utcnow(),
"files.{}.sites.{}.error".format(file_index, site_index): error,
"files.{}.sites.{}.tries".format(file_index, site_index): tries
}
return val
def _get_tries_count_from_rec(self, rec):
"""
Get number of failed attempts to sync from site record
Args:
rec (dictionary): info about specific site record
Returns:
(int) - number of failed attempts
"""
if not rec:
return 0
return rec.get("tries", 0)
def _get_tries_count(self, file, provider):
"""
Get number of failed attempts to sync
Args:
file (dictionary): info about specific file
provider (string): name of site ('gdrive' or specific user site)
Returns:
(int) - number of failed attempts
"""
_, rec = self._get_provider_rec(file.get("sites", []), provider)
return rec.get("tries", 0)
def _get_local_file_path(self, file, local_root):
"""
Auxiliary function for replacing rootless path with real path
Args:
file (dictionary): file info, get 'path' to file with {root}
local_root (string): value of {root} for local projects
Returns:
(string) - absolute path on local system
"""
if not local_root:
raise ValueError("Unknown local root for file {}")
path = file.get("path", "")
return path.format(**{"root": local_root})
def _get_remote_file_path(self, file, root_config):
"""
Auxiliary function for replacing rootless path with real path
Args:
file (dictionary): file info, get 'path' to file with {root}
root_config (dict): value of {root} for remote location
Returns:
(string) - absolute path on remote location
"""
path = file.get("path", "")
if not root_config.get("root"):
root_config = {"root": root_config}
path = path.format(**root_config)
return path
def _get_retries_arr(self):
"""
Returns array with allowed values in 'tries' field. If repre
contains these values, it means it was tried to be synchronized
but failed. We try up to 'self.presets["retry_cnt"]' times before
giving up and skipping representation.
Returns:
(list)
"""
arr = [i for i in range(self.presets["retry_cnt"])]
arr.append(None)
return arr
class SyncServerThread(threading.Thread):
"""
Separate thread running synchronization server with asyncio loop.
Stopped when tray is closed.
"""
def __init__(self, module):
super(SyncServerThread, self).__init__()
self.module = module
self.loop = None
self.is_running = False
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
def run(self):
self.is_running = True
try:
log.info("Starting Sync Server")
self.loop = asyncio.new_event_loop() # create new loop for thread
asyncio.set_event_loop(self.loop)
self.loop.set_default_executor(self.executor)
asyncio.ensure_future(self.check_shutdown(), loop=self.loop)
asyncio.ensure_future(self.sync_loop(), loop=self.loop)
self.loop.run_forever()
except Exception:
log.warning(
"Sync Server service has failed", exc_info=True
)
finally:
self.loop.close() # optional
async def sync_loop(self):
"""
Runs permanently, each time:
- gets list of collections in DB
- gets list of active remote providers (has configuration,
credentials)
- for each collection it looks for representations that should
be synced
- synchronize found collections
- update representations - fills error messages for exceptions
- waits X seconds and repeat
Returns:
"""
try:
while self.is_running:
import time
start_time = None
for collection in self.module.get_collections():
start_time = time.time()
sync_repres = self.module.get_sync_representations(
collection,
self.module.active_site,
self.module.remote_site
)
local = self.module.active_site
task_files_to_process = []
files_processed_info = []
# process only unique file paths in one batch
# multiple representation could have same file path
# (textures),
# upload process can find already uploaded file and
# reuse same id
processed_file_path = set()
for active_site in self.module.active_sites:
provider, site = active_site
handler = lib.factory.get_provider(provider, site)
limit = lib.factory.get_provider_batch_limit(provider)
# first call to get_provider could be expensive, its
# building folder tree structure in memory
# call only if needed, eg. DO_UPLOAD or DO_DOWNLOAD
for sync in sync_repres:
if limit <= 0:
continue
files = sync.get("files") or []
if files:
for file in files:
# skip already processed files
file_path = file.get('path', '')
if file_path in processed_file_path:
continue
status = self.module.check_status(file,
provider)
if status == SyncStatus.DO_UPLOAD:
tree = handler.get_tree()
limit -= 1
task = asyncio.create_task(
self.module.upload(file,
sync,
provider,
site,
tree))
task_files_to_process.append(task)
# store info for exception handling
files_processed_info.append((file,
sync,
site))
processed_file_path.add(file_path)
if status == SyncStatus.DO_DOWNLOAD:
tree = handler.get_tree()
limit -= 1
task = asyncio.create_task(
self.module.download(file,
sync,
provider,
site,
tree))
task_files_to_process.append(task)
files_processed_info.append((file,
sync,
local))
processed_file_path.add(file_path)
log.debug("Sync tasks count {}".
format(len(task_files_to_process)))
files_created = await asyncio.gather(
*task_files_to_process,
return_exceptions=True)
for file_id, info in zip(files_created,
files_processed_info):
file, representation, site = info
error = None
if isinstance(file_id, BaseException):
error = str(file_id)
file_id = None
self.module.update_db(file_id,
file,
representation,
site,
error)
duration = time.time() - start_time
log.debug("One loop took {:.2f}s".format(duration))
await asyncio.sleep(self.module.get_loop_delay())
except ConnectionResetError:
log.warning("ConnectionResetError in sync loop, trying next loop",
exc_info=True)
except CancelledError:
# just stopping server
pass
except Exception:
self.stop()
log.warning("Unhandled exception in sync loop, stopping server",
exc_info=True)
def stop(self):
"""Sets is_running flag to false, 'check_shutdown' shuts server down"""
self.is_running = False
async def check_shutdown(self):
""" Future that is running and checks if server should be running
periodically.
"""
while self.is_running:
await asyncio.sleep(0.5)
tasks = [task for task in asyncio.all_tasks() if
task is not asyncio.current_task()]
list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
results = await asyncio.gather(*tasks, return_exceptions=True)
log.debug(f'Finished awaiting cancelled tasks, results: {results}...')
await self.loop.shutdown_asyncgens()
# to really make sure everything else has time to stop
self.executor.shutdown(wait=True)
await asyncio.sleep(0.07)
self.loop.stop()

View file

@ -0,0 +1,64 @@
from pype.api import Logger
from wsrpc_aiohttp import WebSocketRoute
import functools
import avalon.aftereffects as aftereffects
log = Logger().get_logger("WebsocketServer")
class AfterEffects(WebSocketRoute):
"""
One route, mimicking external application (like Harmony, etc).
All functions could be called from client.
'do_notify' function calls function on the client - mimicking
notification after long running job on the server or similar
"""
instance = None
def init(self, **kwargs):
# Python __init__ must be return "self".
# This method might return anything.
log.debug("someone called AfterEffects route")
self.instance = self
return kwargs
# server functions
async def ping(self):
log.debug("someone called AfterEffects route ping")
# This method calls function on the client side
# client functions
async def read(self):
log.debug("aftereffects.read client calls server server calls "
"aftereffects client")
return await self.socket.call('aftereffects.read')
# panel routes for tools
async def creator_route(self):
self._tool_route("creator")
async def workfiles_route(self):
self._tool_route("workfiles")
async def loader_route(self):
self._tool_route("loader")
async def publish_route(self):
self._tool_route("publish")
async def sceneinventory_route(self):
self._tool_route("sceneinventory")
async def projectmanager_route(self):
self._tool_route("projectmanager")
def _tool_route(self, tool_name):
"""The address accessed when clicking on the buttons."""
partial_method = functools.partial(aftereffects.show, tool_name)
aftereffects.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"

View file

@ -0,0 +1,284 @@
from pype.modules.websocket_server import WebSocketServer
"""
Stub handling connection from server to client.
Used anywhere solution is calling client methods.
"""
import json
from collections import namedtuple
import logging
log = logging.getLogger(__name__)
class AfterEffectsServerStub():
"""
Stub for calling function on client (Photoshop js) side.
Expects that client is already connected (started when avalon menu
is opened).
'self.websocketserver.call' is used as async wrapper
"""
def __init__(self):
self.websocketserver = WebSocketServer.get_instance()
self.client = self.websocketserver.get_client()
def open(self, path):
"""
Open file located at 'path' (local).
Args:
path(string): file path locally
Returns: None
"""
self.websocketserver.call(self.client.call
('AfterEffects.open', path=path)
)
def read(self, layer, layers_meta=None):
"""
Parses layer metadata from Label field of active document
Args:
layer: <namedTuple Layer("id":XX, "name":"YYY")
layers_meta: full list from Headline (for performance in loops)
Returns:
"""
if layers_meta is None:
layers_meta = self.get_metadata()
return layers_meta.get(str(layer.id))
def get_metadata(self):
"""
Get stored JSON with metadata from AE.Metadata.Label field
Returns:
(dict)
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_metadata')
)
try:
layers_data = json.loads(res)
except json.decoder.JSONDecodeError:
raise ValueError("Unparsable metadata {}".format(res))
return layers_data or {}
def imprint(self, layer, data, all_layers=None, layers_meta=None):
"""
Save layer metadata to Label field of metadata of active document
Args:
layer (namedtuple): Layer("id": XXX, "name":'YYY')
data(string): json representation for single layer
all_layers (list of namedtuples): for performance, could be
injected for usage in loop, if not, single call will be
triggered
layers_meta(string): json representation from Headline
(for performance - provide only if imprint is in
loop - value should be same)
Returns: None
"""
if not layers_meta:
layers_meta = self.get_metadata()
# json.dumps writes integer values in a dictionary to string, so
# anticipating it here.
if str(layer.id) in layers_meta and layers_meta[str(layer.id)]:
if data:
layers_meta[str(layer.id)].update(data)
else:
layers_meta.pop(str(layer.id))
else:
layers_meta[str(layer.id)] = data
# Ensure only valid ids are stored.
if not all_layers:
# loaders create FootageItem now
all_layers = self.get_items(comps=True,
folders=False,
footages=True)
item_ids = [int(item.id) for item in all_layers]
cleaned_data = {}
for id in layers_meta:
if int(id) in item_ids:
cleaned_data[id] = layers_meta[id]
payload = json.dumps(cleaned_data, indent=4)
self.websocketserver.call(self.client.call
('AfterEffects.imprint', payload=payload)
)
def get_active_document_full_name(self):
"""
Returns just a name of active document via ws call
Returns(string): file name
"""
res = self.websocketserver.call(self.client.call(
'AfterEffects.get_active_document_full_name'))
return res
def get_active_document_name(self):
"""
Returns just a name of active document via ws call
Returns(string): file name
"""
res = self.websocketserver.call(self.client.call(
'AfterEffects.get_active_document_name'))
return res
def get_items(self, comps, folders=False, footages=False):
"""
Get all items from Project panel according to arguments.
There are multiple different types:
CompItem (could have multiple layers - source for Creator)
FolderItem (collection type, currently not used
FootageItem (imported file - created by Loader)
Args:
comps (bool): return CompItems
folders (bool): return FolderItem
footages (bool: return FootageItem
Returns:
(list) of namedtuples
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_items',
comps=comps,
folders=folders,
footages=footages)
)
return self._to_records(res)
def get_selected_items(self, comps, folders=False, footages=False):
"""
Same as get_items but using selected items only
Args:
comps (bool): return CompItems
folders (bool): return FolderItem
footages (bool: return FootageItem
Returns:
(list) of namedtuples
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_selected_items',
comps=comps,
folders=folders,
footages=footages)
)
return self._to_records(res)
def import_file(self, path, item_name, import_options=None):
"""
Imports file as a FootageItem. Used in Loader
Args:
path (string): absolute path for asset file
item_name (string): label for created FootageItem
import_options (dict): different files (img vs psd) need different
config
"""
res = self.websocketserver.call(self.client.call(
'AfterEffects.import_file',
path=path,
item_name=item_name,
import_options=import_options)
)
records = self._to_records(res)
if records:
return records.pop()
log.debug("Couldn't import {} file".format(path))
def replace_item(self, item, path, item_name):
""" Replace FootageItem with new file
Args:
item (dict):
path (string):absolute path
item_name (string): label on item in Project list
"""
self.websocketserver.call(self.client.call
('AfterEffects.replace_item',
item_id=item.id,
path=path, item_name=item_name))
def delete_item(self, item):
""" Deletes FootageItem with new file
Args:
item (dict):
"""
self.websocketserver.call(self.client.call
('AfterEffects.delete_item',
item_id=item.id
))
def is_saved(self):
# TODO
return True
def set_label_color(self, item_id, color_idx):
"""
Used for highlight additional information in Project panel.
Green color is loaded asset, blue is created asset
Args:
item_id (int):
color_idx (int): 0-16 Label colors from AE Project view
"""
self.websocketserver.call(self.client.call
('AfterEffects.set_label_color',
item_id=item_id,
color_idx=color_idx
))
def save(self):
"""
Saves active document
Returns: None
"""
self.websocketserver.call(self.client.call
('AfterEffects.save'))
def saveAs(self, project_path, as_copy):
"""
Saves active project to aep (copy) or png or jpg
Args:
project_path(string): full local path
as_copy: <boolean>
Returns: None
"""
self.websocketserver.call(self.client.call
('AfterEffects.saveAs',
image_path=project_path,
as_copy=as_copy))
def close(self):
self.client.close()
def _to_records(self, res):
"""
Converts string json representation into list of named tuples for
dot notation access to work.
Returns: <list of named tuples>
res(string): - json representation
"""
if not res:
return []
try:
layers_data = json.loads(res)
except json.decoder.JSONDecodeError:
raise ValueError("Received broken JSON {}".format(res))
if not layers_data:
return []
ret = []
# convert to namedtuple to use dot donation
if isinstance(layers_data, dict): # TODO refactore
layers_data = [layers_data]
for d in layers_data:
ret.append(namedtuple('Layer', d.keys())(*d.values()))
return ret

View file

@ -0,0 +1,52 @@
from avalon import api
from avalon.vendor import Qt
from avalon import aftereffects
import logging
log = logging.getLogger(__name__)
class CreateRender(api.Creator):
"""Render folder for publish."""
name = "renderDefault"
label = "Render"
family = "render"
def process(self):
# Photoshop can have multiple LayerSets with the same name, which does
# not work with Avalon.
txt = "Instance with name \"{}\" already exists.".format(self.name)
stub = aftereffects.stub() # only after After Effects is up
for layer in stub.get_items(comps=True,
folders=False,
footages=False):
if self.name.lower() == layer.name.lower():
msg = Qt.QtWidgets.QMessageBox()
msg.setIcon(Qt.QtWidgets.QMessageBox.Warning)
msg.setText(txt)
msg.exec_()
return False
log.debug("options:: {}".format(self.options))
print("options:: {}".format(self.options))
if (self.options or {}).get("useSelection"):
log.debug("useSelection")
print("useSelection")
items = stub.get_selected_items(comps=True,
folders=False,
footages=False)
else:
items = stub.get_items(comps=True,
folders=False,
footages=False)
log.debug("items:: {}".format(items))
print("items:: {}".format(items))
if not items:
raise ValueError("Nothing to create. Select composition " +
"if 'useSelection' or create at least " +
"one composition.")
for item in items:
stub.imprint(item, self.data)
stub.set_label_color(item.id, 14) # Cyan options 0 - 16

View file

@ -0,0 +1,105 @@
from avalon import api, aftereffects
from pype.plugins import lib
import re
stub = aftereffects.stub()
class FileLoader(api.Loader):
"""Load images
Stores the imported asset in a container named after the asset.
"""
label = "Load file"
families = ["image",
"plate",
"render",
"prerender",
"review",
"audio"]
representations = ["*"]
def load(self, context, name=None, namespace=None, data=None):
comp_name = lib.get_unique_layer_name(stub.get_items(comps=True),
context["asset"]["name"],
name)
import_options = {}
file = self.fname
repr_cont = context["representation"]["context"]
if "#" not in file:
frame = repr_cont.get("frame")
if frame:
padding = len(frame)
file = file.replace(frame, "#" * padding)
import_options['sequence'] = True
if not file:
repr_id = context["representation"]["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
if '.psd' in file:
import_options['ImportAsType'] = 'ImportAsType.COMP'
comp = stub.import_file(self.fname, comp_name, import_options)
if not comp:
self.log.warning(
"Representation id `{}` is failing to load".format(file))
self.log.warning("Check host app for alert error.")
return
self[:] = [comp]
namespace = namespace or comp_name
return aftereffects.containerise(
name,
namespace,
comp,
context,
self.__class__.__name__
)
def update(self, container, representation):
""" Switch asset or change version """
layer = container.pop("layer")
context = representation.get("context", {})
namespace_from_container = re.sub(r'_\d{3}$', '',
container["namespace"])
layer_name = "{}_{}".format(context["asset"], context["subset"])
# switching assets
if namespace_from_container != layer_name:
layer_name = lib.get_unique_layer_name(stub.get_items(comps=True),
context["asset"],
context["subset"])
else: # switching version - keep same name
layer_name = container["namespace"]
path = api.get_representation_path(representation)
# with aftereffects.maintained_selection(): # TODO
stub.replace_item(layer, path, layer_name)
stub.imprint(
layer, {"representation": str(representation["_id"]),
"name": context["subset"],
"namespace": layer_name}
)
def remove(self, container):
"""
Removes element from scene: deletes layer + removes from Headline
Args:
container (dict): container to be removed - used to get layer_id
"""
layer = container.pop("layer")
stub.imprint(layer, {})
stub.delete_item(layer.id)
def switch(self, container, representation):
self.update(container, representation)

View file

@ -1,7 +1,9 @@
import pyblish.api
import os
import collections
import pyblish.api
from avalon import io
import pype.api as pype
from pprint import pformat
@ -12,10 +14,11 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
def process(self, context):
self.log.info('Collecting Audio Data')
asset_entity = context.data["assetEntity"]
asset_doc = context.data["assetEntity"]
# get all available representations
subsets = pype.get_subsets(asset_entity["name"],
subsets = self.get_subsets(
asset_doc,
representations=["audio", "wav"]
)
self.log.info(f"subsets is: {pformat(subsets)}")
@ -39,3 +42,85 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
'audio_file: {}, has been added to context'.format(audio_file))
else:
self.log.warning("Couldn't find any audio file on Ftrack.")
def get_subsets(self, asset_doc, representations):
"""
Query subsets with filter on name.
The method will return all found subsets and its defined version
and subsets. Version could be specified with number. Representation
can be filtered.
Arguments:
asset_doct (dict): Asset (shot) mongo document
representations (list): list for all representations
Returns:
dict: subsets with version and representaions in keys
"""
# Query all subsets for asset
subset_docs = io.find({
"type": "subset",
"parent": asset_doc["_id"]
})
# Collect all subset ids
subset_ids = [
subset_doc["_id"]
for subset_doc in subset_docs
]
# Check if we found anything
assert subset_ids, (
"No subsets found. Check correct filter. "
"Try this for start `r'.*'`: asset: `{}`"
).format(asset_doc["name"])
# Last version aggregation
pipeline = [
# Find all versions of those subsets
{"$match": {
"type": "version",
"parent": {"$in": subset_ids}
}},
# Sorting versions all together
{"$sort": {"name": 1}},
# Group them by "parent", but only take the last
{"$group": {
"_id": "$parent",
"_version_id": {"$last": "$_id"},
"name": {"$last": "$name"}
}}
]
last_versions_by_subset_id = dict()
for doc in io.aggregate(pipeline):
doc["parent"] = doc["_id"]
doc["_id"] = doc.pop("_version_id")
last_versions_by_subset_id[doc["parent"]] = doc
version_docs_by_id = {}
for version_doc in last_versions_by_subset_id.values():
version_docs_by_id[version_doc["_id"]] = version_doc
repre_docs = io.find({
"type": "representation",
"parent": {"$in": list(version_docs_by_id.keys())},
"name": {"$in": representations}
})
repre_docs_by_version_id = collections.defaultdict(list)
for repre_doc in repre_docs:
version_id = repre_doc["parent"]
repre_docs_by_version_id[version_id].append(repre_doc)
output_dict = {}
for version_id, repre_docs in repre_docs_by_version_id.items():
version_doc = version_docs_by_id[version_id]
subset_id = version_doc["parent"]
subset_doc = last_versions_by_subset_id[subset_id]
# Store queried docs by subset name
output_dict[subset_doc["name"]] = {
"representations": repre_docs,
"version": version_doc
}
return output_dict

View file

@ -19,12 +19,16 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
if "unreal" in pyblish.api.registered_hosts():
return
assert context.data.get('currentFile'), "Cannot get curren file"
filename = os.path.basename(context.data.get('currentFile'))
if '<shell>' in filename:
return
rootVersion = int(pype.get_version_from_path(filename))
version = pype.get_version_from_path(filename)
assert version, "Cannot determine version"
rootVersion = int(version)
context.data['version'] = rootVersion
self.log.info("{}".format(type(rootVersion)))
self.log.info('Scene Version: %s' % context.data.get('version'))

View file

@ -1,7 +1,8 @@
import copy
import json
import os
import re
import json
import copy
import tempfile
import pyblish
@ -27,7 +28,7 @@ class ExtractBurnin(pype.api.Extractor):
"hiero",
"premiere",
"standalonepublisher",
"harmony"
"harmony",
"fusion"
]
optional = True
@ -158,6 +159,11 @@ class ExtractBurnin(pype.api.Extractor):
filled_anatomy = anatomy.format_all(burnin_data)
burnin_data["anatomy"] = filled_anatomy.get_solved()
# Add source camera name to burnin data
camera_name = repre.get("camera_name")
if camera_name:
burnin_data["camera_name"] = camera_name
first_output = True
files_to_delete = []
@ -223,12 +229,30 @@ class ExtractBurnin(pype.api.Extractor):
# Dump data to string
dumped_script_data = json.dumps(script_data)
# Store dumped json to temporary file
temporary_json_file = tempfile.NamedTemporaryFile(
mode="w", suffix=".json", delete=False
)
temporary_json_file.write(dumped_script_data)
temporary_json_file.close()
temporary_json_filepath = temporary_json_file.name.replace(
"\\", "/"
)
# Prepare subprocess arguments
args = [executable, scriptpath, dumped_script_data]
self.log.debug("Executing: {}".format(args))
args = [
"\"{}\"".format(executable),
"\"{}\"".format(scriptpath),
"\"{}\"".format(temporary_json_filepath)
]
subprcs_cmd = " ".join(args)
self.log.debug("Executing: {}".format(subprcs_cmd))
# Run burnin script
pype.api.subprocess(args, shell=True, logger=self.log)
pype.api.subprocess(subprcs_cmd, shell=True, logger=self.log)
# Remove the temporary json
os.remove(temporary_json_filepath)
for filepath in temp_data["full_input_paths"]:
filepath = filepath.replace("\\", "/")
@ -970,7 +994,7 @@ class ExtractBurnin(pype.api.Extractor):
args = [executable, scriptpath, json_data]
self.log.debug("Executing: {}".format(args))
output = pype.api.subprocess(args, shell=True)
output = pype.api.subprocess(args, shell=True, logger=self.log)
self.log.debug("Output: {}".format(output))
repre_update = {

View file

@ -73,7 +73,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
ffmpeg_args = self.ffmpeg_args or {}
jpeg_items = []
jpeg_items.append(ffmpeg_path)
jpeg_items.append("\"{}\"".format(ffmpeg_path))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov

View file

@ -467,7 +467,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
audio_filters.append(arg)
all_args = []
all_args.append(self.ffmpeg_path)
all_args.append("\"{}\"".format(self.ffmpeg_path))
all_args.extend(input_args)
if video_filters:
all_args.append("-filter:v {}".format(",".join(video_filters)))
@ -651,7 +651,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
# NOTE Skipped using instance's resolution
full_input_path_single_file = temp_data["full_input_path_single_file"]
input_data = pype.lib.ffprobe_streams(full_input_path_single_file)[0]
input_data = pype.lib.ffprobe_streams(
full_input_path_single_file, self.log
)[0]
input_width = int(input_data["width"])
input_height = int(input_data["height"])
@ -1544,7 +1546,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
os.mkdir(stg_dir)
mov_args = [
ffmpeg_path,
"\"{}\"".format(ffmpeg_path),
" ".join(input_args),
" ".join(output_args)
]

View file

@ -26,7 +26,7 @@ class ExtractReviewSlate(pype.api.Extractor):
slate_path = inst_data.get("slateFrame")
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
slate_stream = pype.lib.ffprobe_streams(slate_path)[0]
slate_stream = pype.lib.ffprobe_streams(slate_path, self.log)[0]
slate_width = slate_stream["width"]
slate_height = slate_stream["height"]
@ -178,7 +178,7 @@ class ExtractReviewSlate(pype.api.Extractor):
_remove_at_end.append(slate_v_path)
slate_args = [
ffmpeg_path,
"\"{}\"".format(ffmpeg_path),
" ".join(input_args),
" ".join(output_args)
]
@ -299,7 +299,7 @@ class ExtractReviewSlate(pype.api.Extractor):
try:
# Get information about input file via ffprobe tool
streams = pype.lib.ffprobe_streams(full_input_path)
streams = pype.lib.ffprobe_streams(full_input_path, self.log)
except Exception:
self.log.warning(
"Could not get codec data from input.",

View file

@ -5,7 +5,6 @@ import os
import re
import shutil
import sys
from datetime import datetime
from os.path import getsize
import clique
@ -16,6 +15,8 @@ from avalon.vendor import filelink
from pymongo import DeleteOne, InsertOne
import pype.api
from datetime import datetime
from pype.api import config
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
@ -616,12 +617,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# copy file with speedcopy and check if size of files are simetrical
while True:
import shutil
try:
if not shutil._samefile(src, dst):
copyfile(src, dst)
except shutil.SameFileError:
self.log.critical("files are the same {} to {}".format(src,
dst))
else:
self.log.critical(
"files are the same {} to {}".format(src, dst)
)
os.remove(dst)
try:
shutil.copyfile(src, dst)
@ -923,11 +924,20 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
size(optional): size of file in bytes
file_hash(optional): hash of file for synchronization validation
sites(optional): array of published locations,
['studio': {'created_dt':date}] by default
[ {'name':'studio', 'created_dt':date} by default
keys expected ['studio', 'site1', 'gdrive1']
Returns:
rec: dictionary with filled info
"""
try:
sync_server_presets = config.get_presets()["sync_server"]["config"]
except KeyError:
log.debug(("There are not set presets for SyncServer."
" No credentials provided, no synching possible").
format(str(sync_server_presets)))
local_site = sync_server_presets.get("active_site", "studio").strip()
remote_site = sync_server_presets.get("remote_site")
rec = {
"_id": io.ObjectId(),
@ -942,8 +952,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if sites:
rec["sites"] = sites
else:
meta = {"created_dt": datetime.now()}
rec["sites"] = {"studio": meta}
meta = {"name": local_site, "created_dt": datetime.now()}
rec["sites"] = [meta]
if remote_site:
meta = {"name": remote_site.strip()}
rec["sites"].append(meta)
return rec

View file

@ -150,6 +150,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
deadline_pool_secondary = ""
deadline_group = ""
deadline_chunk_size = 1
deadline_priority = None
# regex for finding frame number in string
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
@ -901,6 +902,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
render_job["Props"]["User"] = context.data.get(
"deadlineUser", getpass.getuser())
# Priority is now not handled at all
if self.deadline_priority:
render_job["Props"]["Pri"] = self.deadline_priority
else:
render_job["Props"]["Pri"] = instance.data.get("priority")
render_job["Props"]["Env"] = {
@ -1023,6 +1028,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
version = pype.api.get_latest_version(asset, subset)
if version:
version = int(version["name"]) + 1
else:
version = 1
template_data["subset"] = subset
template_data["family"] = "render"
@ -1030,8 +1037,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
if "folder" in anatomy.templates["render"]:
publish_folder = anatomy_filled["render"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
@ -1041,7 +1048,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = anatomy_filled["publish"]["path"]
file_path = anatomy_filled["render"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)

View file

@ -29,6 +29,6 @@ class ValidateFFmpegInstalled(pyblish.api.ContextPlugin):
def process(self, context):
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
self.log.info("ffmpeg path: `{}`".format(ffmpeg_path))
if self.is_tool(ffmpeg_path) is False:
if self.is_tool("{}".format(ffmpeg_path)) is False:
self.log.error("ffmpeg not found in PATH")
raise RuntimeError('ffmpeg not installed.')

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Create render node."""
from avalon import harmony
@ -10,17 +12,15 @@ class CreateRender(harmony.Creator):
node_type = "WRITE"
def __init__(self, *args, **kwargs):
"""Constructor."""
super(CreateRender, self).__init__(*args, **kwargs)
def setup_node(self, node):
sig = harmony.signature()
func = """function %s(args)
{
node.setTextAttr(args[0], "DRAWING_TYPE", 1, "PNG4");
node.setTextAttr(args[0], "DRAWING_NAME", 1, args[1]);
node.setTextAttr(args[0], "MOVIE_PATH", 1, args[1]);
}
%s
""" % (sig, sig)
"""Set render node."""
self_name = self.__class__.__name__
path = "{0}/{0}".format(node.split("/")[-1])
harmony.send({"function": func, "args": [node, path]})
harmony.send(
{
"function": f"PypeHarmony.Creators.{self_name}.create",
"args": [node, path]
})

View file

@ -1,277 +1,81 @@
# -*- coding: utf-8 -*-
"""Loader for image sequences."""
import os
import uuid
from pathlib import Path
import clique
from avalon import api, harmony
import pype.lib
copy_files = """function copyFile(srcFilename, dstFilename)
{
var srcFile = new PermanentFile(srcFilename);
var dstFile = new PermanentFile(dstFilename);
srcFile.copy(dstFile);
}
"""
import_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black
var TGATransparencyMode = 0; //Premultiplied wih Black
var SGITransparencyMode = 0; //Premultiplied wih Black
var LayeredPSDTransparencyMode = 1; //Straight
var FlatPSDTransparencyMode = 2; //Premultiplied wih White
function getUniqueColumnName( column_prefix )
{
var suffix = 0;
// finds if unique name for a column
var column_name = column_prefix;
while(suffix < 2000)
{
if(!column.type(column_name))
break;
suffix = suffix + 1;
column_name = column_prefix + "_" + suffix;
}
return column_name;
}
function import_files(args)
{
var root = args[0];
var files = args[1];
var name = args[2];
var start_frame = args[3];
var vectorFormat = null;
var extension = null;
var filename = files[0];
var pos = filename.lastIndexOf(".");
if( pos < 0 )
return null;
extension = filename.substr(pos+1).toLowerCase();
if(extension == "jpeg")
extension = "jpg";
if(extension == "tvg")
{
vectorFormat = "TVG"
extension ="SCAN"; // element.add() will use this.
}
var elemId = element.add(
name,
"BW",
scene.numberOfUnitsZ(),
extension.toUpperCase(),
vectorFormat
);
if (elemId == -1)
{
// hum, unknown file type most likely -- let's skip it.
return null; // no read to add.
}
var uniqueColumnName = getUniqueColumnName(name);
column.add(uniqueColumnName , "DRAWING");
column.setElementIdOfDrawing(uniqueColumnName, elemId);
var read = node.add(root, name, "READ", 0, 0, 0);
var transparencyAttr = node.getAttr(
read, frame.current(), "READ_TRANSPARENCY"
);
var opacityAttr = node.getAttr(read, frame.current(), "OPACITY");
transparencyAttr.setValue(true);
opacityAttr.setValue(true);
var alignmentAttr = node.getAttr(read, frame.current(), "ALIGNMENT_RULE");
alignmentAttr.setValue("ASIS");
var transparencyModeAttr = node.getAttr(
read, frame.current(), "applyMatteToColor"
);
if (extension == "png")
transparencyModeAttr.setValue(PNGTransparencyMode);
if (extension == "tga")
transparencyModeAttr.setValue(TGATransparencyMode);
if (extension == "sgi")
transparencyModeAttr.setValue(SGITransparencyMode);
if (extension == "psd")
transparencyModeAttr.setValue(FlatPSDTransparencyMode);
if (extension == "jpg")
transparencyModeAttr.setValue(LayeredPSDTransparencyMode);
node.linkAttr(read, "DRAWING.ELEMENT", uniqueColumnName);
if (files.length == 1)
{
// Create a drawing drawing, 'true' indicate that the file exists.
Drawing.create(elemId, 1, true);
// Get the actual path, in tmp folder.
var drawingFilePath = Drawing.filename(elemId, "1");
copyFile(files[0], drawingFilePath);
// Expose the image for the entire frame range.
for( var i =0; i <= frame.numberOf() - 1; ++i)
{
timing = start_frame + i
column.setEntry(uniqueColumnName, 1, timing, "1");
}
} else {
// Create a drawing for each file.
for( var i =0; i <= files.length - 1; ++i)
{
timing = start_frame + i
// Create a drawing drawing, 'true' indicate that the file exists.
Drawing.create(elemId, timing, true);
// Get the actual path, in tmp folder.
var drawingFilePath = Drawing.filename(elemId, timing.toString());
copyFile( files[i], drawingFilePath );
column.setEntry(uniqueColumnName, 1, timing, timing.toString());
}
}
var green_color = new ColorRGBA(0, 255, 0, 255);
node.setColor(read, green_color);
return read;
}
import_files
"""
replace_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black
var TGATransparencyMode = 0; //Premultiplied wih Black
var SGITransparencyMode = 0; //Premultiplied wih Black
var LayeredPSDTransparencyMode = 1; //Straight
var FlatPSDTransparencyMode = 2; //Premultiplied wih White
function replace_files(args)
{
var files = args[0];
MessageLog.trace(files);
MessageLog.trace(files.length);
var _node = args[1];
var start_frame = args[2];
var _column = node.linkedColumn(_node, "DRAWING.ELEMENT");
var elemId = column.getElementIdOfDrawing(_column);
// Delete existing drawings.
var timings = column.getDrawingTimings(_column);
for( var i =0; i <= timings.length - 1; ++i)
{
column.deleteDrawingAt(_column, parseInt(timings[i]));
}
var filename = files[0];
var pos = filename.lastIndexOf(".");
if( pos < 0 )
return null;
var extension = filename.substr(pos+1).toLowerCase();
if(extension == "jpeg")
extension = "jpg";
var transparencyModeAttr = node.getAttr(
_node, frame.current(), "applyMatteToColor"
);
if (extension == "png")
transparencyModeAttr.setValue(PNGTransparencyMode);
if (extension == "tga")
transparencyModeAttr.setValue(TGATransparencyMode);
if (extension == "sgi")
transparencyModeAttr.setValue(SGITransparencyMode);
if (extension == "psd")
transparencyModeAttr.setValue(FlatPSDTransparencyMode);
if (extension == "jpg")
transparencyModeAttr.setValue(LayeredPSDTransparencyMode);
if (files.length == 1)
{
// Create a drawing drawing, 'true' indicate that the file exists.
Drawing.create(elemId, 1, true);
// Get the actual path, in tmp folder.
var drawingFilePath = Drawing.filename(elemId, "1");
copyFile(files[0], drawingFilePath);
MessageLog.trace(files[0]);
MessageLog.trace(drawingFilePath);
// Expose the image for the entire frame range.
for( var i =0; i <= frame.numberOf() - 1; ++i)
{
timing = start_frame + i
column.setEntry(_column, 1, timing, "1");
}
} else {
// Create a drawing for each file.
for( var i =0; i <= files.length - 1; ++i)
{
timing = start_frame + i
// Create a drawing drawing, 'true' indicate that the file exists.
Drawing.create(elemId, timing, true);
// Get the actual path, in tmp folder.
var drawingFilePath = Drawing.filename(elemId, timing.toString());
copyFile( files[i], drawingFilePath );
column.setEntry(_column, 1, timing, timing.toString());
}
}
var green_color = new ColorRGBA(0, 255, 0, 255);
node.setColor(_node, green_color);
}
replace_files
"""
class ImageSequenceLoader(api.Loader):
"""Load images
"""Load image sequences.
Stores the imported asset in a container named after the asset.
"""
families = ["shot", "render", "image", "plate", "reference"]
representations = ["jpeg", "png", "jpg"]
def load(self, context, name=None, namespace=None, data=None):
"""Plugin entry point.
Args:
context (:class:`pyblish.api.Context`): Context.
name (str, optional): Container name.
namespace (str, optional): Container namespace.
data (dict, optional): Additional data passed into loader.
"""
fname = Path(self.fname)
self_name = self.__class__.__name__
collections, remainder = clique.assemble(
os.listdir(os.path.dirname(self.fname))
os.listdir(fname.parent.as_posix())
)
files = []
if collections:
for f in list(collections[0]):
files.append(
os.path.join(
os.path.dirname(self.fname), f
).replace("\\", "/")
)
files.append(fname.parent.joinpath(f).as_posix())
else:
files.append(
os.path.join(
os.path.dirname(self.fname), remainder[0]
).replace("\\", "/")
)
files.append(fname.parent.joinpath(remainder[0]).as_posix())
name = context["subset"]["name"]
name += "_{}".format(uuid.uuid4())
asset = context["asset"]["name"]
subset = context["subset"]["name"]
group_id = str(uuid.uuid4())
read_node = harmony.send(
{
"function": copy_files + import_files,
"args": ["Top", files, name, 1]
"function": f"PypeHarmony.Loaders.{self_name}.importFiles", # noqa: E501
"args": [
files,
asset,
subset,
1,
group_id
]
}
)["result"]
return harmony.containerise(
name,
f"{asset}_{subset}",
namespace,
read_node,
context,
self.__class__.__name__,
self_name,
nodes=[read_node]
)
def update(self, container, representation):
"""Update loaded containers.
Args:
container (dict): Container data.
representation (dict): Representation data.
"""
self_name = self.__class__.__name__
node = harmony.find_node_by_name(container["name"], "READ")
path = api.get_representation_path(representation)
@ -295,50 +99,42 @@ class ImageSequenceLoader(api.Loader):
harmony.send(
{
"function": copy_files + replace_files,
"function": f"PypeHarmony.Loaders.{self_name}.replaceFiles",
"args": [files, node, 1]
}
)
# Colour node.
sig = harmony.signature("copyFile")
func = """function %s(args){
for( var i =0; i <= args[0].length - 1; ++i)
{
var red_color = new ColorRGBA(255, 0, 0, 255);
var green_color = new ColorRGBA(0, 255, 0, 255);
if (args[1] == "red"){
node.setColor(args[0], red_color);
}
if (args[1] == "green"){
node.setColor(args[0], green_color);
}
}
}
%s
""" % (sig, sig)
if pype.lib.is_latest(representation):
harmony.send({"function": func, "args": [node, "green"]})
harmony.send(
{
"function": "PypeHarmony.setColor",
"args": [node, [0, 255, 0, 255]]
})
else:
harmony.send({"function": func, "args": [node, "red"]})
harmony.send(
{
"function": "PypeHarmony.setColor",
"args": [node, [255, 0, 0, 255]]
})
harmony.imprint(
node, {"representation": str(representation["_id"])}
)
def remove(self, container):
node = harmony.find_node_by_name(container["name"], "READ")
"""Remove loaded container.
Args:
container (dict): Container data.
func = """function deleteNode(_node)
{
node.deleteNode(_node, true, true);
}
deleteNode
"""
node = harmony.find_node_by_name(container["name"], "READ")
harmony.send(
{"function": func, "args": [node]}
{"function": "PypeHarmony.deleteNode", "args": [node]}
)
harmony.imprint(node, {}, remove=True)
def switch(self, container, representation):
"""Switch loaded representations."""
self.update(container, representation)

View file

@ -2,13 +2,12 @@ import os
import shutil
from avalon import api, harmony
from avalon.vendor import Qt
class ImportPaletteLoader(api.Loader):
"""Import palettes."""
families = ["harmony.palette"]
families = ["palette"]
representations = ["plt"]
label = "Import Palette"
@ -41,14 +40,14 @@ class ImportPaletteLoader(api.Loader):
harmony.save_scene()
# Dont allow instances with the same name.
message_box = Qt.QtWidgets.QMessageBox()
message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning)
msg = "Updated {}.".format(subset_name)
msg += " You need to reload the scene to see the changes."
message_box.setText(msg)
message_box.exec_()
harmony.send(
{
"function": "PypeHarmony.message",
"args": msg
})
return name
def remove(self, container):

View file

@ -0,0 +1,143 @@
# -*- coding: utf-8 -*-
"""Load template."""
import tempfile
import zipfile
import os
import shutil
import uuid
from avalon import api, harmony
import pype.lib
class TemplateLoader(api.Loader):
"""Load Harmony template as container.
.. todo::
This must be implemented properly.
"""
families = ["template", "workfile"]
representations = ["*"]
label = "Load Template"
icon = "gift"
def load(self, context, name=None, namespace=None, data=None):
"""Plugin entry point.
Args:
context (:class:`pyblish.api.Context`): Context.
name (str, optional): Container name.
namespace (str, optional): Container namespace.
data (dict, optional): Additional data passed into loader.
"""
# Load template.
self_name = self.__class__.__name__
temp_dir = tempfile.mkdtemp()
zip_file = api.get_representation_path(context["representation"])
template_path = os.path.join(temp_dir, "temp.tpl")
with zipfile.ZipFile(zip_file, "r") as zip_ref:
zip_ref.extractall(template_path)
group_id = "{}".format(uuid.uuid4())
container_group = harmony.send(
{
"function": f"PypeHarmony.Loaders.{self_name}.loadContainer",
"args": [template_path,
context["asset"]["name"],
context["subset"]["name"],
group_id]
}
)["result"]
# Cleanup the temp directory
shutil.rmtree(temp_dir)
# We must validate the group_node
return harmony.containerise(
name,
namespace,
container_group,
context,
self_name
)
def update(self, container, representation):
"""Update loaded containers.
Args:
container (dict): Container data.
representation (dict): Representation data.
"""
node_name = container["name"]
node = harmony.find_node_by_name(node_name, "GROUP")
self_name = self.__class__.__name__
update_and_replace = False
if pype.lib.is_latest(representation):
self._set_green(node)
else:
self._set_red(node)
update_and_replace = harmony.send(
{
"function": f"PypeHarmony.Loaders.{self_name}."
"askForColumnsUpdate",
"args": []
}
)["result"]
if update_and_replace:
# FIXME: This won't work, need to implement it.
harmony.send(
{
"function": f"PypeHarmony.Loaders.{self_name}."
"replaceNode",
"args": []
}
)
else:
self.load(
container["context"], container["name"],
None, container["data"])
harmony.imprint(
node, {"representation": str(representation["_id"])}
)
def remove(self, container):
"""Remove container.
Args:
container (dict): container definition.
"""
node = harmony.find_node_by_name(container["name"], "GROUP")
harmony.send(
{"function": "PypeHarmony.deleteNode", "args": [node]}
)
def switch(self, container, representation):
"""Switch representation containers."""
self.update(container, representation)
def _set_green(self, node):
"""Set node color to green `rgba(0, 255, 0, 255)`."""
harmony.send(
{
"function": "PypeHarmony.setColor",
"args": [node, [0, 255, 0, 255]]
})
def _set_red(self, node):
"""Set node color to red `rgba(255, 0, 0, 255)`."""
harmony.send(
{
"function": "PypeHarmony.setColor",
"args": [node, [255, 0, 0, 255]]
})

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Collect information about current file."""
import os
import pyblish.api
@ -5,24 +7,16 @@ from avalon import harmony
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
"""Inject the current working file into context."""
order = pyblish.api.CollectorOrder - 0.5
label = "Current File"
hosts = ["harmony"]
def process(self, context):
"""Inject the current working file"""
sig = harmony.signature()
func = """function %s()
{
return (
scene.currentProjectPath() + "/" +
scene.currentVersionName() + ".xstage"
);
}
%s
""" % (sig, sig)
"""Inject the current working file."""
self_name = self.__class__.__name__
current_file = harmony.send({"function": func})["result"]
current_file = harmony.send(
{"function": f"PypeHarmony.Publish.{self_name}.collect"})["result"]
context.data["currentFile"] = os.path.normpath(current_file)

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Collect instances in Harmony."""
import json
import pyblish.api
@ -8,7 +10,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by nodes metadata.
This collector takes into account assets that are associated with
a composite node and marked with a unique identifier;
a composite node and marked with a unique identifier.
Identifier:
id (str): "pyblish.avalon.instance"
@ -19,10 +21,19 @@ class CollectInstances(pyblish.api.ContextPlugin):
hosts = ["harmony"]
families_mapping = {
"render": ["imagesequence", "review", "ftrack"],
"harmony.template": []
"harmony.template": [],
"palette": ["palette", "ftrack"]
}
pair_media = True
def process(self, context):
"""Plugin entry point.
Args:
context (:class:`pyblish.api.Context`): Context data.
"""
nodes = harmony.send(
{"function": "node.subNodes", "args": ["Top"]}
)["result"]
@ -46,6 +57,11 @@ class CollectInstances(pyblish.api.ContextPlugin):
)["result"]
instance.data["families"] = self.families_mapping[data["family"]]
# If set in plugin, pair the scene Version in ftrack with
# thumbnails and review media.
if (self.pair_media and instance.data["family"] == "scene"):
context.data["scene_instance"] = instance
# Produce diagnostic message for any graphical
# user interface interested in visualising it.
self.log.info(

View file

@ -1,5 +1,7 @@
import json
# -*- coding: utf-8 -*-
"""Collect palettes from Harmony."""
import os
import json
import pyblish.api
from avalon import harmony
@ -13,23 +15,12 @@ class CollectPalettes(pyblish.api.ContextPlugin):
hosts = ["harmony"]
def process(self, context):
sig = harmony.signature()
func = """function %s()
"""Collector entry point."""
self_name = self.__class__.__name__
palettes = harmony.send(
{
var palette_list = PaletteObjectManager.getScenePaletteList();
var palettes = {};
for(var i=0; i < palette_list.numPalettes; ++i)
{
var palette = palette_list.getPaletteByIndex(i);
palettes[palette.getName()] = palette.id;
}
return palettes;
}
%s
""" % (sig, sig)
palettes = harmony.send({"function": func})["result"]
"function": f"PypeHarmony.Publish.{self_name}.getPalettes",
})["result"]
for name, id in palettes.items():
instance = context.create_instance(name)
@ -37,7 +28,7 @@ class CollectPalettes(pyblish.api.ContextPlugin):
"id": id,
"family": "harmony.palette",
"asset": os.environ["AVALON_ASSET"],
"subset": "palette" + name
"subset": "{}{}".format("palette", name)
})
self.log.info(
"Created instance:\n" + json.dumps(

View file

@ -14,26 +14,11 @@ class CollectScene(pyblish.api.ContextPlugin):
hosts = ["harmony"]
def process(self, context):
sig = harmony.signature()
func = """function %s()
{
return [
about.getApplicationPath(),
scene.currentProjectPath(),
scene.currentScene(),
scene.getFrameRate(),
scene.getStartFrame(),
scene.getStopFrame(),
sound.getSoundtrackAll().path(),
scene.defaultResolutionX(),
scene.defaultResolutionY()
]
}
%s
""" % (sig, sig)
"""Plugin entry point."""
result = harmony.send(
{"function": func, "args": []}
{
f"function": "PypeHarmony.getSceneSettings",
"args": []}
)["result"]
context.data["applicationPath"] = result[0]

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Collect current workfile from Harmony."""
import pyblish.api
import os
@ -10,10 +12,12 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
hosts = ["harmony"]
def process(self, context):
"""Plugin entry point."""
family = "workfile"
task = os.getenv("AVALON_TASK", None)
subset = family + task.capitalize()
sanitized_task_name = task[0].upper() + task[1:]
basename = os.path.basename(context.data["currentFile"])
subset = "{}{}".format(family, sanitized_task_name)
# Create instance
instance = context.create_instance(subset)

View file

@ -1,4 +1,9 @@
# -*- coding: utf-8 -*-
"""Extract palette from Harmony."""
import os
import csv
from PIL import Image, ImageDraw, ImageFont
from avalon import harmony
@ -14,18 +19,53 @@ class ExtractPalette(pype.api.Extractor):
families = ["harmony.palette"]
def process(self, instance):
sig = harmony.signature()
func = """function %s(args)
"""Plugin entry point."""
self_name = self.__class__.__name__
result = harmony.send(
{
var palette_list = PaletteObjectManager.getScenePaletteList();
var palette = palette_list.getPaletteById(args[0]);
return (palette.getPath() + "/" + palette.getName() + ".plt");
"function": f"PypeHarmony.Publish.{self_name}.getPalette",
"args": instance.data["id"]
})["result"]
if not isinstance(result, list):
self.log.error(f"Invalid reply: {result}")
raise AssertionError("Invalid reply from server.")
palette_name = result[0]
palette_file = result[1]
self.log.info(f"Got palette named {palette_name} "
f"and file {palette_file}.")
tmp_thumb_path = os.path.join(os.path.dirname(palette_file),
os.path.basename(palette_file)
.split(".plt")[0] + "_swatches.png"
)
self.log.info(f"Temporary humbnail path {tmp_thumb_path}")
palette_version = str(instance.data.get("version")).zfill(3)
self.log.info(f"Palette version {palette_version}")
if not instance.data.get("representations"):
instance.data["representations"] = []
try:
thumbnail_path = self.create_palette_thumbnail(palette_name,
palette_version,
palette_file,
tmp_thumb_path)
except ValueError:
self.log.error("Unsupported palette type for thumbnail.")
else:
thumbnail = {
"name": "thumbnail",
"ext": "png",
"files": os.path.basename(thumbnail_path),
"stagingDir": os.path.dirname(thumbnail_path),
"tags": ["thumbnail"]
}
%s
""" % (sig, sig)
palette_file = harmony.send(
{"function": func, "args": [instance.data["id"]]}
)["result"]
instance.data["representations"].append(thumbnail)
representation = {
"name": "plt",
@ -33,4 +73,130 @@ class ExtractPalette(pype.api.Extractor):
"files": os.path.basename(palette_file),
"stagingDir": os.path.dirname(palette_file)
}
instance.data["representations"] = [representation]
instance.data["representations"].append(representation)
def create_palette_thumbnail(self,
palette_name,
palette_version,
palette_path,
dst_path):
"""Create thumbnail for palette file.
Args:
palette_name (str): Name of palette.
palette_version (str): Version of palette.
palette_path (str): Path to palette file.
dst_path (str): Thumbnail path.
Returns:
str: Thumbnail path.
"""
colors = {}
with open(palette_path, newline='') as plt:
plt_parser = csv.reader(plt, delimiter=" ")
for i, line in enumerate(plt_parser):
if i == 0:
continue
while ("" in line):
line.remove("")
# self.log.debug(line)
if line[0] not in ["Solid"]:
raise ValueError("Unsupported palette type.")
color_name = line[1].strip('"')
colors[color_name] = {"type": line[0],
"uuid": line[2],
"rgba": (int(line[3]),
int(line[4]),
int(line[5]),
int(line[6])),
}
plt.close()
img_pad_top = 80
label_pad_name = 30
label_pad_rgb = 580
swatch_pad_left = 300
swatch_pad_top = 10
swatch_w = 120
swatch_h = 50
image_w = 800
image_h = (img_pad_top +
(len(colors.keys()) *
swatch_h) +
(swatch_pad_top *
len(colors.keys()))
)
img = Image.new("RGBA", (image_w, image_h), "white")
# For bg of colors with alpha, create checkerboard image
checkers = Image.new("RGB", (swatch_w, swatch_h))
pixels = checkers.load()
# Make pixels white where (row+col) is odd
for i in range(swatch_w):
for j in range(swatch_h):
if (i + j) % 2:
pixels[i, j] = (255, 255, 255)
draw = ImageDraw.Draw(img)
# TODO: This needs to be font included with Pype because
# arial is not available on other platforms then Windows.
title_font = ImageFont.truetype("arial.ttf", 28)
label_font = ImageFont.truetype("arial.ttf", 20)
draw.text((label_pad_name, 20),
"{} (v{})".format(palette_name, palette_version),
"black",
font=title_font)
for i, name in enumerate(colors):
rgba = colors[name]["rgba"]
# @TODO: Fix this so alpha colors are displayed with checkboard
# if not rgba[3] == "255":
# img.paste(checkers,
# (swatch_pad_left,
# img_pad_top + swatch_pad_top + (i * swatch_h))
# )
#
# half_y = (img_pad_top + swatch_pad_top + (i * swatch_h))/2
#
# draw.rectangle((
# swatch_pad_left, # upper LX
# img_pad_top + swatch_pad_top + (i * swatch_h), # upper LY
# swatch_pad_left + (swatch_w * 2), # lower RX
# half_y), # lower RY
# fill=rgba[:-1], outline=(0, 0, 0), width=2)
# draw.rectangle((
# swatch_pad_left, # upper LX
# half_y, # upper LY
# swatch_pad_left + (swatch_w * 2), # lower RX
# img_pad_top + swatch_h + (i * swatch_h)), # lower RY
# fill=rgba, outline=(0, 0, 0), width=2)
# else:
draw.rectangle((
swatch_pad_left, # upper left x
img_pad_top + swatch_pad_top + (i * swatch_h), # upper left y
swatch_pad_left + (swatch_w * 2), # lower right x
img_pad_top + swatch_h + (i * swatch_h)), # lower right y
fill=rgba, outline=(0, 0, 0), width=2)
draw.text((label_pad_name, img_pad_top + (i * swatch_h) + swatch_pad_top + (swatch_h / 4)), # noqa: E501
name,
"black",
font=label_font)
draw.text((label_pad_rgb, img_pad_top + (i * swatch_h) + swatch_pad_top + (swatch_h / 4)), # noqa: E501
str(rgba),
"black",
font=label_font)
draw = ImageDraw.Draw(img)
img.save(dst_path)
return dst_path

View file

@ -90,7 +90,7 @@ class ExtractRender(pyblish.api.InstancePlugin):
thumbnail_path = os.path.join(path, "thumbnail.png")
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
args = [
ffmpeg_path, "-y",
"{}".format(ffmpeg_path), "-y",
"-i", os.path.join(path, list(collections[0])[0]),
"-vf", "scale=300:-1",
"-vframes", "1",

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Extract template."""
import os
import shutil
@ -15,6 +17,7 @@ class ExtractTemplate(pype.api.Extractor):
families = ["harmony.template"]
def process(self, instance):
"""Plugin entry point."""
staging_dir = self.staging_dir(instance)
filepath = os.path.join(staging_dir, f"{instance.name}.tpl")
@ -62,60 +65,49 @@ class ExtractTemplate(pype.api.Extractor):
"files": f"{instance.name}.zip",
"stagingDir": staging_dir
}
self.log.info(instance.data.get("representations"))
if instance.data.get("representations"):
instance.data["representations"].extend([representation])
else:
instance.data["representations"] = [representation]
def get_backdrops(self, node):
sig = harmony.signature()
func = """function %s(probe_node)
{
var backdrops = Backdrop.backdrops("Top");
var valid_backdrops = [];
for(var i=0; i<backdrops.length; i++)
{
var position = backdrops[i].position;
instance.data["version_name"] = "{}_{}".format(
instance.data["subset"], os.environ["AVALON_TASK"])
var x_valid = false;
var node_x = node.coordX(probe_node);
if (position.x < node_x && node_x < (position.x + position.w)){
x_valid = true
};
def get_backdrops(self, node: str) -> list:
"""Get backdrops for the node.
var y_valid = false;
var node_y = node.coordY(probe_node);
if (position.y < node_y && node_y < (position.y + position.h)){
y_valid = true
};
Args:
node (str): Node path.
if (x_valid && y_valid){
valid_backdrops.push(backdrops[i])
};
}
return valid_backdrops;
}
%s
""" % (sig, sig)
return harmony.send(
{"function": func, "args": [node]}
)["result"]
Returns:
list: list of Backdrops.
def get_dependencies(self, node, dependencies):
sig = harmony.signature()
func = """function %s(args)
{
var target_node = args[0];
var numInput = node.numberOfInputPorts(target_node);
var dependencies = [];
for (var i = 0 ; i < numInput; i++)
{
dependencies.push(node.srcNode(target_node, i));
}
return dependencies;
}
%s
""" % (sig, sig)
"""
self_name = self.__class__.__name__
return harmony.send({
"function": f"PypeHarmony.Publish.{self_name}.getBackdropsByNode",
"args": node})["result"]
def get_dependencies(
self, node: str, dependencies: list = None) -> list:
"""Get node dependencies.
This will return recursive dependency list of given node.
Args:
node (str): Path to the node.
dependencies (list, optional): existing dependency list.
Returns:
list: List of dependent nodes.
"""
current_dependencies = harmony.send(
{"function": func, "args": [node]}
{
"function": "PypeHarmony.getDependencies",
"args": node}
)["result"]
for dependency in current_dependencies:

View file

@ -130,7 +130,7 @@ class ExtractReviewCutUp(pype.api.Extractor):
# check if audio stream is in input video file
ffprob_cmd = (
"{ffprobe_path} -i \"{full_input_path}\" -show_streams "
"\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams"
" -select_streams a -loglevel error"
).format(**locals())
@ -171,7 +171,8 @@ class ExtractReviewCutUp(pype.api.Extractor):
# try to get video native resolution data
try:
resolution_output = pype.api.subprocess((
"{ffprobe_path} -i \"{full_input_path}\" -v error "
"\"{ffprobe_path}\" -i \"{full_input_path}\""
" -v error "
"-select_streams v:0 -show_entries "
"stream=width,height -of csv=s=x:p=0"
).format(**locals()))
@ -274,7 +275,7 @@ class ExtractReviewCutUp(pype.api.Extractor):
output_args.append("-y \"{}\"".format(full_output_path))
mov_args = [
ffmpeg_path,
"\"{}\"".format(ffmpeg_path),
" ".join(input_args),
" ".join(output_args)
]

View file

@ -1,61 +0,0 @@
import os
import acre
from avalon import api, lib
import pype.api as pype
from pype.aport import lib as aportlib
log = pype.Logger().get_logger(__name__, "aport")
class Aport(api.Action):
name = "aport"
label = "Aport - Avalon's Server"
icon = "retweet"
order = 996
def is_compatible(self, session):
"""Return whether the action is compatible with the session"""
if "AVALON_TASK" in session:
return True
return False
def process(self, session, **kwargs):
"""Implement the behavior for when the action is triggered
Args:
session (dict): environment dictionary
Returns:
Popen instance of newly spawned process
"""
with pype.modified_environ(**session):
# Get executable by name
print(self.name)
app = lib.get_application(self.name)
executable = lib.which(app["executable"])
# Run as server
arguments = []
tools_env = acre.get_tools([self.name])
env = acre.compute(tools_env)
env = acre.merge(env, current_env=dict(os.environ))
if not env.get('AVALON_WORKDIR', None):
os.environ["AVALON_WORKDIR"] = aportlib.get_workdir_template()
env.update(dict(os.environ))
try:
lib.launch(
executable=executable,
args=arguments,
environment=env
)
except Exception as e:
log.error(e)
return

View file

@ -1,83 +0,0 @@
import os
import acre
from avalon import api, lib, io
import pype.api as pype
class PremierePro(api.Action):
name = "premiere_2019"
label = "Premiere Pro"
icon = "premiere_icon"
order = 996
def is_compatible(self, session):
"""Return whether the action is compatible with the session"""
if "AVALON_TASK" in session:
return True
return False
def process(self, session, **kwargs):
"""Implement the behavior for when the action is triggered
Args:
session (dict): environment dictionary
Returns:
Popen instance of newly spawned process
"""
with pype.modified_environ(**session):
# Get executable by name
app = lib.get_application(self.name)
executable = lib.which(app["executable"])
# Run as server
arguments = []
tools_env = acre.get_tools([self.name])
env = acre.compute(tools_env)
env = acre.merge(env, current_env=dict(os.environ))
if not env.get('AVALON_WORKDIR', None):
project_name = env.get("AVALON_PROJECT")
anatomy = pype.Anatomy(project_name)
os.environ['AVALON_PROJECT'] = project_name
io.Session['AVALON_PROJECT'] = project_name
task_name = os.environ.get(
"AVALON_TASK", io.Session["AVALON_TASK"]
)
asset_name = os.environ.get(
"AVALON_ASSET", io.Session["AVALON_ASSET"]
)
application = lib.get_application(
os.environ["AVALON_APP_NAME"]
)
project_doc = io.find_one({"type": "project"})
data = {
"task": task_name,
"asset": asset_name,
"project": {
"name": project_doc["name"],
"code": project_doc["data"].get("code", '')
},
"hierarchy": pype.get_hierarchy(),
"app": application["application_dir"]
}
anatomy_filled = anatomy.format(data)
workdir = anatomy_filled["work"]["folder"]
os.environ["AVALON_WORKDIR"] = workdir
env.update(dict(os.environ))
lib.launch(
executable=executable,
args=arguments,
environment=env
)
return

26
pype/plugins/lib.py Normal file
View file

@ -0,0 +1,26 @@
import re
def get_unique_layer_name(layers, asset_name, subset_name):
"""
Gets all layer names and if 'name' is present in them, increases
suffix by 1 (eg. creates unique layer name - for Loader)
Args:
layers (list): of namedtuples, expects 'name' field present
asset_name (string): in format asset_subset (Hero)
subset_name (string): (LOD)
Returns:
(string): name_00X (without version)
"""
name = "{}_{}".format(asset_name, subset_name)
names = {}
for layer in layers:
layer_name = re.sub(r'_\d{3}$', '', layer.name)
if layer_name in names.keys():
names[layer_name] = names[layer_name] + 1
else:
names[layer_name] = 1
occurrences = names.get(name, 0)
return "{}_{:0>3d}".format(name, occurrences + 1)

View file

@ -4,14 +4,70 @@ import maya.cmds as cmds
from avalon import api, io
from avalon.maya.pipeline import containerise
from avalon.maya import lib
from Qt import QtWidgets
from Qt import QtWidgets, QtCore
class CameraWindow(QtWidgets.QDialog):
def __init__(self, cameras):
super(CameraWindow, self).__init__()
self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint)
self.camera = None
self.widgets = {
"label": QtWidgets.QLabel("Select camera for image plane."),
"list": QtWidgets.QListWidget(),
"warning": QtWidgets.QLabel("No cameras selected!"),
"buttons": QtWidgets.QWidget(),
"okButton": QtWidgets.QPushButton("Ok"),
"cancelButton": QtWidgets.QPushButton("Cancel")
}
# Build warning.
self.widgets["warning"].setVisible(False)
self.widgets["warning"].setStyleSheet("color: red")
# Build list.
for camera in cameras:
self.widgets["list"].addItem(camera)
# Build buttons.
layout = QtWidgets.QHBoxLayout(self.widgets["buttons"])
layout.addWidget(self.widgets["okButton"])
layout.addWidget(self.widgets["cancelButton"])
# Build layout.
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(self.widgets["label"])
layout.addWidget(self.widgets["list"])
layout.addWidget(self.widgets["buttons"])
layout.addWidget(self.widgets["warning"])
self.widgets["okButton"].pressed.connect(self.on_ok_pressed)
self.widgets["cancelButton"].pressed.connect(self.on_cancel_pressed)
self.widgets["list"].itemPressed.connect(self.on_list_itemPressed)
def on_list_itemPressed(self, item):
self.camera = item.text()
def on_ok_pressed(self):
if self.camera is None:
self.widgets["warning"].setVisible(True)
return
self.close()
def on_cancel_pressed(self):
self.camera = None
self.close()
class ImagePlaneLoader(api.Loader):
"""Specific loader of plate for image planes on selected camera."""
families = ["plate", "render"]
label = "Create imagePlane on selected camera."
label = "Load imagePlane."
representations = ["mov", "exr", "preview", "png"]
icon = "image"
color = "orange"
@ -26,43 +82,24 @@ class ImagePlaneLoader(api.Loader):
suffix="_",
)
# Getting camera from selection.
selection = pc.ls(selection=True)
# Get camera from user selection.
camera = None
default_cameras = [
"frontShape", "perspShape", "sideShape", "topShape"
]
cameras = [
x for x in pc.ls(type="camera") if x.name() not in default_cameras
]
camera_names = {x.getParent().name(): x for x in cameras}
camera_names["Create new camera."] = "create_camera"
window = CameraWindow(camera_names.keys())
window.exec_()
camera = camera_names[window.camera]
if len(selection) > 1:
QtWidgets.QMessageBox.critical(
None,
"Error!",
"Multiple nodes selected. Please select only one.",
QtWidgets.QMessageBox.Ok
)
return
if len(selection) < 1:
result = QtWidgets.QMessageBox.critical(
None,
"Error!",
"No camera selected. Do you want to create a camera?",
QtWidgets.QMessageBox.Ok,
QtWidgets.QMessageBox.Cancel
)
if result == QtWidgets.QMessageBox.Ok:
if camera == "create_camera":
camera = pc.createNode("camera")
else:
return
else:
relatives = pc.listRelatives(selection[0], shapes=True)
if pc.ls(relatives, type="camera"):
camera = selection[0]
else:
QtWidgets.QMessageBox.critical(
None,
"Error!",
"Selected node is not a camera.",
QtWidgets.QMessageBox.Ok
)
if camera is None:
return
try:
@ -100,10 +137,16 @@ class ImagePlaneLoader(api.Loader):
# Ensure OpenEXRLoader plugin is loaded.
pc.loadPlugin("OpenEXRLoader.mll", quiet=True)
message = (
"Hold image sequence on first frame?"
"\n{} files available.".format(
len(context["representation"]["files"])
)
)
reply = QtWidgets.QMessageBox.information(
None,
"Frame Hold.",
"Hold image sequence on first frame?",
message,
QtWidgets.QMessageBox.Ok,
QtWidgets.QMessageBox.Cancel
)

View file

@ -20,7 +20,8 @@ class CollectFtrackFamilies(pyblish.api.InstancePlugin):
"model",
"animation",
"look",
"rig"
"rig",
"camera"
]
def process(self, instance):

View file

@ -18,7 +18,12 @@ class CollectRemoveMarked(pyblish.api.ContextPlugin):
def process(self, context):
self.log.debug(context)
# make ftrack publishable
instances_to_remove = []
for instance in context:
if instance.data.get('remove'):
instances_to_remove.append(instance)
for instance in instances_to_remove:
context.remove(instance)

View file

@ -253,6 +253,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
if self.sync_workfile_version:
data["version"] = context.data["version"]
for instance in context:
if instance.data['family'] == "workfile":
instance.data["version"] = context.data["version"]
# Apply each user defined attribute as data
for attr in cmds.listAttr(layer, userDefined=True) or list():
try:

View file

@ -43,16 +43,21 @@ class CollectReview(pyblish.api.InstancePlugin):
i = 0
for inst in instance.context:
self.log.debug('processing {}'.format(inst))
self.log.debug('processing2 {}'.format(instance.context[i]))
self.log.debug('filtering {}'.format(inst))
data = instance.context[i].data
if inst.name == reviewable_subset[0]:
if inst.name != reviewable_subset[0]:
self.log.debug('subset name does not match {}'.format(
reviewable_subset[0]))
i += 1
continue
if data.get('families'):
data['families'].append('review')
else:
data['families'] = ['review']
self.log.debug('adding review family to {}'.format(reviewable_subset))
self.log.debug('adding review family to {}'.format(
reviewable_subset))
data['review_camera'] = camera
# data["publish"] = False
data['frameStartFtrack'] = instance.data["frameStartHandle"]
@ -69,7 +74,7 @@ class CollectReview(pyblish.api.InstancePlugin):
self.log.debug('data {}'.format(instance.context[i].data))
instance.context[i].data.update(data)
instance.data['remove'] = True
i += 1
self.log.debug('isntance data {}'.format(instance.data))
else:
if self.legacy:
instance.data['subset'] = task + 'Review'
@ -82,8 +87,10 @@ class CollectReview(pyblish.api.InstancePlugin):
instance.data['subset'] = subset
instance.data['review_camera'] = camera
instance.data['frameStartFtrack'] = instance.data["frameStartHandle"]
instance.data['frameEndFtrack'] = instance.data["frameEndHandle"]
instance.data['frameStartFtrack'] = \
instance.data["frameStartHandle"]
instance.data['frameEndFtrack'] = \
instance.data["frameEndHandle"]
# make ftrack publishable
instance.data["families"] = ['ftrack']

View file

@ -6,7 +6,6 @@ from maya import cmds
import pyblish.api
from pype.hosts.maya import lib
from pype.lib import pairwise
SETTINGS = {"renderDensity",
@ -78,7 +77,7 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
connections = cmds.ls(connections, long=True) # Ensure long names
inputs = []
for dest, src in pairwise(connections):
for dest, src in lib.pairwise(connections):
source_node, source_attr = src.split(".", 1)
dest_node, dest_attr = dest.split(".", 1)

View file

@ -26,7 +26,15 @@ class ExtractCameraAlembic(pype.api.Extractor):
# get settings
framerange = [instance.data.get("frameStart", 1),
instance.data.get("frameEnd", 1)]
handles = instance.data.get("handles", 0)
handle_start = instance.data.get("handleStart", 0)
handle_end = instance.data.get("handleEnd", 0)
# TODO: deprecated attribute "handles"
if handle_start is None:
handle_start = instance.data.get("handles", 0)
handle_end = instance.data.get("handles", 0)
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
@ -55,8 +63,10 @@ class ExtractCameraAlembic(pype.api.Extractor):
job_str = ' -selection -dataFormat "ogawa" '
job_str += ' -attrPrefix cb'
job_str += ' -frameRange {0} {1} '.format(framerange[0] - handles,
framerange[1] + handles)
job_str += ' -frameRange {0} {1} '.format(framerange[0]
- handle_start,
framerange[1]
+ handle_end)
job_str += ' -step {0} '.format(step)
if bake_to_worldspace:

View file

@ -1,12 +1,12 @@
# -*- coding: utf-8 -*-
"""Extract camera as Maya Scene."""
import os
import itertools
from maya import cmds
import avalon.maya
import pype.api
from pype.lib import grouper
from pype.hosts.maya import lib
@ -36,6 +36,17 @@ def massage_ma_file(path):
f.close()
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks.
Examples:
grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
"""
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def unlock(plug):
"""Unlocks attribute and disconnects inputs for a plug.
@ -107,7 +118,18 @@ class ExtractCameraMayaScene(pype.api.Extractor):
framerange = [instance.data.get("frameStart", 1),
instance.data.get("frameEnd", 1)]
handles = instance.data.get("handles", 0)
handle_start = instance.data.get("handleStart", 0)
handle_end = instance.data.get("handleEnd", 0)
# TODO: deprecated attribute "handles"
if handle_start is None:
handle_start = instance.data.get("handles", 0)
handle_end = instance.data.get("handles", 0)
range_with_handles = [framerange[0] - handle_start,
framerange[1] + handle_end]
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
@ -121,9 +143,6 @@ class ExtractCameraMayaScene(pype.api.Extractor):
cameras = cmds.ls(members, leaf=True, shapes=True, long=True,
dag=True, type="camera")
range_with_handles = [framerange[0] - handles,
framerange[1] + handles]
# validate required settings
assert len(cameras) == 1, "Single camera must be found in extraction"
assert isinstance(step, float), "Step must be a float value"

View file

@ -110,6 +110,9 @@ class ExtractPlayblast(pype.api.Extractor):
if not instance.data.get("keepImages"):
tags.append("delete")
# Add camera node name to representation data
camera_node_name = pm.ls(camera)[0].getTransform().getName()
representation = {
'name': 'png',
'ext': 'png',
@ -119,7 +122,8 @@ class ExtractPlayblast(pype.api.Extractor):
"frameEnd": end,
'fps': fps,
'preview': True,
'tags': tags
'tags': tags,
'camera_name': camera_node_name
}
instance.data["representations"].append(representation)

View file

@ -41,7 +41,7 @@ def preserve_trim(node):
"{}".format(script_start))
def loader_shift(node, frame, relative=True):
def loader_shift(node, frame, relative=False):
"""Shift global in time by i preserving duration
This moves the loader by i frames preserving global duration. When relative
@ -61,11 +61,12 @@ def loader_shift(node, frame, relative=True):
script_start = nuke.root()["first_frame"].value()
if relative:
node['frame_mode'].setValue("start at")
node['frame'].setValue(str(script_start))
else:
node['frame_mode'].setValue("start at")
node['frame'].setValue(str(frame))
return int(script_start)
class LoadSequence(api.Loader):
"""Load image sequence into Nuke"""
@ -73,10 +74,10 @@ class LoadSequence(api.Loader):
families = ["render2d", "source", "plate", "render", "prerender", "review"]
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
label = "Load sequence"
order = -10
icon = "code-fork"
color = "orange"
label = "Load Image Sequence"
order = -20
icon = "file-video-o"
color = "white"
def load(self, context, name, namespace, data):
from avalon.nuke import (

View file

@ -34,15 +34,18 @@ class ValidateWriteLegacy(pyblish.api.InstancePlugin):
# test if render in family test knob
# and only one item should be available
assert len(family_test) != 1, msg
assert "render" in node[family_test[0]].value(), msg
assert len(family_test) == 1, msg + " > More avalon attributes"
assert "render" in node[family_test[0]].value(), msg + \
" > Not correct family"
# test if `file` knob in node, this way old
# non-group-node write could be detected
assert "file" in node.knobs(), msg
assert "file" not in node.knobs(), msg + \
" > file knob should not be present"
# check if write node is having old render targeting
assert "render_farm" in node.knobs(), msg
assert "render_farm" not in node.knobs(), msg + \
" > old way of setting render target"
@classmethod
def repair(cls, instance):

View file

@ -11,7 +11,7 @@ class ImageLoader(api.Loader):
Stores the imported asset in a container named after the asset.
"""
families = ["image"]
families = ["image", "render"]
representations = ["*"]
def load(self, context, name=None, namespace=None, data=None):

View file

@ -34,8 +34,6 @@ class ExtractImage(pype.api.Extractor):
# limit unnecessary calls to client
if layer.visible and layer.id not in extract_ids:
stub.set_visible(layer.id, False)
if not layer.visible and layer.id in extract_ids:
stub.set_visible(layer.id, True)
save_options = []
if "png" in self.formats:

View file

@ -38,8 +38,6 @@ class ExtractReview(pype.api.Extractor):
# limit unnecessary calls to client
if layer.visible and layer.id not in extract_ids:
stub.set_visible(layer.id, False)
if not layer.visible and layer.id in extract_ids:
stub.set_visible(layer.id, True)
stub.saveAs(output_image_path, 'jpg', True)
@ -56,7 +54,7 @@ class ExtractReview(pype.api.Extractor):
# Generate thumbnail.
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
args = [
ffmpeg_path, "-y",
"{}".format(ffmpeg_path), "-y",
"-i", output_image_path,
"-vf", "scale=300:-1",
"-vframes", "1",

View file

@ -0,0 +1,87 @@
# -*- coding: utf-8 -*-
"""Collect Harmony scenes in Standalone Publisher."""
import copy
import glob
import os
from pprint import pformat
import pyblish.api
class CollectHarmonyScenes(pyblish.api.InstancePlugin):
"""Collect Harmony xstage files."""
order = pyblish.api.CollectorOrder + 0.498
label = "Collect Harmony Scene"
hosts = ["standalonepublisher"]
families = ["harmony.scene"]
# presets
ignored_instance_data_keys = ("name", "label", "stagingDir", "version")
def process(self, instance):
"""Plugin entry point."""
context = instance.context
asset_data = instance.context.data["assetEntity"]
asset_name = instance.data["asset"]
subset_name = instance.data.get("subset", "sceneMain")
anatomy_data = instance.context.data["anatomyData"]
repres = instance.data["representations"]
staging_dir = repres[0]["stagingDir"]
files = repres[0]["files"]
if not files.endswith(".zip"):
# A harmony project folder / .xstage was dropped
instance_name = f"{asset_name}_{subset_name}"
task = instance.data.get("task", "harmonyIngest")
# create new instance
new_instance = context.create_instance(instance_name)
# add original instance data except name key
for key, value in instance.data.items():
# Make sure value is copy since value may be object which
# can be shared across all new created objects
if key not in self.ignored_instance_data_keys:
new_instance.data[key] = copy.deepcopy(value)
self.log.info("Copied data: {}".format(new_instance.data))
# fix anatomy data
anatomy_data_new = copy.deepcopy(anatomy_data)
# updating hierarchy data
anatomy_data_new.update({
"asset": asset_data["name"],
"task": task,
"subset": subset_name
})
new_instance.data["label"] = f"{instance_name}"
new_instance.data["subset"] = subset_name
new_instance.data["extension"] = ".zip"
new_instance.data["anatomyData"] = anatomy_data_new
new_instance.data["publish"] = True
# When a project folder was dropped vs. just an xstage file, find
# the latest file xstage version and update the instance
if not files.endswith(".xstage"):
source_dir = os.path.join(
staging_dir, files
).replace("\\", "/")
latest_file = max(glob.iglob(source_dir + "/*.xstage"),
key=os.path.getctime).replace("\\", "/")
new_instance.data["representations"][0]["stagingDir"] = (
source_dir
)
new_instance.data["representations"][0]["files"] = (
os.path.basename(latest_file)
)
self.log.info(f"Created new instance: {instance_name}")
self.log.debug(f"_ inst_data: {pformat(new_instance.data)}")
# set original instance for removal
self.log.info("Context data: {}".format(context.data))
instance.data["remove"] = True

View file

@ -0,0 +1,68 @@
# -*- coding: utf-8 -*-
"""Collect zips as Harmony scene files."""
import copy
from pprint import pformat
import pyblish.api
class CollectHarmonyZips(pyblish.api.InstancePlugin):
"""Collect Harmony zipped projects."""
order = pyblish.api.CollectorOrder + 0.497
label = "Collect Harmony Zipped Projects"
hosts = ["standalonepublisher"]
families = ["harmony.scene"]
extensions = ["zip"]
# presets
ignored_instance_data_keys = ("name", "label", "stagingDir", "version")
def process(self, instance):
"""Plugin entry point."""
context = instance.context
asset_data = instance.context.data["assetEntity"]
asset_name = instance.data["asset"]
subset_name = instance.data.get("subset", "sceneMain")
anatomy_data = instance.context.data["anatomyData"]
repres = instance.data["representations"]
files = repres[0]["files"]
if files.endswith(".zip"):
# A zip file was dropped
instance_name = f"{asset_name}_{subset_name}"
task = instance.data.get("task", "harmonyIngest")
# create new instance
new_instance = context.create_instance(instance_name)
# add original instance data except name key
for key, value in instance.data.items():
# Make sure value is copy since value may be object which
# can be shared across all new created objects
if key not in self.ignored_instance_data_keys:
new_instance.data[key] = copy.deepcopy(value)
self.log.info("Copied data: {}".format(new_instance.data))
# fix anatomy data
anatomy_data_new = copy.deepcopy(anatomy_data)
# updating hierarchy data
anatomy_data_new.update({
"asset": asset_data["name"],
"task": task,
"subset": subset_name
})
new_instance.data["label"] = f"{instance_name}"
new_instance.data["subset"] = subset_name
new_instance.data["extension"] = ".zip"
new_instance.data["anatomyData"] = anatomy_data_new
new_instance.data["publish"] = True
self.log.info(f"Created new instance: {instance_name}")
self.log.debug(f"_ inst_data: {pformat(new_instance.data)}")
# set original instance for removal
self.log.info("Context data: {}".format(context.data))
instance.data["remove"] = True

View file

@ -18,7 +18,7 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
label = "Collect instance data"
order = pyblish.api.CollectorOrder + 0.49
families = ["render", "plate"]
families = ["render", "plate", "review"]
hosts = ["standalonepublisher"]
def process(self, instance):

View file

@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
"""Collect instances that are marked for removal and remove them."""
import pyblish.api
class CollectRemoveMarked(pyblish.api.ContextPlugin):
"""Clean up instances marked for removal.
Note:
This is a workaround for race conditions and removing of instances
used to generate other instances.
"""
order = pyblish.api.CollectorOrder + 0.499
label = 'Remove Marked Instances'
def process(self, context):
"""Plugin entry point."""
for instance in context:
if instance.data.get('remove'):
context.remove(instance)

View file

@ -0,0 +1,404 @@
# -*- coding: utf-8 -*-
"""Extract Harmony scene from zip file."""
import glob
import os
import shutil
import six
import sys
import tempfile
import zipfile
import pyblish.api
from avalon import api, io
import pype.api
class ExtractHarmonyZip(pype.api.Extractor):
"""Extract Harmony zip."""
# Pyblish settings
label = "Extract Harmony zip"
order = pyblish.api.ExtractorOrder + 0.02
hosts = ["standalonepublisher"]
families = ["scene"]
# Properties
session = None
task_types = None
task_statuses = None
assetversion_statuses = None
# Presets
create_workfile = True
default_task = "harmonyIngest"
default_task_type = "Ingest"
default_task_status = "Ingested"
assetversion_status = "Ingested"
def process(self, instance):
"""Plugin entry point."""
context = instance.context
self.session = context.data["ftrackSession"]
asset_doc = context.data["assetEntity"]
# asset_name = instance.data["asset"]
subset_name = instance.data["subset"]
instance_name = instance.data["name"]
family = instance.data["family"]
task = context.data["anatomyData"]["task"] or self.default_task
project_entity = instance.context.data["projectEntity"]
ftrack_id = asset_doc["data"]["ftrackId"]
repres = instance.data["representations"]
submitted_staging_dir = repres[0]["stagingDir"]
submitted_files = repres[0]["files"]
# Get all the ftrack entities needed
# Asset Entity
query = 'AssetBuild where id is "{}"'.format(ftrack_id)
asset_entity = self.session.query(query).first()
# Project Entity
query = 'Project where full_name is "{}"'.format(
project_entity["name"]
)
project_entity = self.session.query(query).one()
# Get Task types and Statuses for creation if needed
self.task_types = self._get_all_task_types(project_entity)
self.task_statuses = self.get_all_task_statuses(project_entity)
# Get Statuses of AssetVersions
self.assetversion_statuses = self.get_all_assetversion_statuses(
project_entity
)
# Setup the status that we want for the AssetVersion
if self.assetversion_status:
instance.data["assetversion_status"] = self.assetversion_status
# Create the default_task if it does not exist
if task == self.default_task:
existing_tasks = []
entity_children = asset_entity.get('children', [])
for child in entity_children:
if child.entity_type.lower() == 'task':
existing_tasks.append(child['name'].lower())
if task.lower() in existing_tasks:
print("Task {} already exists".format(task))
else:
self.create_task(
name=task,
task_type=self.default_task_type,
task_status=self.default_task_status,
parent=asset_entity,
)
# Find latest version
latest_version = self._find_last_version(subset_name, asset_doc)
version_number = 1
if latest_version is not None:
version_number += latest_version
self.log.info(
"Next version of instance \"{}\" will be {}".format(
instance_name, version_number
)
)
# update instance info
instance.data["task"] = task
instance.data["version_name"] = "{}_{}".format(subset_name, task)
instance.data["family"] = family
instance.data["subset"] = subset_name
instance.data["version"] = version_number
instance.data["latestVersion"] = latest_version
instance.data["anatomyData"].update({
"subset": subset_name,
"family": family,
"version": version_number
})
# Copy `families` and check if `family` is not in current families
families = instance.data.get("families") or list()
if families:
families = list(set(families))
instance.data["families"] = families
# Prepare staging dir for new instance and zip + sanitize scene name
staging_dir = tempfile.mkdtemp(prefix="pyblish_tmp_")
# Handle if the representation is a .zip and not an .xstage
pre_staged = False
if submitted_files.endswith(".zip"):
submitted_zip_file = os.path.join(submitted_staging_dir,
submitted_files
).replace("\\", "/")
pre_staged = self.sanitize_prezipped_project(instance,
submitted_zip_file,
staging_dir)
# Get the file to work with
source_dir = str(repres[0]["stagingDir"])
source_file = str(repres[0]["files"])
staging_scene_dir = os.path.join(staging_dir, "scene")
staging_scene = os.path.join(staging_scene_dir, source_file)
# If the file is an .xstage / directory, we must stage it
if not pre_staged:
shutil.copytree(source_dir, staging_scene_dir)
# Rename this latest file as 'scene.xstage'
# This is is determined in the collector from the latest scene in a
# submitted directory / directory the submitted .xstage is in.
# In the case of a zip file being submitted, this is determined within
# the self.sanitize_project() method in this extractor.
os.rename(staging_scene,
os.path.join(staging_scene_dir, "scene.xstage")
)
# Required to set the current directory where the zip will end up
os.chdir(staging_dir)
# Create the zip file
zip_filepath = shutil.make_archive(os.path.basename(source_dir),
"zip",
staging_scene_dir
)
zip_filename = os.path.basename(zip_filepath)
self.log.info("Zip file: {}".format(zip_filepath))
# Setup representation
new_repre = {
"name": "zip",
"ext": "zip",
"files": zip_filename,
"stagingDir": staging_dir
}
self.log.debug(
"Creating new representation: {}".format(new_repre)
)
instance.data["representations"] = [new_repre]
self.log.debug("Completed prep of zipped Harmony scene: {}"
.format(zip_filepath)
)
# If this extractor is setup to also extract a workfile...
if self.create_workfile:
workfile_path = self.extract_workfile(instance,
staging_scene
)
self.log.debug("Extracted Workfile to: {}".format(workfile_path))
def extract_workfile(self, instance, staging_scene):
"""Extract a valid workfile for this corresponding publish.
Args:
instance (:class:`pyblish.api.Instance`): Instance data.
staging_scene (str): path of staging scene.
Returns:
str: Path to workdir.
"""
# Since the staging scene was renamed to "scene.xstage" for publish
# rename the staging scene in the temp stagingdir
staging_scene = os.path.join(os.path.dirname(staging_scene),
"scene.xstage")
# Setup the data needed to form a valid work path filename
anatomy = pype.api.Anatomy()
project_entity = instance.context.data["projectEntity"]
data = {
"root": api.registered_root(),
"project": {
"name": project_entity["name"],
"code": project_entity["data"].get("code", '')
},
"asset": instance.data["asset"],
"hierarchy": pype.api.get_hierarchy(instance.data["asset"]),
"family": instance.data["family"],
"task": instance.data.get("task"),
"subset": instance.data["subset"],
"version": 1,
"ext": "zip",
}
# Get a valid work filename first with version 1
file_template = anatomy.templates["work"]["file"]
anatomy_filled = anatomy.format(data)
work_path = anatomy_filled["work"]["path"]
# Get the final work filename with the proper version
data["version"] = api.last_workfile_with_version(
os.path.dirname(work_path), file_template, data, [".zip"]
)[1]
work_path = anatomy_filled["work"]["path"]
base_name = os.path.splitext(os.path.basename(work_path))[0]
staging_work_path = os.path.join(os.path.dirname(staging_scene),
base_name + ".xstage"
)
# Rename this latest file after the workfile path filename
os.rename(staging_scene, staging_work_path)
# Required to set the current directory where the zip will end up
os.chdir(os.path.dirname(os.path.dirname(staging_scene)))
# Create the zip file
zip_filepath = shutil.make_archive(base_name,
"zip",
os.path.dirname(staging_scene)
)
self.log.info(staging_scene)
self.log.info(work_path)
self.log.info(staging_work_path)
self.log.info(os.path.dirname(os.path.dirname(staging_scene)))
self.log.info(base_name)
self.log.info(zip_filepath)
# Create the work path on disk if it does not exist
os.makedirs(os.path.dirname(work_path), exist_ok=True)
shutil.copy(zip_filepath, work_path)
return work_path
def sanitize_prezipped_project(
self, instance, zip_filepath, staging_dir):
"""Fix when a zip contains a folder.
Handle zip file root contains folder instead of the project.
Args:
instance (:class:`pyblish.api.Instance`): Instance data.
zip_filepath (str): Path to zip.
staging_dir (str): Path to staging directory.
"""
zip = zipfile.ZipFile(zip_filepath)
zip_contents = zipfile.ZipFile.namelist(zip)
# Determine if any xstage file is in root of zip
project_in_root = [pth for pth in zip_contents
if "/" not in pth and pth.endswith(".xstage")]
staging_scene_dir = os.path.join(staging_dir, "scene")
# The project is nested, so we must extract and move it
if not project_in_root:
staging_tmp_dir = os.path.join(staging_dir, "tmp")
with zipfile.ZipFile(zip_filepath, "r") as zip_ref:
zip_ref.extractall(staging_tmp_dir)
nested_project_folder = os.path.join(staging_tmp_dir,
zip_contents[0]
)
shutil.copytree(nested_project_folder, staging_scene_dir)
else:
# The project is not nested, so we just extract to scene folder
with zipfile.ZipFile(zip_filepath, "r") as zip_ref:
zip_ref.extractall(staging_scene_dir)
latest_file = max(glob.iglob(staging_scene_dir + "/*.xstage"),
key=os.path.getctime).replace("\\", "/")
instance.data["representations"][0]["stagingDir"] = staging_scene_dir
instance.data["representations"][0]["files"] = os.path.basename(
latest_file)
# We have staged the scene already so return True
return True
def _find_last_version(self, subset_name, asset_doc):
"""Find last version of subset."""
subset_doc = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_doc["_id"]
})
if subset_doc is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_doc = io.find_one(
{
"type": "version",
"parent": subset_doc["_id"]
},
sort=[("name", -1)]
)
if version_doc:
return int(version_doc["name"])
return None
def _get_all_task_types(self, project):
"""Get all task types."""
tasks = {}
proj_template = project['project_schema']
temp_task_types = proj_template['_task_type_schema']['types']
for type in temp_task_types:
if type['name'] not in tasks:
tasks[type['name']] = type
return tasks
def _get_all_task_statuses(self, project):
"""Get all statuses of tasks."""
statuses = {}
proj_template = project['project_schema']
temp_task_statuses = proj_template.get_statuses("Task")
for status in temp_task_statuses:
if status['name'] not in statuses:
statuses[status['name']] = status
return statuses
def _get_all_assetversion_statuses(self, project):
"""Get statuses of all asset versions."""
statuses = {}
proj_template = project['project_schema']
temp_task_statuses = proj_template.get_statuses("AssetVersion")
for status in temp_task_statuses:
if status['name'] not in statuses:
statuses[status['name']] = status
return statuses
def _create_task(self, name, task_type, parent, task_status):
"""Create task."""
task_data = {
'name': name,
'parent': parent,
}
self.log.info(task_type)
task_data['type'] = self.task_types[task_type]
task_data['status'] = self.task_statuses[task_status]
self.log.info(task_data)
task = self.session.create('Task', task_data)
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
six.reraise(tp, value, tb)
return task

View file

@ -47,7 +47,7 @@ class ExtractShotData(pype.api.Extractor):
start += 0.5
args = [
ffmpeg_path,
"\"{}\"".format(ffmpeg_path),
"-ss", str(start / fps),
"-i", f"\"{video_file_path}\"",
"-t", str(dur / fps)

View file

@ -56,7 +56,9 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
elif is_jpeg:
# use first frame as thumbnail if is sequence of jpegs
full_thumbnail_path = file
full_thumbnail_path = os.path.join(
thumbnail_repre["stagingDir"], file
)
self.log.info(
"For thumbnail is used file: {}".format(full_thumbnail_path)
)
@ -75,7 +77,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
ffmpeg_args = self.ffmpeg_args or {}
jpeg_items = []
jpeg_items.append(ffmpeg_path)
jpeg_items.append("\"{}\"".format(ffmpeg_path))
# override file if already exists
jpeg_items.append("-y")
# add input filters from peresets

View file

@ -0,0 +1,83 @@
from avalon import api
from avalon.vendor import qargparse
from avalon.tvpaint import CommunicatorWrapper
class ImportImage(api.Loader):
"""Load image or image sequence to TVPaint as new layer."""
families = ["render", "image", "background", "plate"]
representations = ["*"]
label = "Import Image"
order = 1
icon = "image"
color = "white"
import_script = (
"filepath = \"{}\"\n"
"layer_name = \"{}\"\n"
"tv_loadsequence filepath {}PARSE layer_id\n"
"tv_layerrename layer_id layer_name"
)
defaults = {
"stretch": True,
"timestretch": True,
"preload": True
}
options = [
qargparse.Boolean(
"stretch",
label="Stretch to project size",
default=True,
help="Stretch loaded image/s to project resolution?"
),
qargparse.Boolean(
"timestretch",
label="Stretch to timeline length",
default=True,
help="Clip loaded image/s to timeline length?"
),
qargparse.Boolean(
"preload",
label="Preload loaded image/s",
default=True,
help="Preload image/s?"
)
]
def load(self, context, name, namespace, options):
stretch = options.get("stretch", self.defaults["stretch"])
timestretch = options.get("timestretch", self.defaults["timestretch"])
preload = options.get("preload", self.defaults["preload"])
load_options = []
if stretch:
load_options.append("\"STRETCH\"")
if timestretch:
load_options.append("\"TIMESTRETCH\"")
if preload:
load_options.append("\"PRELOAD\"")
load_options_str = ""
for load_option in load_options:
load_options_str += (load_option + " ")
# Prepare layer name
asset_name = context["asset"]["name"]
version_name = context["version"]["name"]
layer_name = "{}_{}_v{:0>3}".format(
asset_name,
name,
version_name
)
# Fill import script with filename and layer name
# - filename mus not contain backwards slashes
george_script = self.import_script.format(
self.fname.replace("\\", "/"),
layer_name,
load_options_str
)
return CommunicatorWrapper.execute_george_through_file(george_script)

View file

@ -10,12 +10,28 @@ def get_resource(*args):
"""
return os.path.normpath(
os.path.join(
os.path.dirname(__file__),
os.path.dirname(os.path.abspath(__file__)),
*args
)
)
def get_liberation_font_path(bold=False, italic=False):
font_name = "LiberationSans"
suffix = ""
if bold:
suffix += "Bold"
if italic:
suffix += "Italic"
if not suffix:
suffix = "Regular"
filename = "{}-{}.ttf".format(font_name, suffix)
font_path = get_resource("fonts", font_name, filename)
return font_path
def pype_icon_filepath(debug=None):
if debug is None:
debug = bool(os.getenv("PYPE_DEV"))

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

View file

@ -0,0 +1,77 @@
LICENSE AGREEMENT AND LIMITED PRODUCT WARRANTY LIBERATION FONT SOFTWARE
This agreement governs the use of the Software and any updates to the
Software, regardless of the delivery mechanism. Subject to the following
terms, Red Hat, Inc. ("Red Hat") grants to the user ("Client") a license to
this collective work pursuant to the GNU General Public License v.2 with the
exceptions set forth below and such other terms as our set forth in this End
User License Agreement.
1. The Software and License Exception. LIBERATION font software (the
"Software") consists of TrueType-OpenType formatted font software for
rendering LIBERATION typefaces in sans serif, serif, and monospaced character
styles. You are licensed to use, modify, copy, and distribute the Software
pursuant to the GNU General Public License v.2 with the following exceptions:
1) As a special exception, if you create a document which uses this font, and
embed this font or unaltered portions of this font into the document, this
font does not by itself cause the resulting document to be covered by the GNU
General Public License.  This exception does not however invalidate any other
reasons why the document might be covered by the GNU General Public License. 
If you modify this font, you may extend this exception to your version of the
font, but you are not obligated to do so. If you do not wish to do so, delete
this exception statement from your version.
2) As a further exception, any distribution of the object code of the Software
in a physical product must provide you the right to access and modify the
source code for the Software and to reinstall that modified version of the
Software in object code form on the same physical product on which you
received it.
2. Intellectual Property Rights. The Software and each of its components,
including the source code, documentation, appearance, structure and
organization are owned by Red Hat and others and are protected under copyright
and other laws. Title to the Software and any component, or to any copy,
modification, or merged portion shall remain with the aforementioned, subject
to the applicable license. The "LIBERATION" trademark is a trademark of Red
Hat, Inc. in the U.S. and other countries. This agreement does not permit
Client to distribute modified versions of the Software using Red Hat's
trademarks. If Client makes a redistribution of a modified version of the
Software, then Client must modify the files names to remove any reference to
the Red Hat trademarks and must not use the Red Hat trademarks in any way to
reference or promote the modified Software.
3. Limited Warranty. To the maximum extent permitted under applicable law, the
Software is provided and licensed "as is" without warranty of any kind,
expressed or implied, including the implied warranties of merchantability,
non-infringement or fitness for a particular purpose. Red Hat does not warrant
that the functions contained in the Software will meet Client's requirements
or that the operation of the Software will be entirely error free or appear
precisely as described in the accompanying documentation.
4. Limitation of Remedies and Liability. To the maximum extent permitted by
applicable law, Red Hat or any Red Hat authorized dealer will not be liable to
Client for any incidental or consequential damages, including lost profits or
lost savings arising out of the use or inability to use the Software, even if
Red Hat or such dealer has been advised of the possibility of such damages.
5. Export Control. As required by U.S. law, Client represents and warrants
that it: (a) understands that the Software is subject to export controls under
the U.S. Commerce Department's Export Administration Regulations ("EAR"); (b)
is not located in a prohibited destination country under the EAR or U.S.
sanctions regulations (currently Cuba, Iran, Iraq, Libya, North Korea, Sudan
and Syria); (c) will not export, re-export, or transfer the Software to any
prohibited destination, entity, or individual without the necessary export
license(s) or authorizations(s) from the U.S. Government; (d) will not use or
transfer the Software for use in any sensitive nuclear, chemical or biological
weapons, or missile technology end-uses unless authorized by the U.S.
Government by regulation or specific license; (e) understands and agrees that
if it is in the United States and exports or transfers the Software to
eligible end users, it will, as required by EAR Section 740.17(e), submit
semi-annual reports to the Commerce Department's Bureau of Industry & Security
(BIS), which include the name and address (including country) of each
transferee; and (f) understands that countries other than the United States
may restrict the import, use, or export of encryption products and that it
shall be solely responsible for compliance with any such import, use, or
export restrictions.
6. General. If any provision of this agreement is held to be unenforceable,
that shall not affect the enforceability of the remaining provisions. This
agreement shall be governed by the laws of the State of North Carolina and of
the United States, without regard to any conflict of laws provisions, except
that the United Nations Convention on the International Sale of Goods shall
not apply.
Copyright © 2007 Red Hat, Inc. All rights reserved. LIBERATION is a trademark
of Red Hat, Inc.

View file

@ -191,7 +191,7 @@ def switch(asset_name, filepath=None, new=True):
representations = []
for container in containers:
try:
representation = pype.switch_item(container,
representation = fusion_lib.switch_item(container,
asset_name=asset_name)
representations.append(representation)
except Exception as e:

View file

@ -2,9 +2,10 @@ import os
import sys
import re
import subprocess
import platform
import json
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
from pype.api import config
from pype.api import config, resources
import pype.lib
@ -13,16 +14,16 @@ ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
FFMPEG = (
'{} -i "%(input)s" %(filters)s %(args)s%(output)s'
'"{}" -i "%(input)s" %(filters)s %(args)s%(output)s'
).format(ffmpeg_path)
FFPROBE = (
'{} -v quiet -print_format json -show_format -show_streams "%(source)s"'
'"{}" -v quiet -print_format json -show_format -show_streams "%(source)s"'
).format(ffprobe_path)
DRAWTEXT = (
"drawtext=text=\\'%(text)s\\':x=%(x)s:y=%(y)s:fontcolor="
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'"
"drawtext=fontfile='%(font)s':text=\\'%(text)s\\':"
"x=%(x)s:y=%(y)s:fontcolor=%(color)s@%(opacity).1f:fontsize=%(size)d"
)
TIMECODE = (
"drawtext=timecode=\\'%(timecode)s\\':text=\\'%(text)s\\'"
@ -212,9 +213,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if frame_start is None:
replacement_final = replacement_size = str(MISSING_KEY_VALUE)
else:
replacement_final = "\\'{}\\'".format(
r'%%{eif\:n+%d\:d}' % frame_start
)
replacement_final = "%{eif:n+" + str(frame_start) + ":d}"
replacement_size = str(frame_end)
final_text = final_text.replace(
@ -236,13 +235,32 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
}
timecode_text = options.get("timecode") or ""
text_for_size += timecode_text
data.update(options)
os_system = platform.system().lower()
data_font = data.get("font")
if not data_font:
data_font = (
resources.get_liberation_font_path().replace("\\", "/")
)
elif isinstance(data_font, dict):
data_font = data_font[os_system]
if data_font:
data["font"] = data_font
options["font"] = data_font
if ffmpeg_burnins._is_windows():
data["font"] = (
data_font
.replace(os.sep, r'\\' + os.sep)
.replace(':', r'\:')
)
data.update(
ffmpeg_burnins._drawtext(align, resolution, text_for_size, options)
)
if 'font' in data and ffmpeg_burnins._is_windows():
data['font'] = data['font'].replace(os.sep, r'\\' + os.sep)
data['font'] = data['font'].replace(':', r'\:')
self.filters['drawtext'].append(draw % data)
if options.get('bg_color') is not None:
@ -308,11 +326,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
_stdout, _stderr = proc.communicate()
if _stdout:
print(_stdout.decode("utf-8"))
for line in _stdout.split(b"\r\n"):
print(line.decode("utf-8"))
# This will probably never happen as ffmpeg use stdout
if _stderr:
print(_stderr.decode("utf-8"))
for line in _stderr.split(b"\r\n"):
print(line.decode("utf-8"))
if proc.returncode != 0:
raise RuntimeError(
@ -474,7 +494,7 @@ def burnins_from_data(
# Replace with missing key value if frame_start_tc is not set
if frame_start_tc is None and has_timecode:
has_timecode = False
log.warning(
print(
"`frame_start` and `frame_start_tc`"
" are not set in entered data."
)
@ -483,7 +503,7 @@ def burnins_from_data(
has_source_timecode = SOURCE_TIMECODE_KEY in value
if source_timecode is None and has_source_timecode:
has_source_timecode = False
log.warning("Source does not have set timecode value.")
print("Source does not have set timecode value.")
value = value.replace(SOURCE_TIMECODE_KEY, MISSING_KEY_VALUE)
key_pattern = re.compile(r"(\{.*?[^{0]*\})")
@ -558,7 +578,10 @@ def burnins_from_data(
if __name__ == "__main__":
print("* Burnin script started")
in_data = json.loads(sys.argv[-1])
in_data_json_path = sys.argv[-1]
with open(in_data_json_path, "r") as file_stream:
in_data = json.load(file_stream)
burnins_from_data(
in_data["input"],
in_data["output"],

4
pype/tests/README.md Normal file
View file

@ -0,0 +1,4 @@
Tests for Pype
--------------
Trigger by:
`pype test --pype`

Some files were not shown because too many files have changed in this diff Show more