Merge branch 'develop' into feature/PYPE-654-nks-cut-reference-videos

This commit is contained in:
Jakub Jezek 2020-05-06 16:24:37 +02:00
commit 06fbbd6cba
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
221 changed files with 39266 additions and 12570 deletions

View file

@ -1,6 +1,6 @@
[flake8]
# ignore = D203
ignore = BLK100
ignore = BLK100, W504, W503
max-line-length = 79
exclude =
.git,

31
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View file

@ -0,0 +1,31 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. windows]
- Host: [e.g. Maya, Nuke, Houdini]
**Additional context**
Add any other context about the problem here.

View file

@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

2
.gitignore vendored
View file

@ -33,3 +33,5 @@ coverage.xml
##################
node_modules/
package-lock.json
pype/premiere/ppro/js/debug.log

View file

@ -9,9 +9,9 @@ from pypeapp import config
import logging
log = logging.getLogger(__name__)
__version__ = "2.6.0"
PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS")
STUDIO_PLUGINS_PATH = os.environ.get("PYPE_STUDIO_PLUGINS")
PACKAGE_DIR = os.path.dirname(__file__)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
@ -84,6 +84,20 @@ def install():
pyblish.register_plugin_path(plugin_path)
avalon.register_plugin_path(avalon.Loader, plugin_path)
avalon.register_plugin_path(avalon.Creator, plugin_path)
avalon.register_plugin_path(
avalon.InventoryAction, plugin_path
)
# Register studio specific plugins
if STUDIO_PLUGINS_PATH and project_name:
for path in STUDIO_PLUGINS_PATH.split(os.pathsep):
if not path:
continue
if os.path.exists(path):
pyblish.register_plugin_path(path)
avalon.register_plugin_path(avalon.Loader, path)
avalon.register_plugin_path(avalon.Creator, path)
avalon.register_plugin_path(avalon.InventoryAction, path)
# apply monkey patched discover to original one
avalon.discover = patched_discover

View file

@ -1,122 +0,0 @@
import os
import sys
from avalon import api as avalon
from pyblish import api as pyblish
from app import api as app
from .. import api
t = app.Templates()
log = api.Logger.getLogger(__name__, "aport")
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
ADDITIONAL_PLUGINS = ['ftrack']
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
PUBLISH_PATH = os.path.join(
PLUGINS_DIR, "aport", "publish"
).replace("\\", "/")
if os.getenv("PUBLISH_PATH", None):
os.environ["PUBLISH_PATH"] = os.pathsep.join(
os.environ["PUBLISH_PATH"].split(os.pathsep) +
[PUBLISH_PATH]
)
else:
os.environ["PUBLISH_PATH"] = PUBLISH_PATH
LOAD_PATH = os.path.join(PLUGINS_DIR, "aport", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "aport", "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "aport", "inventory")
def register_additional_plugin_paths():
'''Adding additional host plugins
'''
for host in ADDITIONAL_PLUGINS:
publish_path = os.path.join(
PLUGINS_DIR, host, "publish").replace("\\", "/")
pyblish.register_plugin_path(publish_path)
# adding path to PUBLISH_PATH environment
if os.getenv("PUBLISH_PATH", None):
os.environ["PUBLISH_PATH"] = os.pathsep.join(
os.environ["PUBLISH_PATH"].split(os.pathsep) +
[publish_path]
)
else:
os.environ["PUBLISH_PATH"] = publish_path
log.info(
"Registered additional plugin path: "
"{}".format(publish_path))
def install():
# api.set_avalon_workdir()
log.info("Registering Aport plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# additional plugins
register_additional_plugin_paths()
# Disable all families except for the ones we explicitly want to see
family_states = [
"imagesequence",
"mov"
]
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
# load data from templates
api.load_data_from_templates()
# launch pico server
pico_server_launch()
def uninstall():
log.info("Deregistering Aport plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
# reset data from templates
api.reset_data_from_templates()
def pico_server_launch():
# path = "C:/Users/hubert/CODE/github/pico/examples/everything"
path = os.path.join(
os.path.dirname(__file__),
# "package"
)
os.chdir(path)
print(os.getcwd())
print(os.listdir(path))
try:
args = [sys.executable, "-m", "pico.server",
# "pipeline",
"api"
]
app.forward(
args,
cwd=path
)
except Exception as e:
log.error(e)
log.error(sys.exc_info())
# sys.exit(returncode)

View file

@ -1,225 +0,0 @@
# api.py
import os
import sys
import pico
from pico import PicoApp
from app.api import forward, Logger
import pipeline as ppl
log = Logger.getLogger(__name__, "aport")
@pico.expose()
def get_session():
ppl.AVALON_PROJECT = os.getenv("AVALON_PROJECT", None)
ppl.AVALON_ASSET = os.getenv("AVALON_ASSET", None)
ppl.AVALON_TASK = os.getenv("AVALON_TASK", None)
ppl.AVALON_SILO = os.getenv("AVALON_SILO", None)
return ppl.get_session()
@pico.expose()
def load_representations(project, representations):
'''Querry data from mongo db for defined representations.
Args:
project (str): name of the project
representations (list): representations which are required
Returns:
data (dict): representations in last versions
# testing url:
http://localhost:4242/api/load_representations?project=jakub_projectx&representations=[{%22asset%22:%22e09s031_0040%22,%22subset%22:%22referenceDefault%22,%22representation%22:%22mp4%22},%20{%22asset%22:%22e09s031_0030%22,%22subset%22:%22referenceDefault%22,%22representation%22:%22mp4%22}]
# returning:
{"e09s031_0040_referenceDefault":{"_id":"5c6dabaa2af61756b02f7f32","schema":"pype:representation-2.0","type":"representation","parent":"5c6dabaa2af61756b02f7f31","name":"mp4","data":{"path":"C:\\Users\\hubert\\_PYPE_testing\\projects\\jakub_projectx\\thisFolder\\e09\\s031\\e09s031_0040\\publish\\clip\\referenceDefault\\v019\\jkprx_e09s031_0040_referenceDefault_v019.mp4","template":"{publish.root}/{publish.folder}/{version.main}/{publish.file}"},"dependencies":[],"context":{"root":"C:\\Users\\hubert\\_PYPE_testing\\projects","project":{"name":"jakub_projectx","code":"jkprx"},"task":"edit","silo":"thisFolder","asset":"e09s031_0040","family":"clip","subset":"referenceDefault","VERSION":19,"hierarchy":"thisFolder\\e09\\s031","representation":"mp4"}}}
'''
data = {}
# log.info("___project: {}".format(project))
# ppl.io.activate_project(project)
#
# from_mongo = ppl.io.find({"name": repr['representation'],
# "type": "representation"})[:]
for repr in representations:
log.info("asset: {}".format(repr['asset']))
# set context for each asset individually
context(project, repr['asset'], '')
# query data from mongo db for the asset's subset representation
related_repr = [r for r in ppl.io.find({"name": repr['representation'],
"type": "representation",
"context.asset": repr['asset']})[:]]
versions_dict = {r['context']['version']: i
for i, r in enumerate(related_repr)}
versions_list = [v for v in versions_dict.keys()]
sorted(versions_list)
version_index_last = versions_dict[max(versions_list)]
log.info("version_index_last: {}".format(version_index_last))
# create name which will be used on timeline clip
name = '_'.join([repr['asset'], repr['subset']])
# log.info("___related_repr: {}".format(related_repr))
# assign data for the clip representation
version = ppl.io.find_one(
{'_id': related_repr[version_index_last]['parent']})
log.info("version: {}".format(version))
# fixing path workarround
if '.#####.mxf' in related_repr[version_index_last]['data']['path']:
related_repr[version_index_last]['data']['path'] = related_repr[version_index_last]['data']['path'].replace(
'.#####.mxf', '.mxf')
related_repr[version_index_last]['version'] = version
related_repr[version_index_last]['parentClip'] = repr['parentClip']
data[name] = related_repr[version_index_last]
return data
@pico.expose()
def publish(send_json_path, get_json_path, gui):
"""
Runs standalone pyblish and adds link to
data in external json file
It is necessary to run `register_plugin_path` if particular
host is needed
Args:
send_json_path (string): path to temp json file with
sending context data
get_json_path (strign): path to temp json file with
returning context data
Returns:
dict: get_json_path
Raises:
Exception: description
"""
log.info("avalon.session is: \n{}".format(ppl.SESSION))
log.info("PUBLISH_PATH: \n{}".format(os.environ["PUBLISH_PATH"]))
pype_start = os.path.join(os.getenv('PYPE_SETUP_ROOT'),
"app", "pype-start.py")
args = [pype_start,
"--root", os.environ['AVALON_PROJECTS'], "--publish-gui",
"-pp", os.environ["PUBLISH_PATH"],
"-d", "rqst_json_data_path", send_json_path,
"-d", "post_json_data_path", get_json_path
]
log.debug(args)
log.info("_aport.api Variable `AVALON_PROJECTS` had changed to `{0}`.".format(
os.environ['AVALON_PROJECTS']))
forward([
sys.executable, "-u"
] + args,
# cwd=cwd
)
return {"get_json_path": get_json_path}
@pico.expose()
def context(project, asset, task, app='aport'):
os.environ["AVALON_PROJECT"] = ppl.AVALON_PROJECT = project
os.environ["AVALON_ASSET"] = ppl.AVALON_ASSET = asset
os.environ["AVALON_TASK"] = ppl.AVALON_TASK = task
os.environ["AVALON_SILO"] = ppl.AVALON_SILO = ''
ppl.get_session()
# log.info('ppl.SESSION: {}'.format(ppl.SESSION))
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
ppl.update_current_task(task, asset, app)
project_code = ppl.io.find_one({"type": "project"})["data"].get("code", '')
os.environ["AVALON_PROJECTCODE"] = \
ppl.SESSION["AVALON_PROJECTCODE"] = project_code
parents = ppl.io.find_one({"type": 'asset',
"name": ppl.AVALON_ASSET})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents).replace("\\", "/")
os.environ["AVALON_HIERARCHY"] = \
ppl.SESSION["AVALON_HIERARCHY"] = hierarchy
fix_paths = {k: v.replace("\\", "/") for k, v in ppl.SESSION.items()
if isinstance(v, str)}
ppl.SESSION.update(fix_paths)
ppl.SESSION.update({"AVALON_HIERARCHY": hierarchy,
"AVALON_PROJECTCODE": project_code,
"current_dir": os.getcwd().replace("\\", "/")
})
return ppl.SESSION
@pico.expose()
def anatomy_fill(data):
from pype import api as pype
pype.load_data_from_templates()
anatomy = pype.Anatomy
return anatomy.format(data)
@pico.expose()
def deregister_plugin_path():
if os.getenv("PUBLISH_PATH", None):
aport_plugin_path = os.pathsep.join(
[p.replace("\\", "/")
for p in os.environ["PUBLISH_PATH"].split(os.pathsep)
if "aport" in p or
"ftrack" in p])
os.environ["PUBLISH_PATH"] = aport_plugin_path
else:
log.warning("deregister_plugin_path(): No PUBLISH_PATH is registred")
return "Publish path deregistered"
@pico.expose()
def register_plugin_path(publish_path):
deregister_plugin_path()
if os.getenv("PUBLISH_PATH", None):
os.environ["PUBLISH_PATH"] = os.pathsep.join(
os.environ["PUBLISH_PATH"].split(os.pathsep)
+ [publish_path.replace("\\", "/")]
)
else:
os.environ["PUBLISH_PATH"] = publish_path
log.info(os.environ["PUBLISH_PATH"].split(os.pathsep))
return "Publish registered paths: {}".format(
os.environ["PUBLISH_PATH"].split(os.pathsep)
)
app = PicoApp()
app.register_module(__name__)
# remove all Handlers created by pico
for name, handler in [(handler.get_name(), handler)
for handler in Logger.logging.root.handlers[:]]:
if "pype" not in str(name).lower():
Logger.logging.root.removeHandler(handler)
# SPLASH.hide_splash()

View file

@ -1,26 +0,0 @@
import pype.api as pype
def get_anatomy(**kwarg):
return pype.Anatomy
def format_anatomy(data):
from .templates import (
get_anatomy
)
file = script_name()
anatomy = get_anatomy()
# TODO: perhaps should be in try!
padding = anatomy.render.padding
data.update({
"hierarchy": pype.get_hierarchy(),
"frame": "#" * padding,
"VERSION": pype.get_version_from_workfile(file)
})
# log.info("format_anatomy:anatomy: {}".format(anatomy))
return anatomy.format(data)

View file

@ -1,130 +0,0 @@
import sys
import os
import getpass
from app.api import Logger
from io_nonsingleton import DbConnector
io = DbConnector()
log = Logger.getLogger(__name__, "aport")
self = sys.modules[__name__]
self.SESSION = None
self._registered_root = {"_": ""}
self.AVALON_PROJECT = os.getenv("AVALON_PROJECT", None)
self.AVALON_ASSET = os.getenv("AVALON_ASSET", None)
self.AVALON_TASK = os.getenv("AVALON_TASK", None)
self.AVALON_SILO = os.getenv("AVALON_SILO", None)
def get_session():
if not self.SESSION:
io.install()
self.SESSION = io.Session
# for k, v in os.environ.items():
# if 'AVALON' in k:
# print(str((k, v)))
return self.SESSION
def update_current_task(task=None, asset=None, app=None):
"""Update active Session to a new task work area.
This updates the live Session to a different `asset`, `task` or `app`.
Args:
task (str): The task to set.
asset (str): The asset to set.
app (str): The app to set.
Returns:
dict: The changed key, values in the current Session.
"""
mapping = {
"AVALON_ASSET": asset,
"AVALON_TASK": task,
"AVALON_APP": app,
}
changed = {key: value for key, value in mapping.items() if value}
if not changed:
return
# Update silo when asset changed
if "AVALON_ASSET" in changed:
asset_document = io.find_one({"name": changed["AVALON_ASSET"],
"type": "asset"})
assert asset_document, "Asset must exist"
silo = asset_document["silo"]
if silo is None:
silo = asset_document["name"]
changed["AVALON_SILO"] = silo
parents = asset_document['data']['parents']
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
changed['AVALON_HIERARCHY'] = hierarchy
# Compute work directory (with the temporary changed session so far)
project = io.find_one({"type": "project"},
projection={"config.template.work": True})
template = project["config"]["template"]["work"]
_session = self.SESSION.copy()
_session.update(changed)
changed["AVALON_WORKDIR"] = _format_work_template(template, _session)
# Update the full session in one go to avoid half updates
self.SESSION.update(changed)
# Update the environment
os.environ.update(changed)
return changed
def _format_work_template(template, session=None):
"""Return a formatted configuration template with a Session.
Note: This *cannot* format the templates for published files since the
session does not hold the context for a published file. Instead use
`get_representation_path` to parse the full path to a published file.
Args:
template (str): The template to format.
session (dict, Optional): The Session to use. If not provided use the
currently active global Session.
Returns:
str: The fully formatted path.
"""
if session is None:
session = self.SESSION
project = io.find_one({'type': 'project'})
return template.format(**{
"root": registered_root(),
"project": {
"name": project.get("name", session["AVALON_PROJECT"]),
"code": project["data"].get("code", ''),
},
"silo": session["AVALON_SILO"],
"hierarchy": session['AVALON_HIERARCHY'],
"asset": session["AVALON_ASSET"],
"task": session["AVALON_TASK"],
"app": session["AVALON_APP"],
"user": session.get("AVALON_USER", getpass.getuser())
})
def registered_root():
"""Return currently registered root"""
return os.path.normpath(
self._registered_root["_"]
or self.SESSION.get("AVALON_PROJECTS") or ""
)

File diff suppressed because it is too large Load diff

View file

@ -1,149 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Pype extention</title>
<!-- Load the pico Javascript client, always automatically available at /pico.js -->
<script src="/pico.js"></script>
<!-- Or load our module proxy -->
<script src="/api.js"></script>
<script>
if (typeof module === 'object') {
window.module = module;
module = undefined;
}
</script>
<script src="./build.js"></script>
<script>
if (window.module) module = window.module;
</script>
<!-- <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" crossorigin="anonymous">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap-theme.min.css" integrity="sha384-fLW2N01lMqjakBkx3l/M9EahuwpSfeNvV63J5ezn3uZzapT0u7EYsXMjQV+0En5r" crossorigin="anonymous">
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.6.0/styles/default.min.css">
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.6.0/highlight.min.js"></script>
<script></script> -->
<style type="text/css">
html,
body {
height: 100%;
margin: 0px;
padding: 0px;
}
div {
padding: 5px;
}
#container {
height: 100%;
}
#header {
height: 5%;
}
#main {
height: 70%;
}
#output {
background-color: #333;
color: #aaa;
min-height: 15%;
overflow-y: scroll;
padding: 20px;
position: fixed;
bottom: 0px;
width: 100%;
}
.error {
color: #f00 !important;
}
#examples li {
padding: 10px;
margin: 10px;
background-color: silver;
}
code {
border-radius: 0;
*/ margin: 5px;
white-space: pre !important;
}
#source {
height: 100%;
}
#examples {
height: 100%;
}
#spacer {
height: 20%;
}
.highlight {
background-color: yellow;
}
</style>
</head>
<body onresize="resizePanel()">
<a href="javascript:history.go(0)">Refresh panel</a>
<div id="container">
<div class="row row-eq-height" id="main">
<div class="col-md-6" id="examples">
<ol>
<li id="context">
<h4>Set context here</h4>
<pre><code class="js"></code></pre>
Project<input type="text" name="project" value="jakub_projectx" />Asset<input type="text" name="asset" value="shot01" />task<input type="text" name="task" value="compositing" />app<input type="text" name="app" value="premiera" />
<button class="btn btn-default btn-sm" type="button" onclick="context()">Set context</button>
</li>
<li id="deregister">
<h4>deregister_plugin_path</h4>
<pre><code class="js"></code></pre>
<button class="btn btn-default btn-sm" type="button" onclick="deregister()">Deregister</button>
</li>
<li id="register">
<h4>register_plugin_path</h4>
<pre><code class="js"></code></pre>
Path: <input type="text" name="path" value="C:/Users/hubertCODE/pype-setup/repos/pype-config/pype/plugins/premiere/publish" />
<button class="btn btn-default btn-sm" type="button" onclick="register()">Register path</button>
</li>
<li id="publish">
<h4>Publish</h4>
<pre><code class="js"></code></pre>
Json path: <input type="text" name="path" value="C:/Users/hubert/CODE/pype-setup/repos/pype-config/pype/premiere/example_publish_reqst.json" />
Gui<input type="checkbox" name="gui" value="True" checked>
<button class="btn btn-default btn-sm" type="button" onclick="publish()">Publish</button>
</li>
</ol>
<div id="spacer">
</div>
</div>
<div class="col-md-6" id="source">
<!-- <pre>
<code class="python"></code>
</pre> -->
</div>
</div>
<div class="row" id="output">
</div>
</div>
<script src="script.js"></script>
</body>
</html>

View file

@ -1,214 +0,0 @@
var api = pico.importModule('api');
var output = document.getElementById('output');
function querySelector(parent){
return function(child){
return document.querySelector(parent).querySelector(child)
};
}
var defs = {}
function jumpTo(name){
var e = defs[name];
document.querySelectorAll('.highlight').forEach(function(el){
el.classList.remove('highlight');
});
e.classList.add('highlight');
return false;
}
function displayResult(r){
output.classList.remove("error");
output.innerText = JSON.stringify(r);
}
function displayError(e){
output.classList.add("error");
output.innerText = e.message;
}
function unindent(code){
var lines = code.split('\n');
var margin = -1;
for(var j=0; j < lines.length; j++){
var l = lines[j];
for(i=0; i < l.length; i++){
if(l[i] != " "){
margin = i;
break;
}
}
if(margin > -1){
break;
}
}
lines = lines.slice(j);
return lines.map(function(s){ return s.substr(margin)}).join('\n');
}
function deregister(){
var $ = querySelector("#deregister");
api.deregister_plugin_path().then(displayResult);
}
function register(){
var $ = querySelector("#register");
var path = $("input[name=path]").value;
api.register_plugin_path(path).then(displayResult);
}
function publish(){
var $ = querySelector("#publish");
var path = $("input[name=path]").value;
var gui = $("input[name=gui]").checked;
api.publish(path, gui).then(displayResult);
}
function context(){
var $ = querySelector("#context");
var project = $("input[name=project]").value;
var asset = $("input[name=asset]").value;
var task = $("input[name=task]").value;
var app = $("input[name=app]").value;
api.context(project,asset,task,app).then(displayResult);
}
//
// function example1(){
// var $ = querySelector("#example1");
// var name = $("input[name=name]").value;
// api.hello(name).then(displayResult);
// }
//
//
// function example2(){
// var $ = querySelector("#example2");
// var x = $("input[name=x]").valueAsNumber;
// var y = $("#example2 input[name=y]").valueAsNumber;
// api.multiply(x, y).then(displayResult);
// }
//
// function example3(){
// var $ = querySelector("#example3");
// var file = $("input[name=upload]").files[0];
// api.upload(file, file.name).then(displayResult).catch(displayError);
// }
//
// function example4(){
// var $ = querySelector("#example4");
// api.my_ip().then(displayResult)
// }
//
// function example5(){
// var $ = querySelector("#example5");
// var username = $("input[name=username]").value;
// var password = $("input[name=password]").value;
// pico.setAuthentication(api, username, password);
// api.current_user().then(displayResult).catch(displayError);
// pico.clearAuthentication(api);
// }
//
// function example6(){
// var $ = querySelector("#example6");
// api.start_session().then(function(){
// api.session_id().then(displayResult).then(function(){
// api.end_session();
// })
// })
// }
//
// function example7(){
// var $ = querySelector("#example7");
// var session_id = "4242";
// pico.setRequestHook(api, 'session', function(req) {
// req.headers.set('X-SESSION-ID', session_id)
// })
// api.session_id2().then(displayResult)
// pico.clearRequestHook(api, 'session');
// }
//
// function example8(){
// var $ = querySelector("#example8");
// api.countdown(10).each(displayResult).then(function(){
// displayResult("Boom!");
// });
// }
//
// function example9(){
// var $ = querySelector("#example9");
// var user = {
// name: "Bob",
// age: 30,
// occupation: "Software Engineer",
// }
// api.user_description(user).then(displayResult);
// }
//
// function example10(){
// var $ = querySelector("#example10");
// api.fail().then(displayResult).catch(displayError);
// }
//
// function example11(){
// var $ = querySelector("#example11");
// api.make_coffee().then(displayResult).catch(displayError);
// }
//
//
// function example12(){
// var $ = querySelector("#example12");
// var form = $("form");
// api.multiply.submitFormData(new FormData(form)).then(displayResult).catch(displayError);
// }
//
// function example13(){
// var $ = querySelector("#example13");
// var data = {
// x: 6,
// y: 7,
// }
// api.multiply.submitJSON(data).then(displayResult).catch(displayError);
// }
// api.show_source().then(function(s){
// document.querySelector('#source code').innerText = s;
// }).then(ready);
function ready(){
// // set the <code> element of each example to the corresponding functions source
// document.querySelectorAll('li pre code.js').forEach(function(e){
// var id = e.parentElement.parentElement.id;
// var f = window[id];
// var code = f.toString().split('\n').slice(2, -1).join('\n');
// e.innerText = unindent(code);
// })
document.querySelectorAll('li pre code.html').forEach(function(e){
var html = e.parentElement.parentElement.querySelector('div.example').innerHTML;
e.innerText = unindent(html);
})
hljs.initHighlighting();
// // find all the elements representing the function definitions in the python source
// document.querySelectorAll('.python .hljs-function .hljs-title').forEach(function(e){
// var a = document.createElement('a');
// a.name = e.innerText;
// e.parentElement.insertBefore(a, e)
// return defs[e.innerText] = e.parentElement;
// });
// convert all 'api.X' strings to hyperlinks to jump to python source
document.querySelectorAll('.js').forEach(function(e){
var code = e.innerHTML;
Object.keys(defs).forEach(function(k){
code = code.replace('api.' + k + '(', '<a href="#' + k + '" onclick="jumpTo(\'' + k + '\')">api.' + k + '</a>(');
})
e.innerHTML = code;
})
}

View file

@ -1,41 +0,0 @@
from pype import api as pype
log = pype.Logger.getLogger(__name__, "aport")
def get_anatomy(**kwarg):
return pype.Anatomy
def get_dataflow(**kwarg):
log.info(kwarg)
host = kwarg.get("host", "aport")
cls = kwarg.get("class", None)
preset = kwarg.get("preset", None)
assert any([host, cls]), log.error("aport.templates.get_dataflow():"
"Missing mandatory kwargs `host`, `cls`")
aport_dataflow = getattr(pype.Dataflow, str(host), None)
aport_dataflow_node = getattr(aport_dataflow.nodes, str(cls), None)
if preset:
aport_dataflow_node = getattr(aport_dataflow_node, str(preset), None)
log.info("Dataflow: {}".format(aport_dataflow_node))
return aport_dataflow_node
def get_colorspace(**kwarg):
log.info(kwarg)
host = kwarg.get("host", "aport")
cls = kwarg.get("class", None)
preset = kwarg.get("preset", None)
assert any([host, cls]), log.error("aport.templates.get_colorspace():"
"Missing mandatory kwargs `host`, `cls`")
aport_colorspace = getattr(pype.Colorspace, str(host), None)
aport_colorspace_node = getattr(aport_colorspace, str(cls), None)
if preset:
aport_colorspace_node = getattr(aport_colorspace_node, str(preset), None)
log.info("Colorspace: {}".format(aport_colorspace_node))
return aport_colorspace_node

View file

@ -23,7 +23,9 @@ class AvalonRestApi(RestApi):
if not project_name:
output = {}
for project_name in self.dbcon.tables():
project = self.dbcon[project_name].find_one({"type": "project"})
project = self.dbcon[project_name].find_one({
"type": "project"
})
output[project_name] = project
return CallbackResult(data=self.result_to_json(output))
@ -44,7 +46,7 @@ class AvalonRestApi(RestApi):
if not self.dbcon.exist_table(_project_name):
abort(404, "Project \"{}\" was not found in database".format(
project_name
_project_name
))
if not _asset:
@ -65,9 +67,27 @@ class AvalonRestApi(RestApi):
return asset
abort(404, "Asset \"{}\" with {} was not found in project {}".format(
_asset, identificator, project_name
_asset, identificator, _project_name
))
@RestApi.route("/publish/<asset_name>",
url_prefix="/premiere", methods="GET")
def publish(self, request):
"""
http://localhost:8021/premiere/publish/shot021?json_in=this/path/file_in.json&json_out=this/path/file_out.json
"""
asset_name = request.url_data["asset_name"]
query = request.query
data = request.request_data
output = {
"message": "Got your data. Thanks.",
"your_data": data,
"your_query": query,
"your_asset_is": asset_name
}
return CallbackResult(data=self.result_to_json(output))
def result_to_json(self, result):
""" Converts result of MongoDB query to dict without $oid (ObjectId)
keys with help of regex matching.

View file

@ -1,16 +1,10 @@
import logging
from pathlib import Path
import os
import bpy
import sys
import traceback
from avalon import api as avalon
from pyblish import api as pyblish
from .plugin import AssetLoader
logger = logging.getLogger("pype.blender")
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
@ -19,9 +13,16 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "blender", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "blender", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "blender", "create")
ORIGINAL_EXCEPTHOOK = sys.excepthook
def pype_excepthook_handler(*args):
traceback.print_exception(*args)
def install():
"""Install Blender configuration for Avalon."""
sys.excepthook = pype_excepthook_handler
pyblish.register_plugin_path(str(PUBLISH_PATH))
avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
@ -29,6 +30,7 @@ def install():
def uninstall():
"""Uninstall Blender configuration for Avalon."""
sys.excepthook = ORIGINAL_EXCEPTHOOK
pyblish.deregister_plugin_path(str(PUBLISH_PATH))
avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))

View file

@ -10,14 +10,43 @@ from avalon import api
VALID_EXTENSIONS = [".blend"]
def model_name(asset: str, subset: str, namespace: Optional[str] = None) -> str:
"""Return a consistent name for a model asset."""
def asset_name(
asset: str, subset: str, namespace: Optional[str] = None
) -> str:
"""Return a consistent name for an asset."""
name = f"{asset}_{subset}"
if namespace:
name = f"{namespace}:{name}"
return name
def create_blender_context(active: Optional[bpy.types.Object] = None,
selected: Optional[bpy.types.Object] = None,):
"""Create a new Blender context. If an object is passed as
parameter, it is set as selected and active.
"""
if not isinstance(selected, list):
selected = [selected]
for win in bpy.context.window_manager.windows:
for area in win.screen.areas:
if area.type == 'VIEW_3D':
for region in area.regions:
if region.type == 'WINDOW':
override_context = {
'window': win,
'screen': win.screen,
'area': area,
'region': region,
'scene': bpy.context.scene,
'active_object': active,
'selected_objects': selected
}
return override_context
raise Exception("Could not create a custom Blender context.")
class AssetLoader(api.Loader):
"""A basic AssetLoader for Blender
@ -67,7 +96,8 @@ class AssetLoader(api.Loader):
assert obj.library, f"'{obj.name}' is not linked."
libraries.add(obj.library)
assert len(libraries) == 1, "'{container.name}' contains objects from more then 1 library."
assert len(
libraries) == 1, "'{container.name}' contains objects from more then 1 library."
return list(libraries)[0]
@ -122,7 +152,7 @@ class AssetLoader(api.Loader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
instance_name = model_name(asset, subset, namespace)
instance_name = asset_name(asset, subset, namespace)
return self._get_instance_collection(instance_name, nodes)

View file

@ -0,0 +1,107 @@
import os
import collections
import ftrack_api
from pype.ftrack import BaseAction
from pype.ftrack.lib.avalon_sync import get_avalon_attr
class CleanHierarchicalAttrsAction(BaseAction):
identifier = "clean.hierarchical.attr"
label = "Pype Admin"
variant = "- Clean hierarchical custom attributes"
description = "Unset empty hierarchical attribute values."
role_list = ["Pypeclub", "Administrator", "Project Manager"]
icon = "{}/ftrack/action_icons/PypeAdmin.svg".format(
os.environ.get("PYPE_STATICS_SERVER", "")
)
all_project_entities_query = (
"select id, name, parent_id, link"
" from TypedContext where project_id is \"{}\""
)
cust_attr_query = (
"select value, entity_id from CustomAttributeValue "
"where entity_id in ({}) and configuration_id is \"{}\""
)
def discover(self, session, entities, event):
"""Show only on project entity."""
if len(entities) == 1 and entities[0].entity_type.lower() == "project":
return True
return False
def launch(self, session, entities, event):
project = entities[0]
user_message = "This may take some time"
self.show_message(event, user_message, result=True)
self.log.debug("Preparing entities for cleanup.")
all_entities = session.query(
self.all_project_entities_query.format(project["id"])
).all()
all_entities_ids = [
"\"{}\"".format(entity["id"])
for entity in all_entities
if entity.entity_type.lower() != "task"
]
self.log.debug(
"Collected {} entities to process.".format(len(all_entities_ids))
)
entity_ids_joined = ", ".join(all_entities_ids)
attrs, hier_attrs = get_avalon_attr(session)
for attr in hier_attrs:
configuration_key = attr["key"]
self.log.debug(
"Looking for cleanup of custom attribute \"{}\"".format(
configuration_key
)
)
configuration_id = attr["id"]
call_expr = [{
"action": "query",
"expression": self.cust_attr_query.format(
entity_ids_joined, configuration_id
)
}]
[values] = self.session.call(call_expr)
data = {}
for item in values["data"]:
value = item["value"]
if value is None:
data[item["entity_id"]] = value
if not data:
self.log.debug(
"Nothing to clean for \"{}\".".format(configuration_key)
)
continue
self.log.debug("Cleaning up {} values for \"{}\".".format(
len(data), configuration_key
))
for entity_id, value in data.items():
entity_key = collections.OrderedDict({
"configuration_id": configuration_id,
"entity_id": entity_id
})
session.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
"CustomAttributeValue",
entity_key
)
)
session.commit()
return True
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
CleanHierarchicalAttrsAction(session, plugins_presets).register()

View file

@ -1,9 +1,44 @@
import sys
import argparse
import logging
import ftrack_api
from pype.ftrack import BaseAction
try:
from functools import cmp_to_key
except Exception:
cmp_to_key = None
def existence_comaprison(item_a, item_b):
if not item_a and not item_b:
return 0
if not item_a:
return 1
if not item_b:
return -1
return None
def task_name_sorter(item_a, item_b):
asset_version_a = item_a["asset_version"]
asset_version_b = item_b["asset_version"]
asset_version_comp = existence_comaprison(asset_version_a, asset_version_b)
if asset_version_comp is not None:
return asset_version_comp
task_a = asset_version_a["task"]
task_b = asset_version_b["task"]
task_comp = existence_comaprison(task_a, task_b)
if task_comp is not None:
return task_comp
if task_a["name"] > task_b["name"]:
return 1
if task_a["name"] < task_b["name"]:
return -1
return 0
if cmp_to_key:
task_name_sorter = cmp_to_key(task_name_sorter)
task_name_kwarg_key = "key" if cmp_to_key else "cmp"
task_name_sort_kwargs = {task_name_kwarg_key: task_name_sorter}
class ClientReviewSort(BaseAction):
@ -24,7 +59,6 @@ class ClientReviewSort(BaseAction):
return True
def launch(self, session, entities, event):
entity = entities[0]
# Get all objects from Review Session and all 'sort order' possibilities
@ -36,11 +70,8 @@ class ClientReviewSort(BaseAction):
# Sort criteria
obj_list = sorted(obj_list, key=lambda k: k['version'])
obj_list = sorted(
obj_list, key=lambda k: k['asset_version']['task']['name']
)
obj_list.sort(**task_name_sort_kwargs)
obj_list = sorted(obj_list, key=lambda k: k['name'])
# Set 'sort order' to sorted list, so they are sorted in Ftrack also
for i in range(len(obj_list)):
obj_list[i]['sort_order'] = sort_order_list[i]
@ -57,42 +88,3 @@ def register(session, plugins_presets={}):
'''Register action. Called when used as an event plugin.'''
ClientReviewSort(session, plugins_presets).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -49,27 +49,23 @@ class DeleteAssetSubset(BaseAction):
def _launch(self, event):
try:
args = self._translate_event(
self.session, event
)
entities = self._translate_event(event)
if "values" not in event["data"]:
self.dbcon.install()
return self._interface(self.session, *args)
return self._interface(self.session, entities, event)
confirmation = self.confirm_delete(*args)
confirmation = self.confirm_delete(entities, event)
if confirmation:
return confirmation
self.dbcon.install()
response = self.launch(
self.session, *args
self.session, entities, event
)
finally:
self.dbcon.uninstall()
return self._handle_result(
self.session, response, *args
)
return self._handle_result(response)
def interface(self, session, entities, event):
self.show_message(event, "Preparing data...", True)

View file

@ -55,6 +55,8 @@ class SeedDebugProject(BaseAction):
# Define how much shots will be created for each sequence
default_shots_count = 10
max_entities_created_at_one_commit = 50
existing_projects = None
new_project_item = "< New Project >"
current_project_item = "< Current Project >"
@ -284,21 +286,28 @@ class SeedDebugProject(BaseAction):
int(asset_count / available_assets) +
(asset_count % available_assets > 0)
)
created_assets = 0
index = 0
created_entities = 0
to_create_length = asset_count + (asset_count * len(self.asset_tasks))
for _asset_name in self.assets:
if created_assets >= asset_count:
if created_entities >= to_create_length:
break
for asset_num in range(1, repetitive_times + 1):
if created_assets >= asset_count:
if created_entities >= asset_count:
break
asset_name = "%s_%02d" % (_asset_name, asset_num)
asset = self.session.create("AssetBuild", {
"name": asset_name,
"parent": main_entity
})
created_assets += 1
self.log.debug("- Assets/{}".format(asset_name))
created_entities += 1
index += 1
if self.temp_commit(index, created_entities, to_create_length):
index = 0
for task_name in self.asset_tasks:
self.session.create("Task", {
"name": task_name,
@ -309,7 +318,17 @@ class SeedDebugProject(BaseAction):
asset_name, task_name
))
created_entities += 1
index += 1
if self.temp_commit(
index, created_entities, to_create_length
):
index = 0
self.log.debug("*** Commiting Assets")
self.log.debug("Commiting entities. {}/{}".format(
created_entities, to_create_length
))
self.session.commit()
def create_shots(self, project, seq_count, shots_count):
@ -345,7 +364,14 @@ class SeedDebugProject(BaseAction):
})
self.log.debug("- Shots")
for seq_num in range(1, seq_count+1):
index = 0
created_entities = 0
to_create_length = (
seq_count
+ (seq_count * shots_count)
+ (seq_count * shots_count * len(self.shot_tasks))
)
for seq_num in range(1, seq_count + 1):
seq_name = "sq%03d" % seq_num
seq = self.session.create("Sequence", {
"name": seq_name,
@ -353,14 +379,24 @@ class SeedDebugProject(BaseAction):
})
self.log.debug("- Shots/{}".format(seq_name))
for shot_num in range(1, shots_count+1):
shot_name = "%ssh%04d" % (seq_name, (shot_num*10))
created_entities += 1
index += 1
if self.temp_commit(index, created_entities, to_create_length):
index = 0
for shot_num in range(1, shots_count + 1):
shot_name = "%ssh%04d" % (seq_name, (shot_num * 10))
shot = self.session.create("Shot", {
"name": shot_name,
"parent": seq
})
self.log.debug("- Shots/{}/{}".format(seq_name, shot_name))
created_entities += 1
index += 1
if self.temp_commit(index, created_entities, to_create_length):
index = 0
for task_name in self.shot_tasks:
self.session.create("Task", {
"name": task_name,
@ -371,9 +407,27 @@ class SeedDebugProject(BaseAction):
seq_name, shot_name, task_name
))
created_entities += 1
index += 1
if self.temp_commit(
index, created_entities, to_create_length
):
index = 0
self.log.debug("*** Commiting Shots")
self.log.debug("Commiting entities. {}/{}".format(
created_entities, to_create_length
))
self.session.commit()
def temp_commit(self, index, created_entities, to_create_length):
if index < self.max_entities_created_at_one_commit:
return False
self.log.debug("Commiting {} entities. {}/{}".format(
index, created_entities, to_create_length
))
self.session.commit()
return True
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''

View file

@ -5,13 +5,11 @@ import json
from bson.objectid import ObjectId
from pype.ftrack import BaseAction
from pype.ftrack.lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)
from pypeapp import Anatomy
from pype.ftrack.lib.io_nonsingleton import DbConnector
from pype.ftrack.lib.avalon_sync import CustAttrIdKey
class StoreThumbnailsToAvalon(BaseAction):
# Action identifier
@ -89,7 +87,7 @@ class StoreThumbnailsToAvalon(BaseAction):
"message": msg
}
project = get_project_from_entity(entities[0])
project = self.get_project_from_entity(entities[0])
project_name = project["full_name"]
anatomy = Anatomy(project_name)
@ -186,7 +184,7 @@ class StoreThumbnailsToAvalon(BaseAction):
).format(entity["id"]))
continue
avalon_ents_result = get_avalon_entities_for_assetversion(
avalon_ents_result = self.get_avalon_entities_for_assetversion(
entity, self.db_con
)
version_full_path = (
@ -345,6 +343,119 @@ class StoreThumbnailsToAvalon(BaseAction):
file_open.close()
return True
def get_avalon_entities_for_assetversion(self, asset_version, db_con):
output = {
"success": True,
"message": None,
"project": None,
"project_name": None,
"asset": None,
"asset_name": None,
"asset_path": None,
"subset": None,
"subset_name": None,
"version": None,
"version_name": None,
"representations": None
}
db_con.install()
ft_asset = asset_version["asset"]
subset_name = ft_asset["name"]
version = asset_version["version"]
parent = ft_asset["parent"]
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
project = self.get_project_from_entity(asset_version)
project_name = project["full_name"]
output["project_name"] = project_name
output["asset_name"] = parent["name"]
output["asset_path"] = ent_path
output["subset_name"] = subset_name
output["version_name"] = version
db_con.Session["AVALON_PROJECT"] = project_name
avalon_project = db_con.find_one({"type": "project"})
output["project"] = avalon_project
if not avalon_project:
output["success"] = False
output["message"] = (
"Project not synchronized to avalon `{}`".format(project_name)
)
return output
asset_ent = None
asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if asset_mongo_id:
try:
asset_mongo_id = ObjectId(asset_mongo_id)
asset_ent = db_con.find_one({
"type": "asset",
"_id": asset_mongo_id
})
except Exception:
pass
if not asset_ent:
asset_ent = db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
output["asset"] = asset_ent
if not asset_ent:
output["success"] = False
output["message"] = (
"Not synchronized entity to avalon `{}`".format(ent_path)
)
return output
asset_mongo_id = asset_ent["_id"]
subset_ent = db_con.find_one({
"type": "subset",
"parent": asset_mongo_id,
"name": subset_name
})
output["subset"] = subset_ent
if not subset_ent:
output["success"] = False
output["message"] = (
"Subset `{}` does not exist under Asset `{}`"
).format(subset_name, ent_path)
return output
version_ent = db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
output["version"] = version_ent
if not version_ent:
output["success"] = False
output["message"] = (
"Version `{}` does not exist under Subset `{}` | Asset `{}`"
).format(version, subset_name, ent_path)
return output
repre_ents = list(db_con.find({
"type": "representation",
"parent": version_ent["_id"]
}))
output["representations"] = repre_ents
return output
def register(session, plugins_presets={}):
StoreThumbnailsToAvalon(session, plugins_presets).register()

View file

@ -1,11 +1,15 @@
from . import avalon_sync
from . import credentials
from .ftrack_app_handler import *
from .ftrack_event_handler import *
from .ftrack_action_handler import *
from .ftrack_base_handler import *
from .ftrack_base_handler import BaseHandler
from .ftrack_event_handler import BaseEvent
from .ftrack_action_handler import BaseAction
from .ftrack_app_handler import AppAction
from .lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)
__all__ = [
"avalon_sync",
"credentials",
"BaseHandler",
"BaseEvent",
"BaseAction",
"AppAction"
]

View file

@ -806,12 +806,12 @@ class SyncEntitiesFactory:
def set_hierarchical_attribute(self, hier_attrs, sync_ids):
# collect all hierarchical attribute keys
# and prepare default values to project
attribute_names = []
attribute_ids = []
attributes_by_key = {}
attribute_key_by_id = {}
for attr in hier_attrs:
key = attr["key"]
attribute_ids.append(attr["id"])
attribute_names.append(key)
attribute_key_by_id[attr["id"]] = key
attributes_by_key[key] = attr
store_key = "hier_attrs"
if key.startswith("avalon_"):
@ -824,11 +824,11 @@ class SyncEntitiesFactory:
# Prepare dict with all hier keys and None values
prepare_dict = {}
prepare_dict_avalon = {}
for attr in attribute_names:
if attr.startswith("avalon_"):
prepare_dict_avalon[attr] = None
for key in attributes_by_key.keys():
if key.startswith("avalon_"):
prepare_dict_avalon[key] = None
else:
prepare_dict[attr] = None
prepare_dict[key] = None
for id, entity_dict in self.entities_dict.items():
# Skip project because has stored defaults at the moment
@ -842,32 +842,32 @@ class SyncEntitiesFactory:
entity_ids_joined = ", ".join([
"\"{}\"".format(id) for id in sync_ids
])
attributes_joined = ", ".join([
"\"{}\"".format(name) for name in attribute_ids
])
call_expr = [{
"action": "query",
"expression": (
"select value, entity_id from CustomAttributeValue "
"where entity_id in ({}) and configuration_id in ({})"
).format(entity_ids_joined, attributes_joined)
}]
if hasattr(self.session, "call"):
[values] = self.session.call(call_expr)
else:
[values] = self.session._call(call_expr)
avalon_hier = []
for value in values["data"]:
if value["value"] is None:
continue
entity_id = value["entity_id"]
key = value["configuration"]["key"]
store_key = "hier_attrs"
if key.startswith("avalon_"):
store_key = "avalon_attrs"
avalon_hier.append(key)
self.entities_dict[entity_id][store_key][key] = value["value"]
for configuration_id in attribute_key_by_id.keys():
call_expr = [{
"action": "query",
"expression": (
"select value, entity_id from CustomAttributeValue "
"where entity_id in ({}) and configuration_id is \"{}\""
).format(entity_ids_joined, configuration_id)
}]
if hasattr(self.session, "call"):
[values] = self.session.call(call_expr)
else:
[values] = self.session._call(call_expr)
for value in values["data"]:
if value["value"] is None:
continue
entity_id = value["entity_id"]
key = attribute_key_by_id[value["configuration_id"]]
if key.startswith("avalon_"):
store_key = "avalon_attrs"
avalon_hier.append(key)
else:
store_key = "hier_attrs"
self.entities_dict[entity_id][store_key][key] = value["value"]
# Get dictionary with not None hierarchical values to pull to childs
top_id = self.ft_project_id
@ -888,13 +888,14 @@ class SyncEntitiesFactory:
hier_values, parent_id = hier_down_queue.get()
for child_id in self.entities_dict[parent_id]["children"]:
_hier_values = hier_values.copy()
for name in attribute_names:
store_key = "hier_attrs"
if name.startswith("avalon_"):
for key in attributes_by_key.keys():
if key.startswith("avalon_"):
store_key = "avalon_attrs"
value = self.entities_dict[child_id][store_key][name]
else:
store_key = "hier_attrs"
value = self.entities_dict[child_id][store_key][key]
if value is not None:
_hier_values[name] = value
_hier_values[key] = value
self.entities_dict[child_id]["hier_attrs"].update(_hier_values)
hier_down_queue.put((_hier_values, child_id))

View file

@ -23,17 +23,13 @@ class BaseAction(BaseHandler):
def __init__(self, session, plugins_presets={}):
'''Expects a ftrack_api.Session instance'''
super().__init__(session, plugins_presets)
if self.label is None:
raise ValueError(
'Action missing label.'
)
raise ValueError('Action missing label.')
elif self.identifier is None:
raise ValueError(
'Action missing identifier.'
)
if self.identifier is None:
raise ValueError('Action missing identifier.')
super().__init__(session, plugins_presets)
def register(self):
'''
@ -61,66 +57,131 @@ class BaseAction(BaseHandler):
self._launch
)
def _launch(self, event):
args = self._translate_event(
self.session, event
def _discover(self, event):
entities = self._translate_event(event)
accepts = self.discover(self.session, entities, event)
if not accepts:
return
self.log.debug(u'Discovering action with selection: {0}'.format(
event['data'].get('selection', [])
))
return {
'items': [{
'label': self.label,
'variant': self.variant,
'description': self.description,
'actionIdentifier': self.identifier,
'icon': self.icon,
}]
}
def discover(self, session, entities, event):
'''Return true if we can handle the selected entities.
*session* is a `ftrack_api.Session` instance
*entities* is a list of tuples each containing the entity type and the
entity id. If the entity is a hierarchical you will always get the
entity type TypedContext, once retrieved through a get operation you
will have the "real" entity type ie. example Shot, Sequence
or Asset Build.
*event* the unmodified original event
'''
return False
def _interface(self, session, entities, event):
interface = self.interface(session, entities, event)
if not interface:
return
if isinstance(interface, (tuple, list)):
return {"items": interface}
if isinstance(interface, dict):
if (
"items" in interface
or ("success" in interface and "message" in interface)
):
return interface
raise ValueError((
"Invalid interface output expected key: \"items\" or keys:"
" \"success\" and \"message\". Got: \"{}\""
).format(str(interface)))
raise ValueError(
"Invalid interface output type \"{}\"".format(
str(type(interface))
)
)
def interface(self, session, entities, event):
'''Return a interface if applicable or None
*session* is a `ftrack_api.Session` instance
*entities* is a list of tuples each containing the entity type and
the entity id. If the entity is a hierarchical you will always get the
entity type TypedContext, once retrieved through a get operation you
will have the "real" entity type ie. example Shot, Sequence
or Asset Build.
*event* the unmodified original event
'''
return None
def _launch(self, event):
entities = self._translate_event(event)
preactions_launched = self._handle_preactions(self.session, event)
if preactions_launched is False:
return
interface = self._interface(
self.session, *args
self.session, entities, event
)
if interface:
return interface
response = self.launch(
self.session, *args
self.session, entities, event
)
return self._handle_result(
self.session, response, *args
)
return self._handle_result(response)
def _handle_result(self, session, result, entities, event):
def _handle_result(self, result):
'''Validate the returned result from the action callback'''
if isinstance(result, bool):
if result is True:
result = {
'success': result,
'message': (
'{0} launched successfully.'.format(self.label)
)
}
msg = 'Action {0} finished.'
else:
result = {
'success': result,
'message': (
'{0} launch failed.'.format(self.label)
)
}
msg = 'Action {0} failed.'
elif isinstance(result, dict):
return {
'success': result,
'message': msg.format(self.label)
}
if isinstance(result, dict):
if 'items' in result:
items = result['items']
if not isinstance(items, list):
if not isinstance(result['items'], list):
raise ValueError('Invalid items format, must be list!')
else:
for key in ('success', 'message'):
if key in result:
continue
if key not in result:
raise KeyError('Missing required key: {0}.'.format(key))
return result
raise KeyError(
'Missing required key: {0}.'.format(key)
)
else:
self.log.error(
'Invalid result type must be bool or dictionary!'
)
self.log.warning((
'Invalid result type \"{}\" must be bool or dictionary!'
).format(str(type(result))))
return result

View file

@ -1,44 +1,35 @@
import os
import sys
import platform
from avalon import lib as avalonlib
import avalon.lib
import acre
from pype import api as pype
from pype import lib as pypelib
from pypeapp import config
from .ftrack_base_handler import BaseHandler
from .ftrack_action_handler import BaseAction
from pypeapp import Anatomy
class AppAction(BaseHandler):
'''Custom Action base class
class AppAction(BaseAction):
"""Application Action class.
<label> - a descriptive string identifing your action.
<varaint> - To group actions together, give them the same
label and specify a unique variant per action.
<identifier> - a unique identifier for app.
<description> - a verbose descriptive text for you action
<icon> - icon in ftrack
'''
Args:
session (ftrack_api.Session): Session where action will be registered.
label (str): A descriptive string identifing your action.
varaint (str, optional): To group actions together, give them the same
label and specify a unique variant per action.
identifier (str): An unique identifier for app.
description (str): A verbose descriptive text for you action.
icon (str): Url path to icon which will be shown in Ftrack web.
"""
type = 'Application'
preactions = ['start.timer']
type = "Application"
preactions = ["start.timer"]
def __init__(
self, session, label, name, executable, variant=None,
icon=None, description=None, preactions=[], plugins_presets={}
):
super().__init__(session, plugins_presets)
'''Expects a ftrack_api.Session instance'''
if label is None:
raise ValueError('Action missing label.')
elif name is None:
raise ValueError('Action missing identifier.')
elif executable is None:
raise ValueError('Action missing executable.')
self.label = label
self.identifier = name
self.executable = executable
@ -47,11 +38,19 @@ class AppAction(BaseHandler):
self.description = description
self.preactions.extend(preactions)
super().__init__(session, plugins_presets)
if label is None:
raise ValueError("Action missing label.")
if name is None:
raise ValueError("Action missing identifier.")
if executable is None:
raise ValueError("Action missing executable.")
def register(self):
'''Registers the action, subscribing the discover and launch topics.'''
"""Registers the action, subscribing the discover and launch topics."""
discovery_subscription = (
'topic=ftrack.action.discover and source.user.username={0}'
"topic=ftrack.action.discover and source.user.username={0}"
).format(self.session.api_user)
self.session.event_hub.subscribe(
@ -61,9 +60,9 @@ class AppAction(BaseHandler):
)
launch_subscription = (
'topic=ftrack.action.launch'
' and data.actionIdentifier={0}'
' and source.user.username={1}'
"topic=ftrack.action.launch"
" and data.actionIdentifier={0}"
" and source.user.username={1}"
).format(
self.identifier,
self.session.api_user
@ -74,7 +73,61 @@ class AppAction(BaseHandler):
)
def discover(self, session, entities, event):
'''Return true if we can handle the selected entities.
"""Return true if we can handle the selected entities.
Args:
session (ftrack_api.Session): Helps to query necessary data.
entities (list): Object of selected entities.
event (ftrack_api.Event): Ftrack event causing discover callback.
"""
if (
len(entities) != 1 or
entities[0].entity_type.lower() != "task"
):
return False
entity = entities[0]
if entity["parent"].entity_type.lower() == "project":
return False
ft_project = self.get_project_from_entity(entity)
database = pypelib.get_avalon_database()
project_name = ft_project["full_name"]
avalon_project = database[project_name].find_one({
"type": "project"
})
if not avalon_project:
return False
project_apps = avalon_project["config"].get("apps", [])
apps = [app["name"] for app in project_apps]
if self.identifier in apps:
return True
return False
def _launch(self, event):
entities = self._translate_event(event)
preactions_launched = self._handle_preactions(
self.session, event
)
if preactions_launched is False:
return
response = self.launch(self.session, entities, event)
return self._handle_result(response)
def launch(self, session, entities, event):
"""Callback method for the custom action.
return either a bool (True if successful or False if the action failed)
or a dictionary with they keys `message` and `success`, the message
should be a string and will be displayed as feedback to the user,
success should be a bool, True if successful or False if the action
failed.
*session* is a `ftrack_api.Session` instance
@ -85,90 +138,22 @@ class AppAction(BaseHandler):
or Asset Build.
*event* the unmodified original event
'''
if (
len(entities) != 1 or
entities[0].entity_type.lower() != 'task'
):
return False
if entities[0]['parent'].entity_type.lower() == 'project':
return False
ft_project = entities[0]['project']
database = pypelib.get_avalon_database()
project_name = ft_project['full_name']
avalon_project = database[project_name].find_one({
"type": "project"
})
if avalon_project is None:
return False
else:
apps = [app['name'] for app in avalon_project['config'].get(
'apps', []
)]
if self.identifier not in apps:
return False
return True
def _launch(self, event):
args = self._translate_event(
self.session, event
)
preactions_launched = self._handle_preactions(
self.session, event
)
if preactions_launched is False:
return
response = self.launch(
self.session, *args
)
return self._handle_result(
self.session, response, *args
)
def launch(self, session, entities, event):
'''Callback method for the custom action.
return either a bool ( True if successful or False if the action failed )
or a dictionary with they keys `message` and `success`, the message should be a
string and will be displayed as feedback to the user, success should be a bool,
True if successful or False if the action failed.
*session* is a `ftrack_api.Session` instance
*entities* is a list of tuples each containing the entity type and the entity id.
If the entity is a hierarchical you will always get the entity
type TypedContext, once retrieved through a get operation you
will have the "real" entity type ie. example Shot, Sequence
or Asset Build.
*event* the unmodified original event
'''
"""
entity = entities[0]
project_name = entity['project']['full_name']
ft_project = self.get_project_from_entity(entity)
project_name = ft_project["full_name"]
database = pypelib.get_avalon_database()
# Get current environments
env_list = [
'AVALON_PROJECT',
'AVALON_SILO',
'AVALON_ASSET',
'AVALON_TASK',
'AVALON_APP',
'AVALON_APP_NAME'
"AVALON_PROJECT",
"AVALON_SILO",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP",
"AVALON_APP_NAME"
]
env_origin = {}
for env in env_list:
@ -176,37 +161,38 @@ class AppAction(BaseHandler):
# set environments for Avalon
os.environ["AVALON_PROJECT"] = project_name
os.environ["AVALON_SILO"] = entity['ancestors'][0]['name']
os.environ["AVALON_ASSET"] = entity['parent']['name']
os.environ["AVALON_TASK"] = entity['name']
os.environ["AVALON_SILO"] = entity["ancestors"][0]["name"]
os.environ["AVALON_ASSET"] = entity["parent"]["name"]
os.environ["AVALON_TASK"] = entity["name"]
os.environ["AVALON_APP"] = self.identifier.split("_")[0]
os.environ["AVALON_APP_NAME"] = self.identifier
anatomy = Anatomy()
anatomy = Anatomy(project_name)
asset_doc = database[project_name].find_one({
"type": "asset",
"name": entity["parent"]["name"]
})
parents = asset_doc["data"]["parents"]
hierarchy = ""
parents = database[project_name].find_one({
"type": 'asset',
"name": entity['parent']['name']
})['data']['parents']
if parents:
hierarchy = os.path.join(*parents)
os.environ["AVALON_HIERARCHY"] = hierarchy
application = avalonlib.get_application(os.environ["AVALON_APP_NAME"])
application = avalon.lib.get_application(os.environ["AVALON_APP_NAME"])
data = {
"root": os.environ.get("PYPE_STUDIO_PROJECTS_MOUNT"),
"project": {
"name": entity['project']['full_name'],
"code": entity['project']['name']
"name": ft_project["full_name"],
"code": ft_project["name"]
},
"task": entity['name'],
"asset": entity['parent']['name'],
"task": entity["name"],
"asset": entity["parent"]["name"],
"app": application["application_dir"],
"hierarchy": hierarchy,
"hierarchy": hierarchy
}
av_project = database[project_name].find_one({"type": 'project'})
@ -258,14 +244,6 @@ class AppAction(BaseHandler):
env = acre.merge(env, current_env=dict(os.environ))
env = acre.append(dict(os.environ), env)
#
# tools_env = acre.get_tools(tools)
# env = acre.compute(dict(tools_env))
# env = acre.merge(env, dict(os.environ))
# os.environ = acre.append(dict(os.environ), env)
# os.environ = acre.compute(os.environ)
# Get path to execute
st_temp_path = os.environ['PYPE_CONFIG']
os_plat = platform.system().lower()
@ -275,6 +253,18 @@ class AppAction(BaseHandler):
# Full path to executable launcher
execfile = None
if application.get("launch_hook"):
hook = application.get("launch_hook")
self.log.info("launching hook: {}".format(hook))
ret_val = pypelib.execute_hook(
application.get("launch_hook"), env=env)
if not ret_val:
return {
'success': False,
'message': "Hook didn't finish successfully {0}"
.format(self.label)
}
if sys.platform == "win32":
for ext in os.environ["PATHEXT"].split(os.pathsep):
@ -286,14 +276,18 @@ class AppAction(BaseHandler):
# Run SW if was found executable
if execfile is not None:
avalonlib.launch(executable=execfile, args=[], environment=env)
# Store subprocess to varaible. This is due to Blender launch
# bug. Please make sure Blender >=2.81 can be launched before
# remove `_popen` variable.
_popen = avalon.lib.launch(
executable=execfile, args=[], environment=env
)
else:
return {
'success': False,
'message': "We didn't found launcher for {0}"
.format(self.label)
}
pass
}
if sys.platform.startswith('linux'):
execfile = os.path.join(path.strip('"'), self.executable)
@ -318,7 +312,7 @@ class AppAction(BaseHandler):
'message': "No executable permission - {}".format(
execfile)
}
pass
else:
self.log.error('Launcher doesn\'t exist - {}'.format(
execfile))
@ -326,10 +320,13 @@ class AppAction(BaseHandler):
'success': False,
'message': "Launcher doesn't exist - {}".format(execfile)
}
pass
# Run SW if was found executable
if execfile is not None:
avalonlib.launch(
# Store subprocess to varaible. This is due to Blender launch
# bug. Please make sure Blender >=2.81 can be launched before
# remove `_popen` variable.
_popen = avalon.lib.launch(
'/usr/bin/env', args=['bash', execfile], environment=env
)
else:
@ -338,7 +335,6 @@ class AppAction(BaseHandler):
'message': "We didn't found launcher for {0}"
.format(self.label)
}
pass
# Change status of task to In progress
presets = config.get_presets()["ftrack"]["ftrack_config"]

View file

@ -192,50 +192,10 @@ class BaseHandler(object):
raise NotImplementedError()
def _discover(self, event):
items = {
'items': [{
'label': self.label,
'variant': self.variant,
'description': self.description,
'actionIdentifier': self.identifier,
'icon': self.icon,
}]
}
args = self._translate_event(
self.session, event
)
accepts = self.discover(
self.session, *args
)
if accepts is True:
self.log.debug(u'Discovering action with selection: {0}'.format(
event['data'].get('selection', [])))
return items
def discover(self, session, entities, event):
'''Return true if we can handle the selected entities.
*session* is a `ftrack_api.Session` instance
*entities* is a list of tuples each containing the entity type and the entity id.
If the entity is a hierarchical you will always get the entity
type TypedContext, once retrieved through a get operation you
will have the "real" entity type ie. example Shot, Sequence
or Asset Build.
*event* the unmodified original event
'''
return False
def _translate_event(self, session, event):
def _translate_event(self, event, session=None):
'''Return *event* translated structure to be used with the API.'''
if session is None:
session = self.session
_entities = event['data'].get('entities_object', None)
if (
@ -245,25 +205,40 @@ class BaseHandler(object):
) == ftrack_api.symbol.NOT_SET
):
_entities = self._get_entities(event)
event['data']['entities_object'] = _entities
return [
_entities,
event
]
return _entities
def _get_entities(self, event, session=None, ignore=None):
entities = []
selection = event['data'].get('selection')
if not selection:
return entities
if ignore is None:
ignore = []
elif isinstance(ignore, str):
ignore = [ignore]
filtered_selection = []
for entity in selection:
if entity['entityType'] not in ignore:
filtered_selection.append(entity)
if not filtered_selection:
return entities
def _get_entities(self, event, session=None):
if session is None:
session = self.session
session._local_cache.clear()
selection = event['data'].get('selection') or []
_entities = []
for entity in selection:
_entities.append(session.get(
for entity in filtered_selection:
entities.append(session.get(
self._get_entity_type(entity, session),
entity.get('entityId')
))
event['data']['entities_object'] = _entities
return _entities
return entities
def _get_entity_type(self, entity, session=None):
'''Return translated entity type tht can be used with API.'''
@ -292,30 +267,12 @@ class BaseHandler(object):
)
def _launch(self, event):
args = self._translate_event(
self.session, event
)
self.session.rollback()
self.session._local_cache.clear()
preactions_launched = self._handle_preactions(self.session, event)
if preactions_launched is False:
return
self.launch(self.session, event)
interface = self._interface(
self.session, *args
)
if interface:
return interface
response = self.launch(
self.session, *args
)
return self._handle_result(
self.session, response, *args
)
def launch(self, session, entities, event):
def launch(self, session, event):
'''Callback method for the custom action.
return either a bool ( True if successful or False if the action failed )
@ -360,35 +317,7 @@ class BaseHandler(object):
return False
def _interface(self, *args):
interface = self.interface(*args)
if interface:
if (
'items' in interface or
('success' in interface and 'message' in interface)
):
return interface
return {
'items': interface
}
def interface(self, session, entities, event):
'''Return a interface if applicable or None
*session* is a `ftrack_api.Session` instance
*entities* is a list of tuples each containing the entity type and the entity id.
If the entity is a hierarchical you will always get the entity
type TypedContext, once retrieved through a get operation you
will have the "real" entity type ie. example Shot, Sequence
or Asset Build.
*event* the unmodified original event
'''
return None
def _handle_result(self, session, result, entities, event):
def _handle_result(self, result):
'''Validate the returned result from the action callback'''
if isinstance(result, bool):
if result is True:
@ -417,11 +346,6 @@ class BaseHandler(object):
'Missing required key: {0}.'.format(key)
)
else:
self.log.error(
'Invalid result type must be bool or dictionary!'
)
return result
def show_message(self, event, input_message, result=False):
@ -623,3 +547,28 @@ class BaseHandler(object):
self.log.debug((
"Publishing event: {}"
).format(str(event.__dict__)))
def get_project_from_entity(self, entity):
low_entity_type = entity.entity_type.lower()
if low_entity_type == "project":
return entity
if "project" in entity:
# reviewsession, task(Task, Shot, Sequence,...)
return entity["project"]
if low_entity_type == "filecomponent":
entity = entity["version"]
low_entity_type = entity.entity_type.lower()
if low_entity_type == "assetversion":
asset = entity["asset"]
if asset:
parent = asset["parent"]
if parent:
return parent["project"]
project_data = entity["link"][0]
return self.session.query(
"Project where id is {}".format(project_data["id"])
).one()

View file

@ -43,35 +43,10 @@ class BaseEvent(BaseHandler):
priority=self.priority
)
def _launch(self, event):
self.session.rollback()
self.session._local_cache.clear()
self.launch(self.session, event)
def _translate_event(self, session, event):
def _translate_event(self, event, session=None):
'''Return *event* translated structure to be used with the API.'''
return [
self._get_entities(session, event),
event
]
def _get_entities(
self, session, event, ignore=['socialfeed', 'socialnotification']
):
_selection = event['data'].get('entities', [])
_entities = list()
if isinstance(ignore, str):
ignore = list(ignore)
for entity in _selection:
if entity['entityType'] in ignore:
continue
_entities.append(
(
session.get(
self._get_entity_type(entity),
entity.get('entityId')
)
)
)
return _entities
return self._get_entities(
event,
session,
ignore=['socialfeed', 'socialnotification']
)

View file

@ -1,135 +0,0 @@
from bson.objectid import ObjectId
from .avalon_sync import CustAttrIdKey
import avalon.io
def get_project_from_entity(entity):
# TODO add more entities
ent_type_lowered = entity.entity_type.lower()
if ent_type_lowered == "project":
return entity
elif ent_type_lowered == "assetversion":
return entity["asset"]["parent"]["project"]
elif "project" in entity:
return entity["project"]
return None
def get_avalon_entities_for_assetversion(asset_version, db_con=None):
output = {
"success": True,
"message": None,
"project": None,
"project_name": None,
"asset": None,
"asset_name": None,
"asset_path": None,
"subset": None,
"subset_name": None,
"version": None,
"version_name": None,
"representations": None
}
if db_con is None:
db_con = avalon.io
db_con.install()
ft_asset = asset_version["asset"]
subset_name = ft_asset["name"]
version = asset_version["version"]
parent = ft_asset["parent"]
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
project = get_project_from_entity(asset_version)
project_name = project["full_name"]
output["project_name"] = project_name
output["asset_name"] = parent["name"]
output["asset_path"] = ent_path
output["subset_name"] = subset_name
output["version_name"] = version
db_con.Session["AVALON_PROJECT"] = project_name
avalon_project = db_con.find_one({"type": "project"})
output["project"] = avalon_project
if not avalon_project:
output["success"] = False
output["message"] = "Project not synchronized to avalon `{}`".format(
project_name
)
return output
asset_ent = None
asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if asset_mongo_id:
try:
asset_mongo_id = ObjectId(asset_mongo_id)
asset_ent = db_con.find_one({
"type": "asset",
"_id": asset_mongo_id
})
except Exception:
pass
if not asset_ent:
asset_ent = db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
output["asset"] = asset_ent
if not asset_ent:
output["success"] = False
output["message"] = "Not synchronized entity to avalon `{}`".format(
ent_path
)
return output
asset_mongo_id = asset_ent["_id"]
subset_ent = db_con.find_one({
"type": "subset",
"parent": asset_mongo_id,
"name": subset_name
})
output["subset"] = subset_ent
if not subset_ent:
output["success"] = False
output["message"] = (
"Subset `{}` does not exist under Asset `{}`"
).format(subset_name, ent_path)
return output
version_ent = db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
output["version"] = version_ent
if not version_ent:
output["success"] = False
output["message"] = (
"Version `{}` does not exist under Subset `{}` | Asset `{}`"
).format(version, subset_name, ent_path)
return output
repre_ents = list(db_con.find({
"type": "representation",
"parent": version_ent["_id"]
}))
output["representations"] = repre_ents
return output

View file

@ -0,0 +1,42 @@
import os
import traceback
from pype.lib import PypeHook
from pypeapp import Logger
from pype.premiere import lib as prlib
class PremierePrelaunch(PypeHook):
"""
This hook will check if current workfile path has Adobe Premiere
project inside. IF not, it initialize it and finally it pass
path to the project by environment variable to Premiere launcher
shell script.
"""
def __init__(self, logger=None):
if not logger:
self.log = Logger().get_logger(self.__class__.__name__)
else:
self.log = logger
self.signature = "( {} )".format(self.__class__.__name__)
def execute(self, *args, env: dict = None) -> bool:
if not env:
env = os.environ
try:
__import__("pype.premiere")
__import__("pyblish")
except ImportError as e:
print(traceback.format_exc())
print("pyblish: Could not load integration: %s " % e)
else:
# Premiere Setup integration
# importlib.reload(prlib)
prlib.setup(env)
return True

View file

@ -0,0 +1,83 @@
import logging
import os
from pype.lib import PypeHook
from pype.unreal import lib as unreal_lib
from pypeapp import Logger
log = logging.getLogger(__name__)
class UnrealPrelaunch(PypeHook):
"""
This hook will check if current workfile path has Unreal
project inside. IF not, it initialize it and finally it pass
path to the project by environment variable to Unreal launcher
shell script.
"""
def __init__(self, logger=None):
if not logger:
self.log = Logger().get_logger(self.__class__.__name__)
else:
self.log = logger
self.signature = "( {} )".format(self.__class__.__name__)
def execute(self, *args, env: dict = None) -> bool:
if not env:
env = os.environ
asset = env["AVALON_ASSET"]
task = env["AVALON_TASK"]
workdir = env["AVALON_WORKDIR"]
engine_version = env["AVALON_APP_NAME"].split("_")[-1]
project_name = f"{asset}_{task}"
# Unreal is sensitive about project names longer then 20 chars
if len(project_name) > 20:
self.log.warning((f"Project name exceed 20 characters "
f"({project_name})!"))
# Unreal doesn't accept non alphabet characters at the start
# of the project name. This is because project name is then used
# in various places inside c++ code and there variable names cannot
# start with non-alpha. We append 'P' before project name to solve it.
# 😱
if not project_name[:1].isalpha():
self.log.warning(f"Project name doesn't start with alphabet "
f"character ({project_name}). Appending 'P'")
project_name = f"P{project_name}"
project_path = os.path.join(workdir, project_name)
self.log.info((f"{self.signature} requested UE4 version: "
f"[ {engine_version} ]"))
detected = unreal_lib.get_engine_versions()
detected_str = ', '.join(detected.keys()) or 'none'
self.log.info((f"{self.signature} detected UE4 versions: "
f"[ {detected_str} ]"))
del(detected_str)
engine_version = ".".join(engine_version.split(".")[:2])
if engine_version not in detected.keys():
self.log.error((f"{self.signature} requested version not "
f"detected [ {engine_version} ]"))
return False
os.makedirs(project_path, exist_ok=True)
project_file = os.path.join(project_path, f"{project_name}.uproject")
engine_path = detected[engine_version]
if not os.path.isfile(project_file):
self.log.info((f"{self.signature} creating unreal "
f"project [ {project_name} ]"))
if env.get("AVALON_UNREAL_PLUGIN"):
os.environ["AVALON_UNREAL_PLUGIN"] = env.get("AVALON_UNREAL_PLUGIN") # noqa: E501
unreal_lib.create_unreal_project(project_name,
engine_version,
project_path,
engine_path=engine_path)
env["PYPE_UNREAL_PROJECT_FILE"] = project_file
env["AVALON_CURRENT_UNREAL_ENGINE"] = engine_path
return True

View file

@ -1,14 +1,21 @@
import os
import sys
import types
import re
import uuid
import json
import collections
import logging
import itertools
import contextlib
import subprocess
import inspect
from abc import ABCMeta, abstractmethod
from avalon import io
from avalon import io, pipeline
import six
import avalon.api
import avalon
from pypeapp import config
log = logging.getLogger(__name__)
@ -177,7 +184,8 @@ def modified_environ(*remove, **update):
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
:param update: Dictionary of environment variables
and values to add/update.
"""
env = os.environ
update = update or {}
@ -229,6 +237,8 @@ def is_latest(representation):
"""
version = io.find_one({"_id": representation['parent']})
if version["type"] == "master_version":
return True
# Get highest version under the parent
highest_version = io.find_one({
@ -401,8 +411,8 @@ def switch_item(container,
"parent": version["_id"]}
)
assert representation, ("Could not find representation in the database with"
" the name '%s'" % representation_name)
assert representation, ("Could not find representation in the database "
"with the name '%s'" % representation_name)
avalon.api.switch(container, representation)
@ -489,7 +499,6 @@ def filter_pyblish_plugins(plugins):
`discover()` method.
:type plugins: Dict
"""
from pypeapp import config
from pyblish import api
host = api.current_host()
@ -535,7 +544,9 @@ def get_subsets(asset_name,
"""
Query subsets with filter on name.
The method will return all found subsets and its defined version and subsets. Version could be specified with number. Representation can be filtered.
The method will return all found subsets and its defined version
and subsets. Version could be specified with number. Representation
can be filtered.
Arguments:
asset_name (str): asset (shot) name
@ -546,14 +557,13 @@ def get_subsets(asset_name,
Returns:
dict: subsets with version and representaions in keys
"""
from avalon import io
# query asset from db
asset_io = io.find_one({"type": "asset", "name": asset_name})
# check if anything returned
assert asset_io, "Asset not existing. \
Check correct name: `{}`".format(asset_name)
assert asset_io, (
"Asset not existing. Check correct name: `{}`").format(asset_name)
# create subsets query filter
filter_query = {"type": "subset", "parent": asset_io["_id"]}
@ -567,7 +577,9 @@ def get_subsets(asset_name,
# query all assets
subsets = [s for s in io.find(filter_query)]
assert subsets, "No subsets found. Check correct filter. Try this for start `r'.*'`: asset: `{}`".format(asset_name)
assert subsets, ("No subsets found. Check correct filter. "
"Try this for start `r'.*'`: "
"asset: `{}`").format(asset_name)
output_dict = {}
# Process subsets
@ -620,7 +632,6 @@ class CustomNone:
def __init__(self):
"""Create uuid as identifier for custom None."""
import uuid
self.identifier = str(uuid.uuid4())
def __bool__(self):
@ -641,3 +652,678 @@ class CustomNone:
def __repr__(self):
"""Representation of custom None."""
return "<CustomNone-{}>".format(str(self.identifier))
def execute_hook(hook, *args, **kwargs):
"""
This will load hook file, instantiate class and call `execute` method
on it. Hook must be in a form:
`$PYPE_ROOT/repos/pype/path/to/hook.py/HookClass`
This will load `hook.py`, instantiate HookClass and then execute_hook
`execute(*args, **kwargs)`
:param hook: path to hook class
:type hook: str
"""
class_name = hook.split("/")[-1]
abspath = os.path.join(os.getenv('PYPE_ROOT'),
'repos', 'pype', *hook.split("/")[:-1])
mod_name, mod_ext = os.path.splitext(os.path.basename(abspath))
if not mod_ext == ".py":
return False
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath) as f:
six.exec_(f.read(), module.__dict__)
sys.modules[abspath] = module
except Exception as exp:
log.exception("loading hook failed: {}".format(exp),
exc_info=True)
return False
obj = getattr(module, class_name)
hook_obj = obj()
ret_val = hook_obj.execute(*args, **kwargs)
return ret_val
@six.add_metaclass(ABCMeta)
class PypeHook:
def __init__(self):
pass
@abstractmethod
def execute(self, *args, **kwargs):
pass
def get_linked_assets(asset_entity):
"""Return linked assets for `asset_entity`."""
# TODO implement
return []
def map_subsets_by_family(subsets):
subsets_by_family = collections.defaultdict(list)
for subset in subsets:
family = subset["data"].get("family")
if not family:
families = subset["data"].get("families")
if not families:
continue
family = families[0]
subsets_by_family[family].append(subset)
return subsets_by_family
class BuildWorkfile:
"""Wrapper for build workfile process.
Load representations for current context by build presets. Build presets
are host related, since each host has it's loaders.
"""
def process(self):
"""Main method of this wrapper.
Building of workfile is triggered and is possible to implement
post processing of loaded containers if necessary.
"""
containers = self.build_workfile()
return containers
def build_workfile(self):
"""Prepares and load containers into workfile.
Loads latest versions of current and linked assets to workfile by logic
stored in Workfile profiles from presets. Profiles are set by host,
filtered by current task name and used by families.
Each family can specify representation names and loaders for
representations and first available and successful loaded
representation is returned as container.
At the end you'll get list of loaded containers per each asset.
loaded_containers [{
"asset_entity": <AssetEntity1>,
"containers": [<Container1>, <Container2>, ...]
}, {
"asset_entity": <AssetEntity2>,
"containers": [<Container3>, ...]
}, {
...
}]
"""
# Get current asset name and entity
current_asset_name = io.Session["AVALON_ASSET"]
current_asset_entity = io.find_one({
"type": "asset",
"name": current_asset_name
})
# Skip if asset was not found
if not current_asset_entity:
print("Asset entity with name `{}` was not found".format(
current_asset_name
))
return
# Prepare available loaders
loaders_by_name = {}
for loader in avalon.api.discover(avalon.api.Loader):
loader_name = loader.__name__
if loader_name in loaders_by_name:
raise KeyError(
"Duplicated loader name {0}!".format(loader_name)
)
loaders_by_name[loader_name] = loader
# Skip if there are any loaders
if not loaders_by_name:
log.warning("There are no registered loaders.")
return
# Get current task name
current_task_name = io.Session["AVALON_TASK"]
# Load workfile presets for task
build_presets = self.get_build_presets(current_task_name)
# Skip if there are any presets for task
if not build_presets:
log.warning(
"Current task `{}` does not have any loading preset.".format(
current_task_name
)
)
return
# Get presets for loading current asset
current_context_profiles = build_presets.get("current_context")
# Get presets for loading linked assets
link_context_profiles = build_presets.get("linked_assets")
# Skip if both are missing
if not current_context_profiles and not link_context_profiles:
log.warning("Current task `{}` has empty loading preset.".format(
current_task_name
))
return
elif not current_context_profiles:
log.warning((
"Current task `{}` doesn't have any loading"
" preset for it's context."
).format(current_task_name))
elif not link_context_profiles:
log.warning((
"Current task `{}` doesn't have any"
"loading preset for it's linked assets."
).format(current_task_name))
# Prepare assets to process by workfile presets
assets = []
current_asset_id = None
if current_context_profiles:
# Add current asset entity if preset has current context set
assets.append(current_asset_entity)
current_asset_id = current_asset_entity["_id"]
if link_context_profiles:
# Find and append linked assets if preset has set linked mapping
link_assets = get_linked_assets(current_asset_entity)
if link_assets:
assets.extend(link_assets)
# Skip if there are no assets. This can happen if only linked mapping
# is set and there are no links for his asset.
if not assets:
log.warning(
"Asset does not have linked assets. Nothing to process."
)
return
# Prepare entities from database for assets
prepared_entities = self._collect_last_version_repres(assets)
# Load containers by prepared entities and presets
loaded_containers = []
# - Current asset containers
if current_asset_id and current_asset_id in prepared_entities:
current_context_data = prepared_entities.pop(current_asset_id)
loaded_data = self.load_containers_by_asset_data(
current_context_data, current_context_profiles, loaders_by_name
)
if loaded_data:
loaded_containers.append(loaded_data)
# - Linked assets container
for linked_asset_data in prepared_entities.values():
loaded_data = self.load_containers_by_asset_data(
linked_asset_data, link_context_profiles, loaders_by_name
)
if loaded_data:
loaded_containers.append(loaded_data)
# Return list of loaded containers
return loaded_containers
def get_build_presets(self, task_name):
""" Returns presets to build workfile for task name.
Presets are loaded for current project set in
io.Session["AVALON_PROJECT"], filtered by registered host
and entered task name.
:param task_name: Task name used for filtering build presets.
:type task_name: str
:return: preset per eneter task
:rtype: dict | None
"""
host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1]
presets = config.get_presets(io.Session["AVALON_PROJECT"])
# Get presets for host
build_presets = (
presets["plugins"]
.get(host_name, {})
.get("workfile_build")
)
if not build_presets:
return
task_name_low = task_name.lower()
per_task_preset = None
for preset in build_presets:
preset_tasks = preset.get("tasks") or []
preset_tasks_low = [task.lower() for task in preset_tasks]
if task_name_low in preset_tasks_low:
per_task_preset = preset
break
return per_task_preset
def _filter_build_profiles(self, build_profiles, loaders_by_name):
""" Filter build profiles by loaders and prepare process data.
Valid profile must have "loaders", "families" and "repre_names" keys
with valid values.
- "loaders" expects list of strings representing possible loaders.
- "families" expects list of strings for filtering
by main subset family.
- "repre_names" expects list of strings for filtering by
representation name.
Lowered "families" and "repre_names" are prepared for each profile with
all required keys.
:param build_profiles: Profiles for building workfile.
:type build_profiles: dict
:param loaders_by_name: Available loaders per name.
:type loaders_by_name: dict
:return: Filtered and prepared profiles.
:rtype: list
"""
valid_profiles = []
for profile in build_profiles:
# Check loaders
profile_loaders = profile.get("loaders")
if not profile_loaders:
log.warning((
"Build profile has missing loaders configuration: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Check if any loader is available
loaders_match = False
for loader_name in profile_loaders:
if loader_name in loaders_by_name:
loaders_match = True
break
if not loaders_match:
log.warning((
"All loaders from Build profile are not available: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Check families
profile_families = profile.get("families")
if not profile_families:
log.warning((
"Build profile is missing families configuration: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Check representation names
profile_repre_names = profile.get("repre_names")
if not profile_repre_names:
log.warning((
"Build profile is missing"
" representation names filtering: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Prepare lowered families and representation names
profile["families_lowered"] = [
fam.lower() for fam in profile_families
]
profile["repre_names_lowered"] = [
name.lower() for name in profile_repre_names
]
valid_profiles.append(profile)
return valid_profiles
def _prepare_profile_for_subsets(self, subsets, profiles):
"""Select profile for each subset byt it's data.
Profiles are filtered for each subset individually.
Profile is filtered by subset's family, optionally by name regex and
representation names set in profile.
It is possible to not find matching profile for subset, in that case
subset is skipped and it is possible that none of subsets have
matching profile.
:param subsets: Subset documents.
:type subsets: list
:param profiles: Build profiles.
:type profiles: dict
:return: Profile by subset's id.
:rtype: dict
"""
# Prepare subsets
subsets_by_family = map_subsets_by_family(subsets)
profiles_per_subset_id = {}
for family, subsets in subsets_by_family.items():
family_low = family.lower()
for profile in profiles:
# Skip profile if does not contain family
if family_low not in profile["families_lowered"]:
continue
# Precompile name filters as regexes
profile_regexes = profile.get("subset_name_filters")
if profile_regexes:
_profile_regexes = []
for regex in profile_regexes:
_profile_regexes.append(re.compile(regex))
profile_regexes = _profile_regexes
# TODO prepare regex compilation
for subset in subsets:
# Verify regex filtering (optional)
if profile_regexes:
valid = False
for pattern in profile_regexes:
if re.match(pattern, subset["name"]):
valid = True
break
if not valid:
continue
profiles_per_subset_id[subset["_id"]] = profile
# break profiles loop on finding the first matching profile
break
return profiles_per_subset_id
def load_containers_by_asset_data(
self, asset_entity_data, build_profiles, loaders_by_name
):
"""Load containers for entered asset entity by Build profiles.
:param asset_entity_data: Prepared data with subsets, last version
and representations for specific asset.
:type asset_entity_data: dict
:param build_profiles: Build profiles.
:type build_profiles: dict
:param loaders_by_name: Available loaders per name.
:type loaders_by_name: dict
:return: Output contains asset document and loaded containers.
:rtype: dict
"""
# Make sure all data are not empty
if not asset_entity_data or not build_profiles or not loaders_by_name:
return
asset_entity = asset_entity_data["asset_entity"]
valid_profiles = self._filter_build_profiles(
build_profiles, loaders_by_name
)
if not valid_profiles:
log.warning(
"There are not valid Workfile profiles. Skipping process."
)
return
log.debug("Valid Workfile profiles: {}".format(valid_profiles))
subsets_by_id = {}
version_by_subset_id = {}
repres_by_version_id = {}
for subset_id, in_data in asset_entity_data["subsets"].items():
subset_entity = in_data["subset_entity"]
subsets_by_id[subset_entity["_id"]] = subset_entity
version_data = in_data["version"]
version_entity = version_data["version_entity"]
version_by_subset_id[subset_id] = version_entity
repres_by_version_id[version_entity["_id"]] = (
version_data["repres"]
)
if not subsets_by_id:
log.warning("There are not subsets for asset {0}".format(
asset_entity["name"]
))
return
profiles_per_subset_id = self._prepare_profile_for_subsets(
subsets_by_id.values(), valid_profiles
)
if not profiles_per_subset_id:
log.warning("There are not valid subsets.")
return
valid_repres_by_subset_id = collections.defaultdict(list)
for subset_id, profile in profiles_per_subset_id.items():
profile_repre_names = profile["repre_names_lowered"]
version_entity = version_by_subset_id[subset_id]
version_id = version_entity["_id"]
repres = repres_by_version_id[version_id]
for repre in repres:
repre_name_low = repre["name"].lower()
if repre_name_low in profile_repre_names:
valid_repres_by_subset_id[subset_id].append(repre)
# DEBUG message
msg = "Valid representations for Asset: `{}`".format(
asset_entity["name"]
)
for subset_id, repres in valid_repres_by_subset_id.items():
subset = subsets_by_id[subset_id]
msg += "\n# Subset Name/ID: `{}`/{}".format(
subset["name"], subset_id
)
for repre in repres:
msg += "\n## Repre name: `{}`".format(repre["name"])
log.debug(msg)
containers = self._load_containers(
valid_repres_by_subset_id, subsets_by_id,
profiles_per_subset_id, loaders_by_name
)
return {
"asset_entity": asset_entity,
"containers": containers
}
def _load_containers(
self, repres_by_subset_id, subsets_by_id,
profiles_per_subset_id, loaders_by_name
):
"""Real load by collected data happens here.
Loading of representations per subset happens here. Each subset can
loads one representation. Loading is tried in specific order.
Representations are tried to load by names defined in configuration.
If subset has representation matching representation name each loader
is tried to load it until any is successful. If none of them was
successful then next reprensentation name is tried.
Subset process loop ends when any representation is loaded or
all matching representations were already tried.
:param repres_by_subset_id: Available representations mapped
by their parent (subset) id.
:type repres_by_subset_id: dict
:param subsets_by_id: Subset documents mapped by their id.
:type subsets_by_id: dict
:param profiles_per_subset_id: Build profiles mapped by subset id.
:type profiles_per_subset_id: dict
:param loaders_by_name: Available loaders per name.
:type loaders_by_name: dict
:return: Objects of loaded containers.
:rtype: list
"""
loaded_containers = []
for subset_id, repres in repres_by_subset_id.items():
subset_name = subsets_by_id[subset_id]["name"]
profile = profiles_per_subset_id[subset_id]
loaders_last_idx = len(profile["loaders"]) - 1
repre_names_last_idx = len(profile["repre_names_lowered"]) - 1
repre_by_low_name = {
repre["name"].lower(): repre for repre in repres
}
is_loaded = False
for repre_name_idx, profile_repre_name in enumerate(
profile["repre_names_lowered"]
):
# Break iteration if representation was already loaded
if is_loaded:
break
repre = repre_by_low_name.get(profile_repre_name)
if not repre:
continue
for loader_idx, loader_name in enumerate(profile["loaders"]):
if is_loaded:
break
loader = loaders_by_name.get(loader_name)
if not loader:
continue
try:
container = avalon.api.load(
loader,
repre["_id"],
name=subset_name
)
loaded_containers.append(container)
is_loaded = True
except Exception as exc:
if exc == pipeline.IncompatibleLoaderError:
log.info((
"Loader `{}` is not compatible with"
" representation `{}`"
).format(loader_name, repre["name"]))
else:
log.error(
"Unexpected error happened during loading",
exc_info=True
)
msg = "Loading failed."
if loader_idx < loaders_last_idx:
msg += " Trying next loader."
elif repre_name_idx < repre_names_last_idx:
msg += (
" Loading of subset `{}` was not successful."
).format(subset_name)
else:
msg += " Trying next representation."
log.info(msg)
return loaded_containers
def _collect_last_version_repres(self, asset_entities):
"""Collect subsets, versions and representations for asset_entities.
:param asset_entities: Asset entities for which want to find data
:type asset_entities: list
:return: collected entities
:rtype: dict
Example output:
```
{
{Asset ID}: {
"asset_entity": <AssetEntity>,
"subsets": {
{Subset ID}: {
"subset_entity": <SubsetEntity>,
"version": {
"version_entity": <VersionEntity>,
"repres": [
<RepreEntity1>, <RepreEntity2>, ...
]
}
},
...
}
},
...
}
output[asset_id]["subsets"][subset_id]["version"]["repres"]
```
"""
if not asset_entities:
return {}
asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities}
subsets = list(io.find({
"type": "subset",
"parent": {"$in": asset_entity_by_ids.keys()}
}))
subset_entity_by_ids = {subset["_id"]: subset for subset in subsets}
sorted_versions = list(io.find({
"type": "version",
"parent": {"$in": subset_entity_by_ids.keys()}
}).sort("name", -1))
subset_id_with_latest_version = []
last_versions_by_id = {}
for version in sorted_versions:
subset_id = version["parent"]
if subset_id in subset_id_with_latest_version:
continue
subset_id_with_latest_version.append(subset_id)
last_versions_by_id[version["_id"]] = version
repres = io.find({
"type": "representation",
"parent": {"$in": last_versions_by_id.keys()}
})
output = {}
for repre in repres:
version_id = repre["parent"]
version = last_versions_by_id[version_id]
subset_id = version["parent"]
subset = subset_entity_by_ids[subset_id]
asset_id = subset["parent"]
asset = asset_entity_by_ids[asset_id]
if asset_id not in output:
output[asset_id] = {
"asset_entity": asset,
"subsets": {}
}
if subset_id not in output[asset_id]["subsets"]:
output[asset_id]["subsets"][subset_id] = {
"subset_entity": subset,
"version": {
"version_entity": version,
"repres": []
}
}
output[asset_id]["subsets"][subset_id]["version"]["repres"].append(
repre
)
return output

View file

@ -2,8 +2,9 @@ import sys
import os
import logging
from avalon.vendor.Qt import QtWidgets, QtCore, QtGui
from avalon.vendor.Qt import QtWidgets, QtGui
from avalon.maya import pipeline
from ..lib import BuildWorkfile
import maya.cmds as cmds
self = sys.modules[__name__]
@ -21,8 +22,15 @@ def _get_menu():
return menu
def deferred():
def add_build_workfiles_item():
# Add build first workfile
cmds.menuItem(divider=True, parent=pipeline._menu)
cmds.menuItem(
"Build First Workfile",
parent=pipeline._menu,
command=lambda *args: BuildWorkfile().process()
)
log.info("Attempting to install scripts menu..")
@ -30,8 +38,11 @@ def deferred():
import scriptsmenu.launchformaya as launchformaya
import scriptsmenu.scriptsmenu as scriptsmenu
except ImportError:
log.warning("Skipping studio.menu install, because "
"'scriptsmenu' module seems unavailable.")
log.warning(
"Skipping studio.menu install, because "
"'scriptsmenu' module seems unavailable."
)
add_build_workfiles_item()
return
# load configuration of custom menu
@ -39,15 +50,16 @@ def deferred():
config = scriptsmenu.load_configuration(config_path)
# run the launcher for Maya menu
studio_menu = launchformaya.main(title=self._menu.title(),
objectName=self._menu)
studio_menu = launchformaya.main(
title=self._menu.title(),
objectName=self._menu
)
# apply configuration
studio_menu.build_from_configuration(studio_menu, config)
def uninstall():
menu = _get_menu()
if menu:
log.info("Attempting to uninstall..")
@ -60,9 +72,8 @@ def uninstall():
def install():
if cmds.about(batch=True):
print("Skipping pype.menu initialization in batch mode..")
log.info("Skipping pype.menu initialization in batch mode..")
return
uninstall()

View file

@ -28,7 +28,7 @@ self = sys.modules[__name__]
self._project = None
def onScriptLoad():
def on_script_load():
''' Callback for ffmpeg support
'''
if nuke.env['LINUX']:
@ -39,7 +39,7 @@ def onScriptLoad():
nuke.tcl('load movWriter')
def checkInventoryVersions():
def check_inventory_versions():
"""
Actiual version idetifier of Loaded containers
@ -180,8 +180,8 @@ def format_anatomy(data):
padding = int(anatomy.templates['render']['padding'])
except KeyError as e:
msg = ("`padding` key is not in `render` "
"Anatomy template. Please, add it there and restart "
"the pipeline (padding: \"4\"): `{}`").format(e)
"Anatomy template. Please, add it there and restart "
"the pipeline (padding: \"4\"): `{}`").format(e)
log.error(msg)
nuke.message(msg)
@ -215,14 +215,14 @@ def script_name():
def add_button_write_to_read(node):
name = "createReadNode"
label = "Create Read"
label = "[ Create Read ]"
value = "import write_to_read;write_to_read.write_to_read(nuke.thisNode())"
k = nuke.PyScript_Knob(name, label, value)
k.setFlag(0x1000)
node.addKnob(k)
def create_write_node(name, data, input=None, prenodes=None):
def create_write_node(name, data, input=None, prenodes=None, review=True):
''' Creating write node which is group node
Arguments:
@ -231,6 +231,7 @@ def create_write_node(name, data, input=None, prenodes=None):
input (node): selected node to connect to
prenodes (list, optional): list of lists, definitions for nodes
to be created before write
review (bool): adding review knob
Example:
prenodes = [(
@ -329,8 +330,17 @@ def create_write_node(name, data, input=None, prenodes=None):
# add data to knob
for k, v in properties:
try:
now_node[k].value()
except NameError:
log.warning(
"knob `{}` does not exist on node `{}`".format(
k, now_node["name"].value()
))
continue
if k and v:
now_node[k].serValue(str(v))
now_node[k].setValue(str(v))
# connect to previous node
if set_output_to:
@ -339,14 +349,14 @@ def create_write_node(name, data, input=None, prenodes=None):
input_node = nuke.createNode(
"Input", "name {}".format(node_name))
connections.append({
"node": nuke.toNode(node_name),
"node": nuke.toNode(node_name),
"inputName": node_name})
now_node.setInput(1, input_node)
elif isinstance(set_output_to, str):
input_node = nuke.createNode(
"Input", "name {}".format(node_name))
connections.append({
"node": nuke.toNode(set_output_to),
"node": nuke.toNode(set_output_to),
"inputName": set_output_to})
now_node.setInput(0, input_node)
else:
@ -380,15 +390,8 @@ def create_write_node(name, data, input=None, prenodes=None):
add_rendering_knobs(GN)
# adding write to read button
add_button_write_to_read(GN)
divider = nuke.Text_Knob('')
GN.addKnob(divider)
# set tile color
tile_color = _data.get("tile_color", "0xff0000ff")
GN["tile_color"].setValue(tile_color)
if review:
add_review_knob(GN)
# add render button
lnk = nuke.Link_Knob("Render")
@ -396,9 +399,20 @@ def create_write_node(name, data, input=None, prenodes=None):
lnk.setName("Render")
GN.addKnob(lnk)
divider = nuke.Text_Knob('')
GN.addKnob(divider)
# adding write to read button
add_button_write_to_read(GN)
# Deadline tab.
add_deadline_tab(GN)
# set tile color
tile_color = _data.get("tile_color", "0xff0000ff")
GN["tile_color"].setValue(tile_color)
return GN
@ -420,6 +434,17 @@ def add_rendering_knobs(node):
knob = nuke.Boolean_Knob("render_farm", "Render on Farm")
knob.setValue(False)
node.addKnob(knob)
return node
def add_review_knob(node):
''' Adds additional review knob to given node
Arguments:
node (obj): nuke node object to be fixed
Return:
node (obj): with added knob
'''
if "review" not in node.knobs():
knob = nuke.Boolean_Knob("review", "Review")
knob.setValue(True)
@ -692,7 +717,8 @@ class WorkfileSettings(object):
def set_reads_colorspace(self, reads):
""" Setting colorspace to Read nodes
Looping trought all read nodes and tries to set colorspace based on regex rules in presets
Looping trought all read nodes and tries to set colorspace based
on regex rules in presets
"""
changes = dict()
for n in nuke.allNodes():
@ -864,10 +890,10 @@ class WorkfileSettings(object):
if any(x for x in data.values() if x is None):
msg = ("Missing set shot attributes in DB."
"\nContact your supervisor!."
"\n\nWidth: `{width}`"
"\nHeight: `{height}`"
"\nPixel Asspect: `{pixel_aspect}`").format(**data)
"\nContact your supervisor!."
"\n\nWidth: `{width}`"
"\nHeight: `{height}`"
"\nPixel Asspect: `{pixel_aspect}`").format(**data)
log.error(msg)
nuke.message(msg)
@ -886,8 +912,9 @@ class WorkfileSettings(object):
)
except Exception as e:
bbox = None
msg = ("{}:{} \nFormat:Crop need to be set with dots, example: "
"0.0.1920.1080, /nSetting to default").format(__name__, e)
msg = ("{}:{} \nFormat:Crop need to be set with dots, "
"example: 0.0.1920.1080, "
"/nSetting to default").format(__name__, e)
log.error(msg)
nuke.message(msg)
@ -1028,7 +1055,8 @@ class BuildWorkfile(WorkfileSettings):
"""
Building first version of workfile.
Settings are taken from presets and db. It will add all subsets in last version for defined representaions
Settings are taken from presets and db. It will add all subsets
in last version for defined representaions
Arguments:
variable (type): description
@ -1126,7 +1154,7 @@ class BuildWorkfile(WorkfileSettings):
regex_filter=None,
version=None,
representations=["exr", "dpx", "lutJson", "mov",
"preview", "png"]):
"preview", "png", "jpeg", "jpg"]):
"""
A short description.
@ -1256,8 +1284,6 @@ class BuildWorkfile(WorkfileSettings):
representation (dict): avalon db entity
"""
context = representation["context"]
loader_name = "LoadLuts"
loader_plugin = None
@ -1564,10 +1590,9 @@ class ExporterReviewMov(ExporterReview):
self.nodes = {}
# deal with now lut defined in viewer lut
if hasattr(klass, "viewer_lut_raw"):
self.viewer_lut_raw = klass.viewer_lut_raw
else:
self.viewer_lut_raw = False
self.viewer_lut_raw = klass.viewer_lut_raw
self.bake_colorspace_fallback = klass.bake_colorspace_fallback
self.bake_colorspace_main = klass.bake_colorspace_main
self.name = name or "baked"
self.ext = ext or "mov"
@ -1628,8 +1653,26 @@ class ExporterReviewMov(ExporterReview):
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
if not self.viewer_lut_raw:
# OCIODisplay node
dag_node = nuke.createNode("OCIODisplay")
colorspaces = [
self.bake_colorspace_main, self.bake_colorspace_fallback
]
if any(colorspaces):
# OCIOColorSpace with controled output
dag_node = nuke.createNode("OCIOColorSpace")
for c in colorspaces:
test = dag_node["out_colorspace"].setValue(str(c))
if test:
self.log.info(
"Baking in colorspace... `{}`".format(c))
break
if not test:
dag_node = nuke.createNode("OCIODisplay")
else:
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes.append(dag_node)

View file

@ -0,0 +1,92 @@
import os
import pyblish.api
from avalon import (
io,
api as avalon
)
import json
from pathlib import Path
class CollectContextDataFromAport(pyblish.api.ContextPlugin):
"""
Collecting temp json data sent from a host context
and path for returning json data back to hostself.
Setting avalon session into correct context
Args:
context (obj): pyblish context session
"""
label = "AdobeCommunicator Collect Context"
order = pyblish.api.CollectorOrder - 0.49
def process(self, context):
self.log.info(
"registred_hosts: `{}`".format(pyblish.api.registered_hosts()))
io.install()
# get json paths from data
input_json_path = os.environ.get("AC_PUBLISH_INPATH")
output_json_path = os.environ.get("AC_PUBLISH_OUTPATH")
rqst_json_data_path = Path(input_json_path)
post_json_data_path = Path(output_json_path)
context.data['post_json_data_path'] = str(post_json_data_path)
# get avalon session data and convert \ to /
_S = avalon.session
projects = Path(_S["AVALON_PROJECTS"]).resolve()
asset = _S["AVALON_ASSET"]
workdir = Path(_S["AVALON_WORKDIR"]).resolve()
_S["AVALON_PROJECTS"] = str(projects)
_S["AVALON_WORKDIR"] = str(workdir)
context.data["avalonSession"] = _S
self.log.info(f"__ avalonSession: `{_S}`")
# get stagin directory from recieved path to json
context.data["stagingDir"] = post_json_data_path.parent
# get data from json file recieved
with rqst_json_data_path.open(mode='r') as f:
context.data["jsonData"] = json_data = json.load(f)
assert json_data, "No `data` in json file"
# get and check host type
host = json_data.get("host", None)
host_version = json_data.get("hostVersion", None)
assert host, "No `host` data in json file"
assert host_version, "No `hostVersion` data in json file"
context.data["host"] = _S["AVALON_APP"] = host
context.data["hostVersion"] = \
_S["AVALON_APP_VERSION"] = host_version
# get current file
current_file = json_data.get("currentFile", None)
assert current_file, "No `currentFile` data in json file"
context.data["currentFile"] = str(Path(current_file).resolve())
# get project data from avalon
project_data = io.find_one({'type': 'project'})
assert project_data, "No `project_data` data in avalon db"
context.data["projectData"] = project_data
self.log.debug("project_data: {}".format(project_data))
# get asset data from avalon and fix all paths
asset_data = io.find_one({
"type": 'asset',
"name": asset
})["data"]
assert asset_data, "No `asset_data` data in avalon db"
context.data["assetData"] = asset_data
self.log.debug("asset_data: {}".format(asset_data))
self.log.info("rqst_json_data_path is: {}".format(rqst_json_data_path))
self.log.info("post_json_data_path is: {}".format(post_json_data_path))
# self.log.info("avalon.session is: {}".format(avalon.session))

View file

@ -1,12 +1,5 @@
import os
import json
import pyblish.api
from avalon import (
io,
api as avalon
)
from pype import api as pype
class CollectInstancesFromJson(pyblish.api.ContextPlugin):
@ -26,7 +19,11 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
def process(self, context):
a_session = context.data.get("avalonSession")
_S = context.data["avalonSession"]
asset = _S["AVALON_ASSET"]
task = _S["AVALON_TASK"]
host = _S["AVALON_APP"]
json_data = context.data.get("jsonData", None)
assert json_data, "No `json_data` data in json file"
@ -36,96 +33,91 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
staging_dir = json_data.get("stagingDir", None)
assert staging_dir, "No `stagingDir` path in json file"
presets = context.data["presets"]
rules_tasks = presets["rules_tasks"]
ftrack_types = rules_tasks["ftrackTypes"]
assert ftrack_types, "No `ftrack_types` data in `/templates/presets/[host]/rules_tasks.json` file"
host = context.data["host"]
presets = context.data["presets"][host]
context.data["ftrackTypes"] = ftrack_types
rules_tasks = presets["rules_tasks"]
asset_default = presets["asset_default"]
assert asset_default, "No `asset_default` data in `/templates/presets/[host]/asset_default.json` file"
asset_name = a_session["AVALON_ASSET"]
entity = io.find_one({"name": asset_name,
"type": "asset"})
assert asset_default, ("No `asset_default` data in"
"`/presets/[host]/asset_default.json` file")
# get frame start > first try from asset data
frame_start = context.data["assetData"].get("fstart", None)
frame_start = context.data["assetData"].get("frameStart", None)
if not frame_start:
self.log.debug("frame_start not on assetData")
# get frame start > second try from parent data
frame_start = pype.get_data_hierarchical_attr(entity, "fstart")
if not frame_start:
self.log.debug("frame_start not on any parent entity")
# get frame start > third try from parent data
frame_start = asset_default["fstart"]
self.log.debug("frame_start not on any parent entity")
# get frame start > third try from parent data
frame_start = asset_default["frameStart"]
assert frame_start, "No `frame_start` data found, "
"please set `fstart` on asset"
self.log.debug("frame_start: `{}`".format(frame_start))
# get handles > first try from asset data
handles = context.data["assetData"].get("handles", None)
if not handles:
handle_start = context.data["assetData"].get("handleStart", None)
handle_end = context.data["assetData"].get("handleEnd", None)
if (handle_start is None) or (handle_end is None):
# get frame start > second try from parent data
handles = pype.get_data_hierarchical_attr(entity, "handles")
if not handles:
# get frame start > third try from parent data
handles = asset_default["handles"]
handle_start = asset_default.get("handleStart", None)
handle_end = asset_default.get("handleEnd", None)
assert handles, "No `handles` data found, "
"please set `fstart` on asset"
self.log.debug("handles: `{}`".format(handles))
assert (
(handle_start is not None) or (
handle_end is not None)), (
"No `handle_start, handle_end` data found")
instances = []
task = a_session["AVALON_TASK"]
current_file = os.path.basename(context.data.get("currentFile"))
name, ext = os.path.splitext(current_file)
# get current file host
host = a_session["AVALON_APP"]
family = "projectfile"
families = "filesave"
family = "workfile"
subset_name = "{0}{1}".format(task, 'Default')
instance_name = "{0}_{1}_{2}".format(name,
family,
subset_name)
# Set label
label = "{0} - {1} > {2}".format(name, task, families)
label = "{0} - {1}".format(name, task)
# get project file instance Data
pf_instance = [inst for inst in instances_data
if inst.get("family", None) in 'projectfile']
self.log.debug('pf_instance: {}'.format(pf_instance))
# get working file into instance for publishing
instance = context.create_instance(instance_name)
if pf_instance:
instance.data.update(pf_instance[0])
instance.data.update({
"subset": subset_name,
"stagingDir": staging_dir,
"task": task,
"representation": ext[1:],
"host": host,
"asset": asset_name,
"label": label,
"name": name,
# "hierarchy": hierarchy,
# "parents": parents,
"family": family,
"families": [families, 'ftrack'],
"publish": True,
# "files": files_list
})
instances.append(instance)
wf_instance = next((inst for inst in instances_data
if inst.get("family", None) in 'workfile'), None)
if wf_instance:
self.log.debug('wf_instance: {}'.format(wf_instance))
version = int(wf_instance.get("version", None))
# get working file into instance for publishing
instance = context.create_instance(instance_name)
instance.data.update(wf_instance)
instance.data.update({
"subset": subset_name,
"stagingDir": staging_dir,
"task": task,
"representations": [{
"files": current_file,
'stagingDir': staging_dir,
'name': "projectfile",
'ext': ext[1:]
}],
"host": host,
"asset": asset,
"label": label,
"name": name,
"family": family,
"families": ["ftrack"],
"publish": True,
"version": version
})
instances.append(instance)
for inst in instances_data:
# for key, value in inst.items():
# self.log.debug('instance[key]: {}'.format(key))
#
version = inst.get("version", None)
version = int(inst.get("version", None))
assert version, "No `version` string in json file"
name = asset = inst.get("name", None)
@ -135,14 +127,14 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
assert family, "No `family` key in json_data.instance: {}".format(
inst)
if family in 'projectfile':
if family in 'workfile':
continue
files_list = inst.get("files", None)
assert files_list, "`files` are empty in json file"
hierarchy = inst.get("hierarchy", None)
assert hierarchy, "No `hierarchy` data in json file"
assert hierarchy, f"No `hierarchy` data in json file for {name}"
parents = inst.get("parents", None)
assert parents, "No `parents` data in json file"
@ -161,17 +153,12 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
# create list of tasks for creation
if not inst.get('tasks', None):
inst['tasks'] = list()
if not inst.get('tasksTypes', None):
inst['tasksTypes'] = {}
# append taks into list for later hierarchy cration
ftrack_task_type = ftrack_types[task]
if task not in inst['tasks']:
inst['tasks'].append(task)
inst['tasksTypes'][task] = ftrack_task_type
host = rules_tasks["taskHost"][task]
subsets = rules_tasks["taskSubsets"][task]
subsets = rules_tasks["taskToSubsets"][task]
for sub in subsets:
self.log.debug(sub)
try:
@ -184,8 +171,8 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
subset_lst.extend([s for s in subsets if s not in subset_lst])
for subset in subset_lst:
if inst["representations"].get(subset, None):
repr = inst["representations"][subset]
if inst["subsetToRepresentations"].get(subset, None):
repr = inst["subsetToRepresentations"][subset]
ext = repr['representation']
else:
continue
@ -197,7 +184,7 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
family = subset
subset_name = "{0}{1}".format(subset, "Main")
elif "reference" in subset:
family ="render"
family = "review"
subset_name = "{0}{1}".format(family, "Reference")
else:
subset_name = "{0}{1}".format(subset, 'Default')
@ -209,17 +196,15 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
instance = context.create_instance(name)
files = [f for f in files_list
if subset in f or "thumbnail" in f
]
if subset in f or "thumbnail" in f]
instance.data.update({
"subset": subset_name,
"stagingDir": staging_dir,
"tasks": subset_dict[subset],
"taskTypes": inst['tasksTypes'],
"fstart": frame_start,
"handles": handles,
"host": host,
"frameStart": frame_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"asset": asset,
"hierarchy": hierarchy,
"parents": parents,
@ -230,6 +215,8 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
"family": family,
"families": [subset, inst["family"], 'ftrack'],
"jsonData": inst,
"jsonReprSubset": subset,
"jsonReprExt": ext,
"publish": True,
"version": version})
self.log.info(
@ -238,9 +225,6 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
context.data["instances"] = instances
# Sort/grouped by family (preserving local index)
# context[:] = sorted(context, key=self.sort_by_task)
self.log.debug("context: {}".format(context))
def sort_by_task(self, instance):

View file

@ -2,7 +2,7 @@
import json
import clique
import pyblish.api
from pypeapp import Anatomy
class ExtractJSON(pyblish.api.ContextPlugin):
""" Extract all instances to a serialized json file. """
@ -14,28 +14,27 @@ class ExtractJSON(pyblish.api.ContextPlugin):
json_path = context.data['post_json_data_path']
data = dict(self.serialize(context.data()))
# self.log.info(data)
instances_data = []
for instance in context:
iData = {}
for key, value in instance.data.items():
if isinstance(value, clique.Collection):
value = value.format()
try:
json.dumps(value)
iData[key] = value
except KeyError:
msg = "\"{0}\"".format(value)
msg += " in instance.data[\"{0}\"]".format(key)
msg += " could not be serialized."
self.log.debug(msg)
instances_data.append(iData)
data["instances"] = instances_data
# instances_data = []
# for instance in context:
#
# iData = {}
# for key, value in instance.data.items():
# if isinstance(value, clique.Collection):
# value = value.format()
#
# try:
# json.dumps(value)
# iData[key] = value
# except KeyError:
# msg = "\"{0}\"".format(value)
# msg += " in instance.data[\"{0}\"]".format(key)
# msg += " could not be serialized."
# self.log.debug(msg)
#
# instances_data.append(iData)
#
# data["instances"] = instances_data
with open(json_path, "w") as outfile:
outfile.write(json.dumps(data, indent=4, sort_keys=True))
@ -60,6 +59,9 @@ class ExtractJSON(pyblish.api.ContextPlugin):
# self.log.info("1: {}".format(data))
if isinstance(data, Anatomy):
return
if not isinstance(data, dict):
# self.log.info("2: {}".format(data))
return data
@ -88,6 +90,9 @@ class ExtractJSON(pyblish.api.ContextPlugin):
# loops if dictionary
data[key] = self.serialize(value)
if isinstance(value, Anatomy):
continue
if isinstance(value, (list or tuple)):
# loops if list or tuple
for i, item in enumerate(value):

View file

@ -1,104 +0,0 @@
import os
import pyblish.api
from avalon import api as avalon
from pype import api as pype
import json
from pathlib import Path
class CollectContextDataFromAport(pyblish.api.ContextPlugin):
"""
Collecting temp json data sent from a host context
and path for returning json data back to hostself.
Setting avalon session into correct context
Args:
context (obj): pyblish context session
"""
label = "Collect Aport Context"
order = pyblish.api.CollectorOrder - 0.49
def process(self, context):
# get json paths from data
rqst_json_data_path = Path(context.data['rqst_json_data_path'])
post_json_data_path = Path(context.data['post_json_data_path'])
# get avalon session data and convert \ to /
session = avalon.session
self.log.info(os.environ['AVALON_PROJECTS'])
projects = Path(session['AVALON_PROJECTS']).resolve()
wd = Path(session['AVALON_WORKDIR']).resolve()
session['AVALON_PROJECTS'] = str(projects)
session['AVALON_WORKDIR'] = str(wd)
context.data["avalonSession"] = session
self.log.debug("avalonSession: {}".format(session))
# get stagin directory from recieved path to json
context.data["stagingDir"] = staging_dir = post_json_data_path.parent
# get data from json file recieved
with rqst_json_data_path.open(mode='r') as f:
context.data['jsonData'] = json_data = json.load(f)
assert json_data, "No `data` in json file"
# get and check host type
host = json_data.get("host", None)
host_version = json_data.get("hostVersion", None)
assert host, "No `host` data in json file"
assert host_version, "No `hostVersion` data in json file"
context.data["host"] = session["AVALON_APP"] = host
context.data["hostVersion"] = \
session["AVALON_APP_VERSION"] = host_version
# register pyblish for filtering of hosts in plugins
pyblish.api.deregister_all_hosts()
pyblish.api.register_host(host)
# get path to studio templates
templates_dir = os.getenv("PYPE_STUDIO_TEMPLATES", None)
assert templates_dir, "Missing `PYPE_STUDIO_TEMPLATES` in os.environ"
# get presets for host
presets_dir = os.path.join(templates_dir, "presets", host)
assert os.path.exists(
presets_dir), "Required path `{}` doesn't exist".format(presets_dir)
# load all available preset json files
preset_data = dict()
for file in os.listdir(presets_dir):
name, ext = os.path.splitext(file)
with open(os.path.join(presets_dir, file)) as prst:
preset_data[name] = json.load(prst)
context.data['presets'] = preset_data
assert preset_data, "No `presets` data in json file"
self.log.debug("preset_data: {}".format(preset_data))
# get current file
current_file = json_data.get("currentFile", None)
assert current_file, "No `currentFile` data in json file"
context.data["currentFile"] = Path(current_file).resolve()
# get project data from avalon
project_data = pype.get_project_data()
assert project_data, "No `project_data` data in avalon db"
context.data["projectData"] = project_data
self.log.debug("project_data: {}".format(project_data))
# get asset data from avalon and fix all paths
asset_data = pype.get_asset_data()
assert asset_data, "No `asset_data` data in avalon db"
asset_data = {k: v.replace("\\", "/") for k, v in asset_data.items()
if isinstance(v, str)}
context.data["assetData"] = asset_data
self.log.debug("asset_data: {}".format(asset_data))
self.log.info("rqst_json_data_path is: {}".format(rqst_json_data_path))
self.log.info("post_json_data_path is: {}".format(post_json_data_path))
# self.log.info("avalon.session is: {}".format(avalon.session))

View file

@ -0,0 +1,40 @@
"""Create an animation asset."""
import bpy
from avalon import api
from avalon.blender import Creator, lib
import pype.blender.plugin
class CreateAction(Creator):
"""Action output for character rigs"""
name = "actionMain"
label = "Action"
family = "action"
icon = "male"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = pype.blender.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
if (obj.animation_data is not None
and obj.animation_data.action is not None):
empty_obj = bpy.data.objects.new(name=name,
object_data=None)
empty_obj.animation_data_create()
empty_obj.animation_data.action = obj.animation_data.action
empty_obj.animation_data.action.name = name
collection.objects.link(empty_obj)
return collection

View file

@ -0,0 +1,52 @@
"""Create an animation asset."""
import bpy
from avalon import api
from avalon.blender import Creator, lib
import pype.blender.plugin
class CreateAnimation(Creator):
"""Animation output for character rigs"""
name = "animationMain"
label = "Animation"
family = "animation"
icon = "male"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = pype.blender.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
# Add the rig object and all the children meshes to
# a set and link them all at the end to avoid duplicates.
# Blender crashes if trying to link an object that is already linked.
# This links automatically the children meshes if they were not
# selected, and doesn't link them twice if they, insted,
# were manually selected by the user.
objects_to_link = set()
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
objects_to_link.add(obj)
if obj.type == 'ARMATURE':
for subobj in obj.children:
objects_to_link.add(subobj)
for obj in objects_to_link:
collection.objects.link(obj)
return collection

View file

@ -4,6 +4,7 @@ import bpy
from avalon import api
from avalon.blender import Creator, lib
import pype.blender.plugin
class CreateModel(Creator):
@ -15,11 +16,10 @@ class CreateModel(Creator):
icon = "cube"
def process(self):
import pype.blender
asset = self.data["asset"]
subset = self.data["subset"]
name = pype.blender.plugin.model_name(asset, subset)
name = pype.blender.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')

View file

@ -0,0 +1,52 @@
"""Create a rig asset."""
import bpy
from avalon import api
from avalon.blender import Creator, lib
import pype.blender.plugin
class CreateRig(Creator):
"""Artist-friendly rig with controls to direct motion"""
name = "rigMain"
label = "Rig"
family = "rig"
icon = "wheelchair"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = pype.blender.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
# Add the rig object and all the children meshes to
# a set and link them all at the end to avoid duplicates.
# Blender crashes if trying to link an object that is already linked.
# This links automatically the children meshes if they were not
# selected, and doesn't link them twice if they, insted,
# were manually selected by the user.
objects_to_link = set()
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
objects_to_link.add(obj)
if obj.type == 'ARMATURE':
for subobj in obj.children:
objects_to_link.add(subobj)
for obj in objects_to_link:
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,304 @@
"""Load an action in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import pype.blender.plugin
logger = logging.getLogger("pype").getChild("blender").getChild("load_action")
class BlendActionLoader(pype.blender.plugin.AssetLoader):
"""Load action from a .blend file.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["action"]
representations = ["blend"]
label = "Link Action"
icon = "code-fork"
color = "orange"
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = pype.blender.plugin.asset_name(asset, subset)
container_name = pype.blender.plugin.asset_name(
asset, subset, namespace
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
collection = bpy.context.scene.collection
collection.children.link(bpy.data.collections[lib_container])
animation_container = collection.children[lib_container].make_local()
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in animation_container.objects:
obj = obj.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
objects_list.append(obj)
animation_container.pop(blender.pipeline.AVALON_PROPERTY)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
bpy.ops.object.select_all(action='DESELECT')
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
strips = []
for obj in collection_metadata["objects"]:
# Get all the strips that use the action
arm_objs = [
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
for armature_obj in arm_objs:
if armature_obj.animation_data is not None:
for track in armature_obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action == obj.animation_data.action:
strips.append(strip)
bpy.data.actions.remove(obj.animation_data.action)
bpy.data.objects.remove(obj)
lib_container = collection_metadata["lib_container"]
bpy.data.collections.remove(bpy.data.collections[lib_container])
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
str(libpath), link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
anim_container = scene.collection.children[lib_container].make_local()
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in anim_container.objects:
obj = obj.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
for strip in strips:
strip.action = anim_data.action
strip.action_frame_end = anim_data.action.frame_range[1]
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": collection.name})
objects_list.append(obj)
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
for obj in objects:
# Get all the strips that use the action
arm_objs = [
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
for armature_obj in arm_objs:
if armature_obj.animation_data is not None:
for track in armature_obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action == obj.animation_data.action:
track.strips.remove(strip)
bpy.data.actions.remove(obj.animation_data.action)
bpy.data.objects.remove(obj)
bpy.data.collections.remove(bpy.data.collections[lib_container])
bpy.data.collections.remove(collection)
return True

View file

@ -0,0 +1,256 @@
"""Load an animation in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import pype.blender.plugin
logger = logging.getLogger("pype").getChild(
"blender").getChild("load_animation")
class BlendAnimationLoader(pype.blender.plugin.AssetLoader):
"""Load animations from a .blend file.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["animation"]
representations = ["blend"]
label = "Link Animation"
icon = "code-fork"
color = "orange"
@staticmethod
def _remove(self, objects, lib_container):
for obj in objects:
if obj.type == 'ARMATURE':
bpy.data.armatures.remove(obj.data)
elif obj.type == 'MESH':
bpy.data.meshes.remove(obj.data)
bpy.data.collections.remove(bpy.data.collections[lib_container])
@staticmethod
def _process(self, libpath, lib_container, container_name):
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
anim_container = scene.collection.children[lib_container].make_local()
meshes = [obj for obj in anim_container.objects if obj.type == 'MESH']
armatures = [
obj for obj in anim_container.objects if obj.type == 'ARMATURE']
# Should check if there is only an armature?
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in meshes + armatures:
obj = obj.make_local()
obj.data.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
objects_list.append(obj)
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
bpy.ops.object.select_all(action='DESELECT')
return objects_list
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = pype.blender.plugin.asset_name(asset, subset)
container_name = pype.blender.plugin.asset_name(
asset, subset, namespace
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
objects_list = self._process(
self, libpath, lib_container, container_name)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
# Get the armature of the rig
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
assert(len(armatures) == 1)
self._remove(self, objects, lib_container)
objects_list = self._process(
self, str(libpath), lib_container, collection.name)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(self, objects, lib_container)
bpy.data.collections.remove(collection)
return True

View file

@ -5,15 +5,14 @@ from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import avalon.blender.pipeline
from avalon import api, blender
import bpy
import pype.blender
from avalon import api
import pype.blender.plugin
logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
class BlendModelLoader(pype.blender.AssetLoader):
class BlendModelLoader(pype.blender.plugin.AssetLoader):
"""Load models from a .blend file.
Because they come from a .blend file we can simply link the collection that
@ -32,34 +31,55 @@ class BlendModelLoader(pype.blender.AssetLoader):
color = "orange"
@staticmethod
def _get_lib_collection(name: str, libpath: Path) -> Optional[bpy.types.Collection]:
"""Find the collection(s) with name, loaded from libpath.
def _remove(self, objects, lib_container):
Note:
It is assumed that only 1 matching collection is found.
"""
for collection in bpy.data.collections:
if collection.name != name:
continue
if collection.library is None:
continue
if not collection.library.filepath:
continue
collection_lib_path = str(Path(bpy.path.abspath(collection.library.filepath)).resolve())
normalized_libpath = str(Path(bpy.path.abspath(str(libpath))).resolve())
if collection_lib_path == normalized_libpath:
return collection
return None
for obj in objects:
bpy.data.meshes.remove(obj.data)
bpy.data.collections.remove(bpy.data.collections[lib_container])
@staticmethod
def _collection_contains_object(
collection: bpy.types.Collection, object: bpy.types.Object
) -> bool:
"""Check if the collection contains the object."""
for obj in collection.objects:
if obj == object:
return True
return False
def _process(self, libpath, lib_container, container_name):
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
model_container = scene.collection.children[lib_container].make_local()
objects_list = []
for obj in model_container.objects:
obj = obj.make_local()
obj.data.make_local()
for material_slot in obj.material_slots:
material_slot.material.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
objects_list.append(obj)
model_container.pop(blender.pipeline.AVALON_PROPERTY)
bpy.ops.object.select_all(action='DESELECT')
return objects_list
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
@ -76,42 +96,35 @@ class BlendModelLoader(pype.blender.AssetLoader):
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = pype.blender.plugin.model_name(asset, subset)
container_name = pype.blender.plugin.model_name(
lib_container = pype.blender.plugin.asset_name(asset, subset)
container_name = pype.blender.plugin.asset_name(
asset, subset, namespace
)
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
instance_empty = bpy.data.objects.new(
container_name, None
)
if not instance_empty.get("avalon"):
instance_empty["avalon"] = dict()
avalon_info = instance_empty["avalon"]
avalon_info.update({"container_name": container_name})
scene.collection.objects.link(instance_empty)
instance_empty.instance_type = 'COLLECTION'
container = bpy.data.collections[lib_container]
container.name = container_name
instance_empty.instance_collection = container
container.make_local()
avalon.blender.pipeline.containerise_existing(
container,
collection = bpy.data.collections.new(lib_container)
collection.name = container_name
blender.pipeline.containerise_existing(
collection,
name,
namespace,
context,
self.__class__.__name__,
)
nodes = list(container.objects)
nodes.append(container)
nodes.append(instance_empty)
container_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
objects_list = self._process(
self, libpath, lib_container, container_name)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
nodes = list(collection.objects)
nodes.append(collection)
self[:] = nodes
return nodes
@ -154,9 +167,13 @@ class BlendModelLoader(pype.blender.AssetLoader):
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_libpath = (
self._get_library_from_container(collection).filepath
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
@ -171,58 +188,16 @@ class BlendModelLoader(pype.blender.AssetLoader):
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
# Let Blender's garbage collection take care of removing the library
# itself after removing the objects.
objects_to_remove = set()
collection_objects = list()
collection_objects[:] = collection.objects
for obj in collection_objects:
# Unlink every object
collection.objects.unlink(obj)
remove_obj = True
for coll in [
coll for coll in bpy.data.collections
if coll != collection
]:
if (
coll.objects and
self._collection_contains_object(coll, obj)
):
remove_obj = False
if remove_obj:
objects_to_remove.add(obj)
for obj in objects_to_remove:
# Only delete objects that are not used elsewhere
bpy.data.objects.remove(obj)
self._remove(self, objects, lib_container)
instance_empties = [
obj for obj in collection.users_dupli_group
if obj.name in collection.name
]
if instance_empties:
instance_empty = instance_empties[0]
container_name = instance_empty["avalon"]["container_name"]
objects_list = self._process(
self, str(libpath), lib_container, collection.name)
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
str(libpath), link=True, relative=relative
) as (_, data_to):
data_to.collections = [container_name]
new_collection = self._get_lib_collection(container_name, libpath)
if new_collection is None:
raise ValueError(
"A matching collection '{container_name}' "
"should have been found in: {libpath}"
)
for obj in new_collection.objects:
collection.objects.link(obj)
bpy.data.collections.remove(new_collection)
# Update the representation on the collection
avalon_prop = collection[avalon.blender.pipeline.AVALON_PROPERTY]
avalon_prop["representation"] = str(representation["_id"])
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
@ -245,16 +220,20 @@ class BlendModelLoader(pype.blender.AssetLoader):
assert not (collection.children), (
"Nested collections are not supported."
)
instance_parents = list(collection.users_dupli_group)
instance_objects = list(collection.objects)
for obj in instance_objects + instance_parents:
bpy.data.objects.remove(obj)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(self, objects, lib_container)
bpy.data.collections.remove(collection)
return True
class CacheModelLoader(pype.blender.AssetLoader):
class CacheModelLoader(pype.blender.plugin.AssetLoader):
"""Load cache models.
Stores the imported asset in a collection named after the asset.
@ -281,7 +260,8 @@ class CacheModelLoader(pype.blender.AssetLoader):
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
raise NotImplementedError("Loading of Alembic files is not yet implemented.")
raise NotImplementedError(
"Loading of Alembic files is not yet implemented.")
# TODO (jasper): implement Alembic import.
libpath = self.fname
@ -289,7 +269,7 @@ class CacheModelLoader(pype.blender.AssetLoader):
subset = context["subset"]["name"]
# TODO (jasper): evaluate use of namespace which is 'alien' to Blender.
lib_container = container_name = (
pype.blender.plugin.model_name(asset, subset, namespace)
pype.blender.plugin.asset_name(asset, subset, namespace)
)
relative = bpy.context.preferences.filepaths.use_relative_paths

View file

@ -0,0 +1,256 @@
"""Load a rig asset in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import pype.blender.plugin
logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
class BlendRigLoader(pype.blender.plugin.AssetLoader):
"""Load rigs from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["rig"]
representations = ["blend"]
label = "Link Rig"
icon = "code-fork"
color = "orange"
@staticmethod
def _remove(self, objects, lib_container):
for obj in objects:
if obj.type == 'ARMATURE':
bpy.data.armatures.remove(obj.data)
elif obj.type == 'MESH':
bpy.data.meshes.remove(obj.data)
bpy.data.collections.remove(bpy.data.collections[lib_container])
@staticmethod
def _process(self, libpath, lib_container, container_name, action):
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
rig_container = scene.collection.children[lib_container].make_local()
meshes = [obj for obj in rig_container.objects if obj.type == 'MESH']
armatures = [
obj for obj in rig_container.objects if obj.type == 'ARMATURE']
objects_list = []
assert(len(armatures) == 1)
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in meshes + armatures:
obj = obj.make_local()
obj.data.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
if obj.type == 'ARMATURE' and action is not None:
obj.animation_data.action = action
objects_list.append(obj)
rig_container.pop(blender.pipeline.AVALON_PROPERTY)
bpy.ops.object.select_all(action='DESELECT')
return objects_list
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = pype.blender.plugin.asset_name(asset, subset)
container_name = pype.blender.plugin.asset_name(
asset, subset, namespace
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
objects_list = self._process(
self, libpath, lib_container, container_name, None)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
# Get the armature of the rig
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
assert(len(armatures) == 1)
action = armatures[0].animation_data.action
self._remove(self, objects, lib_container)
objects_list = self._process(
self, str(libpath), lib_container, collection.name, action)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(self, objects, lib_container)
bpy.data.collections.remove(collection)
return True

View file

@ -14,3 +14,6 @@ class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file"""
current_file = bpy.data.filepath
context.data['currentFile'] = current_file
assert current_file != '', "Current file is empty. " \
"Save the file before continuing."

View file

@ -1,22 +1,21 @@
import typing
from typing import Generator
import bpy
import json
import avalon.api
import pyblish.api
from avalon.blender.pipeline import AVALON_PROPERTY
class CollectModel(pyblish.api.ContextPlugin):
class CollectInstances(pyblish.api.ContextPlugin):
"""Collect the data of a model."""
hosts = ["blender"]
label = "Collect Model"
label = "Collect Instances"
order = pyblish.api.CollectorOrder
@staticmethod
def get_model_collections() -> Generator:
def get_collections() -> Generator:
"""Return all 'model' collections.
Check if the family is 'model' and if it doesn't have the
@ -25,13 +24,13 @@ class CollectModel(pyblish.api.ContextPlugin):
"""
for collection in bpy.data.collections:
avalon_prop = collection.get(AVALON_PROPERTY) or dict()
if (avalon_prop.get('family') == 'model'
and not avalon_prop.get('representation')):
if avalon_prop.get('id') == 'pyblish.avalon.instance':
yield collection
def process(self, context):
"""Collect the models from the current Blender scene."""
collections = self.get_model_collections()
collections = self.get_collections()
for collection in collections:
avalon_prop = collection[AVALON_PROPERTY]
asset = avalon_prop['asset']
@ -50,4 +49,6 @@ class CollectModel(pyblish.api.ContextPlugin):
members = list(collection.objects)
members.append(collection)
instance[:] = members
self.log.debug(instance.data)
self.log.debug(json.dumps(instance.data, indent=4))
for obj in instance:
self.log.debug(obj)

View file

@ -0,0 +1,95 @@
import os
import pype.api
import pype.blender.plugin
import bpy
class ExtractABC(pype.api.Extractor):
"""Extract as ABC."""
label = "Extract ABC"
hosts = ["blender"]
families = ["model"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
filepath = os.path.join(stagingdir, filename)
context = bpy.context
scene = context.scene
view_layer = context.view_layer
# Perform extraction
self.log.info("Performing extraction..")
collections = [
obj for obj in instance if type(obj) is bpy.types.Collection]
assert len(collections) == 1, "There should be one and only one " \
"collection collected for this asset"
old_active_layer_collection = view_layer.active_layer_collection
layers = view_layer.layer_collection.children
# Get the layer collection from the collection we need to export.
# This is needed because in Blender you can only set the active
# collection with the layer collection, and there is no way to get
# the layer collection from the collection
# (but there is the vice versa).
layer_collections = [
layer for layer in layers if layer.collection == collections[0]]
assert len(layer_collections) == 1
view_layer.active_layer_collection = layer_collections[0]
old_scale = scene.unit_settings.scale_length
selected = list()
for obj in instance:
try:
obj.select_set(True)
selected.append(obj)
except:
continue
new_context = pype.blender.plugin.create_blender_context(active=selected[0], selected=selected)
# We set the scale of the scene for the export
scene.unit_settings.scale_length = 0.01
self.log.info(new_context)
# We export the abc
bpy.ops.wm.alembic_export(
new_context,
filepath=filepath,
start=1,
end=1
)
view_layer.active_layer_collection = old_active_layer_collection
scene.unit_settings.scale_length = old_scale
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -1,47 +1,47 @@
import os
import avalon.blender.workio
import pype.api
class ExtractModel(pype.api.Extractor):
"""Extract as model."""
label = "Model"
hosts = ["blender"]
families = ["model"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Just save the file to a temporary location. At least for now it's no
# problem to have (possibly) extra stuff in the file.
avalon.blender.workio.save_file(filepath, copy=True)
#
# # Store reference for integration
# if "files" not in instance.data:
# instance.data["files"] = list()
#
# # instance.data["files"].append(filename)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'blend',
'ext': 'blend',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s", instance.name, representation)
import os
import avalon.blender.workio
import pype.api
class ExtractBlend(pype.api.Extractor):
"""Extract a blend file."""
label = "Extract Blend"
hosts = ["blender"]
families = ["animation", "model", "rig", "action"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Just save the file to a temporary location. At least for now it's no
# problem to have (possibly) extra stuff in the file.
avalon.blender.workio.save_file(filepath, copy=True)
#
# # Store reference for integration
# if "files" not in instance.data:
# instance.data["files"] = list()
#
# # instance.data["files"].append(filename)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'blend',
'ext': 'blend',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -0,0 +1,81 @@
import os
import pype.api
import bpy
class ExtractFBX(pype.api.Extractor):
"""Extract as FBX."""
label = "Extract FBX"
hosts = ["blender"]
families = ["model", "rig"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
filepath = os.path.join(stagingdir, filename)
context = bpy.context
scene = context.scene
view_layer = context.view_layer
# Perform extraction
self.log.info("Performing extraction..")
collections = [
obj for obj in instance if type(obj) is bpy.types.Collection]
assert len(collections) == 1, "There should be one and only one " \
"collection collected for this asset"
old_active_layer_collection = view_layer.active_layer_collection
layers = view_layer.layer_collection.children
# Get the layer collection from the collection we need to export.
# This is needed because in Blender you can only set the active
# collection with the layer collection, and there is no way to get
# the layer collection from the collection
# (but there is the vice versa).
layer_collections = [
layer for layer in layers if layer.collection == collections[0]]
assert len(layer_collections) == 1
view_layer.active_layer_collection = layer_collections[0]
old_scale = scene.unit_settings.scale_length
# We set the scale of the scene for the export
scene.unit_settings.scale_length = 0.01
# We export the fbx
bpy.ops.export_scene.fbx(
filepath=filepath,
use_active_collection=True,
mesh_smooth_type='FACE',
add_leaf_bones=False
)
view_layer.active_layer_collection = old_active_layer_collection
scene.unit_settings.scale_length = old_scale
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -0,0 +1,139 @@
import os
import pype.api
import bpy
import bpy_extras
import bpy_extras.anim_utils
class ExtractAnimationFBX(pype.api.Extractor):
"""Extract as animation."""
label = "Extract FBX"
hosts = ["blender"]
families = ["animation"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
filepath = os.path.join(stagingdir, filename)
context = bpy.context
scene = context.scene
view_layer = context.view_layer
# Perform extraction
self.log.info("Performing extraction..")
collections = [
obj for obj in instance if type(obj) is bpy.types.Collection]
assert len(collections) == 1, "There should be one and only one " \
"collection collected for this asset"
old_active_layer_collection = view_layer.active_layer_collection
layers = view_layer.layer_collection.children
# Get the layer collection from the collection we need to export.
# This is needed because in Blender you can only set the active
# collection with the layer collection, and there is no way to get
# the layer collection from the collection
# (but there is the vice versa).
layer_collections = [
layer for layer in layers if layer.collection == collections[0]]
assert len(layer_collections) == 1
view_layer.active_layer_collection = layer_collections[0]
old_scale = scene.unit_settings.scale_length
# We set the scale of the scene for the export
scene.unit_settings.scale_length = 0.01
armatures = [
obj for obj in collections[0].objects if obj.type == 'ARMATURE']
object_action_pairs = []
original_actions = []
starting_frames = []
ending_frames = []
# For each armature, we make a copy of the current action
for obj in armatures:
curr_action = None
copy_action = None
if obj.animation_data and obj.animation_data.action:
curr_action = obj.animation_data.action
copy_action = curr_action.copy()
curr_frame_range = curr_action.frame_range
starting_frames.append(curr_frame_range[0])
ending_frames.append(curr_frame_range[1])
object_action_pairs.append((obj, copy_action))
original_actions.append(curr_action)
# We compute the starting and ending frames
max_frame = min(starting_frames)
min_frame = max(ending_frames)
# We bake the copy of the current action for each object
bpy_extras.anim_utils.bake_action_objects(
object_action_pairs,
frames=range(int(min_frame), int(max_frame)),
do_object=False,
do_clean=False
)
# We export the fbx
bpy.ops.export_scene.fbx(
filepath=filepath,
use_active_collection=True,
bake_anim_use_nla_strips=False,
bake_anim_use_all_actions=False,
add_leaf_bones=False
)
view_layer.active_layer_collection = old_active_layer_collection
scene.unit_settings.scale_length = old_scale
# We delete the baked action and set the original one back
for i in range(0, len(object_action_pairs)):
pair = object_action_pairs[i]
action = original_actions[i]
if action:
pair[0].animation_data.action = action
if pair[1]:
pair[1].user_clear()
bpy.data.actions.remove(pair[1])
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -35,12 +35,15 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in bpy.data.objects if obj.type == 'MESH'
]:
# Make sure we are in object mode.
bpy.ops.object.mode_set(mode='OBJECT')
if not cls.has_uvs(obj):
invalid.append(obj)
obj for obj in instance]:
try:
if obj.type == 'MESH':
# Make sure we are in object mode.
bpy.ops.object.mode_set(mode='OBJECT')
if not cls.has_uvs(obj):
invalid.append(obj)
except:
continue
return invalid
def process(self, instance):

View file

@ -22,6 +22,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
'setdress': 'setdress',
'pointcache': 'cache',
'render': 'render',
'render2d': 'render',
'nukescript': 'comp',
'write': 'render',
'review': 'mov',
@ -127,7 +128,10 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
# Add custom attributes for AssetVersion
assetversion_cust_attrs = {}
intent_val = instance.context.data.get("intent", {}).get("value")
intent_val = instance.context.data.get("intent")
if intent_val and isinstance(intent_val, dict):
intent_val = intent_val.get("value")
if intent_val:
assetversion_cust_attrs["intent"] = intent_val

View file

@ -71,8 +71,13 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
session = instance.context.data["ftrackSession"]
intent_val = instance.context.data.get("intent", {}).get("value")
intent_label = instance.context.data.get("intent", {}).get("label")
intent = instance.context.data.get("intent")
if intent and isinstance(intent, dict):
intent_val = intent.get("value")
intent_label = intent.get("label")
else:
intent_val = intent_label = intent
final_label = None
if intent_val:
final_label = self.get_intent_label(session, intent_val)

View file

@ -89,7 +89,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
# CUSTOM ATTRIBUTES
custom_attributes = entity_data.get('custom_attributes', [])
instances = [
i for i in self.context[:] if i.data['asset'] in entity['name']
i for i in self.context if i.data['asset'] in entity['name']
]
for key in custom_attributes:
assert (key in entity['custom_attributes']), (

View file

@ -15,7 +15,7 @@ import pyblish.api
class CollectAvalonEntities(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder - 0.02
label = "Collect Avalon Entities"
def process(self, context):
@ -47,7 +47,32 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin):
context.data["assetEntity"] = asset_entity
data = asset_entity['data']
handles = int(data.get("handles") or 0)
context.data["handles"] = handles
context.data["handleStart"] = int(data.get("handleStart", handles))
context.data["handleEnd"] = int(data.get("handleEnd", handles))
context.data["frameStart"] = data.get("frameStart")
context.data["frameEnd"] = data.get("frameEnd")
handles = data.get("handles") or 0
handle_start = data.get("handleStart")
if handle_start is None:
handle_start = handles
self.log.info((
"Key \"handleStart\" is not set."
" Using value from \"handles\" key {}."
).format(handle_start))
handle_end = data.get("handleEnd")
if handle_end is None:
handle_end = handles
self.log.info((
"Key \"handleEnd\" is not set."
" Using value from \"handles\" key {}."
).format(handle_end))
context.data["handles"] = int(handles)
context.data["handleStart"] = int(handle_start)
context.data["handleEnd"] = int(handle_end)
frame_start_h = data.get("frameStart") - context.data["handleStart"]
frame_end_h = data.get("frameEnd") + context.data["handleEnd"]
context.data["frameStartHandle"] = frame_start_h
context.data["frameEndHandle"] = frame_end_h

View file

@ -13,7 +13,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
`PYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
"""
order = pyblish.api.CollectorOrder - 0.0001
order = pyblish.api.CollectorOrder - 0.1
targets = ["filesequence"]
label = "Collect rendered frames"

View file

@ -16,6 +16,9 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
if "standalonepublisher" in context.data.get("host", []):
return
if "unreal" in pyblish.api.registered_hosts():
return
filename = os.path.basename(context.data.get('currentFile'))
if '<shell>' in filename:

View file

@ -18,7 +18,7 @@ class ExtractBurnin(pype.api.Extractor):
label = "Extract burnins"
order = pyblish.api.ExtractorOrder + 0.03
families = ["review", "burnin"]
hosts = ["nuke", "maya", "shell", "nukestudio"]
hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"]
optional = True
def process(self, instance):
@ -54,9 +54,12 @@ class ExtractBurnin(pype.api.Extractor):
"comment": instance.context.data.get("comment", "")
})
intent = instance.context.data.get("intent", {}).get("label")
if intent:
prep_data["intent"] = intent
intent_label = instance.context.data.get("intent")
if intent_label and isinstance(intent_label, dict):
intent_label = intent_label.get("label")
if intent_label:
prep_data["intent"] = intent_label
# get anatomy project
anatomy = instance.context.data['anatomy']
@ -65,6 +68,10 @@ class ExtractBurnin(pype.api.Extractor):
for i, repre in enumerate(instance.data["representations"]):
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
if "multipartExr" in repre.get("tags", []):
# ffmpeg doesn't support multipart exrs
continue
if "burnin" not in repre.get("tags", []):
continue
@ -125,13 +132,14 @@ class ExtractBurnin(pype.api.Extractor):
slate_duration = duration_cp
# exception for slate workflow
if ("slate" in instance.data["families"]):
if "slate" in instance.data["families"]:
if "slate-frame" in repre.get("tags", []):
slate_frame_start = frame_start_cp - 1
slate_frame_end = frame_end_cp
slate_duration = duration_cp + 1
self.log.debug("__1 slate_frame_start: {}".format(slate_frame_start))
self.log.debug("__1 slate_frame_start: {}".format(
slate_frame_start))
_prep_data.update({
"slate_frame_start": slate_frame_start,

View file

@ -36,6 +36,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
if not isinstance(repre['files'], list):
continue
if "multipartExr" in tags:
# ffmpeg doesn't support multipart exrs
continue
stagingdir = os.path.normpath(repre.get("stagingDir"))
input_file = repre['files'][0]

View file

@ -20,7 +20,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
label = "Extract Review"
order = pyblish.api.ExtractorOrder + 0.02
families = ["review"]
hosts = ["nuke", "maya", "shell", "nukestudio"]
hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"]
outputs = {}
ext_filter = []
@ -57,11 +57,16 @@ class ExtractReview(pyblish.api.InstancePlugin):
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
if repre['ext'] not in self.ext_filter:
continue
tags = repre.get("tags", [])
if "multipartExr" in tags:
# ffmpeg doesn't support multipart exrs
continue
if "thumbnail" in tags:
continue

View file

@ -11,7 +11,9 @@ class ExtractReviewSlate(pype.api.Extractor):
label = "Review with Slate frame"
order = pyblish.api.ExtractorOrder + 0.031
families = ["slate"]
families = ["slate", "review"]
match = pyblish.api.Subset
hosts = ["nuke", "maya", "shell"]
optional = True
@ -34,7 +36,8 @@ class ExtractReviewSlate(pype.api.Extractor):
fps = inst_data.get("fps")
# defining image ratios
resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height
resolution_ratio = ((float(resolution_width) * pixel_aspect) /
resolution_height)
delivery_ratio = float(to_width) / float(to_height)
self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio))
self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio))
@ -89,7 +92,7 @@ class ExtractReviewSlate(pype.api.Extractor):
input_args.extend([
"-r {}".format(fps),
"-t 0.04"]
)
)
# output args
codec_args = repre["_profile"].get('codec', [])
@ -111,7 +114,7 @@ class ExtractReviewSlate(pype.api.Extractor):
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
to_width - width_scale) / 2)
height_scale = to_height
height_half_pad = 0
else:
@ -124,7 +127,7 @@ class ExtractReviewSlate(pype.api.Extractor):
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
(to_height - height_scale) / 2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
@ -135,8 +138,10 @@ class ExtractReviewSlate(pype.api.Extractor):
self.log.debug(
"__ height_half_pad: `{}`".format(height_half_pad))
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
scaling_arg = ("scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1").format(
width_scale, height_scale, to_width, to_height,
width_half_pad, height_half_pad
)
vf_back = self.add_video_filter_args(

View file

@ -0,0 +1,604 @@
import os
import copy
import clique
import errno
import shutil
from pymongo import InsertOne, ReplaceOne
import pyblish.api
from avalon import api, io, schema
from avalon.vendor import filelink
class IntegrateMasterVersion(pyblish.api.InstancePlugin):
label = "Integrate Master Version"
# Must happen after IntegrateNew
order = pyblish.api.IntegratorOrder + 0.1
optional = True
families = [
"model",
"rig",
"setdress",
"look",
"pointcache",
"animation"
]
# Can specify representation names that will be ignored (lower case)
ignored_representation_names = []
db_representation_context_keys = [
"project", "asset", "task", "subset", "representation",
"family", "hierarchy", "task", "username"
]
# TODO add family filtering
# QUESTION/TODO this process should happen on server if crashed due to
# permissions error on files (files were used or user didn't have perms)
# *but all other plugins must be sucessfully completed
def process(self, instance):
self.log.debug(
"--- Integration of Master version for subset `{}` begins.".format(
instance.data.get("subset", str(instance))
)
)
published_repres = instance.data.get("published_representations")
if not published_repres:
self.log.debug(
"*** There are not published representations on the instance."
)
return
project_name = api.Session["AVALON_PROJECT"]
# TODO raise error if master not set?
anatomy = instance.context.data["anatomy"]
if "master" not in anatomy.templates:
self.log.warning("!!! Anatomy does not have set `master` key!")
return
if "path" not in anatomy.templates["master"]:
self.log.warning((
"!!! There is not set `path` template in `master` anatomy"
" for project \"{}\"."
).format(project_name))
return
master_template = anatomy.templates["master"]["path"]
self.log.debug("`Master` template check was successful. `{}`".format(
master_template
))
master_publish_dir = self.get_publish_dir(instance)
src_version_entity = instance.data.get("versionEntity")
filtered_repre_ids = []
for repre_id, repre_info in published_repres.items():
repre = repre_info["representation"]
if repre["name"].lower() in self.ignored_representation_names:
self.log.debug(
"Filtering representation with name: `{}`".format(
repre["name"].lower()
)
)
filtered_repre_ids.append(repre_id)
for repre_id in filtered_repre_ids:
published_repres.pop(repre_id, None)
if not published_repres:
self.log.debug(
"*** All published representations were filtered by name."
)
return
if src_version_entity is None:
self.log.debug((
"Published version entity was not sent in representation data."
" Querying entity from database."
))
src_version_entity = (
self.version_from_representations(published_repres)
)
if not src_version_entity:
self.log.warning((
"!!! Can't find origin version in database."
" Skipping Master version publish."
))
return
all_copied_files = []
transfers = instance.data.get("transfers", list())
for _src, dst in transfers:
dst = os.path.normpath(dst)
if dst not in all_copied_files:
all_copied_files.append(dst)
hardlinks = instance.data.get("hardlinks", list())
for _src, dst in hardlinks:
dst = os.path.normpath(dst)
if dst not in all_copied_files:
all_copied_files.append(dst)
all_repre_file_paths = []
for repre_info in published_repres.values():
published_files = repre_info.get("published_files") or []
for file_path in published_files:
file_path = os.path.normpath(file_path)
if file_path not in all_repre_file_paths:
all_repre_file_paths.append(file_path)
# TODO this is not best practice of getting resources for publish
# WARNING due to this we must remove all files from master publish dir
instance_publish_dir = os.path.normpath(
instance.data["publishDir"]
)
other_file_paths_mapping = []
for file_path in all_copied_files:
# Check if it is from publishDir
if not file_path.startswith(instance_publish_dir):
continue
if file_path in all_repre_file_paths:
continue
dst_filepath = file_path.replace(
instance_publish_dir, master_publish_dir
)
other_file_paths_mapping.append((file_path, dst_filepath))
# Current version
old_version, old_repres = (
self.current_master_ents(src_version_entity)
)
old_repres_by_name = {
repre["name"].lower(): repre for repre in old_repres
}
if old_version:
new_version_id = old_version["_id"]
else:
new_version_id = io.ObjectId()
new_master_version = {
"_id": new_version_id,
"version_id": src_version_entity["_id"],
"parent": src_version_entity["parent"],
"type": "master_version",
"schema": "pype:master_version-1.0"
}
schema.validate(new_master_version)
# Don't make changes in database until everything is O.K.
bulk_writes = []
if old_version:
self.log.debug("Replacing old master version.")
bulk_writes.append(
ReplaceOne(
{"_id": new_master_version["_id"]},
new_master_version
)
)
else:
self.log.debug("Creating first master version.")
bulk_writes.append(
InsertOne(new_master_version)
)
# Separate old representations into `to replace` and `to delete`
old_repres_to_replace = {}
old_repres_to_delete = {}
for repre_info in published_repres.values():
repre = repre_info["representation"]
repre_name_low = repre["name"].lower()
if repre_name_low in old_repres_by_name:
old_repres_to_replace[repre_name_low] = (
old_repres_by_name.pop(repre_name_low)
)
if old_repres_by_name:
old_repres_to_delete = old_repres_by_name
archived_repres = list(io.find({
# Check what is type of archived representation
"type": "archived_repsentation",
"parent": new_version_id
}))
archived_repres_by_name = {}
for repre in archived_repres:
repre_name_low = repre["name"].lower()
archived_repres_by_name[repre_name_low] = repre
backup_master_publish_dir = None
if os.path.exists(master_publish_dir):
backup_master_publish_dir = master_publish_dir + ".BACKUP"
max_idx = 10
idx = 0
_backup_master_publish_dir = backup_master_publish_dir
while os.path.exists(_backup_master_publish_dir):
self.log.debug((
"Backup folder already exists."
" Trying to remove \"{}\""
).format(_backup_master_publish_dir))
try:
shutil.rmtree(_backup_master_publish_dir)
backup_master_publish_dir = _backup_master_publish_dir
break
except Exception:
self.log.info((
"Could not remove previous backup folder."
" Trying to add index to folder name"
))
_backup_master_publish_dir = (
backup_master_publish_dir + str(idx)
)
if not os.path.exists(_backup_master_publish_dir):
backup_master_publish_dir = _backup_master_publish_dir
break
if idx > max_idx:
raise AssertionError((
"Backup folders are fully occupied to max index \"{}\""
).format(max_idx))
break
idx += 1
self.log.debug("Backup folder path is \"{}\"".format(
backup_master_publish_dir
))
try:
os.rename(master_publish_dir, backup_master_publish_dir)
except PermissionError:
raise AssertionError((
"Could not create master version because it is not"
" possible to replace current master files."
))
try:
src_to_dst_file_paths = []
for repre_info in published_repres.values():
# Skip if new repre does not have published repre files
published_files = repre_info["published_files"]
if len(published_files) == 0:
continue
# Prepare anatomy data
anatomy_data = repre_info["anatomy_data"]
anatomy_data.pop("version", None)
# Get filled path to repre context
anatomy_filled = anatomy.format(anatomy_data)
template_filled = anatomy_filled["master"]["path"]
repre_data = {
"path": str(template_filled),
"template": master_template
}
repre_context = template_filled.used_values
for key in self.db_representation_context_keys:
if (
key in repre_context or
key not in anatomy_data
):
continue
repre_context[key] = anatomy_data[key]
# Prepare new repre
repre = copy.deepcopy(repre_info["representation"])
repre["parent"] = new_master_version["_id"]
repre["context"] = repre_context
repre["data"] = repre_data
repre.pop("_id", None)
schema.validate(repre)
repre_name_low = repre["name"].lower()
# Replace current representation
if repre_name_low in old_repres_to_replace:
old_repre = old_repres_to_replace.pop(repre_name_low)
repre["_id"] = old_repre["_id"]
bulk_writes.append(
ReplaceOne(
{"_id": old_repre["_id"]},
repre
)
)
# Unarchive representation
elif repre_name_low in archived_repres_by_name:
archived_repre = archived_repres_by_name.pop(
repre_name_low
)
old_id = archived_repre["old_id"]
repre["_id"] = old_id
bulk_writes.append(
ReplaceOne(
{"old_id": old_id},
repre
)
)
# Create representation
else:
repre["_id"] = io.ObjectId()
bulk_writes.append(
InsertOne(repre)
)
# Prepare paths of source and destination files
if len(published_files) == 1:
src_to_dst_file_paths.append(
(published_files[0], template_filled)
)
continue
collections, remainders = clique.assemble(published_files)
if remainders or not collections or len(collections) > 1:
raise Exception((
"Integrity error. Files of published representation "
"is combination of frame collections and single files."
"Collections: `{}` Single files: `{}`"
).format(str(collections), str(remainders)))
src_col = collections[0]
# Get head and tail for collection
frame_splitter = "_-_FRAME_SPLIT_-_"
anatomy_data["frame"] = frame_splitter
_anatomy_filled = anatomy.format(anatomy_data)
_template_filled = _anatomy_filled["master"]["path"]
head, tail = _template_filled.split(frame_splitter)
padding = (
anatomy.templates["render"]["padding"]
)
dst_col = clique.Collection(
head=head, padding=padding, tail=tail
)
dst_col.indexes.clear()
dst_col.indexes.update(src_col.indexes)
for src_file, dst_file in zip(src_col, dst_col):
src_to_dst_file_paths.append(
(src_file, dst_file)
)
self.path_checks = []
# Copy(hardlink) paths of source and destination files
# TODO should we *only* create hardlinks?
# TODO should we keep files for deletion until this is successful?
for src_path, dst_path in src_to_dst_file_paths:
self.copy_file(src_path, dst_path)
for src_path, dst_path in other_file_paths_mapping:
self.copy_file(src_path, dst_path)
# Archive not replaced old representations
for repre_name_low, repre in old_repres_to_delete.items():
# Replace archived representation (This is backup)
# - should not happen to have both repre and archived repre
if repre_name_low in archived_repres_by_name:
archived_repre = archived_repres_by_name.pop(
repre_name_low
)
repre["old_id"] = repre["_id"]
repre["_id"] = archived_repre["_id"]
repre["type"] = archived_repre["type"]
bulk_writes.append(
ReplaceOne(
{"_id": archived_repre["_id"]},
repre
)
)
else:
repre["old_id"] = repre["_id"]
repre["_id"] = io.ObjectId()
repre["type"] = "archived_representation"
bulk_writes.append(
InsertOne(repre)
)
if bulk_writes:
io._database[io.Session["AVALON_PROJECT"]].bulk_write(
bulk_writes
)
# Remove backuped previous master
if (
backup_master_publish_dir is not None and
os.path.exists(backup_master_publish_dir)
):
shutil.rmtree(backup_master_publish_dir)
except Exception:
if (
backup_master_publish_dir is not None and
os.path.exists(backup_master_publish_dir)
):
os.rename(backup_master_publish_dir, master_publish_dir)
self.log.error((
"!!! Creating of Master version failed."
" Previous master version maybe lost some data!"
))
raise
self.log.debug((
"--- Master version integration for subset `{}`"
" seems to be successful."
).format(
instance.data.get("subset", str(instance))
))
def get_all_files_from_path(self, path):
files = []
for (dir_path, dir_names, file_names) in os.walk(path):
for file_name in file_names:
_path = os.path.join(dir_path, file_name)
files.append(_path)
return files
def get_publish_dir(self, instance):
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
if "folder" in anatomy.templates["master"]:
anatomy_filled = anatomy.format(template_data)
publish_folder = anatomy_filled["master"]["folder"]
else:
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
anatomy_filled = anatomy.format(template_data)
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = anatomy_filled["master"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
publish_folder = os.path.normpath(publish_folder)
self.log.debug("Master publish dir: \"{}\"".format(publish_folder))
return publish_folder
def copy_file(self, src_path, dst_path):
# TODO check drives if are the same to check if cas hardlink
dst_path = self.path_root_check(dst_path)
src_path = self.path_root_check(src_path)
dirname = os.path.dirname(dst_path)
try:
os.makedirs(dirname)
self.log.debug("Folder(s) created: \"{}\"".format(dirname))
except OSError as exc:
if exc.errno != errno.EEXIST:
self.log.error("An unexpected error occurred.", exc_info=True)
raise
self.log.debug("Folder already exists: \"{}\"".format(dirname))
self.log.debug("Copying file \"{}\" to \"{}\"".format(
src_path, dst_path
))
# First try hardlink and copy if paths are cross drive
try:
filelink.create(src_path, dst_path, filelink.HARDLINK)
# Return when successful
return
except OSError as exc:
# re-raise exception if different than cross drive path
if exc.errno != errno.EXDEV:
raise
shutil.copy(src_path, dst_path)
def path_root_check(self, path):
normalized_path = os.path.normpath(path)
forward_slash_path = normalized_path.replace("\\", "/")
drive, _path = os.path.splitdrive(normalized_path)
if os.path.exists(drive + "/"):
key = "drive_check{}".format(drive)
if key not in self.path_checks:
self.log.debug(
"Drive \"{}\" exist. Nothing to change.".format(drive)
)
self.path_checks.append(key)
return normalized_path
path_env_key = "PYPE_STUDIO_PROJECTS_PATH"
mount_env_key = "PYPE_STUDIO_PROJECTS_MOUNT"
missing_envs = []
if path_env_key not in os.environ:
missing_envs.append(path_env_key)
if mount_env_key not in os.environ:
missing_envs.append(mount_env_key)
if missing_envs:
key = "missing_envs"
if key not in self.path_checks:
self.path_checks.append(key)
_add_s = ""
if len(missing_envs) > 1:
_add_s = "s"
self.log.warning((
"Can't replace MOUNT drive path to UNC path due to missing"
" environment variable{}: `{}`. This may cause issues"
" during publishing process."
).format(_add_s, ", ".join(missing_envs)))
return normalized_path
unc_root = os.environ[path_env_key].replace("\\", "/")
mount_root = os.environ[mount_env_key].replace("\\", "/")
# --- Remove slashes at the end of mount and unc roots ---
while unc_root.endswith("/"):
unc_root = unc_root[:-1]
while mount_root.endswith("/"):
mount_root = mount_root[:-1]
# ---
if forward_slash_path.startswith(unc_root):
self.log.debug((
"Path already starts with UNC root: \"{}\""
).format(unc_root))
return normalized_path
if not forward_slash_path.startswith(mount_root):
self.log.warning((
"Path do not start with MOUNT root \"{}\" "
"set in environment variable \"{}\""
).format(unc_root, mount_env_key))
return normalized_path
# Replace Mount root with Unc root
path = unc_root + forward_slash_path[len(mount_root):]
return os.path.normpath(path)
def version_from_representations(self, repres):
for repre in repres:
version = io.find_one({"_id": repre["parent"]})
if version:
return version
def current_master_ents(self, version):
master_version = io.find_one({
"parent": version["parent"],
"type": "master_version"
})
if not master_version:
return (None, [])
master_repres = list(io.find({
"parent": master_version["_id"],
"type": "representation"
}))
return (master_version, master_repres)

View file

@ -5,6 +5,7 @@ import sys
import copy
import clique
import errno
import six
from pymongo import DeleteOne, InsertOne
import pyblish.api
@ -64,6 +65,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"scene",
"vrayproxy",
"render",
"prerender",
"imagesequence",
"review",
"rendersetup",
@ -81,13 +83,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"image"
"source",
"assembly",
"textures"
"fbx",
"textures",
"action"
]
exclude_families = ["clip"]
db_representation_context_keys = [
"project", "asset", "task", "subset", "version", "representation",
"family", "hierarchy", "task", "username"
]
default_template_name = "publish"
def process(self, instance):
@ -162,6 +167,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
)
subset = self.get_subset(asset_entity, instance)
instance.data["subsetEntity"] = subset
version_number = instance.data["version"]
self.log.debug("Next version: v{}".format(version_number))
@ -236,6 +242,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
bulk_writes
)
version = io.find_one({"_id": version_id})
instance.data["versionEntity"] = version
existing_repres = list(io.find({
"parent": version_id,
"type": "archived_representation"
@ -243,9 +252,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data['version'] = version['name']
intent = context.data.get("intent")
if intent is not None:
anatomy_data["intent"] = intent
intent_value = instance.context.data.get("intent")
if intent_value and isinstance(intent_value, dict):
intent_value = intent_value.get("value")
if intent_value:
anatomy_data["intent"] = intent_value
anatomy = instance.context.data['anatomy']
@ -253,15 +265,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
template_name = 'publish'
if 'transfers' not in instance.data:
instance.data['transfers'] = []
published_representations = {}
for idx, repre in enumerate(instance.data["representations"]):
published_files = []
# create template data for Anatomy
template_data = copy.deepcopy(anatomy_data)
if intent is not None:
template_data["intent"] = intent
if intent_value is not None:
template_data["intent"] = intent_value
resolution_width = repre.get("resolutionWidth")
resolution_height = repre.get("resolutionHeight")
@ -277,8 +292,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
files = repre['files']
if repre.get('stagingDir'):
stagingdir = repre['stagingDir']
if repre.get('anatomy_template'):
template_name = repre['anatomy_template']
template_name = (
repre.get('anatomy_template') or self.default_template_name
)
if repre.get("outputName"):
template_data["output"] = repre['outputName']
@ -365,14 +382,19 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("source: {}".format(src))
instance.data["transfers"].append([src, dst])
published_files.append(dst)
# for adding first frame into db
if not dst_start_frame:
dst_start_frame = dst_padding
# Store used frame value to template data
template_data["frame"] = dst_start_frame
dst = "{0}{1}{2}".format(
dst_head,
dst_start_frame,
dst_tail).replace("..", ".")
dst_tail
).replace("..", ".")
repre['published_path'] = self.unc_convert(dst)
else:
@ -400,9 +422,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data["transfers"].append([src, dst])
published_files.append(dst)
repre['published_path'] = self.unc_convert(dst)
self.log.debug("__ dst: {}".format(dst))
repre["publishedFiles"] = published_files
for key in self.db_representation_context_keys:
value = template_data.get(key)
if not value:
@ -449,6 +474,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("__ destination_list: {}".format(destination_list))
instance.data['destination_list'] = destination_list
representations.append(representation)
published_representations[repre_id] = {
"representation": representation,
"anatomy_data": template_data,
"published_files": published_files
}
self.log.debug("__ representations: {}".format(representations))
# Remove old representations if there are any (before insertion of new)
@ -463,7 +493,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("__ represNAME: {}".format(rep['name']))
self.log.debug("__ represPATH: {}".format(rep['published_path']))
io.insert_many(representations)
instance.data["published_representations"] = representations
instance.data["published_representations"] = (
published_representations
)
# self.log.debug("Representation: {}".format(representations))
self.log.info("Registered {} items".format(len(representations)))
@ -538,7 +570,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# copy file with speedcopy and check if size of files are simetrical
while True:
copyfile(src, dst)
try:
copyfile(src, dst)
except OSError as e:
self.log.critical("Cannot copy {} to {}".format(src, dst))
self.log.critical(e)
six.reraise(*sys.exc_info())
if str(getsize(src)) in str(getsize(dst)):
break
@ -579,7 +616,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"name": subset_name,
"data": {
"families": instance.data.get('families')
},
},
"parent": asset["_id"]
}).inserted_id
@ -633,13 +670,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
families += current_families
self.log.debug("Registered root: {}".format(api.registered_root()))
# create relative source path for DB
try:
source = instance.data['source']
except KeyError:
source = context.data["currentFile"]
source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"),
api.registered_root())
self.log.debug("source: {}".format(source))
source = str(source).replace(
os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"),
api.registered_root()
)
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
@ -653,9 +694,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"fps": context.data.get(
"fps", instance.data.get("fps"))}
intent = context.data.get("intent")
if intent is not None:
version_data["intent"] = intent
intent_value = instance.context.data.get("intent")
if intent_value and isinstance(intent_value, dict):
intent_value = intent_value.get("value")
if intent_value:
version_data["intent"] = intent_value
# Include optional data if present in
optionals = [

View file

@ -18,17 +18,23 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
order = pyblish.api.IntegratorOrder + 0.01
families = ["review"]
required_context_keys = [
"project", "asset", "task", "subset", "version"
]
def process(self, instance):
if not os.environ.get("AVALON_THUMBNAIL_ROOT"):
self.log.info("AVALON_THUMBNAIL_ROOT is not set."
" Skipping thumbnail integration.")
self.log.warning(
"AVALON_THUMBNAIL_ROOT is not set."
" Skipping thumbnail integration."
)
return
published_repres = instance.data.get("published_representations")
if not published_repres:
self.log.debug(
"There are not published representation ids on the instance."
"There are no published representations on the instance."
)
return
@ -36,21 +42,22 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
anatomy = instance.context.data["anatomy"]
if "publish" not in anatomy.templates:
raise AssertionError("Anatomy does not have set publish key!")
self.log.warning("Anatomy is missing the \"publish\" key!")
return
if "thumbnail" not in anatomy.templates["publish"]:
raise AssertionError((
"There is not set \"thumbnail\" template for project \"{}\""
self.log.warning((
"There is no \"thumbnail\" template set for the project \"{}\""
).format(project_name))
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
io.install()
return
thumb_repre = None
for repre in published_repres:
thumb_repre_anatomy_data = None
for repre_info in published_repres.values():
repre = repre_info["representation"]
if repre["name"].lower() == "thumbnail":
thumb_repre = repre
thumb_repre_anatomy_data = repre_info["anatomy_data"]
break
if not thumb_repre:
@ -59,6 +66,10 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
)
return
io.install()
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
version = io.find_one({"_id": thumb_repre["parent"]})
if not version:
raise AssertionError(
@ -80,7 +91,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
thumbnail_id = ObjectId()
# Prepare anatomy template fill data
template_data = copy.deepcopy(thumb_repre["context"])
template_data = copy.deepcopy(thumb_repre_anatomy_data)
template_data.update({
"_id": str(thumbnail_id),
"thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"),
@ -89,15 +100,9 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
})
anatomy_filled = anatomy.format(template_data)
final_path = anatomy_filled.get("publish", {}).get("thumbnail")
if not final_path:
raise AssertionError((
"Anatomy template was not filled with entered data"
"\nTemplate: {} "
"\nData: {}"
).format(thumbnail_template, str(template_data)))
template_filled = anatomy_filled["publish"]["thumbnail"]
dst_full_path = os.path.normpath(final_path)
dst_full_path = os.path.normpath(str(template_filled))
self.log.debug(
"Copying file .. {} -> {}".format(src_full_path, dst_full_path)
)
@ -115,13 +120,20 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
template_data.pop("_id")
template_data.pop("thumbnail_root")
repre_context = template_filled.used_values
for key in self.required_context_keys:
value = template_data.get(key)
if not value:
continue
repre_context[key] = template_data[key]
thumbnail_entity = {
"_id": thumbnail_id,
"type": "thumbnail",
"schema": "pype:thumbnail-1.0",
"data": {
"template": thumbnail_template,
"template_data": template_data
"template_data": repre_context
}
}
# Create thumbnail entity

View file

@ -141,7 +141,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
hosts = ["fusion", "maya", "nuke"]
families = ["render.farm", "renderlayer", "imagesequence"]
families = ["render.farm", "prerener", "renderlayer", "imagesequence"]
aov_filter = {"maya": ["beauty"]}
@ -155,6 +155,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"PYPE_METADATA_FILE",
"PYPE_STUDIO_PROJECTS_PATH",
"PYPE_STUDIO_PROJECTS_MOUNT",
"AVALON_PROJECT"
]
# pool used to do the publishing job
@ -168,9 +169,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
instance_transfer = {
"slate": ["slateFrame"],
"review": ["lutPath"],
"render.farm": ["bakeScriptPath", "bakeRenderPath",
"bakeWriteNodeName", "version"]
}
"render2d": ["bakeScriptPath", "bakeRenderPath",
"bakeWriteNodeName", "version"]
}
# list of family names to transfer to new family if present
families_transfer = ["render3d", "render2d", "ftrack", "slate"]
@ -195,7 +196,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"])
network_root = os.environ["PYPE_STUDIO_PROJECTS_PATH"]
metadata_path = metadata_path.replace(mount_root, network_root)
metadata_path = os.path.normpath(metadata_path)
# Generate the payload for Deadline submission
payload = {
@ -222,9 +222,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Transfer the environment from the original job to this dependent
# job so they use the same environment
environment = job["Props"].get("Env", {})
environment["PYPE_METADATA_FILE"] = metadata_path
environment["AVALON_PROJECT"] = io.Session["AVALON_PROJECT"]
environment["PYPE_LOG_NO_COLORS"] = "1"
try:
environment["PYPE_PYTHON_EXE"] = os.environ["PYPE_PYTHON_EXE"]
except KeyError:
# PYPE_PYTHON_EXE not set
pass
i = 0
for index, key in enumerate(environment):
if key.upper() in self.enviro_filter:
@ -276,7 +282,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# if override remove all frames we are expecting to be rendered
# so we'll copy only those missing from current render
if instance.data.get("overrideExistingFrame"):
for frame in range(start, end+1):
for frame in range(start, end + 1):
if frame not in r_col.indexes:
continue
r_col.indexes.remove(frame)
@ -348,10 +354,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
assert len(cols) == 1, "only one image sequence type is expected"
# create subset name `familyTaskSubset_AOV`
subset_name = 'render{}{}{}{}_{}'.format(
group_name = 'render{}{}{}{}'.format(
task[0].upper(), task[1:],
subset[0].upper(), subset[1:],
aov)
subset[0].upper(), subset[1:])
subset_name = '{}_{}'.format(group_name, aov)
staging = os.path.dirname(list(cols[0])[0])
@ -366,6 +373,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
new_instance = copy(instance_data)
new_instance["subset"] = subset_name
new_instance["subsetGroup"] = group_name
ext = cols[0].tail.lstrip(".")
@ -446,6 +454,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"tags": ["review", "preview"] if preview else [],
}
if instance.get("multipartExr", False):
rep["tags"].append["multipartExr"]
representations.append(rep)
self._solve_families(instance, preview)
@ -458,7 +469,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"ext": ext,
"files": os.path.basename(r),
"stagingDir": os.path.dirname(r),
"anatomy_template": "publish",
"anatomy_template": "publish"
}
if r in bake_render_path:
rep.update({
@ -581,13 +592,26 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"pixelAspect": data.get("pixelAspect", 1),
"resolutionWidth": data.get("resolutionWidth", 1920),
"resolutionHeight": data.get("resolutionHeight", 1080),
"multipartExr": data.get("multipartExr", False)
}
if "prerender" in instance.data["families"]:
instance_skeleton_data.update({
"family": "prerender",
"families": []})
# transfer specific families from original instance to new render
for item in self.families_transfer:
if item in instance.data.get("families", []):
instance_skeleton_data["families"] += [item]
if "render.farm" in instance.data["families"]:
instance_skeleton_data.update({
"family": "render2d",
"families": ["render"] + [f for f in instance.data["families"]
if "render.farm" not in f]
})
# transfer specific properties from original instance based on
# mapping dictionary `instance_transfer`
for key, values in self.instance_transfer.items():

View file

@ -0,0 +1,11 @@
import avalon.maya
class CreateUnrealStaticMesh(avalon.maya.Creator):
name = "staticMeshMain"
label = "Unreal - Static Mesh"
family = "unrealStaticMesh"
icon = "cube"
def __init__(self, *args, **kwargs):
super(CreateUnrealStaticMesh, self).__init__(*args, **kwargs)

View file

@ -11,6 +11,7 @@ class CreateYetiCache(avalon.maya.Creator):
label = "Yeti Cache"
family = "yeticache"
icon = "pagelines"
defaults = ["Main"]
def __init__(self, *args, **kwargs):
super(CreateYetiCache, self).__init__(*args, **kwargs)

View file

@ -10,6 +10,7 @@ class CreateYetiRig(avalon.maya.Creator):
label = "Yeti Rig"
family = "yetiRig"
icon = "usb"
defaults = ["Main"]
def process(self):

View file

@ -16,7 +16,7 @@ class AssProxyLoader(pype.maya.plugin.ReferenceLoader):
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
def process_reference(self, context, name, namespace, options):
import maya.cmds as cmds
from avalon import maya
@ -164,7 +164,7 @@ class AssStandinLoader(api.Loader):
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
def load(self, context, name, namespace, options):
import maya.cmds as cmds
import avalon.maya.lib as lib

View file

@ -16,19 +16,19 @@ class LookLoader(pype.maya.plugin.ReferenceLoader):
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
def process_reference(self, context, name, namespace, options):
"""
Load and try to assign Lookdev to nodes based on relationship data
Load and try to assign Lookdev to nodes based on relationship data.
Args:
name:
namespace:
context:
data:
options:
Returns:
"""
import maya.cmds as cmds
from avalon import maya

View file

@ -65,8 +65,10 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
roots.add(pm.PyNode(node).getAllParents()[-2])
except: # noqa: E722
pass
for root in roots:
root.setParent(world=True)
if family not in ["layout", "setdress", "mayaAscii"]:
for root in roots:
root.setParent(world=True)
groupNode.zeroTransformPivots()
for root in roots:

View file

@ -6,10 +6,11 @@ from collections import defaultdict
from maya import cmds
from avalon import api
from avalon import api, io
from avalon.maya import lib as avalon_lib, pipeline
from pype.maya import lib
from pypeapp import config
from pprint import pprint
class YetiCacheLoader(api.Loader):
@ -101,12 +102,23 @@ class YetiCacheLoader(api.Loader):
def update(self, container, representation):
io.install()
namespace = container["namespace"]
container_node = container["objectName"]
fur_settings = io.find_one(
{"parent": representation["parent"], "name": "fursettings"}
)
pprint({"parent": representation["parent"], "name": "fursettings"})
pprint(fur_settings)
assert fur_settings is not None, (
"cannot find fursettings representation"
)
settings_fname = api.get_representation_path(fur_settings)
path = api.get_representation_path(representation)
# Get all node data
fname, ext = os.path.splitext(path)
settings_fname = "{}.fursettings".format(fname)
with open(settings_fname, "r") as fp:
settings = json.load(fp)
@ -147,13 +159,14 @@ class YetiCacheLoader(api.Loader):
cmds.delete(to_remove)
# replace frame in filename with %04d
RE_frame = re.compile(r"(\d+)(\.fur)$")
file_name = re.sub(RE_frame, r"%04d\g<2>", os.path.basename(path))
for cb_id, data in meta_data_lookup.items():
# Update cache file name
file_name = data["name"].replace(":", "_")
cache_file_path = "{}.%04d.fur".format(file_name)
data["attrs"]["cacheFileName"] = os.path.join(
path, cache_file_path)
os.path.dirname(path), file_name)
if cb_id not in scene_lookup:
@ -197,6 +210,12 @@ class YetiCacheLoader(api.Loader):
yeti_node = yeti_nodes[0]
for attr, value in data["attrs"].items():
# handle empty attribute strings. Those are reported
# as None, so their type is NoneType and this is not
# supported on attributes in Maya. We change it to
# empty string.
if value is None:
value = ""
lib.set_attribute(attr, value, yeti_node)
cmds.setAttr("{}.representation".format(container_node),

View file

@ -21,7 +21,8 @@ class YetiRigLoader(pype.maya.plugin.ReferenceLoader):
icon = "code-fork"
color = "orange"
def process_reference(self, context, name=None, namespace=None, data=None):
def process_reference(
self, context, name=None, namespace=None, options=None):
import maya.cmds as cmds
from avalon import maya

View file

@ -1,16 +0,0 @@
from maya import cmds
import pyblish.api
class CollectMayaCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Maya Current File"
hosts = ['maya']
def process(self, context):
"""Inject the current working file"""
current_file = cmds.file(query=True, sceneName=True)
context.data['currentFile'] = current_file

View file

@ -1,6 +1,7 @@
from maya import cmds
import pyblish.api
import json
class CollectInstances(pyblish.api.ContextPlugin):
@ -32,6 +33,13 @@ class CollectInstances(pyblish.api.ContextPlugin):
objectset = cmds.ls("*.id", long=True, type="objectSet",
recursive=True, objectsOnly=True)
ctx_frame_start = context.data['frameStart']
ctx_frame_end = context.data['frameEnd']
ctx_handle_start = context.data['handleStart']
ctx_handle_end = context.data['handleEnd']
ctx_frame_start_handle = context.data['frameStartHandle']
ctx_frame_end_handle = context.data['frameEndHandle']
context.data['objectsets'] = objectset
for objset in objectset:
@ -108,14 +116,36 @@ class CollectInstances(pyblish.api.ContextPlugin):
label = "{0} ({1})".format(name,
data["asset"])
if "handles" in data:
data["handleStart"] = data["handles"]
data["handleEnd"] = data["handles"]
# Append start frame and end frame to label if present
if "frameStart" and "frameEnd" in data:
data["frameStartHandle"] = data["frameStart"] - data["handleStart"]
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"]
# if frame range on maya set is the same as full shot range
# adjust the values to match the asset data
if (ctx_frame_start_handle == data["frameStart"]
and ctx_frame_end_handle == data["frameEnd"]): # noqa: W503, E501
data["frameStartHandle"] = ctx_frame_start_handle
data["frameEndHandle"] = ctx_frame_end_handle
data["frameStart"] = ctx_frame_start
data["frameEnd"] = ctx_frame_end
data["handleStart"] = ctx_handle_start
data["handleEnd"] = ctx_handle_end
# if there are user values on start and end frame not matching
# the asset, use them
else:
if "handles" in data:
data["handleStart"] = data["handles"]
data["handleEnd"] = data["handles"]
else:
data["handleStart"] = 0
data["handleEnd"] = 0
data["frameStartHandle"] = data["frameStart"] - data["handleStart"] # noqa: E501
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] # noqa: E501
if "handles" in data:
data.pop('handles')
label += " [{0}-{1}]".format(int(data["frameStartHandle"]),
int(data["frameEndHandle"]))
@ -127,7 +157,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
# Produce diagnostic message for any graphical
# user interface interested in visualising it.
self.log.info("Found: \"%s\" " % instance.data["name"])
self.log.debug("DATA: \"%s\" " % instance.data)
self.log.debug(
"DATA: {} ".format(json.dumps(instance.data, indent=4)))
def sort_by_family(instance):
"""Sort by family"""

View file

@ -250,7 +250,8 @@ class CollectLook(pyblish.api.InstancePlugin):
# Remove sets that didn't have any members assigned in the end
# Thus the data will be limited to only what we need.
if not sets[objset]["members"]:
self.log.info("objset {}".format(sets[objset]))
if not sets[objset]["members"] or (not objset.endswith("SG")):
self.log.info("Removing redundant set information: "
"%s" % objset)
sets.pop(objset, None)

View file

@ -41,6 +41,7 @@ import re
import os
import types
import six
import json
from abc import ABCMeta, abstractmethod
from maya import cmds
@ -52,38 +53,40 @@ from avalon import maya, api
import pype.maya.lib as lib
R_SINGLE_FRAME = re.compile(r'^(-?)\d+$')
R_FRAME_RANGE = re.compile(r'^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$')
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
R_SINGLE_FRAME = re.compile(r"^(-?)\d+$")
R_FRAME_RANGE = re.compile(r"^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$")
R_FRAME_NUMBER = re.compile(r".+\.(?P<frame>[0-9]+)\..+")
R_LAYER_TOKEN = re.compile(
r'.*%l.*|.*<layer>.*|.*<renderlayer>.*', re.IGNORECASE)
R_AOV_TOKEN = re.compile(r'.*%a.*|.*<aov>.*|.*<renderpass>.*', re.IGNORECASE)
R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a|<aov>|<renderpass>', re.IGNORECASE)
R_REMOVE_AOV_TOKEN = re.compile(r'_%a|_<aov>|_<renderpass>', re.IGNORECASE)
r".*%l.*|.*<layer>.*|.*<renderlayer>.*", re.IGNORECASE
)
R_AOV_TOKEN = re.compile(r".*%a.*|.*<aov>.*|.*<renderpass>.*", re.IGNORECASE)
R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a|<aov>|<renderpass>", re.IGNORECASE)
R_REMOVE_AOV_TOKEN = re.compile(r"_%a|_<aov>|_<renderpass>", re.IGNORECASE)
# to remove unused renderman tokens
R_CLEAN_FRAME_TOKEN = re.compile(r'\.?<f\d>\.?', re.IGNORECASE)
R_CLEAN_EXT_TOKEN = re.compile(r'\.?<ext>\.?', re.IGNORECASE)
R_CLEAN_FRAME_TOKEN = re.compile(r"\.?<f\d>\.?", re.IGNORECASE)
R_CLEAN_EXT_TOKEN = re.compile(r"\.?<ext>\.?", re.IGNORECASE)
R_SUBSTITUTE_LAYER_TOKEN = re.compile(
r'%l|<layer>|<renderlayer>', re.IGNORECASE)
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
r"%l|<layer>|<renderlayer>", re.IGNORECASE
)
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|<camera>", re.IGNORECASE)
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|<scene>", re.IGNORECASE)
RENDERER_NAMES = {
'mentalray': 'MentalRay',
'vray': 'V-Ray',
'arnold': 'Arnold',
'renderman': 'Renderman',
'redshift': 'Redshift'
"mentalray": "MentalRay",
"vray": "V-Ray",
"arnold": "Arnold",
"renderman": "Renderman",
"redshift": "Redshift",
}
# not sure about the renderman image prefix
ImagePrefixes = {
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
'vray': 'vraySettings.fileNamePrefix',
'arnold': 'defaultRenderGlobals.imageFilePrefix',
'renderman': 'rmanGlobals.imageFileFormat',
'redshift': 'defaultRenderGlobals.imageFilePrefix'
"mentalray": "defaultRenderGlobals.imageFilePrefix",
"vray": "vraySettings.fileNamePrefix",
"arnold": "defaultRenderGlobals.imageFilePrefix",
"renderman": "rmanGlobals.imageFileFormat",
"redshift": "defaultRenderGlobals.imageFilePrefix",
}
@ -97,21 +100,23 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
def process(self, context):
render_instance = None
for instance in context:
if 'rendering' in instance.data['families']:
if "rendering" in instance.data["families"]:
render_instance = instance
render_instance.data["remove"] = True
# make sure workfile instance publishing is enabled
if 'workfile' in instance.data['families']:
if "workfile" in instance.data["families"]:
instance.data["publish"] = True
if not render_instance:
self.log.info("No render instance found, skipping render "
"layer collection.")
self.log.info(
"No render instance found, skipping render "
"layer collection."
)
return
render_globals = render_instance
collected_render_layers = render_instance.data['setMembers']
collected_render_layers = render_instance.data["setMembers"]
filepath = context.data["currentFile"].replace("\\", "/")
asset = api.Session["AVALON_ASSET"]
workspace = context.data["workspaceDir"]
@ -126,22 +131,24 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
try:
expected_layer_name = re.search(r"^LAYER_(.*)", layer).group(1)
except IndexError:
msg = ("Invalid layer name in set [ {} ]".format(layer))
msg = "Invalid layer name in set [ {} ]".format(layer)
self.log.warnig(msg)
continue
self.log.info("processing %s" % layer)
# check if layer is part of renderSetup
if expected_layer_name not in maya_render_layers:
msg = ("Render layer [ {} ] is not in "
"Render Setup".format(expected_layer_name))
msg = "Render layer [ {} ] is not in " "Render Setup".format(
expected_layer_name
)
self.log.warning(msg)
continue
# check if layer is renderable
if not maya_render_layers[expected_layer_name].isRenderable():
msg = ("Render layer [ {} ] is not "
"renderable".format(expected_layer_name))
msg = "Render layer [ {} ] is not " "renderable".format(
expected_layer_name
)
self.log.warning(msg)
continue
@ -150,26 +157,34 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
attachTo = []
if sets:
for s in sets:
attachTo.append({
"version": None, # we need integrator to get version
"subset": s,
"family": cmds.getAttr("{}.family".format(s))
})
if "family" not in cmds.listAttr(s):
continue
attachTo.append(
{
"version": None, # we need integrator for that
"subset": s,
"family": cmds.getAttr("{}.family".format(s)),
}
)
self.log.info(" -> attach render to: {}".format(s))
layer_name = "rs_{}".format(expected_layer_name)
# collect all frames we are expecting to be rendered
renderer = cmds.getAttr(
'defaultRenderGlobals.currentRenderer').lower()
"defaultRenderGlobals.currentRenderer"
).lower()
# handle various renderman names
if renderer.startswith('renderman'):
renderer = 'renderman'
if renderer.startswith("renderman"):
renderer = "renderman"
# return all expected files for all cameras and aovs in given
# frame range
exp_files = ExpectedFiles().get(renderer, layer_name)
assert exp_files, ("no file names were generated, this is bug")
exf = ExpectedFiles()
exp_files = exf.get(renderer, layer_name)
self.log.info("multipart: {}".format(exf.multipart))
assert exp_files, "no file names were generated, this is bug"
# if we want to attach render to subset, check if we have AOV's
# in expectedFiles. If so, raise error as we cannot attach AOV
@ -177,7 +192,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
if attachTo:
assert len(exp_files[0].keys()) == 1, (
"attaching multiple AOVs or renderable cameras to "
"subset is not supported")
"subset is not supported"
)
# append full path
full_exp_files = []
@ -202,6 +218,28 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
full_paths.append(full_path)
aov_dict["beauty"] = full_paths
frame_start_render = int(self.get_render_attribute(
"startFrame", layer=layer_name))
frame_end_render = int(self.get_render_attribute(
"endFrame", layer=layer_name))
if (int(context.data['frameStartHandle']) == frame_start_render
and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501
handle_start = context.data['handleStart']
handle_end = context.data['handleEnd']
frame_start = context.data['frameStart']
frame_end = context.data['frameEnd']
frame_start_handle = context.data['frameStartHandle']
frame_end_handle = context.data['frameEndHandle']
else:
handle_start = 0
handle_end = 0
frame_start = frame_start_render
frame_end = frame_end_render
frame_start_handle = frame_start_render
frame_end_handle = frame_end_render
full_exp_files.append(aov_dict)
self.log.info(full_exp_files)
self.log.info("collecting layer: {}".format(layer_name))
@ -210,35 +248,33 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"subset": expected_layer_name,
"attachTo": attachTo,
"setMembers": layer_name,
"multipartExr": exf.multipart,
"publish": True,
"frameStart": int(context.data["assetEntity"]['data']['frameStart']),
"frameEnd": int(context.data["assetEntity"]['data']['frameEnd']),
"frameStartHandle": int(self.get_render_attribute("startFrame",
layer=layer_name)),
"frameEndHandle": int(self.get_render_attribute("endFrame",
layer=layer_name)),
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_start_handle,
"frameEndHandle": frame_end_handle,
"byFrameStep": int(
self.get_render_attribute("byFrameStep",
layer=layer_name)),
"renderer": self.get_render_attribute("currentRenderer",
layer=layer_name),
"handleStart": int(context.data["assetEntity"]['data']['handleStart']),
"handleEnd": int(context.data["assetEntity"]['data']['handleEnd']),
# instance subset
"family": "renderlayer",
"families": ["renderlayer"],
"asset": asset,
"time": api.time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath,
"expectedFiles": full_exp_files,
"resolutionWidth": cmds.getAttr("defaultResolution.width"),
"resolutionHeight": cmds.getAttr("defaultResolution.height"),
"pixelAspect": cmds.getAttr("defaultResolution.pixelAspect")
"pixelAspect": cmds.getAttr("defaultResolution.pixelAspect"),
}
# Apply each user defined attribute as data
@ -261,13 +297,14 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
# Define nice label
label = "{0} ({1})".format(expected_layer_name, data["asset"])
label += " [{0}-{1}]".format(int(data["frameStartHandle"]),
int(data["frameEndHandle"]))
label += " [{0}-{1}]".format(
int(data["frameStartHandle"]), int(data["frameEndHandle"])
)
instance = context.create_instance(expected_layer_name)
instance.data["label"] = label
instance.data.update(data)
pass
self.log.debug("data: {}".format(json.dumps(data, indent=4)))
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without
@ -298,7 +335,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
machine_list = attributes["machineList"]
if machine_list:
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
options['renderGlobals'][key] = machine_list
options["renderGlobals"][key] = machine_list
# Suspend publish job
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
@ -354,32 +391,41 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
return rset.getOverrides()
def get_render_attribute(self, attr, layer):
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
layer=layer)
return lib.get_attr_in_layer(
"defaultRenderGlobals.{}".format(attr), layer=layer
)
class ExpectedFiles:
multipart = False
def get(self, renderer, layer):
if renderer.lower() == 'arnold':
return ExpectedFilesArnold(layer).get_files()
elif renderer.lower() == 'vray':
return ExpectedFilesVray(layer).get_files()
elif renderer.lower() == 'redshift':
return ExpectedFilesRedshift(layer).get_files()
elif renderer.lower() == 'mentalray':
return ExpectedFilesMentalray(layer).get_files()
elif renderer.lower() == 'renderman':
return ExpectedFilesRenderman(layer).get_files()
if renderer.lower() == "arnold":
return self._get_files(ExpectedFilesArnold(layer))
elif renderer.lower() == "vray":
return self._get_files(ExpectedFilesVray(layer))
elif renderer.lower() == "redshift":
return self._get_files(ExpectedFilesRedshift(layer))
elif renderer.lower() == "mentalray":
return self._get_files(ExpectedFilesMentalray(layer))
elif renderer.lower() == "renderman":
return self._get_files(ExpectedFilesRenderman(layer))
else:
raise UnsupportedRendererException(
"unsupported {}".format(renderer))
"unsupported {}".format(renderer)
)
def _get_files(self, renderer):
files = renderer.get_files()
self.multipart = renderer.multipart
return files
@six.add_metaclass(ABCMeta)
class AExpectedFiles:
renderer = None
layer = None
multipart = False
def __init__(self, layer):
self.layer = layer
@ -393,7 +439,8 @@ class AExpectedFiles:
file_prefix = cmds.getAttr(ImagePrefixes[self.renderer])
except KeyError:
raise UnsupportedRendererException(
"Unsupported renderer {}".format(self.renderer))
"Unsupported renderer {}".format(self.renderer)
)
return file_prefix
def _get_layer_data(self):
@ -419,7 +466,7 @@ class AExpectedFiles:
if not file_prefix:
raise RuntimeError("Image prefix not set")
default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey')
default_ext = cmds.getAttr("defaultRenderGlobals.imfPluginKey")
# ________________________________________________
# __________________/ ______________________________________________/
@ -440,10 +487,10 @@ class AExpectedFiles:
layer_name = self.layer
if self.layer.startswith("rs_"):
layer_name = self.layer[3:]
start_frame = int(self.get_render_attribute('startFrame'))
end_frame = int(self.get_render_attribute('endFrame'))
frame_step = int(self.get_render_attribute('byFrameStep'))
padding = int(self.get_render_attribute('extensionPadding'))
start_frame = int(self.get_render_attribute("startFrame"))
end_frame = int(self.get_render_attribute("endFrame"))
frame_step = int(self.get_render_attribute("byFrameStep"))
padding = int(self.get_render_attribute("extensionPadding"))
scene_data = {
"frameStart": start_frame,
@ -456,7 +503,7 @@ class AExpectedFiles:
"renderer": renderer,
"defaultExt": default_ext,
"filePrefix": file_prefix,
"enabledAOVs": enabled_aovs
"enabledAOVs": enabled_aovs,
}
return scene_data
@ -472,21 +519,24 @@ class AExpectedFiles:
# in Redshift
(R_REMOVE_AOV_TOKEN, ""),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, "")
(R_CLEAN_EXT_TOKEN, ""),
)
for regex, value in mappings:
file_prefix = re.sub(regex, value, file_prefix)
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"])):
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"]),
):
expected_files.append(
'{}.{}.{}'.format(file_prefix,
str(frame).rjust(
layer_data["padding"], "0"),
layer_data["defaultExt"]))
"{}.{}.{}".format(
file_prefix,
str(frame).rjust(layer_data["padding"], "0"),
layer_data["defaultExt"],
)
)
return expected_files
def _generate_aov_file_sequences(self, layer_data):
@ -502,7 +552,7 @@ class AExpectedFiles:
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
(R_SUBSTITUTE_AOV_TOKEN, aov[0]),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, "")
(R_CLEAN_EXT_TOKEN, ""),
)
for regex, value in mappings:
@ -510,14 +560,17 @@ class AExpectedFiles:
aov_files = []
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"])):
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"]),
):
aov_files.append(
'{}.{}.{}'.format(
"{}.{}.{}".format(
file_prefix,
str(frame).rjust(layer_data["padding"], "0"),
aov[1]))
aov[1],
)
)
# if we have more then one renderable camera, append
# camera name to AOV to allow per camera AOVs.
@ -551,17 +604,19 @@ class AExpectedFiles:
return expected_files
def get_renderable_cameras(self):
cam_parents = [cmds.listRelatives(x, ap=True)[-1]
for x in cmds.ls(cameras=True)]
cam_parents = [
cmds.listRelatives(x, ap=True)[-1] for x in cmds.ls(cameras=True)
]
renderable_cameras = []
for cam in cam_parents:
renderable = False
if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))):
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam))):
renderable = True
for override in self.get_layer_overrides(
'{}.renderable'.format(cam), self.layer):
"{}.renderable".format(cam), self.layer
):
renderable = self.maya_is_true(override)
if renderable:
@ -587,16 +642,18 @@ class AExpectedFiles:
if connections:
for connection in connections:
if connection:
node_name = connection.split('.')[0]
if cmds.nodeType(node_name) == 'renderLayer':
attr_name = '%s.value' % '.'.join(
connection.split('.')[:-1])
node_name = connection.split(".")[0]
if cmds.nodeType(node_name) == "renderLayer":
attr_name = "%s.value" % ".".join(
connection.split(".")[:-1]
)
if node_name == layer:
yield cmds.getAttr(attr_name)
def get_render_attribute(self, attr):
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
layer=self.layer)
return lib.get_attr_in_layer(
"defaultRenderGlobals.{}".format(attr), layer=self.layer
)
class ExpectedFilesArnold(AExpectedFiles):
@ -604,25 +661,28 @@ class ExpectedFilesArnold(AExpectedFiles):
# Arnold AOV driver extension mapping
# Is there a better way?
aiDriverExtension = {
'jpeg': 'jpg',
'exr': 'exr',
'deepexr': 'exr',
'png': 'png',
'tiff': 'tif',
'mtoa_shaders': 'ass', # TODO: research what those last two should be
'maya': ''
"jpeg": "jpg",
"exr": "exr",
"deepexr": "exr",
"png": "png",
"tiff": "tif",
"mtoa_shaders": "ass", # TODO: research what those last two should be
"maya": "",
}
def __init__(self, layer):
super(ExpectedFilesArnold, self).__init__(layer)
self.renderer = 'arnold'
self.renderer = "arnold"
def get_aovs(self):
enabled_aovs = []
try:
if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode')
and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')):
if not (
cmds.getAttr("defaultArnoldRenderOptions.aovMode")
and not cmds.getAttr("defaultArnoldDriver.mergeAOVs") # noqa: W503, E501
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
@ -635,46 +695,35 @@ class ExpectedFilesArnold(AExpectedFiles):
# AOVs are set to be rendered separately. We should expect
# <RenderPass> token in path.
ai_aovs = [n for n in cmds.ls(type='aiAOV')]
ai_aovs = [n for n in cmds.ls(type="aiAOV")]
for aov in ai_aovs:
enabled = self.maya_is_true(
cmds.getAttr('{}.enabled'.format(aov)))
ai_driver = cmds.listConnections(
'{}.outputs'.format(aov))[0]
ai_translator = cmds.getAttr(
'{}.aiTranslator'.format(ai_driver))
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
ai_driver = cmds.listConnections("{}.outputs".format(aov))[0]
ai_translator = cmds.getAttr("{}.aiTranslator".format(ai_driver))
try:
aov_ext = self.aiDriverExtension[ai_translator]
except KeyError:
msg = ('Unrecognized arnold '
'driver format for AOV - {}').format(
cmds.getAttr('{}.name'.format(aov))
)
msg = (
"Unrecognized arnold " "driver format for AOV - {}"
).format(cmds.getAttr("{}.name".format(aov)))
raise AOVError(msg)
for override in self.get_layer_overrides(
'{}.enabled'.format(aov), self.layer):
"{}.enabled".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
# If aov RGBA is selected, arnold will translate it to `beauty`
aov_name = cmds.getAttr('%s.name' % aov)
if aov_name == 'RGBA':
aov_name = 'beauty'
enabled_aovs.append(
(
aov_name,
aov_ext
)
)
aov_name = cmds.getAttr("%s.name" % aov)
if aov_name == "RGBA":
aov_name = "beauty"
enabled_aovs.append((aov_name, aov_ext))
# Append 'beauty' as this is arnolds
# default. If <RenderPass> token is specified and no AOVs are
# defined, this will be used.
enabled_aovs.append(
(
u'beauty',
cmds.getAttr('defaultRenderGlobals.imfPluginKey')
)
(u"beauty", cmds.getAttr("defaultRenderGlobals.imfPluginKey"))
)
return enabled_aovs
@ -688,7 +737,7 @@ class ExpectedFilesVray(AExpectedFiles):
def __init__(self, layer):
super(ExpectedFilesVray, self).__init__(layer)
self.renderer = 'vray'
self.renderer = "vray"
def get_renderer_prefix(self):
prefix = super(ExpectedFilesVray, self).get_renderer_prefix()
@ -703,7 +752,9 @@ class ExpectedFilesVray(AExpectedFiles):
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
layer_data
) # noqa: E501
return expected_files
@ -712,9 +763,12 @@ class ExpectedFilesVray(AExpectedFiles):
try:
# really? do we set it in vray just by selecting multichannel exr?
if cmds.getAttr(
"vraySettings.imageFormatStr") == "exr (multichannel)":
if (
cmds.getAttr("vraySettings.imageFormatStr")
== "exr (multichannel)" # noqa: W503
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
@ -724,35 +778,39 @@ class ExpectedFilesVray(AExpectedFiles):
# anyway.
return enabled_aovs
default_ext = cmds.getAttr('vraySettings.imageFormatStr')
default_ext = cmds.getAttr("vraySettings.imageFormatStr")
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
default_ext = "exr"
vr_aovs = [n for n in cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"])]
vr_aovs = [
n
for n in cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"]
)
]
# todo: find out how to detect multichannel exr for vray
for aov in vr_aovs:
enabled = self.maya_is_true(
cmds.getAttr('{}.enabled'.format(aov)))
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
for override in self.get_layer_overrides(
'{}.enabled'.format(aov), 'rs_{}'.format(self.layer)):
"{}.enabled".format(aov), "rs_{}".format(self.layer)
):
enabled = self.maya_is_true(override)
if enabled:
# todo: find how vray set format for AOVs
enabled_aovs.append(
(
self._get_vray_aov_name(aov),
default_ext)
)
(self._get_vray_aov_name(aov), default_ext))
return enabled_aovs
def _get_vray_aov_name(self, node):
# Get render element pass type
vray_node_attr = next(attr for attr in cmds.listAttr(node)
if attr.startswith("vray_name"))
vray_node_attr = next(
attr
for attr in cmds.listAttr(node)
if attr.startswith("vray_name")
)
pass_type = vray_node_attr.rsplit("_", 1)[-1]
# Support V-Ray extratex explicit name (if set by user)
@ -770,11 +828,11 @@ class ExpectedFilesVray(AExpectedFiles):
class ExpectedFilesRedshift(AExpectedFiles):
# mapping redshift extension dropdown values to strings
ext_mapping = ['iff', 'exr', 'tif', 'png', 'tga', 'jpg']
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
def __init__(self, layer):
super(ExpectedFilesRedshift, self).__init__(layer)
self.renderer = 'redshift'
self.renderer = "redshift"
def get_renderer_prefix(self):
prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix()
@ -789,7 +847,9 @@ class ExpectedFilesRedshift(AExpectedFiles):
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
layer_data
) # noqa: E501
return expected_files
@ -798,8 +858,10 @@ class ExpectedFilesRedshift(AExpectedFiles):
try:
if self.maya_is_true(
cmds.getAttr("redshiftOptions.exrForceMultilayer")):
cmds.getAttr("redshiftOptions.exrForceMultilayer")
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
@ -810,34 +872,30 @@ class ExpectedFilesRedshift(AExpectedFiles):
return enabled_aovs
default_ext = self.ext_mapping[
cmds.getAttr('redshiftOptions.imageFormat')
cmds.getAttr("redshiftOptions.imageFormat")
]
rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')]
rs_aovs = [n for n in cmds.ls(type="RedshiftAOV")]
# todo: find out how to detect multichannel exr for redshift
for aov in rs_aovs:
enabled = self.maya_is_true(
cmds.getAttr('{}.enabled'.format(aov)))
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
for override in self.get_layer_overrides(
'{}.enabled'.format(aov), self.layer):
"{}.enabled".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
enabled_aovs.append(
(
cmds.getAttr('%s.name' % aov),
default_ext
)
(cmds.getAttr("%s.name" % aov), default_ext)
)
return enabled_aovs
class ExpectedFilesRenderman(AExpectedFiles):
def __init__(self, layer):
super(ExpectedFilesRenderman, self).__init__(layer)
self.renderer = 'renderman'
self.renderer = "renderman"
def get_aovs(self):
enabled_aovs = []
@ -849,19 +907,14 @@ class ExpectedFilesRenderman(AExpectedFiles):
if aov_name == "rmanDefaultDisplay":
aov_name = "beauty"
enabled = self.maya_is_true(
cmds.getAttr("{}.enable".format(aov)))
enabled = self.maya_is_true(cmds.getAttr("{}.enable".format(aov)))
for override in self.get_layer_overrides(
'{}.enable'.format(aov), self.layer):
"{}.enable".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
enabled_aovs.append(
(
aov_name,
default_ext
)
)
enabled_aovs.append((aov_name, default_ext))
return enabled_aovs
@ -881,9 +934,9 @@ class ExpectedFilesRenderman(AExpectedFiles):
for aov, files in expected_files[0].items():
new_files = []
for file in files:
new_file = "{}/{}/{}".format(layer_data["sceneName"],
layer_data["layerName"],
file)
new_file = "{}/{}/{}".format(
layer_data["sceneName"], layer_data["layerName"], file
)
new_files.append(new_file)
new_aovs[aov] = new_files
@ -891,9 +944,8 @@ class ExpectedFilesRenderman(AExpectedFiles):
class ExpectedFilesMentalray(AExpectedFiles):
def __init__(self, layer):
raise UnimplementedRendererException('Mentalray not implemented')
raise UnimplementedRendererException("Mentalray not implemented")
def get_aovs(self):
return []

View file

@ -60,7 +60,7 @@ class CollectReview(pyblish.api.InstancePlugin):
data['frameEndHandle'] = instance.data["frameEndHandle"]
data["frameStart"] = instance.data["frameStart"]
data["frameEnd"] = instance.data["frameEnd"]
data['handles'] = instance.data['handles']
data['handles'] = instance.data.get('handles', None)
data['step'] = instance.data['step']
data['fps'] = instance.data['fps']
cmds.setAttr(str(instance) + '.active', 1)

View file

@ -1,21 +1,20 @@
from maya import cmds
import pyblish.api
import avalon.api
import os
from pype.maya import lib
from pype.maya import cmds
class CollectMayaScene(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.1
order = pyblish.api.CollectorOrder - 0.01
label = "Maya Workfile"
hosts = ['maya']
def process(self, context):
"""Inject the current working file"""
current_file = context.data['currentFile']
current_file = cmds.file(query=True, sceneName=True)
context.data['currentFile'] = current_file
folder, file = os.path.split(current_file)
filename, ext = os.path.splitext(file)
@ -24,9 +23,6 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
data = {}
for key, value in lib.collect_animation_data().items():
data[key] = value
# create instance
instance = context.create_instance(name=filename)
subset = 'workfile' + task.capitalize()
@ -38,12 +34,16 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
"publish": True,
"family": 'workfile',
"families": ['workfile'],
"setMembers": [current_file]
"setMembers": [current_file],
"frameStart": context.data['frameStart'],
"frameEnd": context.data['frameEnd'],
"handleStart": context.data['handleStart'],
"handleEnd": context.data['handleEnd']
})
data['representations'] = [{
'name': 'ma',
'ext': 'ma',
'name': ext.lstrip("."),
'ext': ext.lstrip("."),
'files': file,
"stagingDir": folder,
}]

View file

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
from maya import cmds
import pyblish.api
class CollectUnrealStaticMesh(pyblish.api.InstancePlugin):
"""Collect unreal static mesh
Ensures always only a single frame is extracted (current frame). This
also sets correct FBX options for later extraction.
Note:
This is a workaround so that the `pype.model` family can use the
same pointcache extractor implementation as animation and pointcaches.
This always enforces the "current" frame to be published.
"""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Model Data"
families = ["unrealStaticMesh"]
def process(self, instance):
# add fbx family to trigger fbx extractor
instance.data["families"].append("fbx")
# set fbx overrides on instance
instance.data["smoothingGroups"] = True
instance.data["smoothMesh"] = True
instance.data["triangulate"] = True
frame = cmds.currentTime(query=True)
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame

View file

@ -49,6 +49,10 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
attr_data = {}
for attr in SETTINGS:
current = cmds.getAttr("%s.%s" % (shape, attr))
# change None to empty string as Maya doesn't support
# NoneType in attributes
if current is None:
current = ""
attr_data[attr] = current
# Get transform data

View file

@ -56,7 +56,8 @@ class ExtractAnimation(pype.api.Extractor):
"writeCreases": True,
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True)
"worldSpace": instance.data.get("worldSpace", True),
"writeColorSets": instance.data.get("writeColorSets", False)
}
if not instance.data.get("includeParentHierarchy", True):

View file

@ -212,12 +212,11 @@ class ExtractFBX(pype.api.Extractor):
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': 'mov',
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": stagingDir,
}
instance.data["representations"].append(representation)
self.log.info("Extract FBX successful to: {0}".format(path))

View file

@ -3,24 +3,23 @@ import glob
import contextlib
import clique
import capture
#
import pype.maya.lib as lib
import pype.api
#
from maya import cmds, mel
from maya import cmds
import pymel.core as pm
# TODO: move codec settings to presets
class ExtractQuicktime(pype.api.Extractor):
"""Extract Quicktime from viewport capture.
class ExtractPlayblast(pype.api.Extractor):
"""Extract viewport playblast.
Takes review camera and creates review Quicktime video based on viewport
capture.
"""
label = "Quicktime"
label = "Extract Playblast"
hosts = ["maya"]
families = ["review"]
optional = True
@ -29,7 +28,7 @@ class ExtractQuicktime(pype.api.Extractor):
self.log.info("Extracting capture..")
# get scene fps
fps = mel.eval('currentTimeUnitToFPS()')
fps = instance.data.get("fps") or instance.context.data.get("fps")
# if start and end frames cannot be determined, get them
# from Maya timeline
@ -39,6 +38,7 @@ class ExtractQuicktime(pype.api.Extractor):
start = cmds.playbackOptions(query=True, animationStartTime=True)
if end is None:
end = cmds.playbackOptions(query=True, animationEndTime=True)
self.log.info("start: {}, end: {}".format(start, end))
# get cameras
@ -47,7 +47,7 @@ class ExtractQuicktime(pype.api.Extractor):
try:
preset = lib.load_capture_preset(data=capture_preset)
except:
except Exception:
preset = {}
self.log.info('using viewport preset: {}'.format(preset))
@ -55,21 +55,12 @@ class ExtractQuicktime(pype.api.Extractor):
preset['format'] = "image"
# preset['compression'] = "qt"
preset['quality'] = 95
preset['compression'] = "jpg"
preset['compression'] = "png"
preset['start_frame'] = start
preset['end_frame'] = end
preset['camera_options'] = {
"displayGateMask": False,
"displayResolution": False,
"displayFilmGate": False,
"displayFieldChart": False,
"displaySafeAction": False,
"displaySafeTitle": False,
"displayFilmPivot": False,
"displayFilmOrigin": False,
"overscan": 1.0,
"depthOfField": cmds.getAttr("{0}.depthOfField".format(camera)),
}
camera_option = preset.get("camera_option", {})
camera_option["depthOfField"] = cmds.getAttr(
"{0}.depthOfField".format(camera))
stagingdir = self.staging_dir(instance)
filename = "{0}".format(instance.name)
@ -90,8 +81,8 @@ class ExtractQuicktime(pype.api.Extractor):
filename = preset.get("filename", "%TEMP%")
# Force viewer to False in call to capture because we have our own
# viewer opening call to allow a signal to trigger between playblast
# and viewer
# viewer opening call to allow a signal to trigger between
# playblast and viewer
preset['viewer'] = False
# Remove panel key since it's internal value to capture_gui
@ -112,8 +103,8 @@ class ExtractQuicktime(pype.api.Extractor):
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': 'mov',
'name': 'png',
'ext': 'png',
'files': collected_frames,
"stagingDir": stagingdir,
"frameStart": start,
@ -133,7 +124,6 @@ class ExtractQuicktime(pype.api.Extractor):
To workaround this we just glob.glob() for any file extensions and
assume the latest modified file is the correct file and return it.
"""
# Catch cancelled playblast
if filepath is None:
@ -164,7 +154,6 @@ class ExtractQuicktime(pype.api.Extractor):
return filepath
@contextlib.contextmanager
def maintained_time():
ct = cmds.currentTime(query=True)

View file

@ -25,12 +25,8 @@ class ExtractAlembic(pype.api.Extractor):
nodes = instance[:]
# Collect the start and end including handles
start = instance.data.get("frameStart", 1)
end = instance.data.get("frameEnd", 1)
handles = instance.data.get("handles", 0)
if handles:
start -= handles
end += handles
start = float(instance.data.get("frameStartHandle", 1))
end = float(instance.data.get("frameEndHandle", 1))
attrs = instance.data.get("attr", "").split(";")
attrs = [value for value in attrs if value.strip()]

View file

@ -147,7 +147,7 @@ class ExtractYetiRig(pype.api.Extractor):
nodes = instance.data["setMembers"]
resources = instance.data.get("resources", {})
with disconnect_plugs(settings, members):
with yetigraph_attribute_values(destination_folder, resources):
with yetigraph_attribute_values(resources_dir, resources):
with maya.attribute_values(attr_value):
cmds.select(nodes, noExpand=True)
cmds.file(maya_path,

View file

@ -1,18 +1,19 @@
import pyblish.api
import pype.api
from maya import cmds
class ValidateFrameRange(pyblish.api.InstancePlugin):
"""Valides the frame ranges.
Checks the `startFrame`, `endFrame` and `handles` data.
This does NOT ensure there's actual data present.
This is optional validator checking if the frame range on instance
matches the one of asset. It also validate render frame range of render
layers
This validates:
- `startFrame` is lower than or equal to the `endFrame`.
- must have both the `startFrame` and `endFrame` data.
- The `handles` value is not lower than zero.
Repair action will change everything to match asset.
This can be turned off by artist to allow custom ranges.
"""
label = "Validate Frame Range"
@ -21,25 +22,66 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
"pointcache",
"camera",
"renderlayer",
"colorbleed.vrayproxy"]
"review",
"yeticache"]
optional = True
actions = [pype.api.RepairAction]
def process(self, instance):
context = instance.context
start = instance.data.get("frameStart", None)
end = instance.data.get("frameEnd", None)
handles = instance.data.get("handles", None)
frame_start_handle = int(context.data.get("frameStartHandle"))
frame_end_handle = int(context.data.get("frameEndHandle"))
handles = int(context.data.get("handles"))
handle_start = int(context.data.get("handleStart"))
handle_end = int(context.data.get("handleEnd"))
frame_start = int(context.data.get("frameStart"))
frame_end = int(context.data.get("frameEnd"))
# Check if any of the values are present
if any(value is None for value in [start, end]):
raise ValueError("No time values for this instance. "
"(Missing `startFrame` or `endFrame`)")
inst_start = int(instance.data.get("frameStartHandle"))
inst_end = int(instance.data.get("frameEndHandle"))
self.log.info("Comparing start (%s) and end (%s)" % (start, end))
if start > end:
raise RuntimeError("The start frame is a higher value "
"than the end frame: "
"{0}>{1}".format(start, end))
# basic sanity checks
assert frame_start_handle <= frame_end_handle, (
"start frame is lower then end frame")
if handles is not None:
if handles < 0.0:
raise RuntimeError("Handles are set to a negative value")
assert handles >= 0, ("handles cannot have negative values")
# compare with data on instance
errors = []
if(inst_start != frame_start_handle):
errors.append("Instance start frame [ {} ] doesn't "
"match the one set on instance [ {} ]: "
"{}/{}/{}/{} (handle/start/end/handle)".format(
inst_start,
frame_start_handle,
handle_start, frame_start, frame_end, handle_end
))
if(inst_end != frame_end_handle):
errors.append("Instance end frame [ {} ] doesn't "
"match the one set on instance [ {} ]: "
"{}/{}/{}/{} (handle/start/end/handle)".format(
inst_end,
frame_end_handle,
handle_start, frame_start, frame_end, handle_end
))
for e in errors:
self.log.error(e)
assert len(errors) == 0, ("Frame range settings are incorrect")
@classmethod
def repair(cls, instance):
"""
Repair instance container to match asset data.
"""
cmds.setAttr(
"{}.frameStart".format(instance.data["name"]),
instance.context.data.get("frameStartHandle"))
cmds.setAttr(
"{}.frameEnd".format(instance.data["name"]),
instance.context.data.get("frameEndHandle"))

View file

@ -13,13 +13,17 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
"""Validates the global render settings
* File Name Prefix must start with: `maya/<Scene>`
all other token are customizable but sane values are:
all other token are customizable but sane values for Arnold are:
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
<Camera> token is supported also, usefull for multiple renderable
<Camera> token is supported also, useful for multiple renderable
cameras per render layer.
For Redshift omit <RenderPass> token. Redshift will append it
automatically if AOVs are enabled and if you user Multipart EXR
it doesn't make much sense.
* Frame Padding must be:
* default: 4
@ -127,8 +131,13 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
# no vray checks implemented yet
pass
elif renderer == "redshift":
# no redshift check implemented yet
pass
if re.search(cls.R_AOV_TOKEN, prefix):
invalid = True
cls.log.error("Do not use AOV token [ {} ] - "
"Redshift automatically append AOV name and "
"it doesn't make much sense with "
"Multipart EXR".format(prefix))
elif renderer == "renderman":
file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat")
dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir")
@ -143,8 +152,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
dir_prefix))
else:
multichannel = cmds.getAttr("defaultArnoldDriver.mergeAOVs")
if multichannel:
multipart = cmds.getAttr("defaultArnoldDriver.mergeAOVs")
if multipart:
if re.search(cls.R_AOV_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "

View file

@ -38,7 +38,8 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin):
if compute:
out_set = next(x for x in instance if x.endswith("out_SET"))
instance_nodes = pc.sets(out_set, query=True)
instance_nodes.extend([x.getShape() for x in instance_nodes])
instance_nodes.extend(
[x.getShape() for x in instance_nodes if x.getShape()])
scene_nodes = pc.ls(type="transform") + pc.ls(type="mesh")
scene_nodes = set(scene_nodes) - set(instance_nodes)

View file

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
from maya import cmds
import pyblish.api
import pype.api
class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin):
"""Validate if mesh is made of triangles for Unreal Engine"""
order = pype.api.ValidateMeshOder
hosts = ["maya"]
families = ["unrealStaticMesh"]
category = "geometry"
label = "Mesh is Triangulated"
actions = [pype.maya.action.SelectInvalidAction]
@classmethod
def get_invalid(cls, instance):
invalid = []
meshes = cmds.ls(instance, type="mesh", long=True)
for mesh in meshes:
faces = cmds.polyEvaluate(mesh, f=True)
tris = cmds.polyEvaluate(mesh, t=True)
if faces != tris:
invalid.append(mesh)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
assert len(invalid) == 0, (
"Found meshes without triangles")

View file

@ -0,0 +1,120 @@
# -*- coding: utf-8 -*-
from maya import cmds
import pyblish.api
import pype.api
import pype.maya.action
import re
class ValidateUnrealStaticmeshName(pyblish.api.InstancePlugin):
"""Validate name of Unreal Static Mesh
Unreals naming convention states that staticMesh sould start with `SM`
prefix - SM_[Name]_## (Eg. SM_sube_01). This plugin also validates other
types of meshes - collision meshes:
UBX_[RenderMeshName]_##:
Boxes are created with the Box objects type in
Max or with the Cube polygonal primitive in Maya.
You cannot move the vertices around or deform it
in any way to make it something other than a
rectangular prism, or else it will not work.
UCP_[RenderMeshName]_##:
Capsules are created with the Capsule object type.
The capsule does not need to have many segments
(8 is a good number) at all because it is
converted into a true capsule for collision. Like
boxes, you should not move the individual
vertices around.
USP_[RenderMeshName]_##:
Spheres are created with the Sphere object type.
The sphere does not need to have many segments
(8 is a good number) at all because it is
converted into a true sphere for collision. Like
boxes, you should not move the individual
vertices around.
UCX_[RenderMeshName]_##:
Convex objects can be any completely closed
convex 3D shape. For example, a box can also be
a convex object
This validator also checks if collision mesh [RenderMeshName] matches one
of SM_[RenderMeshName].
"""
optional = True
order = pype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["unrealStaticMesh"]
label = "Unreal StaticMesh Name"
actions = [pype.maya.action.SelectInvalidAction]
regex_mesh = r"SM_(?P<renderName>.*)_(\d{2})"
regex_collision = r"((UBX)|(UCP)|(USP)|(UCX))_(?P<renderName>.*)_(\d{2})"
@classmethod
def get_invalid(cls, instance):
# find out if supplied transform is group or not
def is_group(groupName):
try:
children = cmds.listRelatives(groupName, children=True)
for child in children:
if not cmds.ls(child, transforms=True):
return False
return True
except Exception:
return False
invalid = []
content_instance = instance.data.get("setMembers", None)
if not content_instance:
cls.log.error("Instance has no nodes!")
return True
pass
descendants = cmds.listRelatives(content_instance,
allDescendents=True,
fullPath=True) or []
descendants = cmds.ls(descendants, noIntermediate=True, long=True)
trns = cmds.ls(descendants, long=False, type=('transform'))
# filter out groups
filter = [node for node in trns if not is_group(node)]
# compile regex for testing names
sm_r = re.compile(cls.regex_mesh)
cl_r = re.compile(cls.regex_collision)
sm_names = []
col_names = []
for obj in filter:
sm_m = sm_r.match(obj)
if sm_m is None:
# test if it matches collision mesh
cl_r = sm_r.match(obj)
if cl_r is None:
cls.log.error("invalid mesh name on: {}".format(obj))
invalid.append(obj)
else:
col_names.append((cl_r.group("renderName"), obj))
else:
sm_names.append(sm_m.group("renderName"))
for c_mesh in col_names:
if c_mesh[0] not in sm_names:
cls.log.error(("collision name {} doesn't match any "
"static mesh names.").format(obj))
invalid.append(c_mesh[1])
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Model naming is invalid. See log.")

View file

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
from maya import cmds
import pyblish.api
import pype.api
class ValidateUnrealUpAxis(pyblish.api.ContextPlugin):
"""Validate if Z is set as up axis in Maya"""
optional = True
order = pype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["unrealStaticMesh"]
label = "Unreal Up-Axis check"
actions = [pype.api.RepairAction]
def process(self, context):
assert cmds.upAxis(q=True, axis=True) == "z", (
"Invalid axis set as up axis"
)
@classmethod
def repair(cls, instance):
cmds.upAxis(axis="z", rotateView=True)

View file

@ -1,103 +1,10 @@
from collections import OrderedDict
from pype.nuke import plugin
from pype.nuke import (
plugin,
lib as pnlib)
import nuke
class CreateWriteRender(plugin.PypeCreator):
# change this to template preset
name = "WriteRender"
label = "Create Write Render"
hosts = ["nuke"]
n_class = "write"
family = "render"
icon = "sign-out"
defaults = ["Main", "Mask"]
def __init__(self, *args, **kwargs):
super(CreateWriteRender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
data.update({k: v})
self.data = data
self.nodes = nuke.selectedNodes()
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
from pype.nuke import lib as pnlib
inputs = []
outputs = []
instance = nuke.toNode(self.data["subset"])
selected_node = None
# use selection
if (self.options or {}).get("useSelection"):
nodes = self.nodes
if not (len(nodes) < 2):
msg = ("Select only one node. The node you want to connect to, "
"or tick off `Use selection`")
log.error(msg)
nuke.message(msg)
selected_node = nodes[0]
inputs = [selected_node]
outputs = selected_node.dependent()
if instance:
if (instance.name() in selected_node.name()):
selected_node = instance.dependencies()[0]
# if node already exist
if instance:
# collect input / outputs
inputs = instance.dependencies()
outputs = instance.dependent()
selected_node = inputs[0]
# remove old one
nuke.delete(instance)
# recreate new
write_data = {
"class": self.n_class,
"families": [self.family],
"avalon": self.data
}
if self.presets.get('fpath_template'):
self.log.info("Adding template path from preset")
write_data.update(
{"fpath_template": self.presets["fpath_template"]}
)
else:
self.log.info("Adding template path from plugin")
write_data.update({
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
write_node = pnlib.create_write_node(
self.data["subset"],
write_data,
input=selected_node)
# relinking to collected connections
for i, input in enumerate(inputs):
write_node.setInput(i, input)
write_node.autoplace()
for output in outputs:
output.setInput(0, write_node)
return write_node
class CreateWritePrerender(plugin.PypeCreator):
# change this to template preset
name = "WritePrerender"
@ -125,8 +32,6 @@ class CreateWritePrerender(plugin.PypeCreator):
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
from pype.nuke import lib as pnlib
inputs = []
outputs = []
instance = nuke.toNode(self.data["subset"])
@ -137,8 +42,17 @@ class CreateWritePrerender(plugin.PypeCreator):
nodes = self.nodes
if not (len(nodes) < 2):
msg = ("Select only one node. The node you want to connect to, "
"or tick off `Use selection`")
msg = ("Select only one node. The node "
"you want to connect to, "
"or tick off `Use selection`")
self.log.error(msg)
nuke.message(msg)
if len(nodes) == 0:
msg = (
"No nodes selected. Please select a single node to connect"
" to or tick off `Use selection`"
)
self.log.error(msg)
nuke.message(msg)
@ -174,13 +88,15 @@ class CreateWritePrerender(plugin.PypeCreator):
else:
self.log.info("Adding template path from plugin")
write_data.update({
"fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}"})
"fpath_template": ("{work}/prerenders/nuke/{subset}"
"/{subset}.{frame}.{ext}")})
write_node = pnlib.create_write_node(
self.data["subset"],
write_data,
input=selected_node,
prenodes=[])
prenodes=[],
review=False)
# relinking to collected connections
for i, input in enumerate(inputs):

View file

@ -0,0 +1,109 @@
from collections import OrderedDict
from pype.nuke import (
plugin,
lib as pnlib)
import nuke
class CreateWriteRender(plugin.PypeCreator):
# change this to template preset
name = "WriteRender"
label = "Create Write Render"
hosts = ["nuke"]
n_class = "write"
family = "render"
icon = "sign-out"
defaults = ["Main", "Mask"]
def __init__(self, *args, **kwargs):
super(CreateWriteRender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
data.update({k: v})
self.data = data
self.nodes = nuke.selectedNodes()
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
inputs = []
outputs = []
instance = nuke.toNode(self.data["subset"])
selected_node = None
# use selection
if (self.options or {}).get("useSelection"):
nodes = self.nodes
if not (len(nodes) < 2):
msg = ("Select only one node. "
"The node you want to connect to, "
"or tick off `Use selection`")
self.log.error(msg)
nuke.message(msg)
if len(nodes) == 0:
msg = (
"No nodes selected. Please select a single node to connect"
" to or tick off `Use selection`"
)
self.log.error(msg)
nuke.message(msg)
selected_node = nodes[0]
inputs = [selected_node]
outputs = selected_node.dependent()
if instance:
if (instance.name() in selected_node.name()):
selected_node = instance.dependencies()[0]
# if node already exist
if instance:
# collect input / outputs
inputs = instance.dependencies()
outputs = instance.dependent()
selected_node = inputs[0]
# remove old one
nuke.delete(instance)
# recreate new
write_data = {
"class": self.n_class,
"families": [self.family],
"avalon": self.data
}
if self.presets.get('fpath_template'):
self.log.info("Adding template path from preset")
write_data.update(
{"fpath_template": self.presets["fpath_template"]}
)
else:
self.log.info("Adding template path from plugin")
write_data.update({
"fpath_template": ("{work}/renders/nuke/{subset}"
"/{subset}.{frame}.{ext}")})
write_node = pnlib.create_write_node(
self.data["subset"],
write_data,
input=selected_node)
# relinking to collected connections
for i, input in enumerate(inputs):
write_node.setInput(i, input)
write_node.autoplace()
for output in outputs:
output.setInput(0, write_node)
return write_node

View file

@ -92,6 +92,7 @@ class LoadMov(api.Loader):
"source",
"plate",
"render",
"prerender",
"review"] + presets["families"]
representations = [

View file

@ -70,7 +70,7 @@ def loader_shift(node, frame, relative=True):
class LoadSequence(api.Loader):
"""Load image sequence into Nuke"""
families = ["render2d", "source", "plate", "render"]
families = ["render2d", "source", "plate", "render", "prerender"]
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
label = "Load sequence"
@ -87,7 +87,7 @@ class LoadSequence(api.Loader):
version = context['version']
version_data = version.get("data", {})
repr_id = context["representation"]["_id"]
self.log.info("version_data: {}\n".format(version_data))
self.log.debug(
"Representation id `{}` ".format(repr_id))

View file

@ -52,6 +52,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
# establish families
family = avalon_knob_data["family"]
families_ak = avalon_knob_data.get("families")
families = list()
# except disabled nodes but exclude backdrops in test
@ -68,16 +69,14 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
# Add all nodes in group instances.
if node.Class() == "Group":
# only alter families for render family
if ("render" in family):
# check if node is not disabled
families.append(avalon_knob_data["families"])
if "write" in families_ak:
if node["render"].value():
self.log.info("flagged for render")
add_family = "render.local"
add_family = "{}.local".format(family)
# dealing with local/farm rendering
if node["render_farm"].value():
self.log.info("adding render farm family")
add_family = "render.farm"
add_family = "{}.farm".format(family)
instance.data["transfer"] = False
families.append(add_family)
else:
@ -89,9 +88,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
instance.append(i)
node.end()
family = avalon_knob_data["family"]
families = list()
families_ak = avalon_knob_data.get("families")
self.log.debug("__ families: `{}`".format(families))
if families_ak:
families.append(families_ak)
@ -104,22 +101,6 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
resolution_height = format.height()
pixel_aspect = format.pixelAspect()
if node.Class() not in "Read":
if "render" not in node.knobs().keys():
pass
elif node["render"].value():
self.log.info("flagged for render")
add_family = "render.local"
# dealing with local/farm rendering
if node["render_farm"].value():
self.log.info("adding render farm family")
add_family = "render.farm"
instance.data["transfer"] = False
families.append(add_family)
else:
# add family into families
families.insert(0, family)
instance.data.update({
"subset": subset,
"asset": os.environ["AVALON_ASSET"],

View file

@ -8,7 +8,7 @@ class CollectSlate(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.09
label = "Collect Slate Node"
hosts = ["nuke"]
families = ["write"]
families = ["render", "render.local", "render.farm"]
def process(self, instance):
node = instance[0]

View file

@ -1,7 +1,6 @@
import os
import nuke
import pyblish.api
import pype.api as pype
@pyblish.api.log
@ -13,9 +12,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
hosts = ["nuke", "nukeassist"]
families = ["write"]
# preset attributes
sync_workfile_version = True
def process(self, instance):
# adding 2d focused rendering
instance.data["families"].append("render2d")
families = instance.data["families"]
node = None
for x in instance:
@ -53,10 +54,13 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
output_dir = os.path.dirname(path)
self.log.debug('output dir: {}'.format(output_dir))
# get version to instance for integration
instance.data['version'] = instance.context.data["version"]
if not next((f for f in families
if "prerender" in f),
None) and self.sync_workfile_version:
# get version to instance for integration
instance.data['version'] = instance.context.data["version"]
self.log.debug('Write Version: %s' % instance.data('version'))
self.log.debug('Write Version: %s' % instance.data('version'))
# create label
name = node.name()
@ -67,7 +71,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
int(last_frame)
)
if 'render' in instance.data['families']:
if [fm for fm in families
if fm in ["render", "prerender"]]:
if "representations" not in instance.data:
instance.data["representations"] = list()
@ -95,7 +100,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
# this will only run if slate frame is not already
# rendered from previews publishes
if "slate" in instance.data["families"] \
and (frame_length == collected_frames_len):
and (frame_length == collected_frames_len) \
and ("prerender" not in instance.data["families"]):
frame_slate_str = "%0{}d".format(
len(str(last_frame))) % (first_frame - 1)
slate_frame = collected_frames[0].replace(
@ -124,6 +130,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
deadlinePriority = group_node["deadlinePriority"].value()
families = [f for f in instance.data["families"] if "write" not in f]
instance.data.update({
"versionData": version_data,
"path": path,
@ -144,4 +151,12 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"deadlinePriority": deadlinePriority
})
if "prerender" in families:
instance.data.update({
"family": "prerender",
"families": []
})
self.log.debug("families: {}".format(families))
self.log.debug("instance.data: {}".format(instance.data))

Some files were not shown because too many files have changed in this diff Show more