mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-02 08:54:53 +01:00
add batch render layer for deadline submission
This commit is contained in:
commit
677a71dbba
321 changed files with 8186 additions and 3768 deletions
18
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
18
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,15 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to OpenPype Tray
|
||||
options:
|
||||
- 3.16.0-nightly.1
|
||||
- 3.15.12
|
||||
- 3.15.12-nightly.4
|
||||
- 3.15.12-nightly.3
|
||||
- 3.15.12-nightly.2
|
||||
- 3.15.12-nightly.1
|
||||
- 3.15.11
|
||||
- 3.15.11-nightly.5
|
||||
- 3.15.11-nightly.4
|
||||
- 3.15.11-nightly.3
|
||||
- 3.15.11-nightly.2
|
||||
- 3.15.11-nightly.1
|
||||
|
|
@ -126,15 +135,6 @@ body:
|
|||
- 3.14.5-nightly.3
|
||||
- 3.14.5-nightly.2
|
||||
- 3.14.5-nightly.1
|
||||
- 3.14.4
|
||||
- 3.14.4-nightly.4
|
||||
- 3.14.4-nightly.3
|
||||
- 3.14.4-nightly.2
|
||||
- 3.14.4-nightly.1
|
||||
- 3.14.3
|
||||
- 3.14.3-nightly.7
|
||||
- 3.14.3-nightly.6
|
||||
- 3.14.3-nightly.5
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
|
|
|
|||
1022
CHANGELOG.md
1022
CHANGELOG.md
File diff suppressed because it is too large
Load diff
|
|
@ -49,7 +49,7 @@ def deprecated(new_destination):
|
|||
|
||||
|
||||
@deprecated("openpype.pipeline.publish.get_errored_instances_from_context")
|
||||
def get_errored_instances_from_context(context):
|
||||
def get_errored_instances_from_context(context, plugin=None):
|
||||
"""
|
||||
Deprecated:
|
||||
Since 3.14.* will be removed in 3.16.* or later.
|
||||
|
|
@ -57,7 +57,7 @@ def get_errored_instances_from_context(context):
|
|||
|
||||
from openpype.pipeline.publish import get_errored_instances_from_context
|
||||
|
||||
return get_errored_instances_from_context(context)
|
||||
return get_errored_instances_from_context(context, plugin=plugin)
|
||||
|
||||
|
||||
@deprecated("openpype.pipeline.publish.get_errored_plugins_from_context")
|
||||
|
|
@ -97,11 +97,9 @@ class RepairAction(pyblish.api.Action):
|
|||
|
||||
# Get the errored instances
|
||||
self.log.info("Finding failed instances..")
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
for instance in instances:
|
||||
errored_instances = get_errored_instances_from_context(context,
|
||||
plugin=plugin)
|
||||
for instance in errored_instances:
|
||||
plugin.repair(instance)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -220,7 +220,6 @@ def new_representation_doc(
|
|||
"parent": version_id,
|
||||
"name": name,
|
||||
"data": data,
|
||||
|
||||
# Imprint shortcut to context for performance reasons.
|
||||
"context": context
|
||||
}
|
||||
|
|
@ -708,7 +707,11 @@ class OperationsSession(object):
|
|||
return operation
|
||||
|
||||
|
||||
def create_project(project_name, project_code, library_project=False):
|
||||
def create_project(
|
||||
project_name,
|
||||
project_code,
|
||||
library_project=False,
|
||||
):
|
||||
"""Create project using OpenPype settings.
|
||||
|
||||
This project creation function is not validating project document on
|
||||
|
|
@ -752,7 +755,7 @@ def create_project(project_name, project_code, library_project=False):
|
|||
"name": project_name,
|
||||
"data": {
|
||||
"code": project_code,
|
||||
"library_project": library_project
|
||||
"library_project": library_project,
|
||||
},
|
||||
"schema": CURRENT_PROJECT_SCHEMA
|
||||
}
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.25"
|
||||
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.26"
|
||||
ExtensionBundleName="com.openpype.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.AE.panel" Version="1.0" />
|
||||
|
|
|
|||
|
|
@ -104,6 +104,39 @@
|
|||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#create-placeholder-button").bind("click", function() {
|
||||
RPC.call('AfterEffects.create_placeholder_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#update-placeholder-button").bind("click", function() {
|
||||
RPC.call('AfterEffects.update_placeholder_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#build-workfile-button").bind("click", function() {
|
||||
RPC.call('AfterEffects.build_workfile_template_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#experimental-button").bind("click", function() {
|
||||
|
|
@ -127,9 +160,15 @@
|
|||
<div><a href=# id=loader-button><button class="hostFontSize">Load...</button></a></div>
|
||||
<div><a href=# id=publish-button><button class="hostFontSize">Publish...</button></a></div>
|
||||
<div><a href=# id=sceneinventory-button><button class="hostFontSize">Manage...</button></a></div>
|
||||
<div><a href=# id=separator0><button class="hostFontSize"> </button></a></div>
|
||||
<div><a href=# id=setresolution-button><button class="hostFontSize">Set Resolution</button></a></div>
|
||||
<div><a href=# id=setframes-button><button class="hostFontSize">Set Frame Range</button></a></div>
|
||||
<div><a href=# id=setall-button><button class="hostFontSize">Apply All Settings</button></a></div>
|
||||
<div><a href=# id=separator1><button class="hostFontSize"> </button></a></div>
|
||||
<div><a href=# id=create-placeholder-button><button class="hostFontSize">Create placeholder</button></a></div>
|
||||
<div><a href=# id=update-placeholder-button><button class="hostFontSize">Update placeholder</button></a></div>
|
||||
<div><a href=# id=build-workfile-button><button class="hostFontSize">Build Workfile from template</button></a></div>
|
||||
<div><a href=# id=separator3><button class="hostFontSize"> </button></a></div>
|
||||
<div><a href=# id=experimental-button><button class="hostFontSize">Experimental Tools...</button></a></div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -107,6 +107,17 @@ function main(websocket_url){
|
|||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('AfterEffects.add_item', function (data) {
|
||||
log.warn('Server called client route "add_item":', data);
|
||||
var escapedName = EscapeStringForJSX(data.name);
|
||||
return runEvalScript("addItem('" + escapedName +"', " +
|
||||
"'" + data.item_type + "')")
|
||||
.then(function(result){
|
||||
log.warn("get_items: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('AfterEffects.get_items', function (data) {
|
||||
log.warn('Server called client route "get_items":', data);
|
||||
return runEvalScript("getItems(" + data.comps + "," +
|
||||
|
|
@ -118,6 +129,15 @@ function main(websocket_url){
|
|||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('AfterEffects.select_items', function (data) {
|
||||
log.warn('Server called client route "select_items":', data);
|
||||
return runEvalScript("selectItems(" + JSON.stringify(data.items) + ")")
|
||||
.then(function(result){
|
||||
log.warn("select_items: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
RPC.addRoute('AfterEffects.get_selected_items', function (data) {
|
||||
log.warn('Server called client route "get_selected_items":', data);
|
||||
|
|
@ -280,7 +300,7 @@ function main(websocket_url){
|
|||
RPC.addRoute('AfterEffects.add_item_as_layer', function (data) {
|
||||
log.warn('Server called client route "add_item_as_layer":', data);
|
||||
return runEvalScript("addItemAsLayerToComp(" + data.comp_id + ", " +
|
||||
data.item_id + "," +
|
||||
data.item_id + "," +
|
||||
" null )")
|
||||
.then(function(result){
|
||||
log.warn("addItemAsLayerToComp: " + result);
|
||||
|
|
@ -288,6 +308,16 @@ function main(websocket_url){
|
|||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('AfterEffects.add_item_instead_placeholder', function (data) {
|
||||
log.warn('Server called client route "add_item_instead_placeholder":', data);
|
||||
return runEvalScript("addItemInstead(" + data.placeholder_item_id + ", " +
|
||||
data.item_id + ")")
|
||||
.then(function(result){
|
||||
log.warn("add_item_instead_placeholder: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('AfterEffects.render', function (data) {
|
||||
log.warn('Server called client route "render":', data);
|
||||
var escapedPath = EscapeStringForJSX(data.folder_url);
|
||||
|
|
@ -312,6 +342,20 @@ function main(websocket_url){
|
|||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('AfterEffects.add_placeholder', function (data) {
|
||||
log.warn('Server called client route "add_placeholder":', data);
|
||||
var escapedName = EscapeStringForJSX(data.name);
|
||||
return runEvalScript("addPlaceholder('" + escapedName +"',"+
|
||||
data.width + ',' +
|
||||
data.height + ',' +
|
||||
data.fps + ',' +
|
||||
data.duration + ")")
|
||||
.then(function(result){
|
||||
log.warn("add_placeholder: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('AfterEffects.close', function (data) {
|
||||
log.warn('Server called client route "close":', data);
|
||||
return runEvalScript("close()");
|
||||
|
|
|
|||
|
|
@ -112,6 +112,32 @@ function getActiveDocumentFullName(){
|
|||
return _prepareError("No file open currently");
|
||||
}
|
||||
|
||||
|
||||
function addItem(name, item_type){
|
||||
/**
|
||||
* Adds comp or folder to project items.
|
||||
*
|
||||
* Could be called when creating publishable instance to prepare
|
||||
* composition (and render queue).
|
||||
*
|
||||
* Args:
|
||||
* name (str): composition name
|
||||
* item_type (str): COMP|FOLDER
|
||||
* Returns:
|
||||
* SingleItemValue: eg {"result": VALUE}
|
||||
*/
|
||||
if (item_type == "COMP"){
|
||||
// dummy values, will be rewritten later
|
||||
item = app.project.items.addComp(name, 1920, 1060, 1, 10, 25);
|
||||
}else if (item_type == "FOLDER"){
|
||||
item = app.project.items.addFolder(name);
|
||||
}else{
|
||||
return _prepareError("Only 'COMP' or 'FOLDER' can be created");
|
||||
}
|
||||
return _prepareSingleValue(item.id);
|
||||
|
||||
}
|
||||
|
||||
function getItems(comps, folders, footages){
|
||||
/**
|
||||
* Returns JSON representation of compositions and
|
||||
|
|
@ -139,6 +165,24 @@ function getItems(comps, folders, footages){
|
|||
|
||||
}
|
||||
|
||||
function selectItems(items){
|
||||
/**
|
||||
* Select all items from `items`, deselect other.
|
||||
*
|
||||
* Args:
|
||||
* items (list)
|
||||
*/
|
||||
for (i = 1; i <= app.project.items.length; ++i){
|
||||
item = app.project.items[i];
|
||||
if (items.indexOf(item.id) > -1){
|
||||
item.selected = true;
|
||||
}else{
|
||||
item.selected = false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
function getSelectedItems(comps, folders, footages){
|
||||
/**
|
||||
* Returns list of selected items from Project menu
|
||||
|
|
@ -280,12 +324,12 @@ function setLabelColor(comp_id, color_idx){
|
|||
}
|
||||
}
|
||||
|
||||
function replaceItem(comp_id, path, item_name){
|
||||
function replaceItem(item_id, path, item_name){
|
||||
/**
|
||||
* Replaces loaded file with new file and updates name
|
||||
*
|
||||
* Args:
|
||||
* comp_id (int): id of composition, not a index!
|
||||
* item_id (int): id of composition, not a index!
|
||||
* path (string): absolute path to new file
|
||||
* item_name (string): new composition name
|
||||
*/
|
||||
|
|
@ -295,7 +339,7 @@ function replaceItem(comp_id, path, item_name){
|
|||
if (!fp.exists){
|
||||
return _prepareError("File " + path + " not found.");
|
||||
}
|
||||
var item = app.project.itemByID(comp_id);
|
||||
var item = app.project.itemByID(item_id);
|
||||
if (item){
|
||||
try{
|
||||
if (isFileSequence(item)) {
|
||||
|
|
@ -311,7 +355,7 @@ function replaceItem(comp_id, path, item_name){
|
|||
fp.close();
|
||||
}
|
||||
}else{
|
||||
return _prepareError("There is no composition with "+ comp_id);
|
||||
return _prepareError("There is no item with "+ item_id);
|
||||
}
|
||||
app.endUndoGroup();
|
||||
}
|
||||
|
|
@ -821,6 +865,67 @@ function printMsg(msg){
|
|||
alert(msg);
|
||||
}
|
||||
|
||||
function addPlaceholder(name, width, height, fps, duration){
|
||||
/** Add AE PlaceholderItem to Project list.
|
||||
*
|
||||
* PlaceholderItem chosen as it doesn't require existing file and
|
||||
* might potentially allow nice functionality in the future.
|
||||
*
|
||||
*/
|
||||
app.beginUndoGroup('change comp properties');
|
||||
try{
|
||||
item = app.project.importPlaceholder(name, width, height,
|
||||
fps, duration);
|
||||
|
||||
return _prepareSingleValue(item.id);
|
||||
}catch (error) {
|
||||
writeLn(_prepareError("Cannot add placeholder " + error.toString()));
|
||||
}
|
||||
app.endUndoGroup();
|
||||
}
|
||||
|
||||
function addItemInstead(placeholder_item_id, item_id){
|
||||
/** Add new loaded item in place of load placeholder.
|
||||
*
|
||||
* Each placeholder could be placed multiple times into multiple
|
||||
* composition. This loops through all compositions and
|
||||
* places loaded item under placeholder.
|
||||
* Placeholder item gets deleted later separately according
|
||||
* to configuration in Settings.
|
||||
*
|
||||
* Args:
|
||||
* placeholder_item_id (int)
|
||||
* item_id (int)
|
||||
*/
|
||||
var item = app.project.itemByID(item_id);
|
||||
if (!item){
|
||||
return _prepareError("There is no item with "+ item_id);
|
||||
}
|
||||
|
||||
app.beginUndoGroup('Add loaded items');
|
||||
for (i = 1; i <= app.project.items.length; ++i){
|
||||
var comp = app.project.items[i];
|
||||
if (!(comp instanceof CompItem)){
|
||||
continue
|
||||
}
|
||||
|
||||
var i = 1;
|
||||
while (i <= comp.numLayers) {
|
||||
var layer = comp.layer(i);
|
||||
var layer_source = layer.source;
|
||||
if (layer_source && layer_source.id == placeholder_item_id){
|
||||
var new_layer = comp.layers.add(item);
|
||||
new_layer.moveAfter(layer);
|
||||
// copy all(?) properties to new layer
|
||||
layer.property("ADBE Transform Group").copyToComp(new_layer);
|
||||
i = i + 1;
|
||||
}
|
||||
i = i + 1;
|
||||
}
|
||||
}
|
||||
app.endUndoGroup();
|
||||
}
|
||||
|
||||
function _prepareSingleValue(value){
|
||||
return JSON.stringify({"result": value})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -357,3 +357,33 @@ class AfterEffectsRoute(WebSocketRoute):
|
|||
|
||||
# Required return statement.
|
||||
return "nothing"
|
||||
|
||||
def create_placeholder_route(self):
|
||||
from openpype.hosts.aftereffects.api.workfile_template_builder import \
|
||||
create_placeholder
|
||||
partial_method = functools.partial(create_placeholder)
|
||||
|
||||
ProcessLauncher.execute_in_main_thread(partial_method)
|
||||
|
||||
# Required return statement.
|
||||
return "nothing"
|
||||
|
||||
def update_placeholder_route(self):
|
||||
from openpype.hosts.aftereffects.api.workfile_template_builder import \
|
||||
update_placeholder
|
||||
partial_method = functools.partial(update_placeholder)
|
||||
|
||||
ProcessLauncher.execute_in_main_thread(partial_method)
|
||||
|
||||
# Required return statement.
|
||||
return "nothing"
|
||||
|
||||
def build_workfile_template_route(self):
|
||||
from openpype.hosts.aftereffects.api.workfile_template_builder import \
|
||||
build_workfile_template
|
||||
partial_method = functools.partial(build_workfile_template)
|
||||
|
||||
ProcessLauncher.execute_in_main_thread(partial_method)
|
||||
|
||||
# Required return statement.
|
||||
return "nothing"
|
||||
|
|
|
|||
|
|
@ -10,6 +10,10 @@ from openpype.pipeline import (
|
|||
register_creator_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from openpype.hosts.aftereffects.api.workfile_template_builder import (
|
||||
AEPlaceholderLoadPlugin,
|
||||
AEPlaceholderCreatePlugin
|
||||
)
|
||||
from openpype.pipeline.load import any_outdated_containers
|
||||
import openpype.hosts.aftereffects
|
||||
|
||||
|
|
@ -116,6 +120,12 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
item["id"] = "publish_context"
|
||||
self.stub.imprint(item["id"], item)
|
||||
|
||||
def get_workfile_build_placeholder_plugins(self):
|
||||
return [
|
||||
AEPlaceholderLoadPlugin,
|
||||
AEPlaceholderCreatePlugin
|
||||
]
|
||||
|
||||
# created instances section
|
||||
def list_instances(self):
|
||||
"""List all created instances from current workfile which
|
||||
|
|
|
|||
|
|
@ -1,7 +1,11 @@
|
|||
import six
|
||||
from abc import ABCMeta
|
||||
|
||||
from openpype.pipeline import LoaderPlugin
|
||||
from .launch_logic import get_stub
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class AfterEffectsLoader(LoaderPlugin):
|
||||
@staticmethod
|
||||
def get_stub():
|
||||
|
|
|
|||
271
openpype/hosts/aftereffects/api/workfile_template_builder.py
Normal file
271
openpype/hosts/aftereffects/api/workfile_template_builder.py
Normal file
|
|
@ -0,0 +1,271 @@
|
|||
import os.path
|
||||
import uuid
|
||||
import shutil
|
||||
|
||||
from openpype.pipeline import registered_host
|
||||
from openpype.tools.workfile_template_build import (
|
||||
WorkfileBuildPlaceholderDialog,
|
||||
)
|
||||
from openpype.pipeline.workfile.workfile_template_builder import (
|
||||
AbstractTemplateBuilder,
|
||||
PlaceholderPlugin,
|
||||
LoadPlaceholderItem,
|
||||
CreatePlaceholderItem,
|
||||
PlaceholderLoadMixin,
|
||||
PlaceholderCreateMixin
|
||||
)
|
||||
from openpype.hosts.aftereffects.api import get_stub
|
||||
from openpype.hosts.aftereffects.api.lib import set_settings
|
||||
|
||||
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
|
||||
PLACEHOLDER_ID = "openpype.placeholder"
|
||||
|
||||
|
||||
class AETemplateBuilder(AbstractTemplateBuilder):
|
||||
"""Concrete implementation of AbstractTemplateBuilder for AE"""
|
||||
|
||||
def import_template(self, path):
|
||||
"""Import template into current scene.
|
||||
Block if a template is already loaded.
|
||||
|
||||
Args:
|
||||
path (str): A path to current template (usually given by
|
||||
get_template_preset implementation)
|
||||
|
||||
Returns:
|
||||
bool: Whether the template was successfully imported or not
|
||||
"""
|
||||
stub = get_stub()
|
||||
if not os.path.exists(path):
|
||||
stub.print_msg(f"Template file on {path} doesn't exist.")
|
||||
return
|
||||
|
||||
stub.save()
|
||||
workfile_path = stub.get_active_document_full_name()
|
||||
shutil.copy2(path, workfile_path)
|
||||
stub.open(workfile_path)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class AEPlaceholderPlugin(PlaceholderPlugin):
|
||||
"""Contains generic methods for all PlaceholderPlugins."""
|
||||
|
||||
def collect_placeholders(self):
|
||||
"""Collect info from file metadata about created placeholders.
|
||||
|
||||
Returns:
|
||||
(list) (LoadPlaceholderItem)
|
||||
"""
|
||||
output = []
|
||||
scene_placeholders = self._collect_scene_placeholders()
|
||||
for item in scene_placeholders:
|
||||
if item.get("plugin_identifier") != self.identifier:
|
||||
continue
|
||||
|
||||
if isinstance(self, AEPlaceholderLoadPlugin):
|
||||
item = LoadPlaceholderItem(item["uuid"],
|
||||
item["data"],
|
||||
self)
|
||||
elif isinstance(self, AEPlaceholderCreatePlugin):
|
||||
item = CreatePlaceholderItem(item["uuid"],
|
||||
item["data"],
|
||||
self)
|
||||
else:
|
||||
raise NotImplementedError(f"Not implemented for {type(self)}")
|
||||
|
||||
output.append(item)
|
||||
|
||||
return output
|
||||
|
||||
def update_placeholder(self, placeholder_item, placeholder_data):
|
||||
"""Resave changed properties for placeholders"""
|
||||
item_id, metadata_item = self._get_item(placeholder_item)
|
||||
stub = get_stub()
|
||||
if not item_id:
|
||||
stub.print_msg("Cannot find item for "
|
||||
f"{placeholder_item.scene_identifier}")
|
||||
return
|
||||
metadata_item["data"] = placeholder_data
|
||||
stub.imprint(item_id, metadata_item)
|
||||
|
||||
def _get_item(self, placeholder_item):
|
||||
"""Returns item id and item metadata for placeholder from file meta"""
|
||||
stub = get_stub()
|
||||
placeholder_uuid = placeholder_item.scene_identifier
|
||||
for metadata_item in stub.get_metadata():
|
||||
if not metadata_item.get("is_placeholder"):
|
||||
continue
|
||||
if placeholder_uuid in metadata_item.get("uuid"):
|
||||
return metadata_item["members"][0], metadata_item
|
||||
return None, None
|
||||
|
||||
def _collect_scene_placeholders(self):
|
||||
"""" Cache placeholder data to shared data.
|
||||
Returns:
|
||||
(list) of dicts
|
||||
"""
|
||||
placeholder_items = self.builder.get_shared_populate_data(
|
||||
"placeholder_items"
|
||||
)
|
||||
if not placeholder_items:
|
||||
placeholder_items = []
|
||||
for item in get_stub().get_metadata():
|
||||
if not item.get("is_placeholder"):
|
||||
continue
|
||||
placeholder_items.append(item)
|
||||
|
||||
self.builder.set_shared_populate_data(
|
||||
"placeholder_items", placeholder_items
|
||||
)
|
||||
return placeholder_items
|
||||
|
||||
def _imprint_item(self, item_id, name, placeholder_data, stub):
|
||||
if not item_id:
|
||||
raise ValueError("Couldn't create a placeholder")
|
||||
container_data = {
|
||||
"id": "openpype.placeholder",
|
||||
"name": name,
|
||||
"is_placeholder": True,
|
||||
"plugin_identifier": self.identifier,
|
||||
"uuid": str(uuid.uuid4()), # scene_identifier
|
||||
"data": placeholder_data,
|
||||
"members": [item_id]
|
||||
}
|
||||
stub.imprint(item_id, container_data)
|
||||
|
||||
|
||||
class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin):
|
||||
"""Adds Create placeholder.
|
||||
|
||||
This adds composition and runs Create
|
||||
"""
|
||||
identifier = "aftereffects.create"
|
||||
label = "AfterEffects create"
|
||||
|
||||
def create_placeholder(self, placeholder_data):
|
||||
stub = get_stub()
|
||||
name = "CREATEPLACEHOLDER"
|
||||
item_id = stub.add_item(name, "COMP")
|
||||
|
||||
self._imprint_item(item_id, name, placeholder_data, stub)
|
||||
|
||||
def populate_placeholder(self, placeholder):
|
||||
"""Replace 'placeholder' with publishable instance.
|
||||
|
||||
Renames prepared composition name, creates publishable instance, sets
|
||||
frame/duration settings according to DB.
|
||||
"""
|
||||
pre_create_data = {"use_selection": True}
|
||||
item_id, item = self._get_item(placeholder)
|
||||
get_stub().select_items([item_id])
|
||||
self.populate_create_placeholder(placeholder, pre_create_data)
|
||||
|
||||
# apply settings for populated composition
|
||||
item_id, metadata_item = self._get_item(placeholder)
|
||||
set_settings(True, True, [item_id])
|
||||
|
||||
def get_placeholder_options(self, options=None):
|
||||
return self.get_create_plugin_options(options)
|
||||
|
||||
|
||||
class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin):
|
||||
identifier = "aftereffects.load"
|
||||
label = "AfterEffects load"
|
||||
|
||||
def create_placeholder(self, placeholder_data):
|
||||
"""Creates AE's Placeholder item in Project items list.
|
||||
|
||||
Sets dummy resolution/duration/fps settings, will be replaced when
|
||||
populated.
|
||||
"""
|
||||
stub = get_stub()
|
||||
name = "LOADERPLACEHOLDER"
|
||||
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
|
||||
|
||||
self._imprint_item(item_id, name, placeholder_data, stub)
|
||||
|
||||
def populate_placeholder(self, placeholder):
|
||||
"""Use Openpype Loader from `placeholder` to create new FootageItems
|
||||
|
||||
New FootageItems are created, files are imported.
|
||||
"""
|
||||
self.populate_load_placeholder(placeholder)
|
||||
errors = placeholder.get_errors()
|
||||
stub = get_stub()
|
||||
if errors:
|
||||
stub.print_msg("\n".join(errors))
|
||||
else:
|
||||
if not placeholder.data["keep_placeholder"]:
|
||||
metadata = stub.get_metadata()
|
||||
for item in metadata:
|
||||
if not item.get("is_placeholder"):
|
||||
continue
|
||||
scene_identifier = item.get("uuid")
|
||||
if (scene_identifier and
|
||||
scene_identifier == placeholder.scene_identifier):
|
||||
stub.delete_item(item["members"][0])
|
||||
stub.remove_instance(placeholder.scene_identifier, metadata)
|
||||
|
||||
def get_placeholder_options(self, options=None):
|
||||
return self.get_load_plugin_options(options)
|
||||
|
||||
def load_succeed(self, placeholder, container):
|
||||
placeholder_item_id, _ = self._get_item(placeholder)
|
||||
item_id = container.id
|
||||
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)
|
||||
|
||||
|
||||
def build_workfile_template(*args, **kwargs):
|
||||
builder = AETemplateBuilder(registered_host())
|
||||
builder.build_template(*args, **kwargs)
|
||||
|
||||
|
||||
def update_workfile_template(*args):
|
||||
builder = AETemplateBuilder(registered_host())
|
||||
builder.rebuild_template()
|
||||
|
||||
|
||||
def create_placeholder(*args):
|
||||
"""Called when new workile placeholder should be created."""
|
||||
host = registered_host()
|
||||
builder = AETemplateBuilder(host)
|
||||
window = WorkfileBuildPlaceholderDialog(host, builder)
|
||||
window.exec_()
|
||||
|
||||
|
||||
def update_placeholder(*args):
|
||||
"""Called after placeholder item is selected to modify it."""
|
||||
host = registered_host()
|
||||
builder = AETemplateBuilder(host)
|
||||
|
||||
stub = get_stub()
|
||||
selected_items = stub.get_selected_items(True, True, True)
|
||||
|
||||
if len(selected_items) != 1:
|
||||
stub.print_msg("Please select just 1 placeholder")
|
||||
return
|
||||
|
||||
selected_id = selected_items[0].id
|
||||
placeholder_item = None
|
||||
|
||||
placeholder_items_by_id = {
|
||||
placeholder_item.scene_identifier: placeholder_item
|
||||
for placeholder_item in builder.get_placeholders()
|
||||
}
|
||||
for metadata_item in stub.get_metadata():
|
||||
if not metadata_item.get("is_placeholder"):
|
||||
continue
|
||||
if selected_id in metadata_item.get("members"):
|
||||
placeholder_item = placeholder_items_by_id.get(
|
||||
metadata_item["uuid"])
|
||||
break
|
||||
|
||||
if not placeholder_item:
|
||||
stub.print_msg("Didn't find placeholder metadata. "
|
||||
"Remove and re-create placeholder.")
|
||||
return
|
||||
|
||||
window = WorkfileBuildPlaceholderDialog(host, builder)
|
||||
window.set_update_mode(placeholder_item)
|
||||
window.exec_()
|
||||
|
|
@ -35,6 +35,8 @@ class AEItem(object):
|
|||
instance_id = attr.ib(default=None) # New Publisher
|
||||
width = attr.ib(default=None)
|
||||
height = attr.ib(default=None)
|
||||
is_placeholder = attr.ib(default=False)
|
||||
uuid = attr.ib(default=False)
|
||||
|
||||
|
||||
class AfterEffectsServerStub():
|
||||
|
|
@ -220,6 +222,16 @@ class AfterEffectsServerStub():
|
|||
)
|
||||
return self._to_records(self._handle_return(res))
|
||||
|
||||
def select_items(self, items):
|
||||
"""
|
||||
Select items in Project list
|
||||
Args:
|
||||
items (list): of int item ids
|
||||
"""
|
||||
self.websocketserver.call(
|
||||
self.client.call('AfterEffects.select_items', items=items))
|
||||
|
||||
|
||||
def get_selected_items(self, comps, folders=False, footages=False):
|
||||
"""
|
||||
Same as get_items but using selected items only
|
||||
|
|
@ -240,6 +252,21 @@ class AfterEffectsServerStub():
|
|||
)
|
||||
return self._to_records(self._handle_return(res))
|
||||
|
||||
def add_item(self, name, item_type):
|
||||
"""
|
||||
Adds either composition or folder to project item list.
|
||||
|
||||
Args:
|
||||
name (str)
|
||||
item_type (str): COMP|FOLDER
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.add_item',
|
||||
name=name,
|
||||
item_type=item_type))
|
||||
|
||||
return self._handle_return(res)
|
||||
|
||||
def get_item(self, item_id):
|
||||
"""
|
||||
Returns metadata for particular 'item_id' or None
|
||||
|
|
@ -316,7 +343,7 @@ class AfterEffectsServerStub():
|
|||
|
||||
return self._handle_return(res)
|
||||
|
||||
def remove_instance(self, instance_id):
|
||||
def remove_instance(self, instance_id, metadata=None):
|
||||
"""
|
||||
Removes instance with 'instance_id' from file's metadata and
|
||||
saves them.
|
||||
|
|
@ -328,7 +355,10 @@ class AfterEffectsServerStub():
|
|||
"""
|
||||
cleaned_data = []
|
||||
|
||||
for instance in self.get_metadata():
|
||||
if metadata is None:
|
||||
metadata = self.get_metadata()
|
||||
|
||||
for instance in metadata:
|
||||
inst_id = instance.get("instance_id") or instance.get("uuid")
|
||||
if inst_id != instance_id:
|
||||
cleaned_data.append(instance)
|
||||
|
|
@ -534,6 +564,47 @@ class AfterEffectsServerStub():
|
|||
if records:
|
||||
return records.pop()
|
||||
|
||||
def add_item_instead_placeholder(self, placeholder_item_id, item_id):
|
||||
"""
|
||||
Adds item_id to layers where plaeholder_item_id is present.
|
||||
|
||||
1 placeholder could result in multiple loaded containers (eg items)
|
||||
|
||||
Args:
|
||||
placeholder_item_id (int): id of placeholder item
|
||||
item_id (int): loaded FootageItem id
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.add_item_instead_placeholder', # noqa
|
||||
placeholder_item_id=placeholder_item_id, # noqa
|
||||
item_id=item_id))
|
||||
|
||||
return self._handle_return(res)
|
||||
|
||||
def add_placeholder(self, name, width, height, fps, duration):
|
||||
"""
|
||||
Adds new FootageItem as a placeholder for workfile builder
|
||||
|
||||
Placeholder requires width etc, currently probably only hardcoded
|
||||
values.
|
||||
|
||||
Args:
|
||||
name (str)
|
||||
width (int)
|
||||
height (int)
|
||||
fps (float)
|
||||
duration (int)
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.add_placeholder',
|
||||
name=name,
|
||||
width=width,
|
||||
height=height,
|
||||
fps=fps,
|
||||
duration=duration))
|
||||
|
||||
return self._handle_return(res)
|
||||
|
||||
def render(self, folder_url, comp_id):
|
||||
"""
|
||||
Render all renderqueueitem to 'folder_url'
|
||||
|
|
@ -632,7 +703,8 @@ class AfterEffectsServerStub():
|
|||
d.get('file_name'),
|
||||
d.get("instance_id"),
|
||||
d.get("width"),
|
||||
d.get("height"))
|
||||
d.get("height"),
|
||||
d.get("is_placeholder"))
|
||||
|
||||
ret.append(item)
|
||||
return ret
|
||||
|
|
|
|||
|
|
@ -1,17 +1,15 @@
|
|||
import re
|
||||
|
||||
from openpype.pipeline import get_representation_path
|
||||
from openpype.hosts.aftereffects.api import (
|
||||
AfterEffectsLoader,
|
||||
containerise
|
||||
)
|
||||
from openpype.hosts.aftereffects import api
|
||||
|
||||
from openpype.hosts.aftereffects.api.lib import (
|
||||
get_background_layers,
|
||||
get_unique_layer_name,
|
||||
)
|
||||
|
||||
|
||||
class BackgroundLoader(AfterEffectsLoader):
|
||||
class BackgroundLoader(api.AfterEffectsLoader):
|
||||
"""
|
||||
Load images from Background family
|
||||
Creates for each background separate folder with all imported images
|
||||
|
|
@ -21,6 +19,7 @@ class BackgroundLoader(AfterEffectsLoader):
|
|||
For each load container is created and stored in project (.aep)
|
||||
metadata
|
||||
"""
|
||||
label = "Load JSON Background"
|
||||
families = ["background"]
|
||||
representations = ["json"]
|
||||
|
||||
|
|
@ -48,7 +47,7 @@ class BackgroundLoader(AfterEffectsLoader):
|
|||
self[:] = [comp]
|
||||
namespace = namespace or comp_name
|
||||
|
||||
return containerise(
|
||||
return api.containerise(
|
||||
name,
|
||||
namespace,
|
||||
comp,
|
||||
|
|
|
|||
|
|
@ -1,14 +1,11 @@
|
|||
import re
|
||||
|
||||
from openpype.pipeline import get_representation_path
|
||||
from openpype.hosts.aftereffects.api import (
|
||||
AfterEffectsLoader,
|
||||
containerise
|
||||
)
|
||||
from openpype.hosts.aftereffects import api
|
||||
from openpype.hosts.aftereffects.api.lib import get_unique_layer_name
|
||||
|
||||
|
||||
class FileLoader(AfterEffectsLoader):
|
||||
class FileLoader(api.AfterEffectsLoader):
|
||||
"""Load images
|
||||
|
||||
Stores the imported asset in a container named after the asset.
|
||||
|
|
@ -64,7 +61,7 @@ class FileLoader(AfterEffectsLoader):
|
|||
self[:] = [comp]
|
||||
namespace = namespace or comp_name
|
||||
|
||||
return containerise(
|
||||
return api.containerise(
|
||||
name,
|
||||
namespace,
|
||||
comp,
|
||||
|
|
|
|||
|
|
@ -12,13 +12,13 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
icon = "search"
|
||||
|
||||
def process(self, context, plugin):
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
errored_instances = get_errored_instances_from_context(context,
|
||||
plugin=plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes...")
|
||||
invalid = list()
|
||||
for instance in instances:
|
||||
for instance in errored_instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
|
|
|
|||
|
|
@ -134,6 +134,27 @@ def append_user_scripts():
|
|||
traceback.print_exc()
|
||||
|
||||
|
||||
def set_app_templates_path():
|
||||
# Blender requires the app templates to be in `BLENDER_USER_SCRIPTS`.
|
||||
# After running Blender, we set that variable to our custom path, so
|
||||
# that the user can use their custom app templates.
|
||||
|
||||
# We look among the scripts paths for one of the paths that contains
|
||||
# the app templates. The path must contain the subfolder
|
||||
# `startup/bl_app_templates_user`.
|
||||
paths = os.environ.get("OPENPYPE_BLENDER_USER_SCRIPTS").split(os.pathsep)
|
||||
|
||||
app_templates_path = None
|
||||
for path in paths:
|
||||
if os.path.isdir(
|
||||
os.path.join(path, "startup", "bl_app_templates_user")):
|
||||
app_templates_path = path
|
||||
break
|
||||
|
||||
if app_templates_path and os.path.isdir(app_templates_path):
|
||||
os.environ["BLENDER_USER_SCRIPTS"] = app_templates_path
|
||||
|
||||
|
||||
def imprint(node: bpy.types.bpy_struct_meta_idprop, data: Dict):
|
||||
r"""Write `data` to `node` as userDefined attributes
|
||||
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ def install():
|
|||
register_creator_plugin_path(str(CREATE_PATH))
|
||||
|
||||
lib.append_user_scripts()
|
||||
lib.set_app_templates_path()
|
||||
|
||||
register_event_callback("new", on_new)
|
||||
register_event_callback("open", on_open)
|
||||
|
|
|
|||
|
|
@ -18,15 +18,13 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
icon = "search" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
errored_instances = get_errored_instances_from_context(context,
|
||||
plugin=plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes..")
|
||||
invalid = list()
|
||||
for instance in instances:
|
||||
for instance in errored_instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
|
|
|
|||
|
|
@ -21,8 +21,13 @@ from .pipeline import (
|
|||
reset_selection
|
||||
)
|
||||
|
||||
from .constants import (
|
||||
OPENPYPE_TAG_NAME,
|
||||
DEFAULT_SEQUENCE_NAME,
|
||||
DEFAULT_BIN_NAME
|
||||
)
|
||||
|
||||
from .lib import (
|
||||
pype_tag_name,
|
||||
flatten,
|
||||
get_track_items,
|
||||
get_current_project,
|
||||
|
|
@ -82,8 +87,12 @@ __all__ = [
|
|||
"file_extensions",
|
||||
"work_root",
|
||||
|
||||
# Constants
|
||||
"OPENPYPE_TAG_NAME",
|
||||
"DEFAULT_SEQUENCE_NAME",
|
||||
"DEFAULT_BIN_NAME",
|
||||
|
||||
# Lib functions
|
||||
"pype_tag_name",
|
||||
"flatten",
|
||||
"get_track_items",
|
||||
"get_current_project",
|
||||
|
|
|
|||
3
openpype/hosts/hiero/api/constants.py
Normal file
3
openpype/hosts/hiero/api/constants.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
OPENPYPE_TAG_NAME = "openpypeData"
|
||||
DEFAULT_SEQUENCE_NAME = "openpypeSequence"
|
||||
DEFAULT_BIN_NAME = "openpypeBin"
|
||||
|
|
@ -5,7 +5,6 @@ Host specific functions where host api is connected
|
|||
from copy import deepcopy
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import platform
|
||||
import functools
|
||||
import warnings
|
||||
|
|
@ -29,12 +28,22 @@ from openpype.pipeline import (
|
|||
from openpype.pipeline.load import filter_containers
|
||||
from openpype.lib import Logger
|
||||
from . import tags
|
||||
|
||||
from .constants import (
|
||||
OPENPYPE_TAG_NAME,
|
||||
DEFAULT_SEQUENCE_NAME,
|
||||
DEFAULT_BIN_NAME
|
||||
)
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_imageio_config
|
||||
)
|
||||
|
||||
|
||||
class _CTX:
|
||||
has_been_setup = False
|
||||
has_menu = False
|
||||
parent_gui = None
|
||||
|
||||
|
||||
class DeprecatedWarning(DeprecationWarning):
|
||||
pass
|
||||
|
||||
|
|
@ -82,23 +91,14 @@ def deprecated(new_destination):
|
|||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._has_been_setup = False
|
||||
self._has_menu = False
|
||||
self._registered_gui = None
|
||||
self._parent = None
|
||||
self.pype_tag_name = "openpypeData"
|
||||
self.default_sequence_name = "openpypeSequence"
|
||||
self.default_bin_name = "openpypeBin"
|
||||
|
||||
|
||||
def flatten(_list):
|
||||
for item in _list:
|
||||
if isinstance(item, (list, tuple)):
|
||||
for sub_item in flatten(item):
|
||||
def flatten(list_):
|
||||
for item_ in list_:
|
||||
if isinstance(item_, (list, tuple)):
|
||||
for sub_item in flatten(item_):
|
||||
yield sub_item
|
||||
else:
|
||||
yield item
|
||||
yield item_
|
||||
|
||||
|
||||
def get_current_project(remove_untitled=False):
|
||||
|
|
@ -131,7 +131,7 @@ def get_current_sequence(name=None, new=False):
|
|||
|
||||
if new:
|
||||
# create new
|
||||
name = name or self.default_sequence_name
|
||||
name = name or DEFAULT_SEQUENCE_NAME
|
||||
sequence = hiero.core.Sequence(name)
|
||||
root_bin.addItem(hiero.core.BinItem(sequence))
|
||||
elif name:
|
||||
|
|
@ -345,7 +345,7 @@ def get_track_item_tags(track_item):
|
|||
# collect all tags which are not openpype tag
|
||||
returning_tag_data.extend(
|
||||
tag for tag in _tags
|
||||
if tag.name() != self.pype_tag_name
|
||||
if tag.name() != OPENPYPE_TAG_NAME
|
||||
)
|
||||
|
||||
return returning_tag_data
|
||||
|
|
@ -385,7 +385,7 @@ def set_track_openpype_tag(track, data=None):
|
|||
# if pype tag available then update with input data
|
||||
tag = tags.create_tag(
|
||||
"{}_{}".format(
|
||||
self.pype_tag_name,
|
||||
OPENPYPE_TAG_NAME,
|
||||
_get_tag_unique_hash()
|
||||
),
|
||||
tag_data
|
||||
|
|
@ -412,7 +412,7 @@ def get_track_openpype_tag(track):
|
|||
return None
|
||||
for tag in _tags:
|
||||
# return only correct tag defined by global name
|
||||
if self.pype_tag_name in tag.name():
|
||||
if OPENPYPE_TAG_NAME in tag.name():
|
||||
return tag
|
||||
|
||||
|
||||
|
|
@ -484,7 +484,7 @@ def get_trackitem_openpype_tag(track_item):
|
|||
return None
|
||||
for tag in _tags:
|
||||
# return only correct tag defined by global name
|
||||
if self.pype_tag_name in tag.name():
|
||||
if OPENPYPE_TAG_NAME in tag.name():
|
||||
return tag
|
||||
|
||||
|
||||
|
|
@ -516,7 +516,7 @@ def set_trackitem_openpype_tag(track_item, data=None):
|
|||
# if pype tag available then update with input data
|
||||
tag = tags.create_tag(
|
||||
"{}_{}".format(
|
||||
self.pype_tag_name,
|
||||
OPENPYPE_TAG_NAME,
|
||||
_get_tag_unique_hash()
|
||||
),
|
||||
tag_data
|
||||
|
|
@ -698,29 +698,29 @@ def setup(console=False, port=None, menu=True):
|
|||
menu (bool, optional): Display file menu in Hiero.
|
||||
"""
|
||||
|
||||
if self._has_been_setup:
|
||||
if _CTX.has_been_setup:
|
||||
teardown()
|
||||
|
||||
add_submission()
|
||||
|
||||
if menu:
|
||||
add_to_filemenu()
|
||||
self._has_menu = True
|
||||
_CTX.has_menu = True
|
||||
|
||||
self._has_been_setup = True
|
||||
_CTX.has_been_setup = True
|
||||
log.debug("pyblish: Loaded successfully.")
|
||||
|
||||
|
||||
def teardown():
|
||||
"""Remove integration"""
|
||||
if not self._has_been_setup:
|
||||
if not _CTX.has_been_setup:
|
||||
return
|
||||
|
||||
if self._has_menu:
|
||||
if _CTX.has_menu:
|
||||
remove_from_filemenu()
|
||||
self._has_menu = False
|
||||
_CTX.has_menu = False
|
||||
|
||||
self._has_been_setup = False
|
||||
_CTX.has_been_setup = False
|
||||
log.debug("pyblish: Integration torn down successfully")
|
||||
|
||||
|
||||
|
|
@ -928,7 +928,7 @@ def create_bin(path=None, project=None):
|
|||
# get the first loaded project
|
||||
project = project or get_current_project()
|
||||
|
||||
path = path or self.default_bin_name
|
||||
path = path or DEFAULT_BIN_NAME
|
||||
|
||||
path = path.replace("\\", "/").split("/")
|
||||
|
||||
|
|
@ -1311,11 +1311,11 @@ def before_project_save(event):
|
|||
|
||||
def get_main_window():
|
||||
"""Acquire Nuke's main window"""
|
||||
if self._parent is None:
|
||||
if _CTX.parent_gui is None:
|
||||
top_widgets = QtWidgets.QApplication.topLevelWidgets()
|
||||
name = "Foundry::UI::DockMainWindow"
|
||||
main_window = next(widget for widget in top_widgets if
|
||||
widget.inherits("QMainWindow") and
|
||||
widget.metaObject().className() == name)
|
||||
self._parent = main_window
|
||||
return self._parent
|
||||
_CTX.parent_gui = main_window
|
||||
return _CTX.parent_gui
|
||||
|
|
|
|||
|
|
@ -3,20 +3,18 @@
|
|||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import ast
|
||||
import opentimelineio as otio
|
||||
from . import utils
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.track_types = {
|
||||
|
||||
TRACK_TYPE_MAP = {
|
||||
hiero.core.VideoTrack: otio.schema.TrackKind.Video,
|
||||
hiero.core.AudioTrack: otio.schema.TrackKind.Audio
|
||||
}
|
||||
self.project_fps = None
|
||||
self.marker_color_map = {
|
||||
MARKER_COLOR_MAP = {
|
||||
"magenta": otio.schema.MarkerColor.MAGENTA,
|
||||
"red": otio.schema.MarkerColor.RED,
|
||||
"yellow": otio.schema.MarkerColor.YELLOW,
|
||||
|
|
@ -24,30 +22,21 @@ self.marker_color_map = {
|
|||
"cyan": otio.schema.MarkerColor.CYAN,
|
||||
"blue": otio.schema.MarkerColor.BLUE,
|
||||
}
|
||||
self.timeline = None
|
||||
self.include_tags = True
|
||||
|
||||
|
||||
def flatten(_list):
|
||||
for item in _list:
|
||||
if isinstance(item, (list, tuple)):
|
||||
for sub_item in flatten(item):
|
||||
class CTX:
|
||||
project_fps = None
|
||||
timeline = None
|
||||
include_tags = True
|
||||
|
||||
|
||||
def flatten(list_):
|
||||
for item_ in list_:
|
||||
if isinstance(item_, (list, tuple)):
|
||||
for sub_item in flatten(item_):
|
||||
yield sub_item
|
||||
else:
|
||||
yield item
|
||||
|
||||
|
||||
def get_current_hiero_project(remove_untitled=False):
|
||||
projects = flatten(hiero.core.projects())
|
||||
if not remove_untitled:
|
||||
return next(iter(projects))
|
||||
|
||||
# if remove_untitled
|
||||
for proj in projects:
|
||||
if "Untitled" in proj.name():
|
||||
proj.close()
|
||||
else:
|
||||
return proj
|
||||
yield item_
|
||||
|
||||
|
||||
def create_otio_rational_time(frame, fps):
|
||||
|
|
@ -152,7 +141,7 @@ def create_otio_reference(clip):
|
|||
file_head = media_source.filenameHead()
|
||||
is_sequence = not media_source.singleFile()
|
||||
frame_duration = media_source.duration()
|
||||
fps = utils.get_rate(clip) or self.project_fps
|
||||
fps = utils.get_rate(clip) or CTX.project_fps
|
||||
extension = os.path.splitext(path)[-1]
|
||||
|
||||
if is_sequence:
|
||||
|
|
@ -217,8 +206,8 @@ def get_marker_color(tag):
|
|||
res = re.search(pat, icon)
|
||||
if res:
|
||||
color = res.groupdict().get('color')
|
||||
if color.lower() in self.marker_color_map:
|
||||
return self.marker_color_map[color.lower()]
|
||||
if color.lower() in MARKER_COLOR_MAP:
|
||||
return MARKER_COLOR_MAP[color.lower()]
|
||||
|
||||
return otio.schema.MarkerColor.RED
|
||||
|
||||
|
|
@ -232,7 +221,7 @@ def create_otio_markers(otio_item, item):
|
|||
# Hiero adds this tag to a lot of clips
|
||||
continue
|
||||
|
||||
frame_rate = utils.get_rate(item) or self.project_fps
|
||||
frame_rate = utils.get_rate(item) or CTX.project_fps
|
||||
|
||||
marked_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
|
|
@ -279,7 +268,7 @@ def create_otio_clip(track_item):
|
|||
|
||||
duration = int(track_item.duration())
|
||||
|
||||
fps = utils.get_rate(track_item) or self.project_fps
|
||||
fps = utils.get_rate(track_item) or CTX.project_fps
|
||||
name = track_item.name()
|
||||
|
||||
media_reference = create_otio_reference(clip)
|
||||
|
|
@ -296,7 +285,7 @@ def create_otio_clip(track_item):
|
|||
)
|
||||
|
||||
# Add tags as markers
|
||||
if self.include_tags:
|
||||
if CTX.include_tags:
|
||||
create_otio_markers(otio_clip, track_item)
|
||||
create_otio_markers(otio_clip, track_item.source())
|
||||
|
||||
|
|
@ -319,13 +308,13 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
|||
|
||||
|
||||
def _create_otio_timeline():
|
||||
project = get_current_hiero_project(remove_untitled=False)
|
||||
metadata = _get_metadata(self.timeline)
|
||||
project = CTX.timeline.project()
|
||||
metadata = _get_metadata(CTX.timeline)
|
||||
|
||||
metadata.update({
|
||||
"openpype.timeline.width": int(self.timeline.format().width()),
|
||||
"openpype.timeline.height": int(self.timeline.format().height()),
|
||||
"openpype.timeline.pixelAspect": int(self.timeline.format().pixelAspect()), # noqa
|
||||
"openpype.timeline.width": int(CTX.timeline.format().width()),
|
||||
"openpype.timeline.height": int(CTX.timeline.format().height()),
|
||||
"openpype.timeline.pixelAspect": int(CTX.timeline.format().pixelAspect()), # noqa
|
||||
"openpype.project.useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), # noqa
|
||||
"openpype.project.lutSetting16Bit": project.lutSetting16Bit(),
|
||||
"openpype.project.lutSetting8Bit": project.lutSetting8Bit(),
|
||||
|
|
@ -339,10 +328,10 @@ def _create_otio_timeline():
|
|||
})
|
||||
|
||||
start_time = create_otio_rational_time(
|
||||
self.timeline.timecodeStart(), self.project_fps)
|
||||
CTX.timeline.timecodeStart(), CTX.project_fps)
|
||||
|
||||
return otio.schema.Timeline(
|
||||
name=self.timeline.name(),
|
||||
name=CTX.timeline.name(),
|
||||
global_start_time=start_time,
|
||||
metadata=metadata
|
||||
)
|
||||
|
|
@ -351,7 +340,7 @@ def _create_otio_timeline():
|
|||
def create_otio_track(track_type, track_name):
|
||||
return otio.schema.Track(
|
||||
name=track_name,
|
||||
kind=self.track_types[track_type]
|
||||
kind=TRACK_TYPE_MAP[track_type]
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -363,7 +352,7 @@ def add_otio_gap(track_item, otio_track, prev_out):
|
|||
gap = otio.opentime.TimeRange(
|
||||
duration=otio.opentime.RationalTime(
|
||||
gap_length,
|
||||
self.project_fps
|
||||
CTX.project_fps
|
||||
)
|
||||
)
|
||||
otio_gap = otio.schema.Gap(source_range=gap)
|
||||
|
|
@ -396,14 +385,14 @@ def create_otio_timeline():
|
|||
return track_item.parent().items()[itemindex - 1]
|
||||
|
||||
# get current timeline
|
||||
self.timeline = hiero.ui.activeSequence()
|
||||
self.project_fps = self.timeline.framerate().toFloat()
|
||||
CTX.timeline = hiero.ui.activeSequence()
|
||||
CTX.project_fps = CTX.timeline.framerate().toFloat()
|
||||
|
||||
# convert timeline to otio
|
||||
otio_timeline = _create_otio_timeline()
|
||||
|
||||
# loop all defined track types
|
||||
for track in self.timeline.items():
|
||||
for track in CTX.timeline.items():
|
||||
# skip if track is disabled
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
|
@ -441,7 +430,7 @@ def create_otio_timeline():
|
|||
otio_track.append(otio_clip)
|
||||
|
||||
# Add tags as markers
|
||||
if self.include_tags:
|
||||
if CTX.include_tags:
|
||||
create_otio_markers(otio_track, track)
|
||||
|
||||
# add track to otio timeline
|
||||
|
|
|
|||
|
|
@ -310,7 +310,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if phiero.pype_tag_name in marker.name:
|
||||
if phiero.OPENPYPE_TAG_NAME in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ from qtpy.QtGui import QPixmap
|
|||
import hiero.ui
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from openpype.hosts.hiero.api.otio import hiero_export
|
||||
|
||||
|
||||
|
|
@ -22,8 +21,8 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
|||
|
||||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
subset = "workfile"
|
||||
project = phiero.get_current_project()
|
||||
active_timeline = hiero.ui.activeSequence()
|
||||
project = active_timeline.project()
|
||||
fps = active_timeline.framerate().toFloat()
|
||||
|
||||
# adding otio timeline to context
|
||||
|
|
|
|||
|
|
@ -17,15 +17,13 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
|
||||
def process(self, context, plugin):
|
||||
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
errored_instances = get_errored_instances_from_context(context,
|
||||
plugin=plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes..")
|
||||
invalid = list()
|
||||
for instance in instances:
|
||||
for instance in errored_instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
|
|
@ -44,3 +42,42 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
node.setCurrent(True)
|
||||
else:
|
||||
self.log.info("No invalid nodes found.")
|
||||
|
||||
|
||||
class SelectROPAction(pyblish.api.Action):
|
||||
"""Select ROP.
|
||||
|
||||
It's used to select the associated ROPs with the errored instances.
|
||||
"""
|
||||
|
||||
label = "Select ROP"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "mdi.cursor-default-click"
|
||||
|
||||
def process(self, context, plugin):
|
||||
errored_instances = get_errored_instances_from_context(context, plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding ROP nodes..")
|
||||
rop_nodes = list()
|
||||
for instance in errored_instances:
|
||||
node_path = instance.data.get("instance_node")
|
||||
if not node_path:
|
||||
continue
|
||||
|
||||
node = hou.node(node_path)
|
||||
if not node:
|
||||
continue
|
||||
|
||||
rop_nodes.append(node)
|
||||
|
||||
hou.clearAllSelected()
|
||||
if rop_nodes:
|
||||
self.log.info("Selecting ROP nodes: {}".format(
|
||||
", ".join(node.path() for node in rop_nodes)
|
||||
))
|
||||
for node in rop_nodes:
|
||||
node.setSelected(True)
|
||||
node.setCurrent(True)
|
||||
else:
|
||||
self.log.info("No ROP nodes found.")
|
||||
|
|
|
|||
|
|
@ -633,23 +633,8 @@ def evalParmNoFrame(node, parm, pad_character="#"):
|
|||
|
||||
def get_color_management_preferences():
|
||||
"""Get default OCIO preferences"""
|
||||
data = {
|
||||
"config": hou.Color.ocio_configPath()
|
||||
|
||||
return {
|
||||
"config": hou.Color.ocio_configPath(),
|
||||
"display": hou.Color.ocio_defaultDisplay(),
|
||||
"view": hou.Color.ocio_defaultView()
|
||||
}
|
||||
|
||||
# Get default display and view from OCIO
|
||||
display = hou.Color.ocio_defaultDisplay()
|
||||
disp_regex = re.compile(r"^(?P<name>.+-)(?P<display>.+)$")
|
||||
disp_match = disp_regex.match(display)
|
||||
|
||||
view = hou.Color.ocio_defaultView()
|
||||
view_regex = re.compile(r"^(?P<name>.+- )(?P<view>.+)$")
|
||||
view_match = view_regex.match(view)
|
||||
data.update({
|
||||
"display": disp_match.group("display"),
|
||||
"view": view_match.group("view")
|
||||
|
||||
})
|
||||
|
||||
return data
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
import hdefereval # noqa, hdefereval is only available in ui mode
|
||||
hdefereval.executeDeferred(creator_node_shelves.install)
|
||||
|
||||
def has_unsaved_changes(self):
|
||||
def workfile_has_unsaved_changes(self):
|
||||
return hou.hipFile.hasUnsavedChanges()
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating pointcache alembics."""
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
import hou
|
||||
|
||||
|
|
@ -14,15 +13,13 @@ class CreatePointCache(plugin.HoudiniCreator):
|
|||
icon = "gears"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
import hou
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "alembic"})
|
||||
|
||||
instance = super(CreatePointCache, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
parms = {
|
||||
|
|
@ -37,13 +34,44 @@ class CreatePointCache(plugin.HoudiniCreator):
|
|||
}
|
||||
|
||||
if self.selected_nodes:
|
||||
parms["sop_path"] = self.selected_nodes[0].path()
|
||||
selected_node = self.selected_nodes[0]
|
||||
|
||||
# try to find output node
|
||||
for child in self.selected_nodes[0].children():
|
||||
if child.type().name() == "output":
|
||||
parms["sop_path"] = child.path()
|
||||
break
|
||||
# Although Houdini allows ObjNode path on `sop_path` for the
|
||||
# the ROP node we prefer it set to the SopNode path explicitly
|
||||
|
||||
# Allow sop level paths (e.g. /obj/geo1/box1)
|
||||
if isinstance(selected_node, hou.SopNode):
|
||||
parms["sop_path"] = selected_node.path()
|
||||
self.log.debug(
|
||||
"Valid SopNode selection, 'SOP Path' in ROP will be set to '%s'."
|
||||
% selected_node.path()
|
||||
)
|
||||
|
||||
# Allow object level paths to Geometry nodes (e.g. /obj/geo1)
|
||||
# but do not allow other object level nodes types like cameras, etc.
|
||||
elif isinstance(selected_node, hou.ObjNode) and \
|
||||
selected_node.type().name() in ["geo"]:
|
||||
|
||||
# get the output node with the minimum
|
||||
# 'outputidx' or the node with display flag
|
||||
sop_path = self.get_obj_output(selected_node)
|
||||
|
||||
if sop_path:
|
||||
parms["sop_path"] = sop_path.path()
|
||||
self.log.debug(
|
||||
"Valid ObjNode selection, 'SOP Path' in ROP will be set to "
|
||||
"the child path '%s'."
|
||||
% sop_path.path()
|
||||
)
|
||||
|
||||
if not parms.get("sop_path", None):
|
||||
self.log.debug(
|
||||
"Selection isn't valid. 'SOP Path' in ROP will be empty."
|
||||
)
|
||||
else:
|
||||
self.log.debug(
|
||||
"No Selection. 'SOP Path' in ROP will be empty."
|
||||
)
|
||||
|
||||
instance_node.setParms(parms)
|
||||
instance_node.parm("trange").set(1)
|
||||
|
|
@ -57,3 +85,23 @@ class CreatePointCache(plugin.HoudiniCreator):
|
|||
hou.ropNodeTypeCategory(),
|
||||
hou.sopNodeTypeCategory()
|
||||
]
|
||||
|
||||
def get_obj_output(self, obj_node):
|
||||
"""Find output node with the smallest 'outputidx'."""
|
||||
|
||||
outputs = obj_node.subnetOutputs()
|
||||
|
||||
# if obj_node is empty
|
||||
if not outputs:
|
||||
return
|
||||
|
||||
# if obj_node has one output child whether its
|
||||
# sop output node or a node with the render flag
|
||||
elif len(outputs) == 1:
|
||||
return outputs[0]
|
||||
|
||||
# if there are more than one, then it have multiple ouput nodes
|
||||
# return the one with the minimum 'outputidx'
|
||||
else:
|
||||
return min(outputs,
|
||||
key=lambda node: node.evalParm('outputidx'))
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@
|
|||
import hou # noqa
|
||||
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.lib import EnumDef
|
||||
|
||||
|
||||
class CreateRedshiftROP(plugin.HoudiniCreator):
|
||||
"""Redshift ROP"""
|
||||
|
||||
identifier = "io.openpype.creators.houdini.redshift_rop"
|
||||
label = "Redshift ROP"
|
||||
family = "redshift_rop"
|
||||
|
|
@ -28,7 +28,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
instance = super(CreateRedshiftROP, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
@ -57,6 +57,8 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
fmt="${aov}.$F4.{ext}".format(aov="AOV", ext=ext)
|
||||
)
|
||||
|
||||
ext_format_index = {"exr": 0, "tif": 1, "jpg": 2, "png": 3}
|
||||
|
||||
parms = {
|
||||
# Render frame range
|
||||
"trange": 1,
|
||||
|
|
@ -64,6 +66,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
"RS_outputFileNamePrefix": filepath,
|
||||
"RS_outputMultilayerMode": "1", # no multi-layered exr
|
||||
"RS_outputBeautyAOVSuffix": "beauty",
|
||||
"RS_outputFileFormat": ext_format_index[ext],
|
||||
}
|
||||
|
||||
if self.selected_nodes:
|
||||
|
|
@ -93,8 +96,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
"exr", "tif", "jpg", "png",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
|
|||
"Collected filename from current scene name."
|
||||
)
|
||||
|
||||
if host.has_unsaved_changes():
|
||||
if host.workfile_has_unsaved_changes():
|
||||
self.log.info("Saving current file: {}".format(current_file))
|
||||
host.save_workfile(current_file)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -73,6 +73,14 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin):
|
|||
cls.log.debug("Checking Primitive to Detail pattern: %s" % pattern)
|
||||
cls.log.debug("Checking with path attribute: %s" % path_attr)
|
||||
|
||||
if not hasattr(output_node, "geometry"):
|
||||
# In the case someone has explicitly set an Object
|
||||
# node instead of a SOP node in Geometry context
|
||||
# then for now we ignore - this allows us to also
|
||||
# export object transforms.
|
||||
cls.log.warning("No geometry output node found, skipping check..")
|
||||
return
|
||||
|
||||
# Check if the primitive attribute exists
|
||||
frame = instance.data.get("frameStart", 0)
|
||||
geo = output_node.geometryAtFrame(frame)
|
||||
|
|
|
|||
|
|
@ -60,6 +60,14 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin):
|
|||
|
||||
cls.log.debug("Checking for attribute: %s" % path_attr)
|
||||
|
||||
if not hasattr(output_node, "geometry"):
|
||||
# In the case someone has explicitly set an Object
|
||||
# node instead of a SOP node in Geometry context
|
||||
# then for now we ignore - this allows us to also
|
||||
# export object transforms.
|
||||
cls.log.warning("No geometry output node found, skipping check..")
|
||||
return
|
||||
|
||||
# Check if the primitive attribute exists
|
||||
frame = instance.data.get("frameStart", 0)
|
||||
geo = output_node.geometryAtFrame(frame)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
from openpype.hosts.houdini.api.action import (
|
||||
SelectInvalidAction,
|
||||
SelectROPAction,
|
||||
)
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
class ValidateSopOutputNode(pyblish.api.InstancePlugin):
|
||||
|
|
@ -19,6 +25,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
|
|||
families = ["pointcache", "vdbcache"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Output Node"
|
||||
actions = [SelectROPAction, SelectInvalidAction]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -31,9 +38,6 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
|
|||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
import hou
|
||||
|
||||
output_node = instance.data.get("output_node")
|
||||
|
||||
if output_node is None:
|
||||
|
|
@ -43,7 +47,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
|
|||
"Ensure a valid SOP output path is set." % node.path()
|
||||
)
|
||||
|
||||
return [node.path()]
|
||||
return [node]
|
||||
|
||||
# Output node must be a Sop node.
|
||||
if not isinstance(output_node, hou.SopNode):
|
||||
|
|
@ -53,7 +57,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
|
|||
"instead found category type: %s"
|
||||
% (output_node.path(), output_node.type().category().name())
|
||||
)
|
||||
return [output_node.path()]
|
||||
return [output_node]
|
||||
|
||||
# For the sake of completeness also assert the category type
|
||||
# is Sop to avoid potential edge case scenarios even though
|
||||
|
|
@ -73,11 +77,11 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
|
|||
except hou.Error as exc:
|
||||
cls.log.error("Cook failed: %s" % exc)
|
||||
cls.log.error(output_node.errors()[0])
|
||||
return [output_node.path()]
|
||||
return [output_node]
|
||||
|
||||
# Ensure the output node has at least Geometry data
|
||||
if not output_node.geometry():
|
||||
cls.log.error(
|
||||
"Output node `%s` has no geometry data." % output_node.path()
|
||||
)
|
||||
return [output_node.path()]
|
||||
return [output_node]
|
||||
|
|
|
|||
|
|
@ -53,6 +53,8 @@ def update_mode_context(mode):
|
|||
|
||||
def get_geometry_at_frame(sop_node, frame, force=True):
|
||||
"""Return geometry at frame but force a cooked value."""
|
||||
if not hasattr(sop_node, "geometry"):
|
||||
return
|
||||
with update_mode_context(hou.updateMode.AutoUpdate):
|
||||
sop_node.cook(force=force, frame_range=(frame, frame))
|
||||
return sop_node.geometryAtFrame(frame)
|
||||
|
|
|
|||
|
|
@ -1,30 +1,27 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Library of functions useful for 3dsmax pipeline."""
|
||||
import json
|
||||
import six
|
||||
from pymxs import runtime as rt
|
||||
from typing import Union
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
import six
|
||||
from openpype.pipeline.context_tools import (
|
||||
get_current_project_asset,
|
||||
get_current_project
|
||||
)
|
||||
|
||||
get_current_project, get_current_project_asset,)
|
||||
from pymxs import runtime as rt
|
||||
|
||||
JSON_PREFIX = "JSON::"
|
||||
|
||||
|
||||
def imprint(node_name: str, data: dict) -> bool:
|
||||
node = rt.getNodeByName(node_name)
|
||||
node = rt.GetNodeByName(node_name)
|
||||
if not node:
|
||||
return False
|
||||
|
||||
for k, v in data.items():
|
||||
if isinstance(v, (dict, list)):
|
||||
rt.setUserProp(node, k, f'{JSON_PREFIX}{json.dumps(v)}')
|
||||
rt.SetUserProp(node, k, f"{JSON_PREFIX}{json.dumps(v)}")
|
||||
else:
|
||||
rt.setUserProp(node, k, v)
|
||||
rt.SetUserProp(node, k, v)
|
||||
|
||||
return True
|
||||
|
||||
|
|
@ -44,7 +41,7 @@ def lsattr(
|
|||
Returns:
|
||||
list of nodes.
|
||||
"""
|
||||
root = rt.rootnode if root is None else rt.getNodeByName(root)
|
||||
root = rt.RootNode if root is None else rt.GetNodeByName(root)
|
||||
|
||||
def output_node(node, nodes):
|
||||
nodes.append(node)
|
||||
|
|
@ -55,16 +52,16 @@ def lsattr(
|
|||
output_node(root, nodes)
|
||||
return [
|
||||
n for n in nodes
|
||||
if rt.getUserProp(n, attr) == value
|
||||
if rt.GetUserProp(n, attr) == value
|
||||
] if value else [
|
||||
n for n in nodes
|
||||
if rt.getUserProp(n, attr)
|
||||
if rt.GetUserProp(n, attr)
|
||||
]
|
||||
|
||||
|
||||
def read(container) -> dict:
|
||||
data = {}
|
||||
props = rt.getUserPropBuffer(container)
|
||||
props = rt.GetUserPropBuffer(container)
|
||||
# this shouldn't happen but let's guard against it anyway
|
||||
if not props:
|
||||
return data
|
||||
|
|
@ -79,29 +76,33 @@ def read(container) -> dict:
|
|||
value = value.strip()
|
||||
if isinstance(value.strip(), six.string_types) and \
|
||||
value.startswith(JSON_PREFIX):
|
||||
try:
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
value = json.loads(value[len(JSON_PREFIX):])
|
||||
except json.JSONDecodeError:
|
||||
# not a json
|
||||
pass
|
||||
|
||||
# default value behavior
|
||||
# convert maxscript boolean values
|
||||
if value == "true":
|
||||
value = True
|
||||
elif value == "false":
|
||||
value = False
|
||||
|
||||
data[key.strip()] = value
|
||||
|
||||
data["instance_node"] = container.name
|
||||
data["instance_node"] = container.Name
|
||||
|
||||
return data
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
previous_selection = rt.getCurrentSelection()
|
||||
previous_selection = rt.GetCurrentSelection()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if previous_selection:
|
||||
rt.select(previous_selection)
|
||||
rt.Select(previous_selection)
|
||||
else:
|
||||
rt.select()
|
||||
rt.Select()
|
||||
|
||||
|
||||
def get_all_children(parent, node_type=None):
|
||||
|
|
@ -123,7 +124,7 @@ def get_all_children(parent, node_type=None):
|
|||
return children
|
||||
child_list = list_children(parent)
|
||||
|
||||
return ([x for x in child_list if rt.superClassOf(x) == node_type]
|
||||
return ([x for x in child_list if rt.SuperClassOf(x) == node_type]
|
||||
if node_type else child_list)
|
||||
|
||||
|
||||
|
|
@ -182,7 +183,7 @@ def set_scene_resolution(width: int, height: int):
|
|||
"""
|
||||
# make sure the render dialog is closed
|
||||
# for the update of resolution
|
||||
# Changing the Render Setup dialog settingsshould be done
|
||||
# Changing the Render Setup dialog settings should be done
|
||||
# with the actual Render Setup dialog in a closed state.
|
||||
if rt.renderSceneDialog.isOpen():
|
||||
rt.renderSceneDialog.close()
|
||||
|
|
@ -190,6 +191,7 @@ def set_scene_resolution(width: int, height: int):
|
|||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
|
||||
|
||||
def reset_scene_resolution():
|
||||
"""Apply the scene resolution from the project definition
|
||||
|
||||
|
|
@ -212,7 +214,7 @@ def reset_scene_resolution():
|
|||
set_scene_resolution(width, height)
|
||||
|
||||
|
||||
def get_frame_range() -> dict:
|
||||
def get_frame_range() -> Union[Dict[str, Any], None]:
|
||||
"""Get the current assets frame range and handles.
|
||||
|
||||
Returns:
|
||||
|
|
@ -256,10 +258,7 @@ def reset_frame_range(fps: bool = True):
|
|||
frame_range["handleStart"]
|
||||
)
|
||||
frame_end_handle = frame_range["frameEnd"] + int(frame_range["handleEnd"])
|
||||
frange_cmd = (
|
||||
f"animationRange = interval {frame_start_handle} {frame_end_handle}"
|
||||
)
|
||||
rt.execute(frange_cmd)
|
||||
set_timeline(frame_start_handle, frame_end_handle)
|
||||
set_render_frame_range(frame_start_handle, frame_end_handle)
|
||||
|
||||
|
||||
|
|
@ -289,5 +288,27 @@ def get_max_version():
|
|||
#(25000, 62, 0, 25, 0, 0, 997, 2023, "")
|
||||
max_info[7] = max version date
|
||||
"""
|
||||
max_info = rt.maxversion()
|
||||
max_info = rt.MaxVersion()
|
||||
return max_info[7]
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def viewport_camera(camera):
|
||||
original = rt.viewport.getCamera()
|
||||
if not original:
|
||||
# if there is no original camera
|
||||
# use the current camera as original
|
||||
original = rt.getNodeByName(camera)
|
||||
review_camera = rt.getNodeByName(camera)
|
||||
try:
|
||||
rt.viewport.setCamera(review_camera)
|
||||
yield
|
||||
finally:
|
||||
rt.viewport.setCamera(original)
|
||||
|
||||
|
||||
def set_timeline(frameStart, frameEnd):
|
||||
"""Set frame range for timeline editor in Max
|
||||
"""
|
||||
rt.animationRange = rt.interval(frameStart, frameEnd)
|
||||
return rt.animationRange
|
||||
|
|
|
|||
|
|
@ -33,6 +33,100 @@ class RenderProducts(object):
|
|||
output_file, start_frame, end_frame, img_fmt
|
||||
)
|
||||
}
|
||||
def get_multiple_beauty(self, outputs, cameras):
|
||||
beauty_output_frames = dict()
|
||||
for output, camera in zip(outputs, cameras):
|
||||
filename, ext = os.path.splitext(output)
|
||||
start_frame = int(rt.rendStart)
|
||||
end_frame = int(rt.rendEnd) + 1
|
||||
new_beauty = self.get_expected_beauty(
|
||||
filename, start_frame, end_frame, ext
|
||||
)
|
||||
beauty_output_frames = ({
|
||||
f"{camera}_beauty": new_beauty
|
||||
})
|
||||
return beauty_output_frames
|
||||
|
||||
def get_multiple_aovs(self, outputs, cameras):
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
aovs_frames = {}
|
||||
for output, camera in zip(outputs, cameras):
|
||||
filename, ext = os.path.splitext(output)
|
||||
start_frame = int(rt.rendStart)
|
||||
end_frame = int(rt.rendEnd) + 1
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": (
|
||||
self.get_expected_render_elements(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
)
|
||||
})
|
||||
elif renderer == "Redshift_Renderer":
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
rs_aov_files = rt.Execute("renderers.current.separateAovFiles")
|
||||
# this doesn't work, always returns False
|
||||
# rs_AovFiles = rt.RedShift_Renderer().separateAovFiles
|
||||
if ext == "exr" and not rs_aov_files:
|
||||
for name in render_name:
|
||||
if name == "RsCryptomatte":
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": (
|
||||
self.get_expected_render_elements(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
)
|
||||
})
|
||||
else:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": (
|
||||
self.get_expected_render_elements(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
)
|
||||
})
|
||||
|
||||
elif renderer == "Arnold":
|
||||
render_name = self.get_arnold_product_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": (
|
||||
self.get_expected_arnold_product(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
)
|
||||
})
|
||||
elif renderer in [
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3"
|
||||
]:
|
||||
if ext != "exr":
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": (
|
||||
self.get_expected_render_elements(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
)
|
||||
})
|
||||
|
||||
return aovs_frames
|
||||
|
||||
def get_aovs(self, container):
|
||||
render_dir = os.path.dirname(rt.rendOutputFilename)
|
||||
|
|
@ -124,7 +218,7 @@ class RenderProducts(object):
|
|||
"""Get all the Arnold AOVs name"""
|
||||
aov_name = []
|
||||
|
||||
amw = rt.MaxtoAOps.AOVsManagerWindow()
|
||||
amw = rt.MaxToAOps.AOVsManagerWindow()
|
||||
aov_mgr = rt.renderers.current.AOVManager
|
||||
# Check if there is any aov group set in AOV manager
|
||||
aov_group_num = len(aov_mgr.drivers)
|
||||
|
|
|
|||
|
|
@ -34,10 +34,6 @@ class RenderSettings(object):
|
|||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
def set_render_camera(self, selection):
|
||||
# to avoid Attribute Error from pymxs wrapper
|
||||
return rt.viewport.setCamera(selection[0])
|
||||
|
||||
def render_output(self, container):
|
||||
folder = rt.maxFilePath
|
||||
# hard-coded, should be customized in the setting
|
||||
|
|
@ -70,7 +66,7 @@ class RenderSettings(object):
|
|||
output = os.path.join(output_dir, container)
|
||||
try:
|
||||
aov_separator = self._aov_chars[(
|
||||
self._project_settings["maya"]
|
||||
self._project_settings["max"]
|
||||
["RenderSettings"]
|
||||
["aov_separator"]
|
||||
)]
|
||||
|
|
@ -162,3 +158,41 @@ class RenderSettings(object):
|
|||
orig_render_elem.append(render_element)
|
||||
|
||||
return orig_render_elem
|
||||
|
||||
def get_batch_render_elements(self, container,
|
||||
output_dir, cameras):
|
||||
render_element_list = list()
|
||||
output = os.path.join(output_dir, container)
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
for cam in cameras:
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = "{0}_{1}_{2}..{3}".format(
|
||||
output, cam, renderpass, img_fmt)
|
||||
render_element_list.append(aov_name)
|
||||
return render_element_list
|
||||
|
||||
def create_batch_render_layer(self, container,
|
||||
output_dir, cameras):
|
||||
outputs = list()
|
||||
output = os.path.join(output_dir, container)
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
for cam in cameras:
|
||||
camera = rt.getNodeByName(cam)
|
||||
layer_no = rt.batchRenderMgr.FindView(cam)
|
||||
renderlayer = None
|
||||
if layer_no is None:
|
||||
renderlayer = rt.batchRenderMgr.CreateView(camera)
|
||||
else:
|
||||
renderlayer = rt.batchRenderMgr.GetView(layer_no)
|
||||
# use camera name as renderlayer name
|
||||
renderlayer.name = cam
|
||||
renderlayer.outputFilename ="{0}_{1}..{2}".format(
|
||||
output, cam, img_fmt)
|
||||
outputs.append(renderlayer.outputFilename)
|
||||
return outputs
|
||||
|
|
|
|||
|
|
@ -1,15 +1,109 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""3dsmax specific Avalon/Pyblish plugin definitions."""
|
||||
from pymxs import runtime as rt
|
||||
import six
|
||||
from abc import ABCMeta
|
||||
from openpype.pipeline import (
|
||||
CreatorError,
|
||||
Creator,
|
||||
CreatedInstance
|
||||
)
|
||||
|
||||
import six
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from openpype.lib import BoolDef
|
||||
from .lib import imprint, read, lsattr
|
||||
from openpype.pipeline import CreatedInstance, Creator, CreatorError
|
||||
|
||||
from .lib import imprint, lsattr, read
|
||||
|
||||
MS_CUSTOM_ATTRIB = """attributes "openPypeData"
|
||||
(
|
||||
parameters main rollout:OPparams
|
||||
(
|
||||
all_handles type:#maxObjectTab tabSize:0 tabSizeVariable:on
|
||||
)
|
||||
|
||||
rollout OPparams "OP Parameters"
|
||||
(
|
||||
listbox list_node "Node References" items:#()
|
||||
button button_add "Add to Container"
|
||||
button button_del "Delete from Container"
|
||||
|
||||
fn node_to_name the_node =
|
||||
(
|
||||
handle = the_node.handle
|
||||
obj_name = the_node.name
|
||||
handle_name = obj_name + "<" + handle as string + ">"
|
||||
return handle_name
|
||||
)
|
||||
|
||||
on button_add pressed do
|
||||
(
|
||||
current_selection = selectByName title:"Select Objects to add to
|
||||
the Container" buttontext:"Add"
|
||||
if current_selection == undefined then return False
|
||||
temp_arr = #()
|
||||
i_node_arr = #()
|
||||
for c in current_selection do
|
||||
(
|
||||
handle_name = node_to_name c
|
||||
node_ref = NodeTransformMonitor node:c
|
||||
idx = finditem list_node.items handle_name
|
||||
if idx do (
|
||||
continue
|
||||
)
|
||||
append temp_arr handle_name
|
||||
append i_node_arr node_ref
|
||||
)
|
||||
all_handles = join i_node_arr all_handles
|
||||
list_node.items = join temp_arr list_node.items
|
||||
)
|
||||
|
||||
on button_del pressed do
|
||||
(
|
||||
current_selection = selectByName title:"Select Objects to remove
|
||||
from the Container" buttontext:"Remove"
|
||||
if current_selection == undefined then return False
|
||||
temp_arr = #()
|
||||
i_node_arr = #()
|
||||
new_i_node_arr = #()
|
||||
new_temp_arr = #()
|
||||
|
||||
for c in current_selection do
|
||||
(
|
||||
node_ref = NodeTransformMonitor node:c as string
|
||||
handle_name = node_to_name c
|
||||
tmp_all_handles = #()
|
||||
for i in all_handles do
|
||||
(
|
||||
tmp = i as string
|
||||
append tmp_all_handles tmp
|
||||
)
|
||||
idx = finditem tmp_all_handles node_ref
|
||||
if idx do
|
||||
(
|
||||
new_i_node_arr = DeleteItem all_handles idx
|
||||
|
||||
)
|
||||
idx = finditem list_node.items handle_name
|
||||
if idx do
|
||||
(
|
||||
new_temp_arr = DeleteItem list_node.items idx
|
||||
)
|
||||
)
|
||||
all_handles = join i_node_arr new_i_node_arr
|
||||
list_node.items = join temp_arr new_temp_arr
|
||||
)
|
||||
|
||||
on OPparams open do
|
||||
(
|
||||
if all_handles.count != 0 then
|
||||
(
|
||||
temp_arr = #()
|
||||
for x in all_handles do
|
||||
(
|
||||
handle_name = node_to_name x.node
|
||||
append temp_arr handle_name
|
||||
)
|
||||
list_node.items = temp_arr
|
||||
)
|
||||
)
|
||||
)
|
||||
)"""
|
||||
|
||||
|
||||
class OpenPypeCreatorError(CreatorError):
|
||||
|
|
@ -20,28 +114,40 @@ class MaxCreatorBase(object):
|
|||
|
||||
@staticmethod
|
||||
def cache_subsets(shared_data):
|
||||
if shared_data.get("max_cached_subsets") is None:
|
||||
shared_data["max_cached_subsets"] = {}
|
||||
cached_instances = lsattr("id", "pyblish.avalon.instance")
|
||||
for i in cached_instances:
|
||||
creator_id = rt.getUserProp(i, "creator_identifier")
|
||||
if creator_id not in shared_data["max_cached_subsets"]:
|
||||
shared_data["max_cached_subsets"][creator_id] = [i.name]
|
||||
else:
|
||||
shared_data[
|
||||
"max_cached_subsets"][creator_id].append(i.name) # noqa
|
||||
if shared_data.get("max_cached_subsets") is not None:
|
||||
return shared_data
|
||||
|
||||
shared_data["max_cached_subsets"] = {}
|
||||
cached_instances = lsattr("id", "pyblish.avalon.instance")
|
||||
for i in cached_instances:
|
||||
creator_id = rt.GetUserProp(i, "creator_identifier")
|
||||
if creator_id not in shared_data["max_cached_subsets"]:
|
||||
shared_data["max_cached_subsets"][creator_id] = [i.name]
|
||||
else:
|
||||
shared_data[
|
||||
"max_cached_subsets"][creator_id].append(i.name)
|
||||
return shared_data
|
||||
|
||||
@staticmethod
|
||||
def create_instance_node(node_name: str, parent: str = ""):
|
||||
parent_node = rt.getNodeByName(parent) if parent else rt.rootScene
|
||||
if not parent_node:
|
||||
raise OpenPypeCreatorError(f"Specified parent {parent} not found")
|
||||
def create_instance_node(node):
|
||||
"""Create instance node.
|
||||
|
||||
container = rt.container(name=node_name)
|
||||
container.Parent = parent_node
|
||||
If the supplied node is existing node, it will be used to hold the
|
||||
instance, otherwise new node of type Dummy will be created.
|
||||
|
||||
return container
|
||||
Args:
|
||||
node (rt.MXSWrapperBase, str): Node or node name to use.
|
||||
|
||||
Returns:
|
||||
instance
|
||||
"""
|
||||
if isinstance(node, str):
|
||||
node = rt.Container(name=node)
|
||||
|
||||
attrs = rt.Execute(MS_CUSTOM_ATTRIB)
|
||||
rt.custAttributes.add(node.baseObject, attrs)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
|
|
@ -50,7 +156,7 @@ class MaxCreator(Creator, MaxCreatorBase):
|
|||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
if pre_create_data.get("use_selection"):
|
||||
self.selected_nodes = rt.getCurrentSelection()
|
||||
self.selected_nodes = rt.GetCurrentSelection()
|
||||
|
||||
instance_node = self.create_instance_node(subset_name)
|
||||
instance_data["instance_node"] = instance_node.name
|
||||
|
|
@ -60,8 +166,16 @@ class MaxCreator(Creator, MaxCreatorBase):
|
|||
instance_data,
|
||||
self
|
||||
)
|
||||
for node in self.selected_nodes:
|
||||
node.Parent = instance_node
|
||||
if pre_create_data.get("use_selection"):
|
||||
|
||||
node_list = []
|
||||
for i in self.selected_nodes:
|
||||
node_ref = rt.NodeTransformMonitor(node=i)
|
||||
node_list.append(node_ref)
|
||||
|
||||
# Setting the property
|
||||
rt.setProperty(
|
||||
instance_node.openPypeData, "all_handles", node_list)
|
||||
|
||||
self._add_instance_to_context(instance)
|
||||
imprint(instance_node.name, instance.data_to_store())
|
||||
|
|
@ -70,10 +184,9 @@ class MaxCreator(Creator, MaxCreatorBase):
|
|||
|
||||
def collect_instances(self):
|
||||
self.cache_subsets(self.collection_shared_data)
|
||||
for instance in self.collection_shared_data[
|
||||
"max_cached_subsets"].get(self.identifier, []):
|
||||
for instance in self.collection_shared_data["max_cached_subsets"].get(self.identifier, []): # noqa
|
||||
created_instance = CreatedInstance.from_existing(
|
||||
read(rt.getNodeByName(instance)), self
|
||||
read(rt.GetNodeByName(instance)), self
|
||||
)
|
||||
self._add_instance_to_context(created_instance)
|
||||
|
||||
|
|
@ -98,12 +211,12 @@ class MaxCreator(Creator, MaxCreatorBase):
|
|||
|
||||
"""
|
||||
for instance in instances:
|
||||
instance_node = rt.getNodeByName(
|
||||
instance_node = rt.GetNodeByName(
|
||||
instance.data.get("instance_node"))
|
||||
if instance_node:
|
||||
rt.select(instance_node)
|
||||
rt.execute(f'for o in selection do for c in o.children do c.parent = undefined') # noqa
|
||||
rt.delete(instance_node)
|
||||
count = rt.custAttributes.count(instance_node)
|
||||
rt.custAttributes.delete(instance_node, count)
|
||||
rt.Delete(instance_node)
|
||||
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,26 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating camera."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
|
||||
class CreateCamera(plugin.MaxCreator):
|
||||
"""Creator plugin for Camera."""
|
||||
identifier = "io.openpype.creators.max.camera"
|
||||
label = "Camera"
|
||||
family = "camera"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreateCamera, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container = rt.getNodeByName(instance.data.get("instance_node"))
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
|
|
|||
|
|
@ -1,26 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating raw max scene."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
|
||||
class CreateMaxScene(plugin.MaxCreator):
|
||||
"""Creator plugin for 3ds max scenes."""
|
||||
identifier = "io.openpype.creators.max.maxScene"
|
||||
label = "Max Scene"
|
||||
family = "maxScene"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreateMaxScene, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container = rt.getNodeByName(instance.data.get("instance_node"))
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
|
|
|||
|
|
@ -1,28 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for model."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
|
||||
class CreateModel(plugin.MaxCreator):
|
||||
"""Creator plugin for Model."""
|
||||
identifier = "io.openpype.creators.max.model"
|
||||
label = "Model"
|
||||
family = "model"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
instance = super(CreateModel, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container = rt.getNodeByName(instance.data.get("instance_node"))
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
sel_obj = None
|
||||
if self.selected_nodes:
|
||||
sel_obj = list(self.selected_nodes)
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
|
|
|||
|
|
@ -1,22 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating pointcache alembics."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
|
||||
class CreatePointCache(plugin.MaxCreator):
|
||||
"""Creator plugin for Point caches."""
|
||||
identifier = "io.openpype.creators.max.pointcache"
|
||||
label = "Point Cache"
|
||||
family = "pointcache"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
# from pymxs import runtime as rt
|
||||
|
||||
_ = super(CreatePointCache, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
|
|
|||
|
|
@ -1,26 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating point cloud."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
|
||||
class CreatePointCloud(plugin.MaxCreator):
|
||||
"""Creator plugin for Point Clouds."""
|
||||
identifier = "io.openpype.creators.max.pointcloud"
|
||||
label = "Point Cloud"
|
||||
family = "pointcloud"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreatePointCloud, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container = rt.getNodeByName(instance.data.get("instance_node"))
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
|
|
|||
|
|
@ -9,10 +9,3 @@ class CreateRedshiftProxy(plugin.MaxCreator):
|
|||
label = "Redshift Proxy"
|
||||
family = "redshiftproxy"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
_ = super(CreateRedshiftProxy, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
|
|
|
|||
|
|
@ -2,11 +2,11 @@
|
|||
"""Creator plugin for creating camera."""
|
||||
import os
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
|
||||
|
||||
class CreateRender(plugin.MaxCreator):
|
||||
"""Creator plugin for Renders."""
|
||||
identifier = "io.openpype.creators.max.render"
|
||||
label = "Render"
|
||||
family = "maxrender"
|
||||
|
|
@ -14,40 +14,18 @@ class CreateRender(plugin.MaxCreator):
|
|||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = [
|
||||
c for c in rt.Objects
|
||||
if rt.classOf(c) in rt.Camera.classes]
|
||||
|
||||
file = rt.maxFileName
|
||||
filename, _ = os.path.splitext(file)
|
||||
instance_data["AssetName"] = filename
|
||||
num_of_renderlayer = rt.batchRenderMgr.numViews
|
||||
if num_of_renderlayer > 0:
|
||||
rt.batchRenderMgr.DeleteView(num_of_renderlayer)
|
||||
|
||||
instance = super(CreateRender, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
container_name = instance.data.get("instance_node")
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
if self.selected_nodes:
|
||||
# set viewport camera for
|
||||
# rendering(mandatory for deadline)
|
||||
sel_obj = [
|
||||
c for c in rt.getCurrentSelection()
|
||||
if rt.classOf(c) in rt.Camera.classes]
|
||||
|
||||
if not sel_obj:
|
||||
raise RuntimeError("Please add at least one camera to the scene "
|
||||
"before creating the render instance")
|
||||
|
||||
RenderSettings().set_render_camera(sel_obj)
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
||||
# make sure the render dialog is closed
|
||||
# for the update of resolution
|
||||
# Changing the Render Setup dialog settings should be done
|
||||
# with the actual Render Setup dialog in a closed state.
|
||||
|
||||
# set output paths for rendering(mandatory for deadline)
|
||||
RenderSettings().render_output(container_name)
|
||||
|
|
|
|||
57
openpype/hosts/max/plugins/create/create_review.py
Normal file
57
openpype/hosts/max/plugins/create/create_review.py
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating review in Max."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.lib import BoolDef, EnumDef, NumberDef
|
||||
|
||||
|
||||
class CreateReview(plugin.MaxCreator):
|
||||
"""Review in 3dsMax"""
|
||||
|
||||
identifier = "io.openpype.creators.max.review"
|
||||
label = "Review"
|
||||
family = "review"
|
||||
icon = "video-camera"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
instance_data["imageFormat"] = pre_create_data.get("imageFormat")
|
||||
instance_data["keepImages"] = pre_create_data.get("keepImages")
|
||||
instance_data["percentSize"] = pre_create_data.get("percentSize")
|
||||
instance_data["rndLevel"] = pre_create_data.get("rndLevel")
|
||||
|
||||
super(CreateReview, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateReview, self).get_pre_create_attr_defs()
|
||||
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "hdr", "rgb", "png",
|
||||
"rla", "rpf", "dds", "sgi", "tga", "tif", "vrimg"
|
||||
]
|
||||
|
||||
rndLevel_enum = [
|
||||
"smoothhighlights", "smooth", "facethighlights",
|
||||
"facet", "flat", "litwireframe", "wireframe", "box"
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("keepImages",
|
||||
label="Keep Image Sequences",
|
||||
default=False),
|
||||
EnumDef("imageFormat",
|
||||
image_format_enum,
|
||||
default="png",
|
||||
label="Image Format Options"),
|
||||
NumberDef("percentSize",
|
||||
label="Percent of Output",
|
||||
default=100,
|
||||
minimum=1,
|
||||
decimals=0),
|
||||
EnumDef("rndLevel",
|
||||
rndLevel_enum,
|
||||
default="smoothhighlights",
|
||||
label="Preference")
|
||||
]
|
||||
|
|
@ -1,14 +1,12 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_representation_path
|
||||
)
|
||||
|
||||
from openpype.hosts.max.api import lib, maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class FbxLoader(load.LoaderPlugin):
|
||||
"""Fbx Loader"""
|
||||
"""Fbx Loader."""
|
||||
|
||||
families = ["camera"]
|
||||
representations = ["fbx"]
|
||||
|
|
@ -24,17 +22,17 @@ class FbxLoader(load.LoaderPlugin):
|
|||
rt.FBXImporterSetParam("Camera", True)
|
||||
rt.FBXImporterSetParam("AxisConversionMethod", True)
|
||||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.importFile(
|
||||
rt.ImportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
using=rt.FBXIMP)
|
||||
|
||||
container = rt.getNodeByName(f"{name}")
|
||||
container = rt.GetNodeByName(f"{name}")
|
||||
if not container:
|
||||
container = rt.container()
|
||||
container = rt.Container()
|
||||
container.name = f"{name}"
|
||||
|
||||
for selection in rt.getCurrentSelection():
|
||||
for selection in rt.GetCurrentSelection():
|
||||
selection.Parent = container
|
||||
|
||||
return containerise(
|
||||
|
|
@ -44,18 +42,33 @@ class FbxLoader(load.LoaderPlugin):
|
|||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Select(node.Children)
|
||||
fbx_reimport_cmd = (
|
||||
f"""
|
||||
|
||||
fbx_objects = self.get_container_children(node)
|
||||
for fbx_object in fbx_objects:
|
||||
fbx_object.source = path
|
||||
FBXImporterSetParam "Animation" true
|
||||
FBXImporterSetParam "Cameras" true
|
||||
FBXImporterSetParam "AxisConversionMethod" true
|
||||
FbxExporterSetParam "UpAxis" "Y"
|
||||
FbxExporterSetParam "Preserveinstances" true
|
||||
|
||||
importFile @"{path}" #noPrompt using:FBXIMP
|
||||
""")
|
||||
rt.Execute(fbx_reimport_cmd)
|
||||
|
||||
with maintained_selection():
|
||||
rt.Select(node)
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Delete(node)
|
||||
|
|
|
|||
|
|
@ -1,13 +1,12 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load, get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class MaxSceneLoader(load.LoaderPlugin):
|
||||
"""Max Scene Loader"""
|
||||
"""Max Scene Loader."""
|
||||
|
||||
families = ["camera",
|
||||
"maxScene",
|
||||
|
|
@ -23,23 +22,11 @@ class MaxSceneLoader(load.LoaderPlugin):
|
|||
path = os.path.normpath(self.fname)
|
||||
# import the max scene by using "merge file"
|
||||
path = path.replace('\\', '/')
|
||||
|
||||
merge_before = {
|
||||
c for c in rt.rootNode.Children
|
||||
if rt.classOf(c) == rt.Container
|
||||
}
|
||||
rt.mergeMaxFile(path)
|
||||
|
||||
merge_after = {
|
||||
c for c in rt.rootNode.Children
|
||||
if rt.classOf(c) == rt.Container
|
||||
}
|
||||
max_containers = merge_after.difference(merge_before)
|
||||
|
||||
if len(max_containers) != 1:
|
||||
self.log.error("Something failed when loading.")
|
||||
|
||||
max_container = max_containers.pop()
|
||||
rt.MergeMaxFile(path)
|
||||
max_objects = rt.getLastMergedNodes()
|
||||
max_container = rt.Container(name=f"{name}")
|
||||
for max_object in max_objects:
|
||||
max_object.Parent = max_container
|
||||
|
||||
return containerise(
|
||||
name, [max_container], context, loader=self.__class__.__name__)
|
||||
|
|
@ -48,17 +35,27 @@ class MaxSceneLoader(load.LoaderPlugin):
|
|||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
max_objects = node.Children
|
||||
node_name = container["instance_node"]
|
||||
|
||||
rt.MergeMaxFile(path,
|
||||
rt.Name("noRedraw"),
|
||||
rt.Name("deleteOldDups"),
|
||||
rt.Name("useSceneMtlDups"))
|
||||
|
||||
max_objects = rt.getLastMergedNodes()
|
||||
container_node = rt.GetNodeByName(node_name)
|
||||
for max_object in max_objects:
|
||||
max_object.source = path
|
||||
max_object.Parent = container_node
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Delete(node)
|
||||
|
|
|
|||
|
|
@ -54,22 +54,22 @@ class ModelAbcLoader(load.LoaderPlugin):
|
|||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.select(node.Children)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Select(node.Children)
|
||||
|
||||
for alembic in rt.selection:
|
||||
abc = rt.getNodeByName(alembic.name)
|
||||
rt.select(abc.Children)
|
||||
for abc_con in rt.selection:
|
||||
container = rt.getNodeByName(abc_con.name)
|
||||
for alembic in rt.Selection:
|
||||
abc = rt.GetNodeByName(alembic.name)
|
||||
rt.Select(abc.Children)
|
||||
for abc_con in rt.Selection:
|
||||
container = rt.GetNodeByName(abc_con.name)
|
||||
container.source = path
|
||||
rt.select(container.Children)
|
||||
for abc_obj in rt.selection:
|
||||
alembic_obj = rt.getNodeByName(abc_obj.name)
|
||||
rt.Select(container.Children)
|
||||
for abc_obj in rt.Selection:
|
||||
alembic_obj = rt.GetNodeByName(abc_obj.name)
|
||||
alembic_obj.source = path
|
||||
|
||||
with maintained_selection():
|
||||
rt.select(node)
|
||||
rt.Select(node)
|
||||
|
||||
lib.imprint(
|
||||
container["instance_node"],
|
||||
|
|
@ -82,8 +82,8 @@ class ModelAbcLoader(load.LoaderPlugin):
|
|||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Delete(node)
|
||||
|
||||
@staticmethod
|
||||
def get_container_children(parent, type_name):
|
||||
|
|
@ -98,7 +98,7 @@ class ModelAbcLoader(load.LoaderPlugin):
|
|||
|
||||
filtered = []
|
||||
for child in list_children(parent):
|
||||
class_type = str(rt.classOf(child.baseObject))
|
||||
class_type = str(rt.ClassOf(child.baseObject))
|
||||
if class_type == type_name:
|
||||
filtered.append(child)
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from openpype.hosts.max.api.lib import maintained_selection
|
|||
|
||||
|
||||
class FbxModelLoader(load.LoaderPlugin):
|
||||
"""Fbx Model Loader"""
|
||||
"""Fbx Model Loader."""
|
||||
|
||||
families = ["model"]
|
||||
representations = ["fbx"]
|
||||
|
|
@ -23,12 +23,12 @@ class FbxModelLoader(load.LoaderPlugin):
|
|||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.importFile(filepath, rt.name("noPrompt"), using=rt.FBXIMP)
|
||||
|
||||
container = rt.getNodeByName(f"{name}")
|
||||
container = rt.GetNodeByName(name)
|
||||
if not container:
|
||||
container = rt.container()
|
||||
container.name = f"{name}"
|
||||
container = rt.Container()
|
||||
container.name = name
|
||||
|
||||
for selection in rt.getCurrentSelection():
|
||||
for selection in rt.GetCurrentSelection():
|
||||
selection.Parent = container
|
||||
|
||||
return containerise(
|
||||
|
|
@ -37,7 +37,6 @@ class FbxModelLoader(load.LoaderPlugin):
|
|||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.select(node.Children)
|
||||
|
|
@ -50,7 +49,7 @@ class FbxModelLoader(load.LoaderPlugin):
|
|||
rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP)
|
||||
|
||||
with maintained_selection():
|
||||
rt.select(node)
|
||||
rt.Select(node)
|
||||
|
||||
lib.imprint(
|
||||
container["instance_node"],
|
||||
|
|
@ -63,5 +62,5 @@ class FbxModelLoader(load.LoaderPlugin):
|
|||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Delete(node)
|
||||
|
|
|
|||
|
|
@ -1,15 +1,13 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.lib import maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class ObjLoader(load.LoaderPlugin):
|
||||
"""Obj Loader"""
|
||||
"""Obj Loader."""
|
||||
|
||||
families = ["model"]
|
||||
representations = ["obj"]
|
||||
|
|
@ -21,18 +19,18 @@ class ObjLoader(load.LoaderPlugin):
|
|||
from pymxs import runtime as rt
|
||||
|
||||
filepath = os.path.normpath(self.fname)
|
||||
self.log.debug(f"Executing command to import..")
|
||||
self.log.debug("Executing command to import..")
|
||||
|
||||
rt.execute(f'importFile @"{filepath}" #noPrompt using:ObjImp')
|
||||
rt.Execute(f'importFile @"{filepath}" #noPrompt using:ObjImp')
|
||||
# create "missing" container for obj import
|
||||
container = rt.container()
|
||||
container.name = f"{name}"
|
||||
container = rt.Container()
|
||||
container.name = name
|
||||
|
||||
# get current selection
|
||||
for selection in rt.getCurrentSelection():
|
||||
for selection in rt.GetCurrentSelection():
|
||||
selection.Parent = container
|
||||
|
||||
asset = rt.getNodeByName(f"{name}")
|
||||
asset = rt.GetNodeByName(name)
|
||||
|
||||
return containerise(
|
||||
name, [asset], context, loader=self.__class__.__name__)
|
||||
|
|
@ -42,27 +40,30 @@ class ObjLoader(load.LoaderPlugin):
|
|||
|
||||
path = get_representation_path(representation)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.getNodeByName(node_name)
|
||||
node = rt.GetNodeByName(node_name)
|
||||
|
||||
instance_name, _ = node_name.split("_")
|
||||
container = rt.getNodeByName(instance_name)
|
||||
for n in container.Children:
|
||||
rt.delete(n)
|
||||
container = rt.GetNodeByName(instance_name)
|
||||
for child in container.Children:
|
||||
rt.Delete(child)
|
||||
|
||||
rt.execute(f'importFile @"{path}" #noPrompt using:ObjImp')
|
||||
rt.Execute(f'importFile @"{path}" #noPrompt using:ObjImp')
|
||||
# get current selection
|
||||
for selection in rt.getCurrentSelection():
|
||||
for selection in rt.GetCurrentSelection():
|
||||
selection.Parent = container
|
||||
|
||||
with maintained_selection():
|
||||
rt.select(node)
|
||||
rt.Select(node)
|
||||
|
||||
lib.imprint(node_name, {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Delete(node)
|
||||
|
|
|
|||
|
|
@ -1,10 +1,9 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load, get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.lib import maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class ModelUSDLoader(load.LoaderPlugin):
|
||||
|
|
@ -19,6 +18,7 @@ class ModelUSDLoader(load.LoaderPlugin):
|
|||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
# asset_filepath
|
||||
filepath = os.path.normpath(self.fname)
|
||||
import_options = rt.USDImporter.CreateOptions()
|
||||
|
|
@ -27,11 +27,11 @@ class ModelUSDLoader(load.LoaderPlugin):
|
|||
log_filepath = filepath.replace(ext, "txt")
|
||||
|
||||
rt.LogPath = log_filepath
|
||||
rt.LogLevel = rt.name('info')
|
||||
rt.LogLevel = rt.Name("info")
|
||||
rt.USDImporter.importFile(filepath,
|
||||
importOptions=import_options)
|
||||
|
||||
asset = rt.getNodeByName(f"{name}")
|
||||
asset = rt.GetNodeByName(name)
|
||||
|
||||
return containerise(
|
||||
name, [asset], context, loader=self.__class__.__name__)
|
||||
|
|
@ -41,11 +41,11 @@ class ModelUSDLoader(load.LoaderPlugin):
|
|||
|
||||
path = get_representation_path(representation)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.getNodeByName(node_name)
|
||||
node = rt.GetNodeByName(node_name)
|
||||
for n in node.Children:
|
||||
for r in n.Children:
|
||||
rt.delete(r)
|
||||
rt.delete(n)
|
||||
rt.Delete(r)
|
||||
rt.Delete(n)
|
||||
instance_name, _ = node_name.split("_")
|
||||
|
||||
import_options = rt.USDImporter.CreateOptions()
|
||||
|
|
@ -54,15 +54,15 @@ class ModelUSDLoader(load.LoaderPlugin):
|
|||
log_filepath = path.replace(ext, "txt")
|
||||
|
||||
rt.LogPath = log_filepath
|
||||
rt.LogLevel = rt.name('info')
|
||||
rt.LogLevel = rt.Name("info")
|
||||
rt.USDImporter.importFile(path,
|
||||
importOptions=import_options)
|
||||
|
||||
asset = rt.getNodeByName(f"{instance_name}")
|
||||
asset = rt.GetNodeByName(instance_name)
|
||||
asset.Parent = node
|
||||
|
||||
with maintained_selection():
|
||||
rt.select(node)
|
||||
rt.Select(node)
|
||||
|
||||
lib.imprint(node_name, {
|
||||
"representation": str(representation["_id"])
|
||||
|
|
@ -74,5 +74,5 @@ class ModelUSDLoader(load.LoaderPlugin):
|
|||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Delete(node)
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ Because of limited api, alembics can be only loaded, but not easily updated.
|
|||
"""
|
||||
import os
|
||||
from openpype.pipeline import load, get_representation_path
|
||||
from openpype.hosts.max.api import lib, maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class AbcLoader(load.LoaderPlugin):
|
||||
|
|
@ -48,6 +48,10 @@ class AbcLoader(load.LoaderPlugin):
|
|||
|
||||
abc_container = abc_containers.pop()
|
||||
|
||||
for abc in rt.GetCurrentSelection():
|
||||
for cam_shape in abc.Children:
|
||||
cam_shape.playbackType = 2
|
||||
|
||||
return containerise(
|
||||
name, [abc_container], context, loader=self.__class__.__name__
|
||||
)
|
||||
|
|
@ -56,7 +60,7 @@ class AbcLoader(load.LoaderPlugin):
|
|||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
|
||||
alembic_objects = self.get_container_children(node, "AlembicObject")
|
||||
for alembic_object in alembic_objects:
|
||||
|
|
@ -67,14 +71,28 @@ class AbcLoader(load.LoaderPlugin):
|
|||
{"representation": str(representation["_id"])},
|
||||
)
|
||||
|
||||
with maintained_selection():
|
||||
rt.Select(node.Children)
|
||||
|
||||
for alembic in rt.Selection:
|
||||
abc = rt.GetNodeByName(alembic.name)
|
||||
rt.Select(abc.Children)
|
||||
for abc_con in rt.Selection:
|
||||
container = rt.GetNodeByName(abc_con.name)
|
||||
container.source = path
|
||||
rt.Select(container.Children)
|
||||
for abc_obj in rt.Selection:
|
||||
alembic_obj = rt.GetNodeByName(abc_obj.name)
|
||||
alembic_obj.source = path
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Delete(node)
|
||||
|
||||
@staticmethod
|
||||
def get_container_children(parent, type_name):
|
||||
|
|
|
|||
|
|
@ -1,13 +1,12 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load, get_representation_path
|
||||
)
|
||||
|
||||
from openpype.hosts.max.api import lib, maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class PointCloudLoader(load.LoaderPlugin):
|
||||
"""Point Cloud Loader"""
|
||||
"""Point Cloud Loader."""
|
||||
|
||||
families = ["pointcloud"]
|
||||
representations = ["prt"]
|
||||
|
|
@ -23,7 +22,7 @@ class PointCloudLoader(load.LoaderPlugin):
|
|||
obj = rt.tyCache()
|
||||
obj.filename = filepath
|
||||
|
||||
prt_container = rt.getNodeByName(f"{obj.name}")
|
||||
prt_container = rt.GetNodeByName(obj.name)
|
||||
|
||||
return containerise(
|
||||
name, [prt_container], context, loader=self.__class__.__name__)
|
||||
|
|
@ -33,19 +32,23 @@ class PointCloudLoader(load.LoaderPlugin):
|
|||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
with maintained_selection():
|
||||
rt.Select(node.Children)
|
||||
for prt in rt.Selection:
|
||||
prt_object = rt.GetNodeByName(prt.name)
|
||||
prt_object.filename = path
|
||||
|
||||
prt_objects = self.get_container_children(node)
|
||||
for prt_object in prt_objects:
|
||||
prt_object.source = path
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
"""remove the container"""
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Delete(node)
|
||||
|
|
|
|||
22
openpype/hosts/max/plugins/publish/collect_members.py
Normal file
22
openpype/hosts/max/plugins/publish/collect_members.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect instance members."""
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class CollectMembers(pyblish.api.InstancePlugin):
|
||||
"""Collect Set Members."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect Instance Members"
|
||||
hosts = ['max']
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if instance.data.get("instance_node"):
|
||||
container = rt.GetNodeByName(instance.data["instance_node"])
|
||||
instance.data["members"] = [
|
||||
member.node for member
|
||||
in container.openPypeData.all_handles
|
||||
]
|
||||
self.log.debug("{}".format(instance.data["members"]))
|
||||
|
|
@ -7,6 +7,7 @@ from pymxs import runtime as rt
|
|||
from openpype.pipeline import get_current_asset_name
|
||||
from openpype.hosts.max.api import colorspace
|
||||
from openpype.hosts.max.api.lib import get_max_version, get_current_renderer
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
|
||||
from openpype.client import get_last_version_by_subset_name
|
||||
|
||||
|
|
@ -25,8 +26,22 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
file = rt.maxFileName
|
||||
current_file = os.path.join(folder, file)
|
||||
filepath = current_file.replace("\\", "/")
|
||||
|
||||
container_name = instance.data.get("instance_node")
|
||||
context.data['currentFile'] = current_file
|
||||
cameras = instance.data.get("members")
|
||||
sel_cam = [
|
||||
c.name for c in cameras
|
||||
if rt.classOf(c) in rt.Camera.classes]
|
||||
render_dir = os.path.dirname(rt.rendOutputFilename)
|
||||
outputs = RenderSettings().create_batch_render_layer(
|
||||
container_name, render_dir, sel_cam
|
||||
)
|
||||
aov_outputs = RenderSettings().get_batch_render_elements(
|
||||
container_name, render_dir, sel_cam
|
||||
)
|
||||
files_aov = RenderProducts().get_multiple_beauty(outputs, cameras)
|
||||
aovs = RenderProducts().get_multiple_aovs(outputs, cameras)
|
||||
files_aov.update(aovs)
|
||||
asset = get_current_asset_name()
|
||||
|
||||
files_by_aov = RenderProducts().get_beauty(instance.name)
|
||||
|
|
@ -37,8 +52,8 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
if "expectedFiles" not in instance.data:
|
||||
instance.data["expectedFiles"] = list()
|
||||
instance.data["files"] = list()
|
||||
instance.data["expectedFiles"].append(files_by_aov)
|
||||
instance.data["files"].append(files_by_aov)
|
||||
instance.data["expectedFiles"].append(files_aov)
|
||||
instance.data["files"].append(files_aov)
|
||||
|
||||
img_format = RenderProducts().image_format()
|
||||
project_name = context.data["projectName"]
|
||||
|
|
@ -48,9 +63,6 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
instance.name,
|
||||
asset_id)
|
||||
self.log.debug("version_doc: {0}".format(version_doc))
|
||||
sel_obj = [
|
||||
c.name for c in rt.Objects
|
||||
if rt.classOf(c) in rt.Camera.classes]
|
||||
|
||||
version_int = 1
|
||||
if version_doc:
|
||||
|
|
@ -82,11 +94,13 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
"renderer": renderer,
|
||||
"source": filepath,
|
||||
"plugin": "3dsmax",
|
||||
"cameras": sel_obj,
|
||||
"cameras": sel_cam,
|
||||
"frameStart": int(rt.rendStart),
|
||||
"frameEnd": int(rt.rendEnd),
|
||||
"version": version_int,
|
||||
"farm": True
|
||||
"farm": True,
|
||||
"renderoutput": outputs,
|
||||
"aovoutput": aov_outputs
|
||||
}
|
||||
instance.data.update(data)
|
||||
|
||||
|
|
|
|||
92
openpype/hosts/max/plugins/publish/collect_review.py
Normal file
92
openpype/hosts/max/plugins/publish/collect_review.py
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
# dont forget getting the focal length for burnin
|
||||
"""Collect Review"""
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.lib import BoolDef
|
||||
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.InstancePlugin,
|
||||
OpenPypePyblishPluginMixin):
|
||||
"""Collect Review Data for Preview Animation"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.02
|
||||
label = "Collect Review Data"
|
||||
hosts = ['max']
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
nodes = instance.data["members"]
|
||||
focal_length = None
|
||||
camera_name = None
|
||||
for node in nodes:
|
||||
if rt.classOf(node) in rt.Camera.classes:
|
||||
camera_name = node.name
|
||||
focal_length = node.fov
|
||||
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
data = {
|
||||
"review_camera": camera_name,
|
||||
"frameStart": instance.context.data["frameStart"],
|
||||
"frameEnd": instance.context.data["frameEnd"],
|
||||
"fps": instance.context.data["fps"],
|
||||
"dspGeometry": attr_values.get("dspGeometry"),
|
||||
"dspShapes": attr_values.get("dspShapes"),
|
||||
"dspLights": attr_values.get("dspLights"),
|
||||
"dspCameras": attr_values.get("dspCameras"),
|
||||
"dspHelpers": attr_values.get("dspHelpers"),
|
||||
"dspParticles": attr_values.get("dspParticles"),
|
||||
"dspBones": attr_values.get("dspBones"),
|
||||
"dspBkg": attr_values.get("dspBkg"),
|
||||
"dspGrid": attr_values.get("dspGrid"),
|
||||
"dspSafeFrame": attr_values.get("dspSafeFrame"),
|
||||
"dspFrameNums": attr_values.get("dspFrameNums")
|
||||
}
|
||||
# Enable ftrack functionality
|
||||
instance.data.setdefault("families", []).append('ftrack')
|
||||
|
||||
burnin_members = instance.data.setdefault("burninDataMembers", {})
|
||||
burnin_members["focalLength"] = focal_length
|
||||
|
||||
self.log.debug(f"data:{data}")
|
||||
instance.data.update(data)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
|
||||
return [
|
||||
BoolDef("dspGeometry",
|
||||
label="Geometry",
|
||||
default=True),
|
||||
BoolDef("dspShapes",
|
||||
label="Shapes",
|
||||
default=False),
|
||||
BoolDef("dspLights",
|
||||
label="Lights",
|
||||
default=False),
|
||||
BoolDef("dspCameras",
|
||||
label="Cameras",
|
||||
default=False),
|
||||
BoolDef("dspHelpers",
|
||||
label="Helpers",
|
||||
default=False),
|
||||
BoolDef("dspParticles",
|
||||
label="Particle Systems",
|
||||
default=True),
|
||||
BoolDef("dspBones",
|
||||
label="Bone Objects",
|
||||
default=False),
|
||||
BoolDef("dspBkg",
|
||||
label="Background",
|
||||
default=True),
|
||||
BoolDef("dspGrid",
|
||||
label="Active Grid",
|
||||
default=False),
|
||||
BoolDef("dspSafeFrame",
|
||||
label="Safe Frames",
|
||||
default=False),
|
||||
BoolDef("dspFrameNums",
|
||||
label="Frame Numbers",
|
||||
default=False)
|
||||
]
|
||||
|
|
@ -1,14 +1,14 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection, get_all_children
|
||||
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.pipeline import OptionalPyblishPluginMixin, publish
|
||||
|
||||
|
||||
class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Camera with AlembicExport
|
||||
"""
|
||||
"""Extract Camera with AlembicExport."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Extract Alembic Camera"
|
||||
|
|
@ -31,20 +31,21 @@ class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
# We run the render
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (filename, stagingdir))
|
||||
self.log.info(f"Writing alembic '{filename}' to '{stagingdir}'")
|
||||
|
||||
rt.AlembicExport.ArchiveType = rt.name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.name("maya")
|
||||
rt.AlembicExport.ArchiveType = rt.Name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.Name("maya")
|
||||
rt.AlembicExport.StartFrame = start
|
||||
rt.AlembicExport.EndFrame = end
|
||||
rt.AlembicExport.CustomAttributes = True
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
rt.select(get_all_children(rt.getNodeByName(container)))
|
||||
rt.exportFile(
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.ExportFile(
|
||||
path,
|
||||
rt.name("noPrompt"),
|
||||
rt.Name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
|
|
@ -58,6 +59,8 @@ class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
"ext": "abc",
|
||||
"files": filename,
|
||||
"stagingDir": stagingdir,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))
|
||||
self.log.info(f"Extracted instance '{instance.name}' to: {path}")
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection, get_all_children
|
||||
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.pipeline import OptionalPyblishPluginMixin, publish
|
||||
|
||||
|
||||
class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Camera with FbxExporter
|
||||
"""
|
||||
"""Extract Camera with FbxExporter."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Extract Fbx Camera"
|
||||
|
|
@ -26,7 +26,7 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
filename = "{name}.fbx".format(**instance.data)
|
||||
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
self.log.info("Writing fbx file '%s' to '%s'" % (filename, filepath))
|
||||
self.log.info(f"Writing fbx file '{filename}' to '{filepath}'")
|
||||
|
||||
rt.FBXExporterSetParam("Animation", True)
|
||||
rt.FBXExporterSetParam("Cameras", True)
|
||||
|
|
@ -36,10 +36,11 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
rt.select(get_all_children(rt.getNodeByName(container)))
|
||||
rt.exportFile(
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.ExportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
rt.Name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.FBXEXP,
|
||||
)
|
||||
|
|
@ -55,6 +56,4 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info(
|
||||
"Extracted instance '%s' to: %s" % (instance.name, filepath)
|
||||
)
|
||||
self.log.info(f"Extracted instance '{instance.name}' to: {filepath}")
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ import os
|
|||
import pyblish.api
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import get_all_children
|
||||
|
||||
|
||||
class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
|
|
@ -33,7 +32,7 @@ class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
nodes = get_all_children(rt.getNodeByName(container))
|
||||
nodes = instance.data["members"]
|
||||
rt.saveNodes(nodes, max_path, quiet=True)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import os
|
|||
import pyblish.api
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection, get_all_children
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
|
||||
|
||||
class ExtractModel(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
|
|
@ -40,7 +40,8 @@ class ExtractModel(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
rt.select(get_all_children(rt.getNodeByName(container)))
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import os
|
|||
import pyblish.api
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection, get_all_children
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
|
||||
|
||||
class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
|
|
@ -22,6 +22,7 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
|
||||
container = instance.data["instance_node"]
|
||||
|
||||
|
||||
self.log.info("Extracting Geometry ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
|
|
@ -39,7 +40,8 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
rt.select(get_all_children(rt.getNodeByName(container)))
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import os
|
|||
import pyblish.api
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection, get_all_children
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
|
||||
|
||||
class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
|
|
@ -31,7 +31,8 @@ class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
rt.select(get_all_children(rt.getNodeByName(container)))
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
|
|
|
|||
|
|
@ -1,20 +1,15 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import (
|
||||
publish,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import (
|
||||
maintained_selection
|
||||
)
|
||||
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.pipeline import OptionalPyblishPluginMixin, publish
|
||||
|
||||
|
||||
class ExtractModelUSD(publish.Extractor,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Geometry in USDA Format
|
||||
"""
|
||||
"""Extract Geometry in USDA Format."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.05
|
||||
label = "Extract Geometry (USD)"
|
||||
|
|
@ -26,31 +21,28 @@ class ExtractModelUSD(publish.Extractor,
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
container = instance.data["instance_node"]
|
||||
|
||||
self.log.info("Extracting Geometry ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
asset_filename = "{name}.usda".format(**instance.data)
|
||||
asset_filepath = os.path.join(stagingdir,
|
||||
asset_filename)
|
||||
self.log.info("Writing USD '%s' to '%s'" % (asset_filepath,
|
||||
stagingdir))
|
||||
self.log.info(f"Writing USD '{asset_filepath}' to '{stagingdir}'")
|
||||
|
||||
log_filename = "{name}.txt".format(**instance.data)
|
||||
log_filepath = os.path.join(stagingdir,
|
||||
log_filename)
|
||||
self.log.info("Writing log '%s' to '%s'" % (log_filepath,
|
||||
stagingdir))
|
||||
self.log.info(f"Writing log '{log_filepath}' to '{stagingdir}'")
|
||||
|
||||
# get the nodes which need to be exported
|
||||
export_options = self.get_export_options(log_filepath)
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = self.get_node_list(container)
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.USDExporter.ExportFile(asset_filepath,
|
||||
exportOptions=export_options,
|
||||
contentSource=rt.name("selected"),
|
||||
contentSource=rt.Name("selected"),
|
||||
nodeList=node_list)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
|
|
@ -73,25 +65,11 @@ class ExtractModelUSD(publish.Extractor,
|
|||
}
|
||||
instance.data["representations"].append(log_representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
asset_filepath))
|
||||
self.log.info(
|
||||
f"Extracted instance '{instance.name}' to: {asset_filepath}")
|
||||
|
||||
def get_node_list(self, container):
|
||||
"""
|
||||
Get the target nodes which are
|
||||
the children of the container
|
||||
"""
|
||||
node_list = []
|
||||
|
||||
container_node = rt.getNodeByName(container)
|
||||
target_node = container_node.Children
|
||||
rt.select(target_node)
|
||||
for sel in rt.selection:
|
||||
node_list.append(sel)
|
||||
|
||||
return node_list
|
||||
|
||||
def get_export_options(self, log_path):
|
||||
@staticmethod
|
||||
def get_export_options(log_path):
|
||||
"""Set Export Options for USD Exporter"""
|
||||
|
||||
export_options = rt.USDExporter.createOptions()
|
||||
|
|
@ -101,13 +79,13 @@ class ExtractModelUSD(publish.Extractor,
|
|||
export_options.Lights = False
|
||||
export_options.Cameras = False
|
||||
export_options.Materials = False
|
||||
export_options.MeshFormat = rt.name('fromScene')
|
||||
export_options.FileFormat = rt.name('ascii')
|
||||
export_options.UpAxis = rt.name('y')
|
||||
export_options.LogLevel = rt.name('info')
|
||||
export_options.MeshFormat = rt.Name('fromScene')
|
||||
export_options.FileFormat = rt.Name('ascii')
|
||||
export_options.UpAxis = rt.Name('y')
|
||||
export_options.LogLevel = rt.Name('info')
|
||||
export_options.LogPath = log_path
|
||||
export_options.PreserveEdgeOrientation = True
|
||||
export_options.TimeMode = rt.name('current')
|
||||
export_options.TimeMode = rt.Name('current')
|
||||
|
||||
rt.USDexporter.UIOptions = export_options
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ import os
|
|||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection, get_all_children
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
|
||||
|
||||
class ExtractAlembic(publish.Extractor):
|
||||
|
|
@ -56,7 +56,7 @@ class ExtractAlembic(publish.Extractor):
|
|||
|
||||
container = instance.data["instance_node"]
|
||||
|
||||
self.log.info("Extracting pointcache ...")
|
||||
self.log.debug("Extracting pointcache ...")
|
||||
|
||||
parent_dir = self.staging_dir(instance)
|
||||
file_name = "{name}.abc".format(**instance.data)
|
||||
|
|
@ -72,7 +72,8 @@ class ExtractAlembic(publish.Extractor):
|
|||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
rt.select(get_all_children(rt.getNodeByName(container)))
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
path,
|
||||
rt.name("noPrompt"),
|
||||
|
|
|
|||
|
|
@ -1,42 +1,34 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import (
|
||||
maintained_selection
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
def get_setting(project_setting=None):
|
||||
project_setting = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
return (project_setting["max"]["PointCloud"])
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.pipeline import publish
|
||||
|
||||
|
||||
class ExtractPointCloud(publish.Extractor):
|
||||
"""
|
||||
Extract PRT format with tyFlow operators
|
||||
Extract PRT format with tyFlow operators.
|
||||
|
||||
Notes:
|
||||
Currently only works for the default partition setting
|
||||
|
||||
Args:
|
||||
export_particle(): sets up all job arguments for attributes
|
||||
to be exported in MAXscript
|
||||
self.export_particle(): sets up all job arguments for attributes
|
||||
to be exported in MAXscript
|
||||
|
||||
get_operators(): get the export_particle operator
|
||||
self.get_operators(): get the export_particle operator
|
||||
|
||||
get_custom_attr(): get all custom channel attributes from Openpype
|
||||
setting and sets it as job arguments before exporting
|
||||
self.get_custom_attr(): get all custom channel attributes from Openpype
|
||||
setting and sets it as job arguments before exporting
|
||||
|
||||
get_files(): get the files with tyFlow naming convention
|
||||
before publishing
|
||||
self.get_files(): get the files with tyFlow naming convention
|
||||
before publishing
|
||||
|
||||
partition_output_name(): get the naming with partition settings.
|
||||
get_partition(): get partition value
|
||||
self.partition_output_name(): get the naming with partition settings.
|
||||
|
||||
self.get_partition(): get partition value
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -46,9 +38,9 @@ class ExtractPointCloud(publish.Extractor):
|
|||
families = ["pointcloud"]
|
||||
|
||||
def process(self, instance):
|
||||
self.settings = self.get_setting(instance)
|
||||
start = int(instance.context.data.get("frameStart"))
|
||||
end = int(instance.context.data.get("frameEnd"))
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Extracting PRT...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
|
|
@ -56,22 +48,25 @@ class ExtractPointCloud(publish.Extractor):
|
|||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
with maintained_selection():
|
||||
job_args = self.export_particle(container,
|
||||
job_args = self.export_particle(instance.data["members"],
|
||||
start,
|
||||
end,
|
||||
path)
|
||||
|
||||
for job in job_args:
|
||||
rt.execute(job)
|
||||
rt.Execute(job)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
self.log.info("Writing PRT with TyFlow Plugin...")
|
||||
filenames = self.get_files(container, path, start, end)
|
||||
self.log.debug("filenames: {0}".format(filenames))
|
||||
filenames = self.get_files(
|
||||
instance.data["members"], path, start, end)
|
||||
self.log.debug(f"filenames: {filenames}")
|
||||
|
||||
partition = self.partition_output_name(container)
|
||||
partition = self.partition_output_name(
|
||||
instance.data["members"])
|
||||
|
||||
representation = {
|
||||
'name': 'prt',
|
||||
|
|
@ -81,67 +76,84 @@ class ExtractPointCloud(publish.Extractor):
|
|||
"outputName": partition # partition value
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
path))
|
||||
self.log.info(f"Extracted instance '{instance.name}' to: {path}")
|
||||
|
||||
def export_particle(self,
|
||||
container,
|
||||
members,
|
||||
start,
|
||||
end,
|
||||
filepath):
|
||||
"""Sets up all job arguments for attributes.
|
||||
|
||||
Those attributes are to be exported in MAX Script.
|
||||
|
||||
Args:
|
||||
members (list): Member nodes of the instance.
|
||||
start (int): Start frame.
|
||||
end (int): End frame.
|
||||
filepath (str): Path to PRT file.
|
||||
|
||||
Returns:
|
||||
list of arguments for MAX Script.
|
||||
|
||||
"""
|
||||
job_args = []
|
||||
opt_list = self.get_operators(container)
|
||||
opt_list = self.get_operators(members)
|
||||
for operator in opt_list:
|
||||
start_frame = "{0}.frameStart={1}".format(operator,
|
||||
start)
|
||||
start_frame = f"{operator}.frameStart={start}"
|
||||
job_args.append(start_frame)
|
||||
end_frame = "{0}.frameEnd={1}".format(operator,
|
||||
end)
|
||||
end_frame = f"{operator}.frameEnd={end}"
|
||||
job_args.append(end_frame)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
prt_filename = '{0}.PRTFilename="{1}"'.format(operator,
|
||||
filepath)
|
||||
|
||||
prt_filename = f'{operator}.PRTFilename="{filepath}"'
|
||||
job_args.append(prt_filename)
|
||||
# Partition
|
||||
mode = "{0}.PRTPartitionsMode=2".format(operator)
|
||||
mode = f"{operator}.PRTPartitionsMode=2"
|
||||
job_args.append(mode)
|
||||
|
||||
additional_args = self.get_custom_attr(operator)
|
||||
for args in additional_args:
|
||||
job_args.append(args)
|
||||
|
||||
prt_export = "{0}.exportPRT()".format(operator)
|
||||
job_args.extend(iter(additional_args))
|
||||
prt_export = f"{operator}.exportPRT()"
|
||||
job_args.append(prt_export)
|
||||
|
||||
return job_args
|
||||
|
||||
def get_operators(self, container):
|
||||
"""Get Export Particles Operator"""
|
||||
@staticmethod
|
||||
def get_operators(members):
|
||||
"""Get Export Particles Operator.
|
||||
|
||||
Args:
|
||||
members (list): Instance members.
|
||||
|
||||
Returns:
|
||||
list of particle operators
|
||||
|
||||
"""
|
||||
opt_list = []
|
||||
node = rt.getNodebyName(container)
|
||||
selection_list = list(node.Children)
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
# TODO: to see if it can be used maxscript instead
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
for member in members:
|
||||
obj = member.baseobject
|
||||
# TODO: to see if it can be used maxscript instead
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
for anim_name in anim_names:
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
event_name = sub_anim.name
|
||||
sub_anim = rt.GetSubAnim(obj, anim_name)
|
||||
boolean = rt.IsProperty(sub_anim, "Export_Particles")
|
||||
if boolean:
|
||||
opt = "${0}.{1}.export_particles".format(sel.name,
|
||||
event_name)
|
||||
opt_list.append(opt)
|
||||
event_name = sub_anim.Name
|
||||
opt = f"${member.Name}.{event_name}.export_particles"
|
||||
opt_list.append(opt)
|
||||
|
||||
return opt_list
|
||||
|
||||
@staticmethod
|
||||
def get_setting(instance):
|
||||
project_setting = instance.context.data["project_settings"]
|
||||
return project_setting["max"]["PointCloud"]
|
||||
|
||||
def get_custom_attr(self, operator):
|
||||
"""Get Custom Attributes"""
|
||||
|
||||
custom_attr_list = []
|
||||
attr_settings = get_setting()["attribute"]
|
||||
attr_settings = self.settings["attribute"]
|
||||
for key, value in attr_settings.items():
|
||||
custom_attr = "{0}.PRTChannels_{1}=True".format(operator,
|
||||
value)
|
||||
|
|
@ -157,14 +169,25 @@ class ExtractPointCloud(publish.Extractor):
|
|||
path,
|
||||
start_frame,
|
||||
end_frame):
|
||||
"""
|
||||
Note:
|
||||
Set the filenames accordingly to the tyFlow file
|
||||
naming extension for the publishing purpose
|
||||
"""Get file names for tyFlow.
|
||||
|
||||
Actual File Output from tyFlow:
|
||||
Set the filenames accordingly to the tyFlow file
|
||||
naming extension for the publishing purpose
|
||||
|
||||
Actual File Output from tyFlow::
|
||||
<SceneFile>__part<PartitionStart>of<PartitionCount>.<frame>.prt
|
||||
|
||||
e.g. tyFlow_cloth_CCCS_blobbyFill_001__part1of1_00004.prt
|
||||
|
||||
Args:
|
||||
container: Instance node.
|
||||
path (str): Output directory.
|
||||
start_frame (int): Start frame.
|
||||
end_frame (int): End frame.
|
||||
|
||||
Returns:
|
||||
list of filenames
|
||||
|
||||
"""
|
||||
filenames = []
|
||||
filename = os.path.basename(path)
|
||||
|
|
@ -181,27 +204,36 @@ class ExtractPointCloud(publish.Extractor):
|
|||
return filenames
|
||||
|
||||
def partition_output_name(self, container):
|
||||
"""
|
||||
Notes:
|
||||
Partition output name set for mapping
|
||||
the published file output
|
||||
"""Get partition output name.
|
||||
|
||||
Partition output name set for mapping
|
||||
the published file output.
|
||||
|
||||
Todo:
|
||||
Customizes the setting for the output.
|
||||
|
||||
Args:
|
||||
container: Instance node.
|
||||
|
||||
Returns:
|
||||
str: Partition name.
|
||||
|
||||
todo:
|
||||
Customizes the setting for the output
|
||||
"""
|
||||
partition_count, partition_start = self.get_partition(container)
|
||||
partition = "_part{:03}of{}".format(partition_start,
|
||||
partition_count)
|
||||
|
||||
return partition
|
||||
return f"_part{partition_start:03}of{partition_count}"
|
||||
|
||||
def get_partition(self, container):
|
||||
"""
|
||||
Get Partition Value
|
||||
"""Get Partition value.
|
||||
|
||||
Args:
|
||||
container: Instance node.
|
||||
|
||||
"""
|
||||
opt_list = self.get_operators(container)
|
||||
# TODO: This looks strange? Iterating over
|
||||
# the opt_list but returning from inside?
|
||||
for operator in opt_list:
|
||||
count = rt.execute(f'{operator}.PRTPartitionsCount')
|
||||
start = rt.execute(f'{operator}.PRTPartitionsFrom')
|
||||
count = rt.Execute(f'{operator}.PRTPartitionsCount')
|
||||
start = rt.Execute(f'{operator}.PRTPartitionsFrom')
|
||||
|
||||
return count, start
|
||||
|
|
|
|||
|
|
@ -30,8 +30,8 @@ class ExtractRedshiftProxy(publish.Extractor):
|
|||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
con = rt.getNodeByName(container)
|
||||
rt.select(con.Children)
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
# Redshift rsProxy command
|
||||
# rsProxy fp selected compress connectivity startFrame endFrame
|
||||
# camera warnExisting transformPivotToOrigin
|
||||
|
|
|
|||
102
openpype/hosts/max/plugins/publish/extract_review_animation.py
Normal file
102
openpype/hosts/max/plugins/publish/extract_review_animation.py
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
|
||||
|
||||
|
||||
class ExtractReviewAnimation(publish.Extractor):
|
||||
"""
|
||||
Extract Review by Review Animation
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.001
|
||||
label = "Extract Review Animation"
|
||||
hosts = ["max"]
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
staging_dir = self.staging_dir(instance)
|
||||
ext = instance.data.get("imageFormat")
|
||||
filename = "{0}..{1}".format(instance.name, ext)
|
||||
start = int(instance.data["frameStart"])
|
||||
end = int(instance.data["frameEnd"])
|
||||
fps = int(instance.data["fps"])
|
||||
filepath = os.path.join(staging_dir, filename)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
filenames = self.get_files(
|
||||
instance.name, start, end, ext)
|
||||
|
||||
self.log.debug(
|
||||
"Writing Review Animation to"
|
||||
" '%s' to '%s'" % (filename, staging_dir))
|
||||
|
||||
review_camera = instance.data["review_camera"]
|
||||
with viewport_camera(review_camera):
|
||||
preview_arg = self.set_preview_arg(
|
||||
instance, filepath, start, end, fps)
|
||||
rt.execute(preview_arg)
|
||||
|
||||
tags = ["review"]
|
||||
if not instance.data.get("keepImages"):
|
||||
tags.append("delete")
|
||||
|
||||
self.log.debug("Performing Extraction ...")
|
||||
|
||||
representation = {
|
||||
"name": instance.data["imageFormat"],
|
||||
"ext": instance.data["imageFormat"],
|
||||
"files": filenames,
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
"tags": tags,
|
||||
"preview": True,
|
||||
"camera_name": review_camera
|
||||
}
|
||||
self.log.debug(f"{representation}")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def get_files(self, filename, start, end, ext):
|
||||
file_list = []
|
||||
for frame in range(int(start), int(end) + 1):
|
||||
actual_name = "{}.{:04}.{}".format(
|
||||
filename, frame, ext)
|
||||
file_list.append(actual_name)
|
||||
|
||||
return file_list
|
||||
|
||||
def set_preview_arg(self, instance, filepath,
|
||||
start, end, fps):
|
||||
job_args = list()
|
||||
default_option = f'CreatePreview filename:"{filepath}"'
|
||||
job_args.append(default_option)
|
||||
frame_option = f"outputAVI:false start:{start} end:{end} fps:{fps}" # noqa
|
||||
job_args.append(frame_option)
|
||||
rndLevel = instance.data.get("rndLevel")
|
||||
if rndLevel:
|
||||
option = f"rndLevel:#{rndLevel}"
|
||||
job_args.append(option)
|
||||
options = [
|
||||
"percentSize", "dspGeometry", "dspShapes",
|
||||
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
|
||||
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
|
||||
]
|
||||
|
||||
for key in options:
|
||||
enabled = instance.data.get(key)
|
||||
if enabled:
|
||||
job_args.append(f"{key}:{enabled}")
|
||||
|
||||
if get_max_version() == 2024:
|
||||
# hardcoded for current stage
|
||||
auto_play_option = "autoPlay:false"
|
||||
job_args.append(auto_play_option)
|
||||
|
||||
job_str = " ".join(job_args)
|
||||
self.log.debug(job_str)
|
||||
|
||||
return job_str
|
||||
91
openpype/hosts/max/plugins/publish/extract_thumbnail.py
Normal file
91
openpype/hosts/max/plugins/publish/extract_thumbnail.py
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
import os
|
||||
import tempfile
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
|
||||
|
||||
|
||||
class ExtractThumbnail(publish.Extractor):
|
||||
"""
|
||||
Extract Thumbnail for Review
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Thumbnail"
|
||||
hosts = ["max"]
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
# TODO: Create temp directory for thumbnail
|
||||
# - this is to avoid "override" of source file
|
||||
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
self.log.debug(
|
||||
f"Create temp directory {tmp_staging} for thumbnail"
|
||||
)
|
||||
fps = int(instance.data["fps"])
|
||||
frame = int(instance.data["frameStart"])
|
||||
instance.context.data["cleanupFullPaths"].append(tmp_staging)
|
||||
filename = "{name}_thumbnail..png".format(**instance.data)
|
||||
filepath = os.path.join(tmp_staging, filename)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
thumbnail = self.get_filename(instance.name, frame)
|
||||
|
||||
self.log.debug(
|
||||
"Writing Thumbnail to"
|
||||
" '%s' to '%s'" % (filename, tmp_staging))
|
||||
review_camera = instance.data["review_camera"]
|
||||
with viewport_camera(review_camera):
|
||||
preview_arg = self.set_preview_arg(
|
||||
instance, filepath, fps, frame)
|
||||
rt.execute(preview_arg)
|
||||
|
||||
representation = {
|
||||
"name": "thumbnail",
|
||||
"ext": "png",
|
||||
"files": thumbnail,
|
||||
"stagingDir": tmp_staging,
|
||||
"thumbnail": True
|
||||
}
|
||||
|
||||
self.log.debug(f"{representation}")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def get_filename(self, filename, target_frame):
|
||||
thumbnail_name = "{}_thumbnail.{:04}.png".format(
|
||||
filename, target_frame
|
||||
)
|
||||
return thumbnail_name
|
||||
|
||||
def set_preview_arg(self, instance, filepath, fps, frame):
|
||||
job_args = list()
|
||||
default_option = f'CreatePreview filename:"{filepath}"'
|
||||
job_args.append(default_option)
|
||||
frame_option = f"outputAVI:false start:{frame} end:{frame} fps:{fps}" # noqa
|
||||
job_args.append(frame_option)
|
||||
rndLevel = instance.data.get("rndLevel")
|
||||
if rndLevel:
|
||||
option = f"rndLevel:#{rndLevel}"
|
||||
job_args.append(option)
|
||||
options = [
|
||||
"percentSize", "dspGeometry", "dspShapes",
|
||||
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
|
||||
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
|
||||
]
|
||||
|
||||
for key in options:
|
||||
enabled = instance.data.get(key)
|
||||
if enabled:
|
||||
job_args.append(f"{key}:{enabled}")
|
||||
if get_max_version() == 2024:
|
||||
# hardcoded for current stage
|
||||
auto_play_option = "autoPlay:false"
|
||||
job_args.append(auto_play_option)
|
||||
|
||||
job_str = " ".join(job_args)
|
||||
self.log.debug(job_str)
|
||||
|
||||
return job_str
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
ValidateContentsOrder,
|
||||
PublishValidationError
|
||||
)
|
||||
from openpype.hosts.max.api.lib import get_frame_range, set_timeline
|
||||
|
||||
|
||||
class ValidateAnimationTimeline(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Validates Animation Timeline for Preview Animation in Max
|
||||
"""
|
||||
|
||||
label = "Animation Timeline for Review"
|
||||
order = ValidateContentsOrder
|
||||
families = ["review"]
|
||||
hosts = ["max"]
|
||||
actions = [RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
frame_range = get_frame_range()
|
||||
frame_start_handle = frame_range["frameStart"] - int(
|
||||
frame_range["handleStart"]
|
||||
)
|
||||
frame_end_handle = frame_range["frameEnd"] + int(
|
||||
frame_range["handleEnd"]
|
||||
)
|
||||
if rt.animationRange.start != frame_start_handle or (
|
||||
rt.animationRange.end != frame_end_handle
|
||||
):
|
||||
raise PublishValidationError("Incorrect animation timeline "
|
||||
"set for preview animation.. "
|
||||
"\nYou can use repair action to "
|
||||
"the correct animation timeline")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
frame_range = get_frame_range()
|
||||
frame_start_handle = frame_range["frameStart"] - int(
|
||||
frame_range["handleStart"]
|
||||
)
|
||||
frame_end_handle = frame_range["frameEnd"] + int(
|
||||
frame_range["handleEnd"]
|
||||
)
|
||||
set_timeline(frame_start_handle, frame_end_handle)
|
||||
|
|
@ -11,7 +11,7 @@ class ValidateCameraContent(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["camera"]
|
||||
families = ["camera", "review"]
|
||||
hosts = ["max"]
|
||||
label = "Camera Contents"
|
||||
camera_type = ["$Free_Camera", "$Target_Camera",
|
||||
|
|
@ -20,28 +20,23 @@ class ValidateCameraContent(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("Camera instance must only include"
|
||||
"camera (and camera target)")
|
||||
raise PublishValidationError(("Camera instance must only include"
|
||||
"camera (and camera target). "
|
||||
f"Invalid content {invalid}"))
|
||||
|
||||
def get_invalid(self, instance):
|
||||
"""
|
||||
Get invalid nodes if the instance is not camera
|
||||
"""
|
||||
invalid = list()
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating look content for "
|
||||
"{}".format(container))
|
||||
self.log.info(f"Validating camera content for {container}")
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
selection_list = instance.data["members"]
|
||||
for sel in selection_list:
|
||||
# to avoid Attribute Error from pymxs wrapper
|
||||
sel_tmp = str(sel)
|
||||
found = False
|
||||
for cam in self.camera_type:
|
||||
if sel_tmp.startswith(cam):
|
||||
found = True
|
||||
break
|
||||
found = any(sel_tmp.startswith(cam) for cam in self.camera_type)
|
||||
if not found:
|
||||
self.log.error("Camera not found")
|
||||
invalid.append(sel)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
|
||||
class ValidateModelContent(pyblish.api.InstancePlugin):
|
||||
"""Validates Model instance contents.
|
||||
|
|
@ -19,26 +20,25 @@ class ValidateModelContent(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("Model instance must only include"
|
||||
"Geometry and Editable Mesh")
|
||||
raise PublishValidationError(("Model instance must only include"
|
||||
"Geometry and Editable Mesh. "
|
||||
f"Invalid types on: {invalid}"))
|
||||
|
||||
def get_invalid(self, instance):
|
||||
"""
|
||||
Get invalid nodes if the instance is not camera
|
||||
"""
|
||||
invalid = list()
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating look content for "
|
||||
"{}".format(container))
|
||||
self.log.info(f"Validating model content for {container}")
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children) or rt.getCurrentSelection()
|
||||
selection_list = instance.data["members"]
|
||||
for sel in selection_list:
|
||||
if rt.classOf(sel) in rt.Camera.classes:
|
||||
if rt.ClassOf(sel) in rt.Camera.classes:
|
||||
invalid.append(sel)
|
||||
if rt.classOf(sel) in rt.Light.classes:
|
||||
if rt.ClassOf(sel) in rt.Light.classes:
|
||||
invalid.append(sel)
|
||||
if rt.classOf(sel) in rt.Shape.classes:
|
||||
if rt.ClassOf(sel) in rt.Shape.classes:
|
||||
invalid.append(sel)
|
||||
|
||||
return invalid
|
||||
|
|
|
|||
|
|
@ -13,11 +13,11 @@ class ValidateMaxContents(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.ValidatorOrder
|
||||
families = ["camera",
|
||||
"maxScene",
|
||||
"maxrender"]
|
||||
"maxrender",
|
||||
"review"]
|
||||
hosts = ["max"]
|
||||
label = "Max Scene Contents"
|
||||
|
||||
def process(self, instance):
|
||||
container = rt.getNodeByName(instance.data["instance_node"])
|
||||
if not list(container.Children):
|
||||
if not instance.data["members"]:
|
||||
raise PublishValidationError("No content found in the container")
|
||||
|
|
|
|||
|
|
@ -9,11 +9,11 @@ def get_setting(project_setting=None):
|
|||
project_setting = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
return (project_setting["max"]["PointCloud"])
|
||||
return project_setting["max"]["PointCloud"]
|
||||
|
||||
|
||||
class ValidatePointCloud(pyblish.api.InstancePlugin):
|
||||
"""Validate that workfile was saved."""
|
||||
"""Validate that work file was saved."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["pointcloud"]
|
||||
|
|
@ -34,39 +34,42 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
of export_particle operator
|
||||
|
||||
"""
|
||||
invalid = self.get_tyFlow_object(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("Non tyFlow object "
|
||||
"found: {}".format(invalid))
|
||||
invalid = self.get_tyFlow_operator(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("tyFlow ExportParticle operator "
|
||||
"not found: {}".format(invalid))
|
||||
report = []
|
||||
|
||||
invalid = self.validate_export_mode(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("The export mode is not at PRT")
|
||||
invalid_object = self.get_tyflow_object(instance)
|
||||
if invalid_object:
|
||||
report.append(f"Non tyFlow object found: {invalid_object}")
|
||||
|
||||
invalid = self.validate_partition_value(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("tyFlow Partition setting is "
|
||||
"not at the default value")
|
||||
invalid = self.validate_custom_attribute(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("Custom Attribute not found "
|
||||
":{}".format(invalid))
|
||||
invalid_operator = self.get_tyflow_operator(instance)
|
||||
if invalid_operator:
|
||||
report.append((
|
||||
"tyFlow ExportParticle operator not "
|
||||
f"found: {invalid_operator}"))
|
||||
|
||||
def get_tyFlow_object(self, instance):
|
||||
if self.validate_export_mode(instance):
|
||||
report.append("The export mode is not at PRT")
|
||||
|
||||
if self.validate_partition_value(instance):
|
||||
report.append(("tyFlow Partition setting is "
|
||||
"not at the default value"))
|
||||
|
||||
invalid_attribute = self.validate_custom_attribute(instance)
|
||||
if invalid_attribute:
|
||||
report.append(("Custom Attribute not found "
|
||||
f":{invalid_attribute}"))
|
||||
|
||||
if report:
|
||||
raise PublishValidationError(f"{report}")
|
||||
|
||||
def get_tyflow_object(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow container "
|
||||
"for {}".format(container))
|
||||
self.log.info(f"Validating tyFlow container for {container}")
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
selection_list = instance.data["members"]
|
||||
for sel in selection_list:
|
||||
sel_tmp = str(sel)
|
||||
if rt.classOf(sel) in [rt.tyFlow,
|
||||
if rt.ClassOf(sel) in [rt.tyFlow,
|
||||
rt.Editable_Mesh]:
|
||||
if "tyFlow" not in sel_tmp:
|
||||
invalid.append(sel)
|
||||
|
|
@ -75,23 +78,20 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
|
||||
return invalid
|
||||
|
||||
def get_tyFlow_operator(self, instance):
|
||||
def get_tyflow_operator(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow object "
|
||||
"for {}".format(container))
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
self.log.info(f"Validating tyFlow object for {container}")
|
||||
selection_list = instance.data["members"]
|
||||
bool_list = []
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get all the names of the related tyFlow nodes
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
sub_anim = rt.GetSubAnim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
boolean = rt.IsProperty(sub_anim, "Export_Particles")
|
||||
bool_list.append(str(boolean))
|
||||
# if the export_particles property is not there
|
||||
# it means there is not a "Export Particle" operator
|
||||
|
|
@ -104,21 +104,18 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
def validate_custom_attribute(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow custom "
|
||||
"attributes for {}".format(container))
|
||||
self.log.info(
|
||||
f"Validating tyFlow custom attributes for {container}")
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
selection_list = instance.data["members"]
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get all the names of the related tyFlow nodes
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
event_name = sub_anim.name
|
||||
if boolean:
|
||||
sub_anim = rt.GetSubAnim(obj, anim_name)
|
||||
if rt.IsProperty(sub_anim, "Export_Particles"):
|
||||
event_name = sub_anim.name
|
||||
opt = "${0}.{1}.export_particles".format(sel.name,
|
||||
event_name)
|
||||
attributes = get_setting()["attribute"]
|
||||
|
|
@ -126,39 +123,36 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
custom_attr = "{0}.PRTChannels_{1}".format(opt,
|
||||
value)
|
||||
try:
|
||||
rt.execute(custom_attr)
|
||||
rt.Execute(custom_attr)
|
||||
except RuntimeError:
|
||||
invalid.add(key)
|
||||
invalid.append(key)
|
||||
|
||||
return invalid
|
||||
|
||||
def validate_partition_value(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow partition "
|
||||
"value for {}".format(container))
|
||||
self.log.info(
|
||||
f"Validating tyFlow partition value for {container}")
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
selection_list = instance.data["members"]
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get all the names of the related tyFlow nodes
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
event_name = sub_anim.name
|
||||
if boolean:
|
||||
sub_anim = rt.GetSubAnim(obj, anim_name)
|
||||
if rt.IsProperty(sub_anim, "Export_Particles"):
|
||||
event_name = sub_anim.name
|
||||
opt = "${0}.{1}.export_particles".format(sel.name,
|
||||
event_name)
|
||||
count = rt.execute(f'{opt}.PRTPartitionsCount')
|
||||
count = rt.Execute(f'{opt}.PRTPartitionsCount')
|
||||
if count != 100:
|
||||
invalid.append(count)
|
||||
start = rt.execute(f'{opt}.PRTPartitionsFrom')
|
||||
start = rt.Execute(f'{opt}.PRTPartitionsFrom')
|
||||
if start != 1:
|
||||
invalid.append(start)
|
||||
end = rt.execute(f'{opt}.PRTPartitionsTo')
|
||||
end = rt.Execute(f'{opt}.PRTPartitionsTo')
|
||||
if end != 1:
|
||||
invalid.append(end)
|
||||
|
||||
|
|
@ -167,24 +161,23 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
def validate_export_mode(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow export "
|
||||
"mode for {}".format(container))
|
||||
self.log.info(
|
||||
f"Validating tyFlow export mode for {container}")
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
con = rt.GetNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get all the names of the related tyFlow nodes
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
sub_anim = rt.GetSubAnim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
boolean = rt.IsProperty(sub_anim, "Export_Particles")
|
||||
event_name = sub_anim.name
|
||||
if boolean:
|
||||
opt = "${0}.{1}.export_particles".format(sel.name,
|
||||
event_name)
|
||||
export_mode = rt.execute(f'{opt}.exportMode')
|
||||
opt = f"${sel.name}.{event_name}.export_particles"
|
||||
export_mode = rt.Execute(f'{opt}.exportMode')
|
||||
if export_mode != 1:
|
||||
invalid.append(export_mode)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,36 +1,37 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
"""Validator for USD plugin."""
|
||||
from openpype.pipeline import PublishValidationError
|
||||
from pyblish.api import InstancePlugin, ValidatorOrder
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class ValidateUSDPlugin(pyblish.api.InstancePlugin):
|
||||
"""Validates if USD plugin is installed or loaded in Max
|
||||
"""
|
||||
def get_plugins() -> list:
|
||||
"""Get plugin list from 3ds max."""
|
||||
manager = rt.PluginManager
|
||||
count = manager.pluginDllCount
|
||||
plugin_info_list = []
|
||||
for p in range(1, count + 1):
|
||||
plugin_info = manager.pluginDllName(p)
|
||||
plugin_info_list.append(plugin_info)
|
||||
|
||||
order = pyblish.api.ValidatorOrder - 0.01
|
||||
return plugin_info_list
|
||||
|
||||
|
||||
class ValidateUSDPlugin(InstancePlugin):
|
||||
"""Validates if USD plugin is installed or loaded in 3ds max."""
|
||||
|
||||
order = ValidatorOrder - 0.01
|
||||
families = ["model"]
|
||||
hosts = ["max"]
|
||||
label = "USD Plugin"
|
||||
|
||||
def process(self, instance):
|
||||
plugin_mgr = rt.pluginManager
|
||||
plugin_count = plugin_mgr.pluginDllCount
|
||||
plugin_info = self.get_plugins(plugin_mgr,
|
||||
plugin_count)
|
||||
"""Plugin entry point."""
|
||||
|
||||
plugin_info = get_plugins()
|
||||
usd_import = "usdimport.dli"
|
||||
if usd_import not in plugin_info:
|
||||
raise PublishValidationError("USD Plugin {}"
|
||||
" not found".format(usd_import))
|
||||
raise PublishValidationError(f"USD Plugin {usd_import} not found")
|
||||
usd_export = "usdexport.dle"
|
||||
if usd_export not in plugin_info:
|
||||
raise PublishValidationError("USD Plugin {}"
|
||||
" not found".format(usd_export))
|
||||
|
||||
def get_plugins(self, manager, count):
|
||||
plugin_info_list = list()
|
||||
for p in range(1, count + 1):
|
||||
plugin_info = manager.pluginDllName(p)
|
||||
plugin_info_list.append(plugin_info)
|
||||
|
||||
return plugin_info_list
|
||||
raise PublishValidationError(f"USD Plugin {usd_export} not found")
|
||||
|
|
|
|||
|
|
@ -111,15 +111,13 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
except ImportError:
|
||||
raise ImportError("Current host is not Maya")
|
||||
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
errored_instances = get_errored_instances_from_context(context,
|
||||
plugin=plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes..")
|
||||
invalid = list()
|
||||
for instance in instances:
|
||||
for instance in errored_instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
import sys
|
||||
import platform
|
||||
import uuid
|
||||
import re
|
||||
|
||||
|
|
@ -33,13 +32,11 @@ from openpype.pipeline import (
|
|||
load_container,
|
||||
registered_host,
|
||||
)
|
||||
from openpype.pipeline.create import (
|
||||
legacy_create,
|
||||
get_legacy_creator_by_name,
|
||||
)
|
||||
from openpype.lib import NumberDef
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from openpype.pipeline.create import CreateContext
|
||||
from openpype.pipeline.context_tools import (
|
||||
get_current_asset_name,
|
||||
get_current_project_asset,
|
||||
get_current_project_name,
|
||||
get_current_task_name
|
||||
)
|
||||
|
|
@ -123,16 +120,14 @@ FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
|
|||
|
||||
RENDERLIKE_INSTANCE_FAMILIES = ["rendering", "vrayscene"]
|
||||
|
||||
DISPLAY_LIGHTS_VALUES = [
|
||||
"project_settings", "default", "all", "selected", "flat", "none"
|
||||
]
|
||||
DISPLAY_LIGHTS_LABELS = [
|
||||
"Use Project Settings",
|
||||
"Default Lighting",
|
||||
"All Lights",
|
||||
"Selected Lights",
|
||||
"Flat Lighting",
|
||||
"No Lights"
|
||||
|
||||
DISPLAY_LIGHTS_ENUM = [
|
||||
{"label": "Use Project Settings", "value": "project_settings"},
|
||||
{"label": "Default Lighting", "value": "default"},
|
||||
{"label": "All Lights", "value": "all"},
|
||||
{"label": "Selected Lights", "value": "selected"},
|
||||
{"label": "Flat Lighting", "value": "flat"},
|
||||
{"label": "No Lights", "value": "none"}
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -344,8 +339,8 @@ def pairwise(iterable):
|
|||
return zip(a, a)
|
||||
|
||||
|
||||
def collect_animation_data(fps=False):
|
||||
"""Get the basic animation data
|
||||
def collect_animation_defs(fps=False):
|
||||
"""Get the basic animation attribute defintions for the publisher.
|
||||
|
||||
Returns:
|
||||
OrderedDict
|
||||
|
|
@ -364,17 +359,42 @@ def collect_animation_data(fps=False):
|
|||
handle_end = frame_end_handle - frame_end
|
||||
|
||||
# build attributes
|
||||
data = OrderedDict()
|
||||
data["frameStart"] = frame_start
|
||||
data["frameEnd"] = frame_end
|
||||
data["handleStart"] = handle_start
|
||||
data["handleEnd"] = handle_end
|
||||
data["step"] = 1.0
|
||||
defs = [
|
||||
NumberDef("frameStart",
|
||||
label="Frame Start",
|
||||
default=frame_start,
|
||||
decimals=0),
|
||||
NumberDef("frameEnd",
|
||||
label="Frame End",
|
||||
default=frame_end,
|
||||
decimals=0),
|
||||
NumberDef("handleStart",
|
||||
label="Handle Start",
|
||||
default=handle_start,
|
||||
decimals=0),
|
||||
NumberDef("handleEnd",
|
||||
label="Handle End",
|
||||
default=handle_end,
|
||||
decimals=0),
|
||||
NumberDef("step",
|
||||
label="Step size",
|
||||
tooltip="A smaller step size means more samples and larger "
|
||||
"output files.\n"
|
||||
"A 1.0 step size is a single sample every frame.\n"
|
||||
"A 0.5 step size is two samples per frame.\n"
|
||||
"A 0.2 step size is five samples per frame.",
|
||||
default=1.0,
|
||||
decimals=3),
|
||||
]
|
||||
|
||||
if fps:
|
||||
data["fps"] = mel.eval('currentTimeUnitToFPS()')
|
||||
current_fps = mel.eval('currentTimeUnitToFPS()')
|
||||
fps_def = NumberDef(
|
||||
"fps", label="FPS", default=current_fps, decimals=5
|
||||
)
|
||||
defs.append(fps_def)
|
||||
|
||||
return data
|
||||
return defs
|
||||
|
||||
|
||||
def imprint(node, data):
|
||||
|
|
@ -460,10 +480,10 @@ def lsattrs(attrs):
|
|||
attrs (dict): Name and value pairs of expected matches
|
||||
|
||||
Example:
|
||||
>> # Return nodes with an `age` of five.
|
||||
>> lsattr({"age": "five"})
|
||||
>> # Return nodes with both `age` and `color` of five and blue.
|
||||
>> lsattr({"age": "five", "color": "blue"})
|
||||
>>> # Return nodes with an `age` of five.
|
||||
>>> lsattrs({"age": "five"})
|
||||
>>> # Return nodes with both `age` and `color` of five and blue.
|
||||
>>> lsattrs({"age": "five", "color": "blue"})
|
||||
|
||||
Return:
|
||||
list: matching nodes.
|
||||
|
|
@ -1523,7 +1543,15 @@ def set_attribute(attribute, value, node):
|
|||
cmds.addAttr(node, longName=attribute, **kwargs)
|
||||
|
||||
node_attr = "{}.{}".format(node, attribute)
|
||||
if "dataType" in kwargs:
|
||||
enum_type = cmds.attributeQuery(attribute, node=node, enum=True)
|
||||
if enum_type and value_type == "str":
|
||||
enum_string_values = cmds.attributeQuery(
|
||||
attribute, node=node, listEnum=True
|
||||
)[0].split(":")
|
||||
cmds.setAttr(
|
||||
"{}.{}".format(node, attribute), enum_string_values.index(value)
|
||||
)
|
||||
elif "dataType" in kwargs:
|
||||
attr_type = kwargs["dataType"]
|
||||
cmds.setAttr(node_attr, value, type=attr_type)
|
||||
else:
|
||||
|
|
@ -2297,8 +2325,8 @@ def reset_frame_range(playback=True, render=True, fps=True):
|
|||
cmds.currentTime(frame_start)
|
||||
|
||||
if render:
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", animation_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", animation_end)
|
||||
|
||||
|
||||
def reset_scene_resolution():
|
||||
|
|
@ -2811,19 +2839,22 @@ def get_attr_in_layer(attr, layer):
|
|||
|
||||
def fix_incompatible_containers():
|
||||
"""Backwards compatibility: old containers to use new ReferenceLoader"""
|
||||
|
||||
old_loaders = {
|
||||
"MayaAsciiLoader",
|
||||
"AbcLoader",
|
||||
"ModelLoader",
|
||||
"CameraLoader",
|
||||
"RigLoader",
|
||||
"FBXLoader"
|
||||
}
|
||||
host = registered_host()
|
||||
for container in host.ls():
|
||||
loader = container['loader']
|
||||
|
||||
print(container['loader'])
|
||||
|
||||
if loader in ["MayaAsciiLoader",
|
||||
"AbcLoader",
|
||||
"ModelLoader",
|
||||
"CameraLoader",
|
||||
"RigLoader",
|
||||
"FBXLoader"]:
|
||||
if loader in old_loaders:
|
||||
log.info(
|
||||
"Converting legacy container loader {} to "
|
||||
"ReferenceLoader: {}".format(loader, container["objectName"])
|
||||
)
|
||||
cmds.setAttr(container["objectName"] + ".loader",
|
||||
"ReferenceLoader", type="string")
|
||||
|
||||
|
|
@ -2951,7 +2982,7 @@ def _get_render_instances():
|
|||
list: list of instances
|
||||
|
||||
"""
|
||||
objectset = cmds.ls("*.id", long=True, type="objectSet",
|
||||
objectset = cmds.ls("*.id", long=True, exactType="objectSet",
|
||||
recursive=True, objectsOnly=True)
|
||||
|
||||
instances = []
|
||||
|
|
@ -3238,36 +3269,21 @@ def iter_shader_edits(relationships, shader_nodes, nodes_by_id, label=None):
|
|||
|
||||
|
||||
def set_colorspace():
|
||||
"""Set Colorspace from project configuration
|
||||
"""
|
||||
"""Set Colorspace from project configuration"""
|
||||
|
||||
# set color spaces for rendering space and view transforms
|
||||
def _colormanage(**kwargs):
|
||||
"""Wrapper around `cmds.colorManagementPrefs`.
|
||||
|
||||
This logs errors instead of raising an error so color management
|
||||
settings get applied as much as possible.
|
||||
|
||||
"""
|
||||
assert len(kwargs) == 1, "Must receive one keyword argument"
|
||||
try:
|
||||
cmds.colorManagementPrefs(edit=True, **kwargs)
|
||||
log.debug("Setting Color Management Preference: {}".format(kwargs))
|
||||
except RuntimeError as exc:
|
||||
log.error(exc)
|
||||
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
project_name = get_current_project_name()
|
||||
imageio = get_project_settings(project_name)["maya"]["imageio"]
|
||||
|
||||
# ocio compatibility variables
|
||||
ocio_v2_maya_version = 2022
|
||||
maya_version = int(cmds.about(version=True))
|
||||
ocio_v2_support = use_ocio_v2 = maya_version >= ocio_v2_maya_version
|
||||
is_ocio_set = bool(os.environ.get("OCIO"))
|
||||
|
||||
root_dict = {}
|
||||
use_workfile_settings = imageio.get("workfile", {}).get("enabled")
|
||||
|
||||
if use_workfile_settings:
|
||||
root_dict = imageio["workfile"]
|
||||
else:
|
||||
# TODO: deprecated code from 3.15.5 - remove
|
||||
# Maya 2022+ introduces new OCIO v2 color management settings that
|
||||
# can override the old color management preferences. OpenPype has
|
||||
|
|
@ -3290,40 +3306,63 @@ def set_colorspace():
|
|||
if not isinstance(root_dict, dict):
|
||||
msg = "set_colorspace(): argument should be dictionary"
|
||||
log.error(msg)
|
||||
return
|
||||
|
||||
else:
|
||||
root_dict = imageio["workfile"]
|
||||
# backward compatibility
|
||||
# TODO: deprecated code from 3.15.5 - remove with deprecated code above
|
||||
view_name = root_dict.get("viewTransform")
|
||||
if view_name is None:
|
||||
view_name = root_dict.get("viewName")
|
||||
|
||||
log.debug(">> root_dict: {}".format(pformat(root_dict)))
|
||||
if not root_dict:
|
||||
return
|
||||
|
||||
if root_dict:
|
||||
# enable color management
|
||||
cmds.colorManagementPrefs(e=True, cmEnabled=True)
|
||||
cmds.colorManagementPrefs(e=True, ocioRulesEnabled=True)
|
||||
# set color spaces for rendering space and view transforms
|
||||
def _colormanage(**kwargs):
|
||||
"""Wrapper around `cmds.colorManagementPrefs`.
|
||||
|
||||
# backward compatibility
|
||||
# TODO: deprecated code from 3.15.5 - refactor to use new settings
|
||||
view_name = root_dict.get("viewTransform")
|
||||
if view_name is None:
|
||||
view_name = root_dict.get("viewName")
|
||||
This logs errors instead of raising an error so color management
|
||||
settings get applied as much as possible.
|
||||
|
||||
if use_ocio_v2:
|
||||
# Use Maya 2022+ default OCIO v2 config
|
||||
"""
|
||||
assert len(kwargs) == 1, "Must receive one keyword argument"
|
||||
try:
|
||||
cmds.colorManagementPrefs(edit=True, **kwargs)
|
||||
log.debug("Setting Color Management Preference: {}".format(kwargs))
|
||||
except RuntimeError as exc:
|
||||
log.error(exc)
|
||||
|
||||
# enable color management
|
||||
cmds.colorManagementPrefs(edit=True, cmEnabled=True)
|
||||
cmds.colorManagementPrefs(edit=True, ocioRulesEnabled=True)
|
||||
|
||||
if use_ocio_v2:
|
||||
log.info("Using Maya OCIO v2")
|
||||
if not is_ocio_set:
|
||||
# Set the Maya 2022+ default OCIO v2 config file path
|
||||
log.info("Setting default Maya OCIO v2 config")
|
||||
cmds.colorManagementPrefs(edit=True, configFilePath="")
|
||||
# Note: Setting "" as value also sets this default however
|
||||
# introduces a bug where launching a file on startup will prompt
|
||||
# to save the empty scene before it, so we set using the path.
|
||||
# This value has been the same for 2022, 2023 and 2024
|
||||
path = "<MAYA_RESOURCES>/OCIO-configs/Maya2022-default/config.ocio"
|
||||
cmds.colorManagementPrefs(edit=True, configFilePath=path)
|
||||
|
||||
# set rendering space and view transform
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
_colormanage(viewName=view_name)
|
||||
_colormanage(displayName=root_dict["displayName"])
|
||||
else:
|
||||
# set rendering space and view transform
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
_colormanage(viewName=view_name)
|
||||
_colormanage(displayName=root_dict["displayName"])
|
||||
else:
|
||||
log.info("Using Maya OCIO v1 (legacy)")
|
||||
if not is_ocio_set:
|
||||
# Set the Maya default config file path
|
||||
log.info("Setting default Maya OCIO v1 legacy config")
|
||||
cmds.colorManagementPrefs(edit=True, configFilePath="legacy")
|
||||
|
||||
# set rendering space and view transform
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
_colormanage(viewTransformName=view_name)
|
||||
# set rendering space and view transform
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
_colormanage(viewTransformName=view_name)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
@ -3966,6 +4005,71 @@ def get_capture_preset(task_name, task_type, subset, project_settings, log):
|
|||
return capture_preset or {}
|
||||
|
||||
|
||||
def get_reference_node(members, log=None):
|
||||
"""Get the reference node from the container members
|
||||
Args:
|
||||
members: list of node names
|
||||
|
||||
Returns:
|
||||
str: Reference node name.
|
||||
|
||||
"""
|
||||
|
||||
# Collect the references without .placeHolderList[] attributes as
|
||||
# unique entries (objects only) and skipping the sharedReferenceNode.
|
||||
references = set()
|
||||
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
|
||||
|
||||
# Ignore any `:sharedReferenceNode`
|
||||
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
|
||||
continue
|
||||
|
||||
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
|
||||
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
|
||||
continue
|
||||
|
||||
references.add(ref)
|
||||
|
||||
assert references, "No reference node found in container"
|
||||
|
||||
# Get highest reference node (least parents)
|
||||
highest = min(references,
|
||||
key=lambda x: len(get_reference_node_parents(x)))
|
||||
|
||||
# Warn the user when we're taking the highest reference node
|
||||
if len(references) > 1:
|
||||
if not log:
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
log.warning("More than one reference node found in "
|
||||
"container, using highest reference node: "
|
||||
"%s (in: %s)", highest, list(references))
|
||||
|
||||
return highest
|
||||
|
||||
|
||||
def get_reference_node_parents(ref):
|
||||
"""Return all parent reference nodes of reference node
|
||||
|
||||
Args:
|
||||
ref (str): reference node.
|
||||
|
||||
Returns:
|
||||
list: The upstream parent reference nodes.
|
||||
|
||||
"""
|
||||
parent = cmds.referenceQuery(ref,
|
||||
referenceNode=True,
|
||||
parent=True)
|
||||
parents = []
|
||||
while parent:
|
||||
parents.append(parent)
|
||||
parent = cmds.referenceQuery(parent,
|
||||
referenceNode=True,
|
||||
parent=True)
|
||||
return parents
|
||||
|
||||
|
||||
def create_rig_animation_instance(
|
||||
nodes, context, namespace, options=None, log=None
|
||||
):
|
||||
|
|
@ -4003,12 +4107,10 @@ def create_rig_animation_instance(
|
|||
)
|
||||
assert roots, "No root nodes in rig, this is a bug."
|
||||
|
||||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
dependency = str(context["representation"]["_id"])
|
||||
|
||||
custom_subset = options.get("animationSubsetName")
|
||||
if custom_subset:
|
||||
formatting_data = {
|
||||
# TODO remove 'asset_type' and replace 'asset_name' with 'asset'
|
||||
"asset_name": context['asset']['name'],
|
||||
"asset_type": context['asset']['type'],
|
||||
"subset": context['subset']['name'],
|
||||
|
|
@ -4026,14 +4128,17 @@ def create_rig_animation_instance(
|
|||
if log:
|
||||
log.info("Creating subset: {}".format(namespace))
|
||||
|
||||
# Fill creator identifier
|
||||
creator_identifier = "io.openpype.creators.maya.animation"
|
||||
|
||||
host = registered_host()
|
||||
create_context = CreateContext(host)
|
||||
|
||||
# Create the animation instance
|
||||
creator_plugin = get_legacy_creator_by_name("CreateAnimation")
|
||||
with maintained_selection():
|
||||
cmds.select([output, controls] + roots, noExpand=True)
|
||||
legacy_create(
|
||||
creator_plugin,
|
||||
name=namespace,
|
||||
asset=asset,
|
||||
options={"useSelection": True},
|
||||
data={"dependencies": dependency}
|
||||
create_context.create(
|
||||
creator_identifier=creator_identifier,
|
||||
variant=namespace,
|
||||
pre_create_data={"use_selection": True}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ def get(layer, render_instance=None):
|
|||
}.get(renderer_name.lower(), None)
|
||||
if renderer is None:
|
||||
raise UnsupportedRendererException(
|
||||
"unsupported {}".format(renderer_name)
|
||||
"Unsupported renderer: {}".format(renderer_name)
|
||||
)
|
||||
|
||||
return renderer(layer, render_instance)
|
||||
|
|
@ -274,12 +274,14 @@ class ARenderProducts:
|
|||
"Unsupported renderer {}".format(self.renderer)
|
||||
)
|
||||
|
||||
# Note: When this attribute is never set (e.g. on maya launch) then
|
||||
# this can return None even though it is a string attribute
|
||||
prefix = self._get_attr(prefix_attr)
|
||||
|
||||
if not prefix:
|
||||
# Fall back to scene name by default
|
||||
log.debug("Image prefix not set, using <Scene>")
|
||||
file_prefix = "<Scene>"
|
||||
log.warning("Image prefix not set, using <Scene>")
|
||||
prefix = "<Scene>"
|
||||
|
||||
return prefix
|
||||
|
||||
|
|
|
|||
|
|
@ -66,10 +66,12 @@ def install():
|
|||
|
||||
cmds.menuItem(divider=True)
|
||||
|
||||
# Create default items
|
||||
cmds.menuItem(
|
||||
"Create...",
|
||||
command=lambda *args: host_tools.show_creator(parent=parent_widget)
|
||||
command=lambda *args: host_tools.show_publisher(
|
||||
parent=parent_widget,
|
||||
tab="create"
|
||||
)
|
||||
)
|
||||
|
||||
cmds.menuItem(
|
||||
|
|
@ -82,8 +84,9 @@ def install():
|
|||
|
||||
cmds.menuItem(
|
||||
"Publish...",
|
||||
command=lambda *args: host_tools.show_publish(
|
||||
parent=parent_widget
|
||||
command=lambda *args: host_tools.show_publisher(
|
||||
parent=parent_widget,
|
||||
tab="publish"
|
||||
),
|
||||
image=pyblish_icon
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
import json
|
||||
import base64
|
||||
import os
|
||||
import errno
|
||||
import logging
|
||||
import contextlib
|
||||
import shutil
|
||||
|
||||
from maya import utils, cmds, OpenMaya
|
||||
import maya.api.OpenMaya as om
|
||||
|
|
@ -13,6 +16,7 @@ from openpype.host import (
|
|||
HostBase,
|
||||
IWorkfileHost,
|
||||
ILoadHost,
|
||||
IPublishHost,
|
||||
HostDirmap,
|
||||
)
|
||||
from openpype.tools.utils import host_tools
|
||||
|
|
@ -63,7 +67,7 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
|||
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
|
||||
|
||||
|
||||
class MayaHost(HostBase, IWorkfileHost, ILoadHost):
|
||||
class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||
name = "maya"
|
||||
|
||||
def __init__(self):
|
||||
|
|
@ -113,7 +117,10 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
register_event_callback("taskChanged", on_task_changed)
|
||||
register_event_callback("workfile.open.before", before_workfile_open)
|
||||
register_event_callback("workfile.save.before", before_workfile_save)
|
||||
register_event_callback("workfile.save.before", after_workfile_save)
|
||||
register_event_callback(
|
||||
"workfile.save.before", workfile_save_before_xgen
|
||||
)
|
||||
register_event_callback("workfile.save.after", after_workfile_save)
|
||||
|
||||
def open_workfile(self, filepath):
|
||||
return open_file(filepath)
|
||||
|
|
@ -146,6 +153,20 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
with lib.maintained_selection():
|
||||
yield
|
||||
|
||||
def get_context_data(self):
|
||||
data = cmds.fileInfo("OpenPypeContext", query=True)
|
||||
if not data:
|
||||
return {}
|
||||
|
||||
data = data[0] # Maya seems to return a list
|
||||
decoded = base64.b64decode(data).decode("utf-8")
|
||||
return json.loads(decoded)
|
||||
|
||||
def update_context_data(self, data, changes):
|
||||
json_str = json.dumps(data)
|
||||
encoded = base64.b64encode(json_str.encode("utf-8"))
|
||||
return cmds.fileInfo("OpenPypeContext", encoded)
|
||||
|
||||
def _register_callbacks(self):
|
||||
for handler, event in self._op_events.copy().items():
|
||||
if event is None:
|
||||
|
|
@ -480,18 +501,16 @@ def on_init():
|
|||
# Force load objExport plug-in (requested by artists)
|
||||
cmds.loadPlugin("objExport", quiet=True)
|
||||
|
||||
from .customize import (
|
||||
override_component_mask_commands,
|
||||
override_toolbox_ui
|
||||
)
|
||||
safe_deferred(override_component_mask_commands)
|
||||
|
||||
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
|
||||
|
||||
if launch_workfiles:
|
||||
safe_deferred(host_tools.show_workfiles)
|
||||
|
||||
if not lib.IS_HEADLESS:
|
||||
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
|
||||
if launch_workfiles:
|
||||
safe_deferred(host_tools.show_workfiles)
|
||||
|
||||
from .customize import (
|
||||
override_component_mask_commands,
|
||||
override_toolbox_ui
|
||||
)
|
||||
safe_deferred(override_component_mask_commands)
|
||||
safe_deferred(override_toolbox_ui)
|
||||
|
||||
|
||||
|
|
@ -549,37 +568,29 @@ def on_save():
|
|||
Any transform of a mesh, without an existing ID, is given one
|
||||
automatically on file save.
|
||||
"""
|
||||
|
||||
log.info("Running callback on save..")
|
||||
# remove lockfile if users jumps over from one scene to another
|
||||
_remove_workfile_lock()
|
||||
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
|
||||
# Generate ids of the current context on nodes in the scene
|
||||
nodes = lib.get_id_required_nodes(referenced_nodes=False)
|
||||
for node, new_id in lib.generate_ids(nodes):
|
||||
lib.set_id(node, new_id, overwrite=False)
|
||||
|
||||
|
||||
def _update_render_layer_observers():
|
||||
# Helper to trigger update for all renderlayer observer logic
|
||||
lib.remove_render_layer_observer()
|
||||
lib.add_render_layer_observer()
|
||||
lib.add_render_layer_change_observer()
|
||||
|
||||
|
||||
def on_open():
|
||||
"""On scene open let's assume the containers have changed."""
|
||||
|
||||
from qtpy import QtWidgets
|
||||
from openpype.widgets import popup
|
||||
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_change_observer()")
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
utils.executeDeferred(_update_render_layer_observers)
|
||||
|
||||
# Validate FPS after update_task_from_path to
|
||||
# ensure it is using correct FPS for the asset
|
||||
|
|
@ -590,10 +601,7 @@ def on_open():
|
|||
log.warning("Scene has outdated content.")
|
||||
|
||||
# Find maya main window
|
||||
top_level_widgets = {w.objectName(): w for w in
|
||||
QtWidgets.QApplication.topLevelWidgets()}
|
||||
parent = top_level_widgets.get("MayaWindow", None)
|
||||
|
||||
parent = lib.get_main_window()
|
||||
if parent is None:
|
||||
log.info("Skipping outdated content pop-up "
|
||||
"because Maya window can't be found.")
|
||||
|
|
@ -618,16 +626,9 @@ def on_new():
|
|||
"""Set project resolution and fps when create a new file"""
|
||||
log.info("Running callback on new..")
|
||||
with lib.suspended_refresh():
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_change_observer()")
|
||||
lib.set_context_settings()
|
||||
|
||||
utils.executeDeferred(_update_render_layer_observers)
|
||||
_remove_workfile_lock()
|
||||
|
||||
|
||||
|
|
@ -681,6 +682,91 @@ def before_workfile_save(event):
|
|||
create_workspace_mel(workdir_path, project_name)
|
||||
|
||||
|
||||
def workfile_save_before_xgen(event):
|
||||
"""Manage Xgen external files when switching context.
|
||||
|
||||
Xgen has various external files that needs to be unique and relative to the
|
||||
workfile, so we need to copy and potentially overwrite these files when
|
||||
switching context.
|
||||
|
||||
Args:
|
||||
event (Event) - openpype/lib/events.py
|
||||
"""
|
||||
if not cmds.pluginInfo("xgenToolkit", query=True, loaded=True):
|
||||
return
|
||||
|
||||
import xgenm
|
||||
|
||||
current_work_dir = legacy_io.Session["AVALON_WORKDIR"].replace("\\", "/")
|
||||
expected_work_dir = event.data["workdir_path"].replace("\\", "/")
|
||||
if current_work_dir == expected_work_dir:
|
||||
return
|
||||
|
||||
palettes = cmds.ls(type="xgmPalette", long=True)
|
||||
if not palettes:
|
||||
return
|
||||
|
||||
transfers = []
|
||||
overwrites = []
|
||||
attribute_changes = {}
|
||||
attrs = ["xgFileName", "xgBaseFile"]
|
||||
for palette in palettes:
|
||||
sanitized_palette = palette.replace("|", "")
|
||||
project_path = xgenm.getAttr("xgProjectPath", sanitized_palette)
|
||||
_, maya_extension = os.path.splitext(event.data["filename"])
|
||||
|
||||
for attr in attrs:
|
||||
node_attr = "{}.{}".format(palette, attr)
|
||||
attr_value = cmds.getAttr(node_attr)
|
||||
|
||||
if not attr_value:
|
||||
continue
|
||||
|
||||
source = os.path.join(project_path, attr_value)
|
||||
|
||||
attr_value = event.data["filename"].replace(
|
||||
maya_extension,
|
||||
"__{}{}".format(
|
||||
sanitized_palette.replace(":", "__"),
|
||||
os.path.splitext(attr_value)[1]
|
||||
)
|
||||
)
|
||||
target = os.path.join(expected_work_dir, attr_value)
|
||||
|
||||
transfers.append((source, target))
|
||||
attribute_changes[node_attr] = attr_value
|
||||
|
||||
relative_path = xgenm.getAttr(
|
||||
"xgDataPath", sanitized_palette
|
||||
).split(os.pathsep)[0]
|
||||
absolute_path = relative_path.replace("${PROJECT}", project_path)
|
||||
for root, _, files in os.walk(absolute_path):
|
||||
for f in files:
|
||||
source = os.path.join(root, f).replace("\\", "/")
|
||||
target = source.replace(project_path, expected_work_dir + "/")
|
||||
transfers.append((source, target))
|
||||
if os.path.exists(target):
|
||||
overwrites.append(target)
|
||||
|
||||
# Ask user about overwriting files.
|
||||
if overwrites:
|
||||
log.warning(
|
||||
"WARNING! Potential loss of data.\n\n"
|
||||
"Found duplicate Xgen files in new context.\n{}".format(
|
||||
"\n".join(overwrites)
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
for source, destination in transfers:
|
||||
if not os.path.exists(os.path.dirname(destination)):
|
||||
os.makedirs(os.path.dirname(destination))
|
||||
shutil.copy(source, destination)
|
||||
|
||||
for attribute, value in attribute_changes.items():
|
||||
cmds.setAttr(attribute, value, type="string")
|
||||
|
||||
|
||||
def after_workfile_save(event):
|
||||
workfile_name = event["filename"]
|
||||
if (
|
||||
|
|
|
|||
|
|
@ -1,87 +1,57 @@
|
|||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
from maya import cmds
|
||||
from abc import ABCMeta
|
||||
|
||||
import qargparse
|
||||
import six
|
||||
from maya import cmds
|
||||
from maya.app.renderSetup.model import renderSetup
|
||||
|
||||
from openpype.lib import Logger
|
||||
from openpype.lib import BoolDef, Logger
|
||||
from openpype.pipeline import AVALON_CONTAINER_ID, Anatomy, CreatedInstance
|
||||
from openpype.pipeline import Creator as NewCreator
|
||||
from openpype.pipeline import (
|
||||
LegacyCreator,
|
||||
LoaderPlugin,
|
||||
get_representation_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
Anatomy,
|
||||
)
|
||||
CreatorError, LegacyCreator, LoaderPlugin, get_representation_path,
|
||||
legacy_io)
|
||||
from openpype.pipeline.load import LoadError
|
||||
from openpype.settings import get_project_settings
|
||||
from .pipeline import containerise
|
||||
|
||||
from . import lib
|
||||
from .lib import imprint, read
|
||||
from .pipeline import containerise
|
||||
|
||||
log = Logger.get_logger()
|
||||
|
||||
|
||||
def get_reference_node(members, log=None):
|
||||
def _get_attr(node, attr, default=None):
|
||||
"""Helper to get attribute which allows attribute to not exist."""
|
||||
if not cmds.attributeQuery(attr, node=node, exists=True):
|
||||
return default
|
||||
return cmds.getAttr("{}.{}".format(node, attr))
|
||||
|
||||
|
||||
# Backwards compatibility: these functions has been moved to lib.
|
||||
def get_reference_node(*args, **kwargs):
|
||||
"""Get the reference node from the container members
|
||||
Args:
|
||||
members: list of node names
|
||||
|
||||
Returns:
|
||||
str: Reference node name.
|
||||
|
||||
Deprecated:
|
||||
This function was moved and will be removed in 3.16.x.
|
||||
"""
|
||||
|
||||
# Collect the references without .placeHolderList[] attributes as
|
||||
# unique entries (objects only) and skipping the sharedReferenceNode.
|
||||
references = set()
|
||||
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
|
||||
|
||||
# Ignore any `:sharedReferenceNode`
|
||||
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
|
||||
continue
|
||||
|
||||
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
|
||||
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
|
||||
continue
|
||||
|
||||
references.add(ref)
|
||||
|
||||
assert references, "No reference node found in container"
|
||||
|
||||
# Get highest reference node (least parents)
|
||||
highest = min(references,
|
||||
key=lambda x: len(get_reference_node_parents(x)))
|
||||
|
||||
# Warn the user when we're taking the highest reference node
|
||||
if len(references) > 1:
|
||||
if not log:
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
log.warning("More than one reference node found in "
|
||||
"container, using highest reference node: "
|
||||
"%s (in: %s)", highest, list(references))
|
||||
|
||||
return highest
|
||||
msg = "Function 'get_reference_node' has been moved."
|
||||
log.warning(msg)
|
||||
cmds.warning(msg)
|
||||
return lib.get_reference_node(*args, **kwargs)
|
||||
|
||||
|
||||
def get_reference_node_parents(ref):
|
||||
"""Return all parent reference nodes of reference node
|
||||
|
||||
Args:
|
||||
ref (str): reference node.
|
||||
|
||||
Returns:
|
||||
list: The upstream parent reference nodes.
|
||||
|
||||
def get_reference_node_parents(*args, **kwargs):
|
||||
"""
|
||||
parent = cmds.referenceQuery(ref,
|
||||
referenceNode=True,
|
||||
parent=True)
|
||||
parents = []
|
||||
while parent:
|
||||
parents.append(parent)
|
||||
parent = cmds.referenceQuery(parent,
|
||||
referenceNode=True,
|
||||
parent=True)
|
||||
return parents
|
||||
Deprecated:
|
||||
This function was moved and will be removed in 3.16.x.
|
||||
"""
|
||||
msg = "Function 'get_reference_node_parents' has been moved."
|
||||
log.warning(msg)
|
||||
cmds.warning(msg)
|
||||
return lib.get_reference_node_parents(*args, **kwargs)
|
||||
|
||||
|
||||
class Creator(LegacyCreator):
|
||||
|
|
@ -100,6 +70,379 @@ class Creator(LegacyCreator):
|
|||
return instance
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class MayaCreatorBase(object):
|
||||
|
||||
@staticmethod
|
||||
def cache_subsets(shared_data):
|
||||
"""Cache instances for Creators to shared data.
|
||||
|
||||
Create `maya_cached_subsets` key when needed in shared data and
|
||||
fill it with all collected instances from the scene under its
|
||||
respective creator identifiers.
|
||||
|
||||
If legacy instances are detected in the scene, create
|
||||
`maya_cached_legacy_subsets` there and fill it with
|
||||
all legacy subsets under family as a key.
|
||||
|
||||
Args:
|
||||
Dict[str, Any]: Shared data.
|
||||
|
||||
Return:
|
||||
Dict[str, Any]: Shared data dictionary.
|
||||
|
||||
"""
|
||||
if shared_data.get("maya_cached_subsets") is None:
|
||||
cache = dict()
|
||||
cache_legacy = dict()
|
||||
|
||||
for node in cmds.ls(type="objectSet"):
|
||||
|
||||
if _get_attr(node, attr="id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
creator_id = _get_attr(node, attr="creator_identifier")
|
||||
if creator_id is not None:
|
||||
# creator instance
|
||||
cache.setdefault(creator_id, []).append(node)
|
||||
else:
|
||||
# legacy instance
|
||||
family = _get_attr(node, attr="family")
|
||||
if family is None:
|
||||
# must be a broken instance
|
||||
continue
|
||||
|
||||
cache_legacy.setdefault(family, []).append(node)
|
||||
|
||||
shared_data["maya_cached_subsets"] = cache
|
||||
shared_data["maya_cached_legacy_subsets"] = cache_legacy
|
||||
return shared_data
|
||||
|
||||
def imprint_instance_node(self, node, data):
|
||||
|
||||
# We never store the instance_node as value on the node since
|
||||
# it's the node name itself
|
||||
data.pop("instance_node", None)
|
||||
|
||||
# We store creator attributes at the root level and assume they
|
||||
# will not clash in names with `subset`, `task`, etc. and other
|
||||
# default names. This is just so these attributes in many cases
|
||||
# are still editable in the maya UI by artists.
|
||||
# pop to move to end of dict to sort attributes last on the node
|
||||
creator_attributes = data.pop("creator_attributes", {})
|
||||
data.update(creator_attributes)
|
||||
|
||||
# We know the "publish_attributes" will be complex data of
|
||||
# settings per plugins, we'll store this as a flattened json structure
|
||||
# pop to move to end of dict to sort attributes last on the node
|
||||
data["publish_attributes"] = json.dumps(
|
||||
data.pop("publish_attributes", {})
|
||||
)
|
||||
|
||||
# Since we flattened the data structure for creator attributes we want
|
||||
# to correctly detect which flattened attributes should end back in the
|
||||
# creator attributes when reading the data from the node, so we store
|
||||
# the relevant keys as a string
|
||||
data["__creator_attributes_keys"] = ",".join(creator_attributes.keys())
|
||||
|
||||
# Kill any existing attributes just so we can imprint cleanly again
|
||||
for attr in data.keys():
|
||||
if cmds.attributeQuery(attr, node=node, exists=True):
|
||||
cmds.deleteAttr("{}.{}".format(node, attr))
|
||||
|
||||
return imprint(node, data)
|
||||
|
||||
def read_instance_node(self, node):
|
||||
node_data = read(node)
|
||||
|
||||
# Never care about a cbId attribute on the object set
|
||||
# being read as 'data'
|
||||
node_data.pop("cbId", None)
|
||||
|
||||
# Move the relevant attributes into "creator_attributes" that
|
||||
# we flattened originally
|
||||
node_data["creator_attributes"] = {}
|
||||
creator_attribute_keys = node_data.pop("__creator_attributes_keys",
|
||||
"").split(",")
|
||||
for key in creator_attribute_keys:
|
||||
if key in node_data:
|
||||
node_data["creator_attributes"][key] = node_data.pop(key)
|
||||
|
||||
publish_attributes = node_data.get("publish_attributes")
|
||||
if publish_attributes:
|
||||
node_data["publish_attributes"] = json.loads(publish_attributes)
|
||||
|
||||
# Explicitly re-parse the node name
|
||||
node_data["instance_node"] = node
|
||||
|
||||
return node_data
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class MayaCreator(NewCreator, MayaCreatorBase):
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
members = list()
|
||||
if pre_create_data.get("use_selection"):
|
||||
members = cmds.ls(selection=True)
|
||||
|
||||
with lib.undo_chunk():
|
||||
instance_node = cmds.sets(members, name=subset_name)
|
||||
instance_data["instance_node"] = instance_node
|
||||
instance = CreatedInstance(
|
||||
self.family,
|
||||
subset_name,
|
||||
instance_data,
|
||||
self)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
self.imprint_instance_node(instance_node,
|
||||
data=instance.data_to_store())
|
||||
return instance
|
||||
|
||||
def collect_instances(self):
|
||||
self.cache_subsets(self.collection_shared_data)
|
||||
cached_subsets = self.collection_shared_data["maya_cached_subsets"]
|
||||
for node in cached_subsets.get(self.identifier, []):
|
||||
node_data = self.read_instance_node(node)
|
||||
|
||||
created_instance = CreatedInstance.from_existing(node_data, self)
|
||||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
data = created_inst.data_to_store()
|
||||
node = data.get("instance_node")
|
||||
|
||||
self.imprint_instance_node(node, data)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
"""Remove specified instance from the scene.
|
||||
|
||||
This is only removing `id` parameter so instance is no longer
|
||||
instance, because it might contain valuable data for artist.
|
||||
|
||||
"""
|
||||
for instance in instances:
|
||||
node = instance.data.get("instance_node")
|
||||
if node:
|
||||
cmds.delete(node)
|
||||
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
return [
|
||||
BoolDef("use_selection",
|
||||
label="Use selection",
|
||||
default=True)
|
||||
]
|
||||
|
||||
|
||||
def ensure_namespace(namespace):
|
||||
"""Make sure the namespace exists.
|
||||
|
||||
Args:
|
||||
namespace (str): The preferred namespace name.
|
||||
|
||||
Returns:
|
||||
str: The generated or existing namespace
|
||||
|
||||
"""
|
||||
exists = cmds.namespace(exists=namespace)
|
||||
if exists:
|
||||
return namespace
|
||||
else:
|
||||
return cmds.namespace(add=namespace)
|
||||
|
||||
|
||||
class RenderlayerCreator(NewCreator, MayaCreatorBase):
|
||||
"""Creator which creates an instance per renderlayer in the workfile.
|
||||
|
||||
Create and manages renderlayer subset per renderLayer in workfile.
|
||||
This generates a singleton node in the scene which, if it exists, tells the
|
||||
Creator to collect Maya rendersetup renderlayers as individual instances.
|
||||
As such, triggering create doesn't actually create the instance node per
|
||||
layer but only the node which tells the Creator it may now collect
|
||||
an instance per renderlayer.
|
||||
|
||||
"""
|
||||
|
||||
# These are required to be overridden in subclass
|
||||
singleton_node_name = ""
|
||||
|
||||
# These are optional to be overridden in subclass
|
||||
layer_instance_prefix = None
|
||||
|
||||
def _get_singleton_node(self, return_all=False):
|
||||
nodes = lib.lsattr("pre_creator_identifier", self.identifier)
|
||||
if nodes:
|
||||
return nodes if return_all else nodes[0]
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
# A Renderlayer is never explicitly created using the create method.
|
||||
# Instead, renderlayers from the scene are collected. Thus "create"
|
||||
# would only ever be called to say, 'hey, please refresh collect'
|
||||
self.create_singleton_node()
|
||||
|
||||
# if no render layers are present, create default one with
|
||||
# asterisk selector
|
||||
rs = renderSetup.instance()
|
||||
if not rs.getRenderLayers():
|
||||
render_layer = rs.createRenderLayer("Main")
|
||||
collection = render_layer.createCollection("defaultCollection")
|
||||
collection.getSelector().setPattern('*')
|
||||
|
||||
# By RenderLayerCreator.create we make it so that the renderlayer
|
||||
# instances directly appear even though it just collects scene
|
||||
# renderlayers. This doesn't actually 'create' any scene contents.
|
||||
self.collect_instances()
|
||||
|
||||
def create_singleton_node(self):
|
||||
if self._get_singleton_node():
|
||||
raise CreatorError("A Render instance already exists - only "
|
||||
"one can be configured.")
|
||||
|
||||
with lib.undo_chunk():
|
||||
node = cmds.sets(empty=True, name=self.singleton_node_name)
|
||||
lib.imprint(node, data={
|
||||
"pre_creator_identifier": self.identifier
|
||||
})
|
||||
|
||||
return node
|
||||
|
||||
def collect_instances(self):
|
||||
|
||||
# We only collect if the global render instance exists
|
||||
if not self._get_singleton_node():
|
||||
return
|
||||
|
||||
rs = renderSetup.instance()
|
||||
layers = rs.getRenderLayers()
|
||||
for layer in layers:
|
||||
layer_instance_node = self.find_layer_instance_node(layer)
|
||||
if layer_instance_node:
|
||||
data = self.read_instance_node(layer_instance_node)
|
||||
instance = CreatedInstance.from_existing(data, creator=self)
|
||||
else:
|
||||
# No existing scene instance node for this layer. Note that
|
||||
# this instance will not have the `instance_node` data yet
|
||||
# until it's been saved/persisted at least once.
|
||||
# TODO: Correctly define the subset name using templates
|
||||
prefix = self.layer_instance_prefix or self.family
|
||||
subset_name = "{}{}".format(prefix, layer.name())
|
||||
instance_data = {
|
||||
"asset": legacy_io.Session["AVALON_ASSET"],
|
||||
"task": legacy_io.Session["AVALON_TASK"],
|
||||
"variant": layer.name(),
|
||||
}
|
||||
instance = CreatedInstance(
|
||||
family=self.family,
|
||||
subset_name=subset_name,
|
||||
data=instance_data,
|
||||
creator=self
|
||||
)
|
||||
|
||||
instance.transient_data["layer"] = layer
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def find_layer_instance_node(self, layer):
|
||||
connected_sets = cmds.listConnections(
|
||||
"{}.message".format(layer.name()),
|
||||
source=False,
|
||||
destination=True,
|
||||
type="objectSet"
|
||||
) or []
|
||||
|
||||
for node in connected_sets:
|
||||
if not cmds.attributeQuery("creator_identifier",
|
||||
node=node,
|
||||
exists=True):
|
||||
continue
|
||||
|
||||
creator_identifier = cmds.getAttr(node + ".creator_identifier")
|
||||
if creator_identifier == self.identifier:
|
||||
self.log.info(f"Found node: {node}")
|
||||
return node
|
||||
|
||||
def _create_layer_instance_node(self, layer):
|
||||
|
||||
# We only collect if a CreateRender instance exists
|
||||
create_render_set = self._get_singleton_node()
|
||||
if not create_render_set:
|
||||
raise CreatorError("Creating a renderlayer instance node is not "
|
||||
"allowed if no 'CreateRender' instance exists")
|
||||
|
||||
namespace = "_{}".format(self.singleton_node_name)
|
||||
namespace = ensure_namespace(namespace)
|
||||
|
||||
name = "{}:{}".format(namespace, layer.name())
|
||||
render_set = cmds.sets(name=name, empty=True)
|
||||
|
||||
# Keep an active link with the renderlayer so we can retrieve it
|
||||
# later by a physical maya connection instead of relying on the layer
|
||||
# name
|
||||
cmds.addAttr(render_set, longName="renderlayer", at="message")
|
||||
cmds.connectAttr("{}.message".format(layer.name()),
|
||||
"{}.renderlayer".format(render_set), force=True)
|
||||
|
||||
# Add the set to the 'CreateRender' set.
|
||||
cmds.sets(render_set, forceElement=create_render_set)
|
||||
|
||||
return render_set
|
||||
|
||||
def update_instances(self, update_list):
|
||||
# We only generate the persisting layer data into the scene once
|
||||
# we save with the UI on e.g. validate or publish
|
||||
for instance, _changes in update_list:
|
||||
instance_node = instance.data.get("instance_node")
|
||||
|
||||
# Ensure a node exists to persist the data to
|
||||
if not instance_node:
|
||||
layer = instance.transient_data["layer"]
|
||||
instance_node = self._create_layer_instance_node(layer)
|
||||
instance.data["instance_node"] = instance_node
|
||||
|
||||
self.imprint_instance_node(instance_node,
|
||||
data=instance.data_to_store())
|
||||
|
||||
def imprint_instance_node(self, node, data):
|
||||
# Do not ever try to update the `renderlayer` since it'll try
|
||||
# to remove the attribute and recreate it but fail to keep it a
|
||||
# message attribute link. We only ever imprint that on the initial
|
||||
# node creation.
|
||||
# TODO: Improve how this is handled
|
||||
data.pop("renderlayer", None)
|
||||
data.get("creator_attributes", {}).pop("renderlayer", None)
|
||||
|
||||
return super(RenderlayerCreator, self).imprint_instance_node(node,
|
||||
data=data)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
"""Remove specified instances from the scene.
|
||||
|
||||
This is only removing `id` parameter so instance is no longer
|
||||
instance, because it might contain valuable data for artist.
|
||||
|
||||
"""
|
||||
# Instead of removing the single instance or renderlayers we instead
|
||||
# remove the CreateRender node this creator relies on to decide whether
|
||||
# it should collect anything at all.
|
||||
nodes = self._get_singleton_node(return_all=True)
|
||||
if nodes:
|
||||
cmds.delete(nodes)
|
||||
|
||||
# Remove ALL the instances even if only one gets deleted
|
||||
for instance in list(self.create_context.instances):
|
||||
if instance.get("creator_identifier") == self.identifier:
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
# Remove the stored settings per renderlayer too
|
||||
node = instance.data.get("instance_node")
|
||||
if node and cmds.objExists(node):
|
||||
cmds.delete(node)
|
||||
|
||||
|
||||
class Loader(LoaderPlugin):
|
||||
hosts = ["maya"]
|
||||
|
||||
|
|
@ -205,7 +548,7 @@ class ReferenceLoader(Loader):
|
|||
if not nodes:
|
||||
return
|
||||
|
||||
ref_node = get_reference_node(nodes, self.log)
|
||||
ref_node = lib.get_reference_node(nodes, self.log)
|
||||
container = containerise(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
|
|
@ -226,6 +569,7 @@ class ReferenceLoader(Loader):
|
|||
|
||||
def update(self, container, representation):
|
||||
from maya import cmds
|
||||
|
||||
from openpype.hosts.maya.api.lib import get_container_members
|
||||
|
||||
node = container["objectName"]
|
||||
|
|
@ -234,7 +578,7 @@ class ReferenceLoader(Loader):
|
|||
|
||||
# Get reference node from container members
|
||||
members = get_container_members(node)
|
||||
reference_node = get_reference_node(members, self.log)
|
||||
reference_node = lib.get_reference_node(members, self.log)
|
||||
namespace = cmds.referenceQuery(reference_node, namespace=True)
|
||||
|
||||
file_type = {
|
||||
|
|
@ -382,7 +726,7 @@ class ReferenceLoader(Loader):
|
|||
|
||||
# Assume asset has been referenced
|
||||
members = cmds.sets(node, query=True)
|
||||
reference_node = get_reference_node(members, self.log)
|
||||
reference_node = lib.get_reference_node(members, self.log)
|
||||
|
||||
assert reference_node, ("Imported container not supported; "
|
||||
"container must be referenced.")
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@ import contextlib
|
|||
from maya import cmds
|
||||
from maya.app.renderSetup.model import renderSetup
|
||||
|
||||
# from colorbleed.maya import lib
|
||||
from .lib import pairwise
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from openpype.tools.workfile_template_build import (
|
|||
WorkfileBuildPlaceholderDialog,
|
||||
)
|
||||
|
||||
from .lib import read, imprint
|
||||
from .lib import read, imprint, get_reference_node, get_main_window
|
||||
|
||||
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
|
||||
|
||||
|
|
@ -173,44 +173,37 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
|||
|
||||
def create_placeholder(self, placeholder_data):
|
||||
selection = cmds.ls(selection=True)
|
||||
if not selection:
|
||||
raise ValueError("Nothing is selected")
|
||||
if len(selection) > 1:
|
||||
raise ValueError("More then one item are selected")
|
||||
|
||||
parent = selection[0] if selection else None
|
||||
|
||||
placeholder_data["plugin_identifier"] = self.identifier
|
||||
|
||||
placeholder_name = self._create_placeholder_name(placeholder_data)
|
||||
|
||||
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
|
||||
# TODO: this can crash if selection can't be used
|
||||
cmds.parent(placeholder, selection[0])
|
||||
if parent:
|
||||
placeholder = cmds.parent(placeholder, selection[0])[0]
|
||||
|
||||
# get the long name of the placeholder (with the groups)
|
||||
placeholder_full_name = (
|
||||
cmds.ls(selection[0], long=True)[0]
|
||||
+ "|"
|
||||
+ placeholder.replace("|", "")
|
||||
)
|
||||
|
||||
imprint(placeholder_full_name, placeholder_data)
|
||||
imprint(placeholder, placeholder_data)
|
||||
|
||||
# Add helper attributes to keep placeholder info
|
||||
cmds.addAttr(
|
||||
placeholder_full_name,
|
||||
placeholder,
|
||||
longName="parent",
|
||||
hidden=True,
|
||||
dataType="string"
|
||||
)
|
||||
cmds.addAttr(
|
||||
placeholder_full_name,
|
||||
placeholder,
|
||||
longName="index",
|
||||
hidden=True,
|
||||
attributeType="short",
|
||||
defaultValue=-1
|
||||
)
|
||||
|
||||
cmds.setAttr(placeholder_full_name + ".parent", "", type="string")
|
||||
cmds.setAttr(placeholder + ".parent", "", type="string")
|
||||
|
||||
def update_placeholder(self, placeholder_item, placeholder_data):
|
||||
node_name = placeholder_item.scene_identifier
|
||||
|
|
@ -233,7 +226,7 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
|||
if placeholder_data.get("plugin_identifier") != self.identifier:
|
||||
continue
|
||||
|
||||
# TODO do data validations and maybe updgrades if are invalid
|
||||
# TODO do data validations and maybe upgrades if they are invalid
|
||||
output.append(
|
||||
LoadPlaceholderItem(node_name, placeholder_data, self)
|
||||
)
|
||||
|
|
@ -250,15 +243,19 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
|||
def get_placeholder_options(self, options=None):
|
||||
return self.get_load_plugin_options(options)
|
||||
|
||||
def cleanup_placeholder(self, placeholder, failed):
|
||||
def post_placeholder_process(self, placeholder, failed):
|
||||
"""Hide placeholder, add them to placeholder set
|
||||
"""
|
||||
node = placeholder._scene_identifier
|
||||
node = placeholder.scene_identifier
|
||||
|
||||
cmds.sets(node, addElement=PLACEHOLDER_SET)
|
||||
cmds.hide(node)
|
||||
cmds.setAttr(node + ".hiddenInOutliner", True)
|
||||
|
||||
def delete_placeholder(self, placeholder):
|
||||
"""Remove placeholder if building was successful"""
|
||||
cmds.delete(placeholder.scene_identifier)
|
||||
|
||||
def load_succeed(self, placeholder, container):
|
||||
self._parent_in_hierarchy(placeholder, container)
|
||||
|
||||
|
|
@ -275,9 +272,24 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
|||
return
|
||||
|
||||
roots = cmds.sets(container, q=True)
|
||||
ref_node = None
|
||||
try:
|
||||
ref_node = get_reference_node(roots)
|
||||
except AssertionError as e:
|
||||
self.log.info(e.args[0])
|
||||
|
||||
nodes_to_parent = []
|
||||
for root in roots:
|
||||
if ref_node:
|
||||
ref_root = cmds.referenceQuery(root, nodes=True)[0]
|
||||
ref_root = (
|
||||
cmds.listRelatives(ref_root, parent=True, path=True) or
|
||||
[ref_root]
|
||||
)
|
||||
nodes_to_parent.extend(ref_root)
|
||||
continue
|
||||
if root.endswith("_RN"):
|
||||
# Backwards compatibility for hardcoded reference names.
|
||||
refRoot = cmds.referenceQuery(root, n=True)[0]
|
||||
refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
|
||||
nodes_to_parent.extend(refRoot)
|
||||
|
|
@ -294,10 +306,17 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
|||
matrix=True,
|
||||
worldSpace=True
|
||||
)
|
||||
scene_parent = cmds.listRelatives(
|
||||
placeholder.scene_identifier, parent=True, fullPath=True
|
||||
)
|
||||
for node in set(nodes_to_parent):
|
||||
cmds.reorder(node, front=True)
|
||||
cmds.reorder(node, relative=placeholder.data["index"])
|
||||
cmds.xform(node, matrix=placeholder_form, ws=True)
|
||||
if scene_parent:
|
||||
cmds.parent(node, scene_parent)
|
||||
else:
|
||||
cmds.parent(node, world=True)
|
||||
|
||||
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
|
||||
if not holding_sets:
|
||||
|
|
@ -319,8 +338,9 @@ def update_workfile_template(*args):
|
|||
def create_placeholder(*args):
|
||||
host = registered_host()
|
||||
builder = MayaTemplateBuilder(host)
|
||||
window = WorkfileBuildPlaceholderDialog(host, builder)
|
||||
window.exec_()
|
||||
window = WorkfileBuildPlaceholderDialog(host, builder,
|
||||
parent=get_main_window())
|
||||
window.show()
|
||||
|
||||
|
||||
def update_placeholder(*args):
|
||||
|
|
@ -343,6 +363,7 @@ def update_placeholder(*args):
|
|||
raise ValueError("Too many selected nodes")
|
||||
|
||||
placeholder_item = placeholder_items[0]
|
||||
window = WorkfileBuildPlaceholderDialog(host, builder)
|
||||
window = WorkfileBuildPlaceholderDialog(host, builder,
|
||||
parent=get_main_window())
|
||||
window.set_update_mode(placeholder_item)
|
||||
window.exec_()
|
||||
|
|
|
|||
165
openpype/hosts/maya/plugins/create/convert_legacy.py
Normal file
165
openpype/hosts/maya/plugins/create/convert_legacy.py
Normal file
|
|
@ -0,0 +1,165 @@
|
|||
from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin
|
||||
from openpype.hosts.maya.api import plugin
|
||||
from openpype.hosts.maya.api.lib import read
|
||||
|
||||
from maya import cmds
|
||||
from maya.app.renderSetup.model import renderSetup
|
||||
|
||||
|
||||
class MayaLegacyConvertor(SubsetConvertorPlugin,
|
||||
plugin.MayaCreatorBase):
|
||||
"""Find and convert any legacy subsets in the scene.
|
||||
|
||||
This Convertor will find all legacy subsets in the scene and will
|
||||
transform them to the current system. Since the old subsets doesn't
|
||||
retain any information about their original creators, the only mapping
|
||||
we can do is based on their families.
|
||||
|
||||
Its limitation is that you can have multiple creators creating subset
|
||||
of the same family and there is no way to handle it. This code should
|
||||
nevertheless cover all creators that came with OpenPype.
|
||||
|
||||
"""
|
||||
identifier = "io.openpype.creators.maya.legacy"
|
||||
|
||||
# Cases where the identifier or new family doesn't correspond to the
|
||||
# original family on the legacy instances
|
||||
special_family_conversions = {
|
||||
"rendering": "io.openpype.creators.maya.renderlayer",
|
||||
}
|
||||
|
||||
def find_instances(self):
|
||||
|
||||
self.cache_subsets(self.collection_shared_data)
|
||||
legacy = self.collection_shared_data.get("maya_cached_legacy_subsets")
|
||||
if not legacy:
|
||||
return
|
||||
|
||||
self.add_convertor_item("Convert legacy instances")
|
||||
|
||||
def convert(self):
|
||||
self.remove_convertor_item()
|
||||
|
||||
# We can't use the collected shared data cache here
|
||||
# we re-query it here directly to convert all found.
|
||||
cache = {}
|
||||
self.cache_subsets(cache)
|
||||
legacy = cache.get("maya_cached_legacy_subsets")
|
||||
if not legacy:
|
||||
return
|
||||
|
||||
# From all current new style manual creators find the mapping
|
||||
# from family to identifier
|
||||
family_to_id = {}
|
||||
for identifier, creator in self.create_context.manual_creators.items():
|
||||
family = getattr(creator, "family", None)
|
||||
if not family:
|
||||
continue
|
||||
|
||||
if family in family_to_id:
|
||||
# We have a clash of family -> identifier. Multiple
|
||||
# new style creators use the same family
|
||||
self.log.warning("Clash on family->identifier: "
|
||||
"{}".format(identifier))
|
||||
family_to_id[family] = identifier
|
||||
|
||||
family_to_id.update(self.special_family_conversions)
|
||||
|
||||
# We also embed the current 'task' into the instance since legacy
|
||||
# instances didn't store that data on the instances. The old style
|
||||
# logic was thus to be live to the current task to begin with.
|
||||
data = dict()
|
||||
data["task"] = self.create_context.get_current_task_name()
|
||||
|
||||
for family, instance_nodes in legacy.items():
|
||||
if family not in family_to_id:
|
||||
self.log.warning(
|
||||
"Unable to convert legacy instance with family '{}'"
|
||||
" because there is no matching new creator's family"
|
||||
"".format(family)
|
||||
)
|
||||
continue
|
||||
|
||||
creator_id = family_to_id[family]
|
||||
creator = self.create_context.manual_creators[creator_id]
|
||||
data["creator_identifier"] = creator_id
|
||||
|
||||
if isinstance(creator, plugin.RenderlayerCreator):
|
||||
self._convert_per_renderlayer(instance_nodes, data, creator)
|
||||
else:
|
||||
self._convert_regular(instance_nodes, data)
|
||||
|
||||
def _convert_regular(self, instance_nodes, data):
|
||||
# We only imprint the creator identifier for it to identify
|
||||
# as the new style creator
|
||||
for instance_node in instance_nodes:
|
||||
self.imprint_instance_node(instance_node,
|
||||
data=data.copy())
|
||||
|
||||
def _convert_per_renderlayer(self, instance_nodes, data, creator):
|
||||
# Split the instance into an instance per layer
|
||||
rs = renderSetup.instance()
|
||||
layers = rs.getRenderLayers()
|
||||
if not layers:
|
||||
self.log.error(
|
||||
"Can't convert legacy renderlayer instance because no existing"
|
||||
" renderSetup layers exist in the scene."
|
||||
)
|
||||
return
|
||||
|
||||
creator_attribute_names = {
|
||||
attr_def.key for attr_def in creator.get_instance_attr_defs()
|
||||
}
|
||||
|
||||
for instance_node in instance_nodes:
|
||||
|
||||
# Ensure we have the new style singleton node generated
|
||||
# TODO: Make function public
|
||||
singleton_node = creator._get_singleton_node()
|
||||
if singleton_node:
|
||||
self.log.error(
|
||||
"Can't convert legacy renderlayer instance '{}' because"
|
||||
" new style instance '{}' already exists".format(
|
||||
instance_node,
|
||||
singleton_node
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
creator.create_singleton_node()
|
||||
|
||||
# We are creating new nodes to replace the original instance
|
||||
# Copy the attributes of the original instance to the new node
|
||||
original_data = read(instance_node)
|
||||
|
||||
# The family gets converted to the new family (this is due to
|
||||
# "rendering" family being converted to "renderlayer" family)
|
||||
original_data["family"] = creator.family
|
||||
|
||||
# Convert to creator attributes when relevant
|
||||
creator_attributes = {}
|
||||
for key in list(original_data.keys()):
|
||||
# Iterate in order of the original attributes to preserve order
|
||||
# in the output creator attributes
|
||||
if key in creator_attribute_names:
|
||||
creator_attributes[key] = original_data.pop(key)
|
||||
original_data["creator_attributes"] = creator_attributes
|
||||
|
||||
# For layer in maya layers
|
||||
for layer in layers:
|
||||
layer_instance_node = creator.find_layer_instance_node(layer)
|
||||
if not layer_instance_node:
|
||||
# TODO: Make function public
|
||||
layer_instance_node = creator._create_layer_instance_node(
|
||||
layer
|
||||
)
|
||||
|
||||
# Transfer the main attributes of the original instance
|
||||
layer_data = original_data.copy()
|
||||
layer_data.update(data)
|
||||
|
||||
self.imprint_instance_node(layer_instance_node,
|
||||
data=layer_data)
|
||||
|
||||
# Delete the legacy instance node
|
||||
cmds.delete(instance_node)
|
||||
|
|
@ -2,9 +2,13 @@ from openpype.hosts.maya.api import (
|
|||
lib,
|
||||
plugin
|
||||
)
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
TextDef
|
||||
)
|
||||
|
||||
|
||||
class CreateAnimation(plugin.Creator):
|
||||
class CreateAnimation(plugin.MayaCreator):
|
||||
"""Animation output for character rigs"""
|
||||
|
||||
# We hide the animation creator from the UI since the creation of it
|
||||
|
|
@ -13,48 +17,71 @@ class CreateAnimation(plugin.Creator):
|
|||
# Note: This setting is actually applied from project settings
|
||||
enabled = False
|
||||
|
||||
identifier = "io.openpype.creators.maya.animation"
|
||||
name = "animationDefault"
|
||||
label = "Animation"
|
||||
family = "animation"
|
||||
icon = "male"
|
||||
|
||||
write_color_sets = False
|
||||
write_face_sets = False
|
||||
include_parent_hierarchy = False
|
||||
include_user_defined_attributes = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateAnimation, self).__init__(*args, **kwargs)
|
||||
# TODO: Would be great if we could visually hide this from the creator
|
||||
# by default but do allow to generate it through code.
|
||||
|
||||
# create an ordered dict with the existing data first
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
# get basic animation data : start / end / handles / steps
|
||||
for key, value in lib.collect_animation_data().items():
|
||||
self.data[key] = value
|
||||
defs = lib.collect_animation_defs()
|
||||
|
||||
# Write vertex colors with the geometry.
|
||||
self.data["writeColorSets"] = self.write_color_sets
|
||||
self.data["writeFaceSets"] = self.write_face_sets
|
||||
|
||||
# Include only renderable visible shapes.
|
||||
# Skips locators and empty transforms
|
||||
self.data["renderableOnly"] = False
|
||||
|
||||
# Include only nodes that are visible at least once during the
|
||||
# frame range.
|
||||
self.data["visibleOnly"] = False
|
||||
|
||||
# Include the groups above the out_SET content
|
||||
self.data["includeParentHierarchy"] = self.include_parent_hierarchy
|
||||
|
||||
# Default to exporting world-space
|
||||
self.data["worldSpace"] = True
|
||||
defs.extend([
|
||||
BoolDef("writeColorSets",
|
||||
label="Write vertex colors",
|
||||
tooltip="Write vertex colors with the geometry",
|
||||
default=self.write_color_sets),
|
||||
BoolDef("writeFaceSets",
|
||||
label="Write face sets",
|
||||
tooltip="Write face sets with the geometry",
|
||||
default=self.write_face_sets),
|
||||
BoolDef("writeNormals",
|
||||
label="Write normals",
|
||||
tooltip="Write normals with the deforming geometry",
|
||||
default=True),
|
||||
BoolDef("renderableOnly",
|
||||
label="Renderable Only",
|
||||
tooltip="Only export renderable visible shapes",
|
||||
default=False),
|
||||
BoolDef("visibleOnly",
|
||||
label="Visible Only",
|
||||
tooltip="Only export dag objects visible during "
|
||||
"frame range",
|
||||
default=False),
|
||||
BoolDef("includeParentHierarchy",
|
||||
label="Include Parent Hierarchy",
|
||||
tooltip="Whether to include parent hierarchy of nodes in "
|
||||
"the publish instance",
|
||||
default=self.include_parent_hierarchy),
|
||||
BoolDef("worldSpace",
|
||||
label="World-Space Export",
|
||||
default=True),
|
||||
BoolDef("includeUserDefinedAttributes",
|
||||
label="Include User Defined Attributes",
|
||||
default=self.include_user_defined_attributes),
|
||||
TextDef("attr",
|
||||
label="Custom Attributes",
|
||||
default="",
|
||||
placeholder="attr1, attr2"),
|
||||
TextDef("attrPrefix",
|
||||
label="Custom Attributes Prefix",
|
||||
placeholder="prefix1, prefix2")
|
||||
])
|
||||
|
||||
# TODO: Implement these on a Deadline plug-in instead?
|
||||
"""
|
||||
# Default to not send to farm.
|
||||
self.data["farm"] = False
|
||||
self.data["priority"] = 50
|
||||
"""
|
||||
|
||||
# Default to write normals.
|
||||
self.data["writeNormals"] = True
|
||||
|
||||
value = self.include_user_defined_attributes
|
||||
self.data["includeUserDefinedAttributes"] = value
|
||||
return defs
|
||||
|
|
|
|||
|
|
@ -2,17 +2,20 @@ from openpype.hosts.maya.api import (
|
|||
lib,
|
||||
plugin
|
||||
)
|
||||
|
||||
from maya import cmds
|
||||
from openpype.lib import (
|
||||
NumberDef,
|
||||
BoolDef
|
||||
)
|
||||
|
||||
|
||||
class CreateArnoldSceneSource(plugin.Creator):
|
||||
class CreateArnoldSceneSource(plugin.MayaCreator):
|
||||
"""Arnold Scene Source"""
|
||||
|
||||
name = "ass"
|
||||
identifier = "io.openpype.creators.maya.ass"
|
||||
label = "Arnold Scene Source"
|
||||
family = "ass"
|
||||
icon = "cube"
|
||||
|
||||
expandProcedurals = False
|
||||
motionBlur = True
|
||||
motionBlurKeys = 2
|
||||
|
|
@ -28,39 +31,71 @@ class CreateArnoldSceneSource(plugin.Creator):
|
|||
maskColor_manager = False
|
||||
maskOperator = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateArnoldSceneSource, self).__init__(*args, **kwargs)
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
# Add animation data
|
||||
self.data.update(lib.collect_animation_data())
|
||||
defs = lib.collect_animation_defs()
|
||||
|
||||
self.data["expandProcedurals"] = self.expandProcedurals
|
||||
self.data["motionBlur"] = self.motionBlur
|
||||
self.data["motionBlurKeys"] = self.motionBlurKeys
|
||||
self.data["motionBlurLength"] = self.motionBlurLength
|
||||
defs.extend([
|
||||
BoolDef("expandProcedural",
|
||||
label="Expand Procedural",
|
||||
default=self.expandProcedurals),
|
||||
BoolDef("motionBlur",
|
||||
label="Motion Blur",
|
||||
default=self.motionBlur),
|
||||
NumberDef("motionBlurKeys",
|
||||
label="Motion Blur Keys",
|
||||
decimals=0,
|
||||
default=self.motionBlurKeys),
|
||||
NumberDef("motionBlurLength",
|
||||
label="Motion Blur Length",
|
||||
decimals=3,
|
||||
default=self.motionBlurLength),
|
||||
|
||||
# Masks
|
||||
self.data["maskOptions"] = self.maskOptions
|
||||
self.data["maskCamera"] = self.maskCamera
|
||||
self.data["maskLight"] = self.maskLight
|
||||
self.data["maskShape"] = self.maskShape
|
||||
self.data["maskShader"] = self.maskShader
|
||||
self.data["maskOverride"] = self.maskOverride
|
||||
self.data["maskDriver"] = self.maskDriver
|
||||
self.data["maskFilter"] = self.maskFilter
|
||||
self.data["maskColor_manager"] = self.maskColor_manager
|
||||
self.data["maskOperator"] = self.maskOperator
|
||||
# Masks
|
||||
BoolDef("maskOptions",
|
||||
label="Export Options",
|
||||
default=self.maskOptions),
|
||||
BoolDef("maskCamera",
|
||||
label="Export Cameras",
|
||||
default=self.maskCamera),
|
||||
BoolDef("maskLight",
|
||||
label="Export Lights",
|
||||
default=self.maskLight),
|
||||
BoolDef("maskShape",
|
||||
label="Export Shapes",
|
||||
default=self.maskShape),
|
||||
BoolDef("maskShader",
|
||||
label="Export Shaders",
|
||||
default=self.maskShader),
|
||||
BoolDef("maskOverride",
|
||||
label="Export Override Nodes",
|
||||
default=self.maskOverride),
|
||||
BoolDef("maskDriver",
|
||||
label="Export Drivers",
|
||||
default=self.maskDriver),
|
||||
BoolDef("maskFilter",
|
||||
label="Export Filters",
|
||||
default=self.maskFilter),
|
||||
BoolDef("maskOperator",
|
||||
label="Export Operators",
|
||||
default=self.maskOperator),
|
||||
BoolDef("maskColor_manager",
|
||||
label="Export Color Managers",
|
||||
default=self.maskColor_manager),
|
||||
])
|
||||
|
||||
def process(self):
|
||||
instance = super(CreateArnoldSceneSource, self).process()
|
||||
return defs
|
||||
|
||||
nodes = []
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
nodes = cmds.ls(selection=True)
|
||||
from maya import cmds
|
||||
|
||||
cmds.sets(nodes, rm=instance)
|
||||
instance = super(CreateArnoldSceneSource, self).create(
|
||||
subset_name, instance_data, pre_create_data
|
||||
)
|
||||
|
||||
assContent = cmds.sets(name=instance + "_content_SET")
|
||||
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
|
||||
cmds.sets([assContent, assProxy], forceElement=instance)
|
||||
instance_node = instance.get("instance_node")
|
||||
|
||||
content = cmds.sets(name=instance_node + "_content_SET", empty=True)
|
||||
proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
|
||||
cmds.sets([content, proxy], forceElement=instance_node)
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
from openpype.hosts.maya.api import plugin
|
||||
|
||||
|
||||
class CreateAssembly(plugin.Creator):
|
||||
class CreateAssembly(plugin.MayaCreator):
|
||||
"""A grouped package of loaded content"""
|
||||
|
||||
name = "assembly"
|
||||
identifier = "io.openpype.creators.maya.assembly"
|
||||
label = "Assembly"
|
||||
family = "assembly"
|
||||
icon = "cubes"
|
||||
|
|
|
|||
|
|
@ -2,33 +2,35 @@ from openpype.hosts.maya.api import (
|
|||
lib,
|
||||
plugin
|
||||
)
|
||||
from openpype.lib import BoolDef
|
||||
|
||||
|
||||
class CreateCamera(plugin.Creator):
|
||||
class CreateCamera(plugin.MayaCreator):
|
||||
"""Single baked camera"""
|
||||
|
||||
name = "cameraMain"
|
||||
identifier = "io.openpype.creators.maya.camera"
|
||||
label = "Camera"
|
||||
family = "camera"
|
||||
icon = "video-camera"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateCamera, self).__init__(*args, **kwargs)
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
# get basic animation data : start / end / handles / steps
|
||||
animation_data = lib.collect_animation_data()
|
||||
for key, value in animation_data.items():
|
||||
self.data[key] = value
|
||||
defs = lib.collect_animation_defs()
|
||||
|
||||
# Bake to world space by default, when this is False it will also
|
||||
# include the parent hierarchy in the baked results
|
||||
self.data['bakeToWorldSpace'] = True
|
||||
defs.extend([
|
||||
BoolDef("bakeToWorldSpace",
|
||||
label="Bake to World-Space",
|
||||
tooltip="Bake to World-Space",
|
||||
default=True),
|
||||
])
|
||||
|
||||
return defs
|
||||
|
||||
|
||||
class CreateCameraRig(plugin.Creator):
|
||||
class CreateCameraRig(plugin.MayaCreator):
|
||||
"""Complex hierarchy with camera."""
|
||||
|
||||
name = "camerarigMain"
|
||||
identifier = "io.openpype.creators.maya.camerarig"
|
||||
label = "Camera Rig"
|
||||
family = "camerarig"
|
||||
icon = "video-camera"
|
||||
|
|
|
|||
|
|
@ -1,16 +1,21 @@
|
|||
from openpype.hosts.maya.api import plugin
|
||||
from openpype.lib import BoolDef
|
||||
|
||||
|
||||
class CreateLayout(plugin.Creator):
|
||||
class CreateLayout(plugin.MayaCreator):
|
||||
"""A grouped package of loaded content"""
|
||||
|
||||
name = "layoutMain"
|
||||
identifier = "io.openpype.creators.maya.layout"
|
||||
label = "Layout"
|
||||
family = "layout"
|
||||
icon = "cubes"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateLayout, self).__init__(*args, **kwargs)
|
||||
# enable this when you want to
|
||||
# publish group of loaded asset
|
||||
self.data["groupLoadedAssets"] = False
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
return [
|
||||
BoolDef("groupLoadedAssets",
|
||||
label="Group Loaded Assets",
|
||||
tooltip="Enable this when you want to publish group of "
|
||||
"loaded asset",
|
||||
default=False)
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,29 +1,53 @@
|
|||
from openpype.hosts.maya.api import (
|
||||
lib,
|
||||
plugin
|
||||
plugin,
|
||||
lib
|
||||
)
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
TextDef
|
||||
)
|
||||
|
||||
|
||||
class CreateLook(plugin.Creator):
|
||||
class CreateLook(plugin.MayaCreator):
|
||||
"""Shader connections defining shape look"""
|
||||
|
||||
name = "look"
|
||||
identifier = "io.openpype.creators.maya.look"
|
||||
label = "Look"
|
||||
family = "look"
|
||||
icon = "paint-brush"
|
||||
|
||||
make_tx = True
|
||||
rs_tex = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateLook, self).__init__(*args, **kwargs)
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
self.data["renderlayer"] = lib.get_current_renderlayer()
|
||||
return [
|
||||
# TODO: This value should actually get set on create!
|
||||
TextDef("renderLayer",
|
||||
# TODO: Bug: Hidden attribute's label is still shown in UI?
|
||||
hidden=True,
|
||||
default=lib.get_current_renderlayer(),
|
||||
label="Renderlayer",
|
||||
tooltip="Renderlayer to extract the look from"),
|
||||
BoolDef("maketx",
|
||||
label="MakeTX",
|
||||
tooltip="Whether to generate .tx files for your textures",
|
||||
default=self.make_tx),
|
||||
BoolDef("rstex",
|
||||
label="Convert textures to .rstex",
|
||||
tooltip="Whether to generate Redshift .rstex files for "
|
||||
"your textures",
|
||||
default=self.rs_tex),
|
||||
BoolDef("forceCopy",
|
||||
label="Force Copy",
|
||||
tooltip="Enable users to force a copy instead of hardlink."
|
||||
"\nNote: On Windows copy is always forced due to "
|
||||
"bugs in windows' implementation of hardlinks.",
|
||||
default=False)
|
||||
]
|
||||
|
||||
# Whether to automatically convert the textures to .tx upon publish.
|
||||
self.data["maketx"] = self.make_tx
|
||||
# Whether to automatically convert the textures to .rstex upon publish.
|
||||
self.data["rstex"] = self.rs_tex
|
||||
# Enable users to force a copy.
|
||||
# - on Windows is "forceCopy" always changed to `True` because of
|
||||
# windows implementation of hardlinks
|
||||
self.data["forceCopy"] = False
|
||||
def get_pre_create_attr_defs(self):
|
||||
# Show same attributes on create but include use selection
|
||||
defs = super(CreateLook, self).get_pre_create_attr_defs()
|
||||
defs.extend(self.get_instance_attr_defs())
|
||||
return defs
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
from openpype.hosts.maya.api import plugin
|
||||
|
||||
|
||||
class CreateMayaScene(plugin.Creator):
|
||||
class CreateMayaScene(plugin.MayaCreator):
|
||||
"""Raw Maya Scene file export"""
|
||||
|
||||
identifier = "io.openpype.creators.maya.mayascene"
|
||||
name = "mayaScene"
|
||||
label = "Maya Scene"
|
||||
family = "mayaScene"
|
||||
|
|
@ -1,26 +1,43 @@
|
|||
from openpype.hosts.maya.api import plugin
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
TextDef
|
||||
)
|
||||
|
||||
|
||||
class CreateModel(plugin.Creator):
|
||||
class CreateModel(plugin.MayaCreator):
|
||||
"""Polygonal static geometry"""
|
||||
|
||||
name = "modelMain"
|
||||
identifier = "io.openpype.creators.maya.model"
|
||||
label = "Model"
|
||||
family = "model"
|
||||
icon = "cube"
|
||||
defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"]
|
||||
|
||||
write_color_sets = False
|
||||
write_face_sets = False
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateModel, self).__init__(*args, **kwargs)
|
||||
|
||||
# Vertex colors with the geometry
|
||||
self.data["writeColorSets"] = self.write_color_sets
|
||||
self.data["writeFaceSets"] = self.write_face_sets
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
# Include attributes by attribute name or prefix
|
||||
self.data["attr"] = ""
|
||||
self.data["attrPrefix"] = ""
|
||||
|
||||
# Whether to include parent hierarchy of nodes in the instance
|
||||
self.data["includeParentHierarchy"] = False
|
||||
return [
|
||||
BoolDef("writeColorSets",
|
||||
label="Write vertex colors",
|
||||
tooltip="Write vertex colors with the geometry",
|
||||
default=self.write_color_sets),
|
||||
BoolDef("writeFaceSets",
|
||||
label="Write face sets",
|
||||
tooltip="Write face sets with the geometry",
|
||||
default=self.write_face_sets),
|
||||
BoolDef("includeParentHierarchy",
|
||||
label="Include Parent Hierarchy",
|
||||
tooltip="Whether to include parent hierarchy of nodes in "
|
||||
"the publish instance",
|
||||
default=False),
|
||||
TextDef("attr",
|
||||
label="Custom Attributes",
|
||||
default="",
|
||||
placeholder="attr1, attr2"),
|
||||
TextDef("attrPrefix",
|
||||
label="Custom Attributes Prefix",
|
||||
placeholder="prefix1, prefix2")
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,15 +1,27 @@
|
|||
from openpype.hosts.maya.api import plugin
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
EnumDef
|
||||
)
|
||||
|
||||
|
||||
class CreateMultiverseLook(plugin.Creator):
|
||||
class CreateMultiverseLook(plugin.MayaCreator):
|
||||
"""Create Multiverse Look"""
|
||||
|
||||
name = "mvLook"
|
||||
identifier = "io.openpype.creators.maya.mvlook"
|
||||
label = "Multiverse Look"
|
||||
family = "mvLook"
|
||||
icon = "cubes"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateMultiverseLook, self).__init__(*args, **kwargs)
|
||||
self.data["fileFormat"] = ["usda", "usd"]
|
||||
self.data["publishMipMap"] = True
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
return [
|
||||
EnumDef("fileFormat",
|
||||
label="File Format",
|
||||
tooltip="USD export file format",
|
||||
items=["usda", "usd"],
|
||||
default="usda"),
|
||||
BoolDef("publishMipMap",
|
||||
label="Publish MipMap",
|
||||
default=True),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,53 +1,135 @@
|
|||
from openpype.hosts.maya.api import plugin, lib
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
NumberDef,
|
||||
TextDef,
|
||||
EnumDef
|
||||
)
|
||||
|
||||
|
||||
class CreateMultiverseUsd(plugin.Creator):
|
||||
class CreateMultiverseUsd(plugin.MayaCreator):
|
||||
"""Create Multiverse USD Asset"""
|
||||
|
||||
name = "mvUsdMain"
|
||||
identifier = "io.openpype.creators.maya.mvusdasset"
|
||||
label = "Multiverse USD Asset"
|
||||
family = "usd"
|
||||
icon = "cubes"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateMultiverseUsd, self).__init__(*args, **kwargs)
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
# Add animation data first, since it maintains order.
|
||||
self.data.update(lib.collect_animation_data(True))
|
||||
defs = lib.collect_animation_defs(fps=True)
|
||||
defs.extend([
|
||||
EnumDef("fileFormat",
|
||||
label="File format",
|
||||
items=["usd", "usda", "usdz"],
|
||||
default="usd"),
|
||||
BoolDef("stripNamespaces",
|
||||
label="Strip Namespaces",
|
||||
default=True),
|
||||
BoolDef("mergeTransformAndShape",
|
||||
label="Merge Transform and Shape",
|
||||
default=False),
|
||||
BoolDef("writeAncestors",
|
||||
label="Write Ancestors",
|
||||
default=True),
|
||||
BoolDef("flattenParentXforms",
|
||||
label="Flatten Parent Xforms",
|
||||
default=False),
|
||||
BoolDef("writeSparseOverrides",
|
||||
label="Write Sparse Overrides",
|
||||
default=False),
|
||||
BoolDef("useMetaPrimPath",
|
||||
label="Use Meta Prim Path",
|
||||
default=False),
|
||||
TextDef("customRootPath",
|
||||
label="Custom Root Path",
|
||||
default=''),
|
||||
TextDef("customAttributes",
|
||||
label="Custom Attributes",
|
||||
tooltip="Comma-separated list of attribute names",
|
||||
default=''),
|
||||
TextDef("nodeTypesToIgnore",
|
||||
label="Node Types to Ignore",
|
||||
tooltip="Comma-separated list of node types to be ignored",
|
||||
default=''),
|
||||
BoolDef("writeMeshes",
|
||||
label="Write Meshes",
|
||||
default=True),
|
||||
BoolDef("writeCurves",
|
||||
label="Write Curves",
|
||||
default=True),
|
||||
BoolDef("writeParticles",
|
||||
label="Write Particles",
|
||||
default=True),
|
||||
BoolDef("writeCameras",
|
||||
label="Write Cameras",
|
||||
default=False),
|
||||
BoolDef("writeLights",
|
||||
label="Write Lights",
|
||||
default=False),
|
||||
BoolDef("writeJoints",
|
||||
label="Write Joints",
|
||||
default=False),
|
||||
BoolDef("writeCollections",
|
||||
label="Write Collections",
|
||||
default=False),
|
||||
BoolDef("writePositions",
|
||||
label="Write Positions",
|
||||
default=True),
|
||||
BoolDef("writeNormals",
|
||||
label="Write Normals",
|
||||
default=True),
|
||||
BoolDef("writeUVs",
|
||||
label="Write UVs",
|
||||
default=True),
|
||||
BoolDef("writeColorSets",
|
||||
label="Write Color Sets",
|
||||
default=False),
|
||||
BoolDef("writeTangents",
|
||||
label="Write Tangents",
|
||||
default=False),
|
||||
BoolDef("writeRefPositions",
|
||||
label="Write Ref Positions",
|
||||
default=True),
|
||||
BoolDef("writeBlendShapes",
|
||||
label="Write BlendShapes",
|
||||
default=False),
|
||||
BoolDef("writeDisplayColor",
|
||||
label="Write Display Color",
|
||||
default=True),
|
||||
BoolDef("writeSkinWeights",
|
||||
label="Write Skin Weights",
|
||||
default=False),
|
||||
BoolDef("writeMaterialAssignment",
|
||||
label="Write Material Assignment",
|
||||
default=False),
|
||||
BoolDef("writeHardwareShader",
|
||||
label="Write Hardware Shader",
|
||||
default=False),
|
||||
BoolDef("writeShadingNetworks",
|
||||
label="Write Shading Networks",
|
||||
default=False),
|
||||
BoolDef("writeTransformMatrix",
|
||||
label="Write Transform Matrix",
|
||||
default=True),
|
||||
BoolDef("writeUsdAttributes",
|
||||
label="Write USD Attributes",
|
||||
default=True),
|
||||
BoolDef("writeInstancesAsReferences",
|
||||
label="Write Instances as References",
|
||||
default=False),
|
||||
BoolDef("timeVaryingTopology",
|
||||
label="Time Varying Topology",
|
||||
default=False),
|
||||
TextDef("customMaterialNamespace",
|
||||
label="Custom Material Namespace",
|
||||
default=''),
|
||||
NumberDef("numTimeSamples",
|
||||
label="Num Time Samples",
|
||||
default=1),
|
||||
NumberDef("timeSamplesSpan",
|
||||
label="Time Samples Span",
|
||||
default=0.0),
|
||||
])
|
||||
|
||||
self.data["fileFormat"] = ["usd", "usda", "usdz"]
|
||||
self.data["stripNamespaces"] = True
|
||||
self.data["mergeTransformAndShape"] = False
|
||||
self.data["writeAncestors"] = True
|
||||
self.data["flattenParentXforms"] = False
|
||||
self.data["writeSparseOverrides"] = False
|
||||
self.data["useMetaPrimPath"] = False
|
||||
self.data["customRootPath"] = ''
|
||||
self.data["customAttributes"] = ''
|
||||
self.data["nodeTypesToIgnore"] = ''
|
||||
self.data["writeMeshes"] = True
|
||||
self.data["writeCurves"] = True
|
||||
self.data["writeParticles"] = True
|
||||
self.data["writeCameras"] = False
|
||||
self.data["writeLights"] = False
|
||||
self.data["writeJoints"] = False
|
||||
self.data["writeCollections"] = False
|
||||
self.data["writePositions"] = True
|
||||
self.data["writeNormals"] = True
|
||||
self.data["writeUVs"] = True
|
||||
self.data["writeColorSets"] = False
|
||||
self.data["writeTangents"] = False
|
||||
self.data["writeRefPositions"] = True
|
||||
self.data["writeBlendShapes"] = False
|
||||
self.data["writeDisplayColor"] = True
|
||||
self.data["writeSkinWeights"] = False
|
||||
self.data["writeMaterialAssignment"] = False
|
||||
self.data["writeHardwareShader"] = False
|
||||
self.data["writeShadingNetworks"] = False
|
||||
self.data["writeTransformMatrix"] = True
|
||||
self.data["writeUsdAttributes"] = True
|
||||
self.data["writeInstancesAsReferences"] = False
|
||||
self.data["timeVaryingTopology"] = False
|
||||
self.data["customMaterialNamespace"] = ''
|
||||
self.data["numTimeSamples"] = 1
|
||||
self.data["timeSamplesSpan"] = 0.0
|
||||
return defs
|
||||
|
|
|
|||
|
|
@ -1,26 +1,48 @@
|
|||
from openpype.hosts.maya.api import plugin, lib
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
NumberDef,
|
||||
EnumDef
|
||||
)
|
||||
|
||||
|
||||
class CreateMultiverseUsdComp(plugin.Creator):
|
||||
class CreateMultiverseUsdComp(plugin.MayaCreator):
|
||||
"""Create Multiverse USD Composition"""
|
||||
|
||||
name = "mvUsdCompositionMain"
|
||||
identifier = "io.openpype.creators.maya.mvusdcomposition"
|
||||
label = "Multiverse USD Composition"
|
||||
family = "mvUsdComposition"
|
||||
icon = "cubes"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateMultiverseUsdComp, self).__init__(*args, **kwargs)
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
# Add animation data first, since it maintains order.
|
||||
self.data.update(lib.collect_animation_data(True))
|
||||
defs = lib.collect_animation_defs(fps=True)
|
||||
defs.extend([
|
||||
EnumDef("fileFormat",
|
||||
label="File format",
|
||||
items=["usd", "usda"],
|
||||
default="usd"),
|
||||
BoolDef("stripNamespaces",
|
||||
label="Strip Namespaces",
|
||||
default=False),
|
||||
BoolDef("mergeTransformAndShape",
|
||||
label="Merge Transform and Shape",
|
||||
default=False),
|
||||
BoolDef("flattenContent",
|
||||
label="Flatten Content",
|
||||
default=False),
|
||||
BoolDef("writeAsCompoundLayers",
|
||||
label="Write As Compound Layers",
|
||||
default=False),
|
||||
BoolDef("writePendingOverrides",
|
||||
label="Write Pending Overrides",
|
||||
default=False),
|
||||
NumberDef("numTimeSamples",
|
||||
label="Num Time Samples",
|
||||
default=1),
|
||||
NumberDef("timeSamplesSpan",
|
||||
label="Time Samples Span",
|
||||
default=0.0),
|
||||
])
|
||||
|
||||
# Order of `fileFormat` must match extract_multiverse_usd_comp.py
|
||||
self.data["fileFormat"] = ["usda", "usd"]
|
||||
self.data["stripNamespaces"] = False
|
||||
self.data["mergeTransformAndShape"] = False
|
||||
self.data["flattenContent"] = False
|
||||
self.data["writeAsCompoundLayers"] = False
|
||||
self.data["writePendingOverrides"] = False
|
||||
self.data["numTimeSamples"] = 1
|
||||
self.data["timeSamplesSpan"] = 0.0
|
||||
return defs
|
||||
|
|
|
|||
|
|
@ -1,30 +1,59 @@
|
|||
from openpype.hosts.maya.api import plugin, lib
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
NumberDef,
|
||||
EnumDef
|
||||
)
|
||||
|
||||
|
||||
class CreateMultiverseUsdOver(plugin.Creator):
|
||||
"""Create Multiverse USD Override"""
|
||||
|
||||
name = "mvUsdOverrideMain"
|
||||
identifier = "io.openpype.creators.maya.mvusdoverride"
|
||||
label = "Multiverse USD Override"
|
||||
family = "mvUsdOverride"
|
||||
icon = "cubes"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateMultiverseUsdOver, self).__init__(*args, **kwargs)
|
||||
def get_instance_attr_defs(self):
|
||||
defs = lib.collect_animation_defs(fps=True)
|
||||
defs.extend([
|
||||
EnumDef("fileFormat",
|
||||
label="File format",
|
||||
items=["usd", "usda"],
|
||||
default="usd"),
|
||||
BoolDef("writeAll",
|
||||
label="Write All",
|
||||
default=False),
|
||||
BoolDef("writeTransforms",
|
||||
label="Write Transforms",
|
||||
default=True),
|
||||
BoolDef("writeVisibility",
|
||||
label="Write Visibility",
|
||||
default=True),
|
||||
BoolDef("writeAttributes",
|
||||
label="Write Attributes",
|
||||
default=True),
|
||||
BoolDef("writeMaterials",
|
||||
label="Write Materials",
|
||||
default=True),
|
||||
BoolDef("writeVariants",
|
||||
label="Write Variants",
|
||||
default=True),
|
||||
BoolDef("writeVariantsDefinition",
|
||||
label="Write Variants Definition",
|
||||
default=True),
|
||||
BoolDef("writeActiveState",
|
||||
label="Write Active State",
|
||||
default=True),
|
||||
BoolDef("writeNamespaces",
|
||||
label="Write Namespaces",
|
||||
default=False),
|
||||
NumberDef("numTimeSamples",
|
||||
label="Num Time Samples",
|
||||
default=1),
|
||||
NumberDef("timeSamplesSpan",
|
||||
label="Time Samples Span",
|
||||
default=0.0),
|
||||
])
|
||||
|
||||
# Add animation data first, since it maintains order.
|
||||
self.data.update(lib.collect_animation_data(True))
|
||||
|
||||
# Order of `fileFormat` must match extract_multiverse_usd_over.py
|
||||
self.data["fileFormat"] = ["usda", "usd"]
|
||||
self.data["writeAll"] = False
|
||||
self.data["writeTransforms"] = True
|
||||
self.data["writeVisibility"] = True
|
||||
self.data["writeAttributes"] = True
|
||||
self.data["writeMaterials"] = True
|
||||
self.data["writeVariants"] = True
|
||||
self.data["writeVariantsDefinition"] = True
|
||||
self.data["writeActiveState"] = True
|
||||
self.data["writeNamespaces"] = False
|
||||
self.data["numTimeSamples"] = 1
|
||||
self.data["timeSamplesSpan"] = 0.0
|
||||
return defs
|
||||
|
|
|
|||
|
|
@ -4,47 +4,85 @@ from openpype.hosts.maya.api import (
|
|||
lib,
|
||||
plugin
|
||||
)
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
TextDef
|
||||
)
|
||||
|
||||
|
||||
class CreatePointCache(plugin.Creator):
|
||||
class CreatePointCache(plugin.MayaCreator):
|
||||
"""Alembic pointcache for animated data"""
|
||||
|
||||
name = "pointcache"
|
||||
label = "Point Cache"
|
||||
identifier = "io.openpype.creators.maya.pointcache"
|
||||
label = "Pointcache"
|
||||
family = "pointcache"
|
||||
icon = "gears"
|
||||
write_color_sets = False
|
||||
write_face_sets = False
|
||||
include_user_defined_attributes = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreatePointCache, self).__init__(*args, **kwargs)
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
# Add animation data
|
||||
self.data.update(lib.collect_animation_data())
|
||||
defs = lib.collect_animation_defs()
|
||||
|
||||
# Vertex colors with the geometry.
|
||||
self.data["writeColorSets"] = self.write_color_sets
|
||||
# Vertex colors with the geometry.
|
||||
self.data["writeFaceSets"] = self.write_face_sets
|
||||
self.data["renderableOnly"] = False # Only renderable visible shapes
|
||||
self.data["visibleOnly"] = False # only nodes that are visible
|
||||
self.data["includeParentHierarchy"] = False # Include parent groups
|
||||
self.data["worldSpace"] = True # Default to exporting world-space
|
||||
self.data["refresh"] = False # Default to suspend refresh.
|
||||
|
||||
# Add options for custom attributes
|
||||
value = self.include_user_defined_attributes
|
||||
self.data["includeUserDefinedAttributes"] = value
|
||||
self.data["attr"] = ""
|
||||
self.data["attrPrefix"] = ""
|
||||
defs.extend([
|
||||
BoolDef("writeColorSets",
|
||||
label="Write vertex colors",
|
||||
tooltip="Write vertex colors with the geometry",
|
||||
default=False),
|
||||
BoolDef("writeFaceSets",
|
||||
label="Write face sets",
|
||||
tooltip="Write face sets with the geometry",
|
||||
default=False),
|
||||
BoolDef("renderableOnly",
|
||||
label="Renderable Only",
|
||||
tooltip="Only export renderable visible shapes",
|
||||
default=False),
|
||||
BoolDef("visibleOnly",
|
||||
label="Visible Only",
|
||||
tooltip="Only export dag objects visible during "
|
||||
"frame range",
|
||||
default=False),
|
||||
BoolDef("includeParentHierarchy",
|
||||
label="Include Parent Hierarchy",
|
||||
tooltip="Whether to include parent hierarchy of nodes in "
|
||||
"the publish instance",
|
||||
default=False),
|
||||
BoolDef("worldSpace",
|
||||
label="World-Space Export",
|
||||
default=True),
|
||||
BoolDef("refresh",
|
||||
label="Refresh viewport during export",
|
||||
default=False),
|
||||
BoolDef("includeUserDefinedAttributes",
|
||||
label="Include User Defined Attributes",
|
||||
default=self.include_user_defined_attributes),
|
||||
TextDef("attr",
|
||||
label="Custom Attributes",
|
||||
default="",
|
||||
placeholder="attr1, attr2"),
|
||||
TextDef("attrPrefix",
|
||||
label="Custom Attributes Prefix",
|
||||
default="",
|
||||
placeholder="prefix1, prefix2")
|
||||
])
|
||||
|
||||
# TODO: Implement these on a Deadline plug-in instead?
|
||||
"""
|
||||
# Default to not send to farm.
|
||||
self.data["farm"] = False
|
||||
self.data["priority"] = 50
|
||||
"""
|
||||
|
||||
def process(self):
|
||||
instance = super(CreatePointCache, self).process()
|
||||
return defs
|
||||
|
||||
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
|
||||
cmds.sets(assProxy, forceElement=instance)
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
instance = super(CreatePointCache, self).create(
|
||||
subset_name, instance_data, pre_create_data
|
||||
)
|
||||
instance_node = instance.get("instance_node")
|
||||
|
||||
# For Arnold standin proxy
|
||||
proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
|
||||
cmds.sets(proxy_set, forceElement=instance_node)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue