Merge branch 'develop' into enhancement/OP-5265_Use-custom-staging-dir-function-for-Maya-renders

# Conflicts:
#	openpype/hosts/maya/plugins/publish/collect_render.py
#	openpype/hosts/maya/plugins/publish/validate_render_image_rule.py
This commit is contained in:
Toke Stuart Jepsen 2023-07-13 12:15:31 +01:00
commit 7866c6f84e
271 changed files with 6768 additions and 2922 deletions

View file

@ -35,6 +35,14 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.16.0-nightly.1
- 3.15.12
- 3.15.12-nightly.4
- 3.15.12-nightly.3
- 3.15.12-nightly.2
- 3.15.12-nightly.1
- 3.15.11
- 3.15.11-nightly.5
- 3.15.11-nightly.4
- 3.15.11-nightly.3
- 3.15.11-nightly.2
@ -127,14 +135,6 @@ body:
- 3.14.5-nightly.3
- 3.14.5-nightly.2
- 3.14.5-nightly.1
- 3.14.4
- 3.14.4-nightly.4
- 3.14.4-nightly.3
- 3.14.4-nightly.2
- 3.14.4-nightly.1
- 3.14.3
- 3.14.3-nightly.7
- 3.14.3-nightly.6
validations:
required: true
- type: dropdown

File diff suppressed because it is too large Load diff

View file

@ -49,7 +49,7 @@ def deprecated(new_destination):
@deprecated("openpype.pipeline.publish.get_errored_instances_from_context")
def get_errored_instances_from_context(context):
def get_errored_instances_from_context(context, plugin=None):
"""
Deprecated:
Since 3.14.* will be removed in 3.16.* or later.
@ -57,7 +57,7 @@ def get_errored_instances_from_context(context):
from openpype.pipeline.publish import get_errored_instances_from_context
return get_errored_instances_from_context(context)
return get_errored_instances_from_context(context, plugin=plugin)
@deprecated("openpype.pipeline.publish.get_errored_plugins_from_context")
@ -97,11 +97,9 @@ class RepairAction(pyblish.api.Action):
# Get the errored instances
self.log.info("Finding failed instances..")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
for instance in instances:
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
for instance in errored_instances:
plugin.repair(instance)

View file

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.25"
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.26"
ExtensionBundleName="com.openpype.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionList>
<Extension Id="com.openpype.AE.panel" Version="1.0" />

View file

@ -104,6 +104,39 @@
});
</script>
<script type=text/javascript>
$(function() {
$("a#create-placeholder-button").bind("click", function() {
RPC.call('AfterEffects.create_placeholder_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#update-placeholder-button").bind("click", function() {
RPC.call('AfterEffects.update_placeholder_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#build-workfile-button").bind("click", function() {
RPC.call('AfterEffects.build_workfile_template_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#experimental-button").bind("click", function() {
@ -127,9 +160,15 @@
<div><a href=# id=loader-button><button class="hostFontSize">Load...</button></a></div>
<div><a href=# id=publish-button><button class="hostFontSize">Publish...</button></a></div>
<div><a href=# id=sceneinventory-button><button class="hostFontSize">Manage...</button></a></div>
<div><a href=# id=separator0><button class="hostFontSize">&nbsp;</button></a></div>
<div><a href=# id=setresolution-button><button class="hostFontSize">Set Resolution</button></a></div>
<div><a href=# id=setframes-button><button class="hostFontSize">Set Frame Range</button></a></div>
<div><a href=# id=setall-button><button class="hostFontSize">Apply All Settings</button></a></div>
<div><a href=# id=separator1><button class="hostFontSize">&nbsp;</button></a></div>
<div><a href=# id=create-placeholder-button><button class="hostFontSize">Create placeholder</button></a></div>
<div><a href=# id=update-placeholder-button><button class="hostFontSize">Update placeholder</button></a></div>
<div><a href=# id=build-workfile-button><button class="hostFontSize">Build Workfile from template</button></a></div>
<div><a href=# id=separator3><button class="hostFontSize">&nbsp;</button></a></div>
<div><a href=# id=experimental-button><button class="hostFontSize">Experimental Tools...</button></a></div>
</div>

View file

@ -107,6 +107,17 @@ function main(websocket_url){
});
});
RPC.addRoute('AfterEffects.add_item', function (data) {
log.warn('Server called client route "add_item":', data);
var escapedName = EscapeStringForJSX(data.name);
return runEvalScript("addItem('" + escapedName +"', " +
"'" + data.item_type + "')")
.then(function(result){
log.warn("get_items: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_items', function (data) {
log.warn('Server called client route "get_items":', data);
return runEvalScript("getItems(" + data.comps + "," +
@ -118,6 +129,15 @@ function main(websocket_url){
});
});
RPC.addRoute('AfterEffects.select_items', function (data) {
log.warn('Server called client route "select_items":', data);
return runEvalScript("selectItems(" + JSON.stringify(data.items) + ")")
.then(function(result){
log.warn("select_items: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_selected_items', function (data) {
log.warn('Server called client route "get_selected_items":', data);
@ -280,7 +300,7 @@ function main(websocket_url){
RPC.addRoute('AfterEffects.add_item_as_layer', function (data) {
log.warn('Server called client route "add_item_as_layer":', data);
return runEvalScript("addItemAsLayerToComp(" + data.comp_id + ", " +
data.item_id + "," +
data.item_id + "," +
" null )")
.then(function(result){
log.warn("addItemAsLayerToComp: " + result);
@ -288,6 +308,16 @@ function main(websocket_url){
});
});
RPC.addRoute('AfterEffects.add_item_instead_placeholder', function (data) {
log.warn('Server called client route "add_item_instead_placeholder":', data);
return runEvalScript("addItemInstead(" + data.placeholder_item_id + ", " +
data.item_id + ")")
.then(function(result){
log.warn("add_item_instead_placeholder: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.render', function (data) {
log.warn('Server called client route "render":', data);
var escapedPath = EscapeStringForJSX(data.folder_url);
@ -312,6 +342,20 @@ function main(websocket_url){
});
});
RPC.addRoute('AfterEffects.add_placeholder', function (data) {
log.warn('Server called client route "add_placeholder":', data);
var escapedName = EscapeStringForJSX(data.name);
return runEvalScript("addPlaceholder('" + escapedName +"',"+
data.width + ',' +
data.height + ',' +
data.fps + ',' +
data.duration + ")")
.then(function(result){
log.warn("add_placeholder: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.close', function (data) {
log.warn('Server called client route "close":', data);
return runEvalScript("close()");

View file

@ -112,6 +112,32 @@ function getActiveDocumentFullName(){
return _prepareError("No file open currently");
}
function addItem(name, item_type){
/**
* Adds comp or folder to project items.
*
* Could be called when creating publishable instance to prepare
* composition (and render queue).
*
* Args:
* name (str): composition name
* item_type (str): COMP|FOLDER
* Returns:
* SingleItemValue: eg {"result": VALUE}
*/
if (item_type == "COMP"){
// dummy values, will be rewritten later
item = app.project.items.addComp(name, 1920, 1060, 1, 10, 25);
}else if (item_type == "FOLDER"){
item = app.project.items.addFolder(name);
}else{
return _prepareError("Only 'COMP' or 'FOLDER' can be created");
}
return _prepareSingleValue(item.id);
}
function getItems(comps, folders, footages){
/**
* Returns JSON representation of compositions and
@ -139,6 +165,24 @@ function getItems(comps, folders, footages){
}
function selectItems(items){
/**
* Select all items from `items`, deselect other.
*
* Args:
* items (list)
*/
for (i = 1; i <= app.project.items.length; ++i){
item = app.project.items[i];
if (items.indexOf(item.id) > -1){
item.selected = true;
}else{
item.selected = false;
}
}
}
function getSelectedItems(comps, folders, footages){
/**
* Returns list of selected items from Project menu
@ -280,12 +324,12 @@ function setLabelColor(comp_id, color_idx){
}
}
function replaceItem(comp_id, path, item_name){
function replaceItem(item_id, path, item_name){
/**
* Replaces loaded file with new file and updates name
*
* Args:
* comp_id (int): id of composition, not a index!
* item_id (int): id of composition, not a index!
* path (string): absolute path to new file
* item_name (string): new composition name
*/
@ -295,7 +339,7 @@ function replaceItem(comp_id, path, item_name){
if (!fp.exists){
return _prepareError("File " + path + " not found.");
}
var item = app.project.itemByID(comp_id);
var item = app.project.itemByID(item_id);
if (item){
try{
if (isFileSequence(item)) {
@ -311,7 +355,7 @@ function replaceItem(comp_id, path, item_name){
fp.close();
}
}else{
return _prepareError("There is no composition with "+ comp_id);
return _prepareError("There is no item with "+ item_id);
}
app.endUndoGroup();
}
@ -821,6 +865,67 @@ function printMsg(msg){
alert(msg);
}
function addPlaceholder(name, width, height, fps, duration){
/** Add AE PlaceholderItem to Project list.
*
* PlaceholderItem chosen as it doesn't require existing file and
* might potentially allow nice functionality in the future.
*
*/
app.beginUndoGroup('change comp properties');
try{
item = app.project.importPlaceholder(name, width, height,
fps, duration);
return _prepareSingleValue(item.id);
}catch (error) {
writeLn(_prepareError("Cannot add placeholder " + error.toString()));
}
app.endUndoGroup();
}
function addItemInstead(placeholder_item_id, item_id){
/** Add new loaded item in place of load placeholder.
*
* Each placeholder could be placed multiple times into multiple
* composition. This loops through all compositions and
* places loaded item under placeholder.
* Placeholder item gets deleted later separately according
* to configuration in Settings.
*
* Args:
* placeholder_item_id (int)
* item_id (int)
*/
var item = app.project.itemByID(item_id);
if (!item){
return _prepareError("There is no item with "+ item_id);
}
app.beginUndoGroup('Add loaded items');
for (i = 1; i <= app.project.items.length; ++i){
var comp = app.project.items[i];
if (!(comp instanceof CompItem)){
continue
}
var i = 1;
while (i <= comp.numLayers) {
var layer = comp.layer(i);
var layer_source = layer.source;
if (layer_source && layer_source.id == placeholder_item_id){
var new_layer = comp.layers.add(item);
new_layer.moveAfter(layer);
// copy all(?) properties to new layer
layer.property("ADBE Transform Group").copyToComp(new_layer);
i = i + 1;
}
i = i + 1;
}
}
app.endUndoGroup();
}
function _prepareSingleValue(value){
return JSON.stringify({"result": value})
}

View file

@ -357,3 +357,33 @@ class AfterEffectsRoute(WebSocketRoute):
# Required return statement.
return "nothing"
def create_placeholder_route(self):
from openpype.hosts.aftereffects.api.workfile_template_builder import \
create_placeholder
partial_method = functools.partial(create_placeholder)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"
def update_placeholder_route(self):
from openpype.hosts.aftereffects.api.workfile_template_builder import \
update_placeholder
partial_method = functools.partial(update_placeholder)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"
def build_workfile_template_route(self):
from openpype.hosts.aftereffects.api.workfile_template_builder import \
build_workfile_template
partial_method = functools.partial(build_workfile_template)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"

View file

@ -10,6 +10,10 @@ from openpype.pipeline import (
register_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from openpype.hosts.aftereffects.api.workfile_template_builder import (
AEPlaceholderLoadPlugin,
AEPlaceholderCreatePlugin
)
from openpype.pipeline.load import any_outdated_containers
import openpype.hosts.aftereffects
@ -116,6 +120,12 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
item["id"] = "publish_context"
self.stub.imprint(item["id"], item)
def get_workfile_build_placeholder_plugins(self):
return [
AEPlaceholderLoadPlugin,
AEPlaceholderCreatePlugin
]
# created instances section
def list_instances(self):
"""List all created instances from current workfile which

View file

@ -1,7 +1,11 @@
import six
from abc import ABCMeta
from openpype.pipeline import LoaderPlugin
from .launch_logic import get_stub
@six.add_metaclass(ABCMeta)
class AfterEffectsLoader(LoaderPlugin):
@staticmethod
def get_stub():

View file

@ -0,0 +1,271 @@
import os.path
import uuid
import shutil
from openpype.pipeline import registered_host
from openpype.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from openpype.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder,
PlaceholderPlugin,
LoadPlaceholderItem,
CreatePlaceholderItem,
PlaceholderLoadMixin,
PlaceholderCreateMixin
)
from openpype.hosts.aftereffects.api import get_stub
from openpype.hosts.aftereffects.api.lib import set_settings
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
PLACEHOLDER_ID = "openpype.placeholder"
class AETemplateBuilder(AbstractTemplateBuilder):
"""Concrete implementation of AbstractTemplateBuilder for AE"""
def import_template(self, path):
"""Import template into current scene.
Block if a template is already loaded.
Args:
path (str): A path to current template (usually given by
get_template_preset implementation)
Returns:
bool: Whether the template was successfully imported or not
"""
stub = get_stub()
if not os.path.exists(path):
stub.print_msg(f"Template file on {path} doesn't exist.")
return
stub.save()
workfile_path = stub.get_active_document_full_name()
shutil.copy2(path, workfile_path)
stub.open(workfile_path)
return True
class AEPlaceholderPlugin(PlaceholderPlugin):
"""Contains generic methods for all PlaceholderPlugins."""
def collect_placeholders(self):
"""Collect info from file metadata about created placeholders.
Returns:
(list) (LoadPlaceholderItem)
"""
output = []
scene_placeholders = self._collect_scene_placeholders()
for item in scene_placeholders:
if item.get("plugin_identifier") != self.identifier:
continue
if isinstance(self, AEPlaceholderLoadPlugin):
item = LoadPlaceholderItem(item["uuid"],
item["data"],
self)
elif isinstance(self, AEPlaceholderCreatePlugin):
item = CreatePlaceholderItem(item["uuid"],
item["data"],
self)
else:
raise NotImplementedError(f"Not implemented for {type(self)}")
output.append(item)
return output
def update_placeholder(self, placeholder_item, placeholder_data):
"""Resave changed properties for placeholders"""
item_id, metadata_item = self._get_item(placeholder_item)
stub = get_stub()
if not item_id:
stub.print_msg("Cannot find item for "
f"{placeholder_item.scene_identifier}")
return
metadata_item["data"] = placeholder_data
stub.imprint(item_id, metadata_item)
def _get_item(self, placeholder_item):
"""Returns item id and item metadata for placeholder from file meta"""
stub = get_stub()
placeholder_uuid = placeholder_item.scene_identifier
for metadata_item in stub.get_metadata():
if not metadata_item.get("is_placeholder"):
continue
if placeholder_uuid in metadata_item.get("uuid"):
return metadata_item["members"][0], metadata_item
return None, None
def _collect_scene_placeholders(self):
"""" Cache placeholder data to shared data.
Returns:
(list) of dicts
"""
placeholder_items = self.builder.get_shared_populate_data(
"placeholder_items"
)
if not placeholder_items:
placeholder_items = []
for item in get_stub().get_metadata():
if not item.get("is_placeholder"):
continue
placeholder_items.append(item)
self.builder.set_shared_populate_data(
"placeholder_items", placeholder_items
)
return placeholder_items
def _imprint_item(self, item_id, name, placeholder_data, stub):
if not item_id:
raise ValueError("Couldn't create a placeholder")
container_data = {
"id": "openpype.placeholder",
"name": name,
"is_placeholder": True,
"plugin_identifier": self.identifier,
"uuid": str(uuid.uuid4()), # scene_identifier
"data": placeholder_data,
"members": [item_id]
}
stub.imprint(item_id, container_data)
class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin):
"""Adds Create placeholder.
This adds composition and runs Create
"""
identifier = "aftereffects.create"
label = "AfterEffects create"
def create_placeholder(self, placeholder_data):
stub = get_stub()
name = "CREATEPLACEHOLDER"
item_id = stub.add_item(name, "COMP")
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Replace 'placeholder' with publishable instance.
Renames prepared composition name, creates publishable instance, sets
frame/duration settings according to DB.
"""
pre_create_data = {"use_selection": True}
item_id, item = self._get_item(placeholder)
get_stub().select_items([item_id])
self.populate_create_placeholder(placeholder, pre_create_data)
# apply settings for populated composition
item_id, metadata_item = self._get_item(placeholder)
set_settings(True, True, [item_id])
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)
class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin):
identifier = "aftereffects.load"
label = "AfterEffects load"
def create_placeholder(self, placeholder_data):
"""Creates AE's Placeholder item in Project items list.
Sets dummy resolution/duration/fps settings, will be replaced when
populated.
"""
stub = get_stub()
name = "LOADERPLACEHOLDER"
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Use Openpype Loader from `placeholder` to create new FootageItems
New FootageItems are created, files are imported.
"""
self.populate_load_placeholder(placeholder)
errors = placeholder.get_errors()
stub = get_stub()
if errors:
stub.print_msg("\n".join(errors))
else:
if not placeholder.data["keep_placeholder"]:
metadata = stub.get_metadata()
for item in metadata:
if not item.get("is_placeholder"):
continue
scene_identifier = item.get("uuid")
if (scene_identifier and
scene_identifier == placeholder.scene_identifier):
stub.delete_item(item["members"][0])
stub.remove_instance(placeholder.scene_identifier, metadata)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def load_succeed(self, placeholder, container):
placeholder_item_id, _ = self._get_item(placeholder)
item_id = container.id
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)
def build_workfile_template(*args, **kwargs):
builder = AETemplateBuilder(registered_host())
builder.build_template(*args, **kwargs)
def update_workfile_template(*args):
builder = AETemplateBuilder(registered_host())
builder.rebuild_template()
def create_placeholder(*args):
"""Called when new workile placeholder should be created."""
host = registered_host()
builder = AETemplateBuilder(host)
window = WorkfileBuildPlaceholderDialog(host, builder)
window.exec_()
def update_placeholder(*args):
"""Called after placeholder item is selected to modify it."""
host = registered_host()
builder = AETemplateBuilder(host)
stub = get_stub()
selected_items = stub.get_selected_items(True, True, True)
if len(selected_items) != 1:
stub.print_msg("Please select just 1 placeholder")
return
selected_id = selected_items[0].id
placeholder_item = None
placeholder_items_by_id = {
placeholder_item.scene_identifier: placeholder_item
for placeholder_item in builder.get_placeholders()
}
for metadata_item in stub.get_metadata():
if not metadata_item.get("is_placeholder"):
continue
if selected_id in metadata_item.get("members"):
placeholder_item = placeholder_items_by_id.get(
metadata_item["uuid"])
break
if not placeholder_item:
stub.print_msg("Didn't find placeholder metadata. "
"Remove and re-create placeholder.")
return
window = WorkfileBuildPlaceholderDialog(host, builder)
window.set_update_mode(placeholder_item)
window.exec_()

View file

@ -35,6 +35,8 @@ class AEItem(object):
instance_id = attr.ib(default=None) # New Publisher
width = attr.ib(default=None)
height = attr.ib(default=None)
is_placeholder = attr.ib(default=False)
uuid = attr.ib(default=False)
class AfterEffectsServerStub():
@ -220,6 +222,16 @@ class AfterEffectsServerStub():
)
return self._to_records(self._handle_return(res))
def select_items(self, items):
"""
Select items in Project list
Args:
items (list): of int item ids
"""
self.websocketserver.call(
self.client.call('AfterEffects.select_items', items=items))
def get_selected_items(self, comps, folders=False, footages=False):
"""
Same as get_items but using selected items only
@ -240,6 +252,21 @@ class AfterEffectsServerStub():
)
return self._to_records(self._handle_return(res))
def add_item(self, name, item_type):
"""
Adds either composition or folder to project item list.
Args:
name (str)
item_type (str): COMP|FOLDER
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_item',
name=name,
item_type=item_type))
return self._handle_return(res)
def get_item(self, item_id):
"""
Returns metadata for particular 'item_id' or None
@ -316,7 +343,7 @@ class AfterEffectsServerStub():
return self._handle_return(res)
def remove_instance(self, instance_id):
def remove_instance(self, instance_id, metadata=None):
"""
Removes instance with 'instance_id' from file's metadata and
saves them.
@ -328,7 +355,10 @@ class AfterEffectsServerStub():
"""
cleaned_data = []
for instance in self.get_metadata():
if metadata is None:
metadata = self.get_metadata()
for instance in metadata:
inst_id = instance.get("instance_id") or instance.get("uuid")
if inst_id != instance_id:
cleaned_data.append(instance)
@ -534,6 +564,47 @@ class AfterEffectsServerStub():
if records:
return records.pop()
def add_item_instead_placeholder(self, placeholder_item_id, item_id):
"""
Adds item_id to layers where plaeholder_item_id is present.
1 placeholder could result in multiple loaded containers (eg items)
Args:
placeholder_item_id (int): id of placeholder item
item_id (int): loaded FootageItem id
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_item_instead_placeholder', # noqa
placeholder_item_id=placeholder_item_id, # noqa
item_id=item_id))
return self._handle_return(res)
def add_placeholder(self, name, width, height, fps, duration):
"""
Adds new FootageItem as a placeholder for workfile builder
Placeholder requires width etc, currently probably only hardcoded
values.
Args:
name (str)
width (int)
height (int)
fps (float)
duration (int)
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_placeholder',
name=name,
width=width,
height=height,
fps=fps,
duration=duration))
return self._handle_return(res)
def render(self, folder_url, comp_id):
"""
Render all renderqueueitem to 'folder_url'
@ -632,7 +703,8 @@ class AfterEffectsServerStub():
d.get('file_name'),
d.get("instance_id"),
d.get("width"),
d.get("height"))
d.get("height"),
d.get("is_placeholder"))
ret.append(item)
return ret

View file

@ -1,17 +1,15 @@
import re
from openpype.pipeline import get_representation_path
from openpype.hosts.aftereffects.api import (
AfterEffectsLoader,
containerise
)
from openpype.hosts.aftereffects import api
from openpype.hosts.aftereffects.api.lib import (
get_background_layers,
get_unique_layer_name,
)
class BackgroundLoader(AfterEffectsLoader):
class BackgroundLoader(api.AfterEffectsLoader):
"""
Load images from Background family
Creates for each background separate folder with all imported images
@ -21,6 +19,7 @@ class BackgroundLoader(AfterEffectsLoader):
For each load container is created and stored in project (.aep)
metadata
"""
label = "Load JSON Background"
families = ["background"]
representations = ["json"]
@ -48,7 +47,7 @@ class BackgroundLoader(AfterEffectsLoader):
self[:] = [comp]
namespace = namespace or comp_name
return containerise(
return api.containerise(
name,
namespace,
comp,

View file

@ -1,14 +1,11 @@
import re
from openpype.pipeline import get_representation_path
from openpype.hosts.aftereffects.api import (
AfterEffectsLoader,
containerise
)
from openpype.hosts.aftereffects import api
from openpype.hosts.aftereffects.api.lib import get_unique_layer_name
class FileLoader(AfterEffectsLoader):
class FileLoader(api.AfterEffectsLoader):
"""Load images
Stores the imported asset in a container named after the asset.
@ -64,7 +61,7 @@ class FileLoader(AfterEffectsLoader):
self[:] = [comp]
namespace = namespace or comp_name
return containerise(
return api.containerise(
name,
namespace,
comp,

View file

@ -12,13 +12,13 @@ class SelectInvalidAction(pyblish.api.Action):
icon = "search"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes...")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):

View file

@ -411,6 +411,7 @@ def register():
pcoll.load("pyblish_menu_icon", str(pyblish_icon_file.absolute()), 'IMAGE')
PREVIEW_COLLECTIONS["avalon"] = pcoll
BlenderApplication.get_app()
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.TOPBAR_MT_editor_menus.append(draw_avalon_menu)

View file

@ -18,15 +18,13 @@ class SelectInvalidAction(pyblish.api.Action):
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):

View file

@ -21,8 +21,13 @@ from .pipeline import (
reset_selection
)
from .constants import (
OPENPYPE_TAG_NAME,
DEFAULT_SEQUENCE_NAME,
DEFAULT_BIN_NAME
)
from .lib import (
pype_tag_name,
flatten,
get_track_items,
get_current_project,
@ -82,8 +87,12 @@ __all__ = [
"file_extensions",
"work_root",
# Constants
"OPENPYPE_TAG_NAME",
"DEFAULT_SEQUENCE_NAME",
"DEFAULT_BIN_NAME",
# Lib functions
"pype_tag_name",
"flatten",
"get_track_items",
"get_current_project",

View file

@ -0,0 +1,3 @@
OPENPYPE_TAG_NAME = "openpypeData"
DEFAULT_SEQUENCE_NAME = "openpypeSequence"
DEFAULT_BIN_NAME = "openpypeBin"

View file

@ -5,7 +5,6 @@ Host specific functions where host api is connected
from copy import deepcopy
import os
import re
import sys
import platform
import functools
import warnings
@ -29,12 +28,22 @@ from openpype.pipeline import (
from openpype.pipeline.load import filter_containers
from openpype.lib import Logger
from . import tags
from .constants import (
OPENPYPE_TAG_NAME,
DEFAULT_SEQUENCE_NAME,
DEFAULT_BIN_NAME
)
from openpype.pipeline.colorspace import (
get_imageio_config
)
class _CTX:
has_been_setup = False
has_menu = False
parent_gui = None
class DeprecatedWarning(DeprecationWarning):
pass
@ -82,23 +91,14 @@ def deprecated(new_destination):
log = Logger.get_logger(__name__)
self = sys.modules[__name__]
self._has_been_setup = False
self._has_menu = False
self._registered_gui = None
self._parent = None
self.pype_tag_name = "openpypeData"
self.default_sequence_name = "openpypeSequence"
self.default_bin_name = "openpypeBin"
def flatten(_list):
for item in _list:
if isinstance(item, (list, tuple)):
for sub_item in flatten(item):
def flatten(list_):
for item_ in list_:
if isinstance(item_, (list, tuple)):
for sub_item in flatten(item_):
yield sub_item
else:
yield item
yield item_
def get_current_project(remove_untitled=False):
@ -131,7 +131,7 @@ def get_current_sequence(name=None, new=False):
if new:
# create new
name = name or self.default_sequence_name
name = name or DEFAULT_SEQUENCE_NAME
sequence = hiero.core.Sequence(name)
root_bin.addItem(hiero.core.BinItem(sequence))
elif name:
@ -345,7 +345,7 @@ def get_track_item_tags(track_item):
# collect all tags which are not openpype tag
returning_tag_data.extend(
tag for tag in _tags
if tag.name() != self.pype_tag_name
if tag.name() != OPENPYPE_TAG_NAME
)
return returning_tag_data
@ -385,7 +385,7 @@ def set_track_openpype_tag(track, data=None):
# if pype tag available then update with input data
tag = tags.create_tag(
"{}_{}".format(
self.pype_tag_name,
OPENPYPE_TAG_NAME,
_get_tag_unique_hash()
),
tag_data
@ -412,7 +412,7 @@ def get_track_openpype_tag(track):
return None
for tag in _tags:
# return only correct tag defined by global name
if self.pype_tag_name in tag.name():
if OPENPYPE_TAG_NAME in tag.name():
return tag
@ -484,7 +484,7 @@ def get_trackitem_openpype_tag(track_item):
return None
for tag in _tags:
# return only correct tag defined by global name
if self.pype_tag_name in tag.name():
if OPENPYPE_TAG_NAME in tag.name():
return tag
@ -516,7 +516,7 @@ def set_trackitem_openpype_tag(track_item, data=None):
# if pype tag available then update with input data
tag = tags.create_tag(
"{}_{}".format(
self.pype_tag_name,
OPENPYPE_TAG_NAME,
_get_tag_unique_hash()
),
tag_data
@ -698,29 +698,29 @@ def setup(console=False, port=None, menu=True):
menu (bool, optional): Display file menu in Hiero.
"""
if self._has_been_setup:
if _CTX.has_been_setup:
teardown()
add_submission()
if menu:
add_to_filemenu()
self._has_menu = True
_CTX.has_menu = True
self._has_been_setup = True
_CTX.has_been_setup = True
log.debug("pyblish: Loaded successfully.")
def teardown():
"""Remove integration"""
if not self._has_been_setup:
if not _CTX.has_been_setup:
return
if self._has_menu:
if _CTX.has_menu:
remove_from_filemenu()
self._has_menu = False
_CTX.has_menu = False
self._has_been_setup = False
_CTX.has_been_setup = False
log.debug("pyblish: Integration torn down successfully")
@ -928,7 +928,7 @@ def create_bin(path=None, project=None):
# get the first loaded project
project = project or get_current_project()
path = path or self.default_bin_name
path = path or DEFAULT_BIN_NAME
path = path.replace("\\", "/").split("/")
@ -1311,11 +1311,11 @@ def before_project_save(event):
def get_main_window():
"""Acquire Nuke's main window"""
if self._parent is None:
if _CTX.parent_gui is None:
top_widgets = QtWidgets.QApplication.topLevelWidgets()
name = "Foundry::UI::DockMainWindow"
main_window = next(widget for widget in top_widgets if
widget.inherits("QMainWindow") and
widget.metaObject().className() == name)
self._parent = main_window
return self._parent
_CTX.parent_gui = main_window
return _CTX.parent_gui

View file

@ -3,20 +3,18 @@
import os
import re
import sys
import ast
import opentimelineio as otio
from . import utils
import hiero.core
import hiero.ui
self = sys.modules[__name__]
self.track_types = {
TRACK_TYPE_MAP = {
hiero.core.VideoTrack: otio.schema.TrackKind.Video,
hiero.core.AudioTrack: otio.schema.TrackKind.Audio
}
self.project_fps = None
self.marker_color_map = {
MARKER_COLOR_MAP = {
"magenta": otio.schema.MarkerColor.MAGENTA,
"red": otio.schema.MarkerColor.RED,
"yellow": otio.schema.MarkerColor.YELLOW,
@ -24,30 +22,21 @@ self.marker_color_map = {
"cyan": otio.schema.MarkerColor.CYAN,
"blue": otio.schema.MarkerColor.BLUE,
}
self.timeline = None
self.include_tags = True
def flatten(_list):
for item in _list:
if isinstance(item, (list, tuple)):
for sub_item in flatten(item):
class CTX:
project_fps = None
timeline = None
include_tags = True
def flatten(list_):
for item_ in list_:
if isinstance(item_, (list, tuple)):
for sub_item in flatten(item_):
yield sub_item
else:
yield item
def get_current_hiero_project(remove_untitled=False):
projects = flatten(hiero.core.projects())
if not remove_untitled:
return next(iter(projects))
# if remove_untitled
for proj in projects:
if "Untitled" in proj.name():
proj.close()
else:
return proj
yield item_
def create_otio_rational_time(frame, fps):
@ -152,7 +141,7 @@ def create_otio_reference(clip):
file_head = media_source.filenameHead()
is_sequence = not media_source.singleFile()
frame_duration = media_source.duration()
fps = utils.get_rate(clip) or self.project_fps
fps = utils.get_rate(clip) or CTX.project_fps
extension = os.path.splitext(path)[-1]
if is_sequence:
@ -217,8 +206,8 @@ def get_marker_color(tag):
res = re.search(pat, icon)
if res:
color = res.groupdict().get('color')
if color.lower() in self.marker_color_map:
return self.marker_color_map[color.lower()]
if color.lower() in MARKER_COLOR_MAP:
return MARKER_COLOR_MAP[color.lower()]
return otio.schema.MarkerColor.RED
@ -232,7 +221,7 @@ def create_otio_markers(otio_item, item):
# Hiero adds this tag to a lot of clips
continue
frame_rate = utils.get_rate(item) or self.project_fps
frame_rate = utils.get_rate(item) or CTX.project_fps
marked_range = otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(
@ -279,7 +268,7 @@ def create_otio_clip(track_item):
duration = int(track_item.duration())
fps = utils.get_rate(track_item) or self.project_fps
fps = utils.get_rate(track_item) or CTX.project_fps
name = track_item.name()
media_reference = create_otio_reference(clip)
@ -296,7 +285,7 @@ def create_otio_clip(track_item):
)
# Add tags as markers
if self.include_tags:
if CTX.include_tags:
create_otio_markers(otio_clip, track_item)
create_otio_markers(otio_clip, track_item.source())
@ -319,13 +308,13 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
def _create_otio_timeline():
project = get_current_hiero_project(remove_untitled=False)
metadata = _get_metadata(self.timeline)
project = CTX.timeline.project()
metadata = _get_metadata(CTX.timeline)
metadata.update({
"openpype.timeline.width": int(self.timeline.format().width()),
"openpype.timeline.height": int(self.timeline.format().height()),
"openpype.timeline.pixelAspect": int(self.timeline.format().pixelAspect()), # noqa
"openpype.timeline.width": int(CTX.timeline.format().width()),
"openpype.timeline.height": int(CTX.timeline.format().height()),
"openpype.timeline.pixelAspect": int(CTX.timeline.format().pixelAspect()), # noqa
"openpype.project.useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), # noqa
"openpype.project.lutSetting16Bit": project.lutSetting16Bit(),
"openpype.project.lutSetting8Bit": project.lutSetting8Bit(),
@ -339,10 +328,10 @@ def _create_otio_timeline():
})
start_time = create_otio_rational_time(
self.timeline.timecodeStart(), self.project_fps)
CTX.timeline.timecodeStart(), CTX.project_fps)
return otio.schema.Timeline(
name=self.timeline.name(),
name=CTX.timeline.name(),
global_start_time=start_time,
metadata=metadata
)
@ -351,7 +340,7 @@ def _create_otio_timeline():
def create_otio_track(track_type, track_name):
return otio.schema.Track(
name=track_name,
kind=self.track_types[track_type]
kind=TRACK_TYPE_MAP[track_type]
)
@ -363,7 +352,7 @@ def add_otio_gap(track_item, otio_track, prev_out):
gap = otio.opentime.TimeRange(
duration=otio.opentime.RationalTime(
gap_length,
self.project_fps
CTX.project_fps
)
)
otio_gap = otio.schema.Gap(source_range=gap)
@ -396,14 +385,14 @@ def create_otio_timeline():
return track_item.parent().items()[itemindex - 1]
# get current timeline
self.timeline = hiero.ui.activeSequence()
self.project_fps = self.timeline.framerate().toFloat()
CTX.timeline = hiero.ui.activeSequence()
CTX.project_fps = CTX.timeline.framerate().toFloat()
# convert timeline to otio
otio_timeline = _create_otio_timeline()
# loop all defined track types
for track in self.timeline.items():
for track in CTX.timeline.items():
# skip if track is disabled
if not track.isEnabled():
continue
@ -441,7 +430,7 @@ def create_otio_timeline():
otio_track.append(otio_clip)
# Add tags as markers
if self.include_tags:
if CTX.include_tags:
create_otio_markers(otio_track, track)
# add track to otio timeline

View file

@ -310,7 +310,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
# add pypedata marker to otio_clip metadata
for marker in otio_clip.markers:
if phiero.pype_tag_name in marker.name:
if phiero.OPENPYPE_TAG_NAME in marker.name:
otio_clip.metadata.update(marker.metadata)
return {"otioClip": otio_clip}

View file

@ -8,7 +8,6 @@ from qtpy.QtGui import QPixmap
import hiero.ui
from openpype.pipeline import legacy_io
from openpype.hosts.hiero import api as phiero
from openpype.hosts.hiero.api.otio import hiero_export
@ -22,8 +21,8 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
asset = legacy_io.Session["AVALON_ASSET"]
subset = "workfile"
project = phiero.get_current_project()
active_timeline = hiero.ui.activeSequence()
project = active_timeline.project()
fps = active_timeline.framerate().toFloat()
# adding otio timeline to context

View file

@ -17,15 +17,13 @@ class SelectInvalidAction(pyblish.api.Action):
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
@ -44,3 +42,42 @@ class SelectInvalidAction(pyblish.api.Action):
node.setCurrent(True)
else:
self.log.info("No invalid nodes found.")
class SelectROPAction(pyblish.api.Action):
"""Select ROP.
It's used to select the associated ROPs with the errored instances.
"""
label = "Select ROP"
on = "failed" # This action is only available on a failed plug-in
icon = "mdi.cursor-default-click"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding ROP nodes..")
rop_nodes = list()
for instance in errored_instances:
node_path = instance.data.get("instance_node")
if not node_path:
continue
node = hou.node(node_path)
if not node:
continue
rop_nodes.append(node)
hou.clearAllSelected()
if rop_nodes:
self.log.info("Selecting ROP nodes: {}".format(
", ".join(node.path() for node in rop_nodes)
))
for node in rop_nodes:
node.setSelected(True)
node.setCurrent(True)
else:
self.log.info("No ROP nodes found.")

View file

@ -633,23 +633,8 @@ def evalParmNoFrame(node, parm, pad_character="#"):
def get_color_management_preferences():
"""Get default OCIO preferences"""
data = {
"config": hou.Color.ocio_configPath()
return {
"config": hou.Color.ocio_configPath(),
"display": hou.Color.ocio_defaultDisplay(),
"view": hou.Color.ocio_defaultView()
}
# Get default display and view from OCIO
display = hou.Color.ocio_defaultDisplay()
disp_regex = re.compile(r"^(?P<name>.+-)(?P<display>.+)$")
disp_match = disp_regex.match(display)
view = hou.Color.ocio_defaultView()
view_regex = re.compile(r"^(?P<name>.+- )(?P<view>.+)$")
view_match = view_regex.match(view)
data.update({
"display": disp_match.group("display"),
"view": view_match.group("view")
})
return data

View file

@ -93,7 +93,7 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
import hdefereval # noqa, hdefereval is only available in ui mode
hdefereval.executeDeferred(creator_node_shelves.install)
def has_unsaved_changes(self):
def workfile_has_unsaved_changes(self):
return hou.hipFile.hasUnsavedChanges()
def get_workfile_extensions(self):

View file

@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating pointcache alembics."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
import hou
@ -14,15 +13,13 @@ class CreatePointCache(plugin.HoudiniCreator):
icon = "gears"
def create(self, subset_name, instance_data, pre_create_data):
import hou
instance_data.pop("active", None)
instance_data.update({"node_type": "alembic"})
instance = super(CreatePointCache, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
pre_create_data)
instance_node = hou.node(instance.get("instance_node"))
parms = {
@ -37,13 +34,44 @@ class CreatePointCache(plugin.HoudiniCreator):
}
if self.selected_nodes:
parms["sop_path"] = self.selected_nodes[0].path()
selected_node = self.selected_nodes[0]
# try to find output node
for child in self.selected_nodes[0].children():
if child.type().name() == "output":
parms["sop_path"] = child.path()
break
# Although Houdini allows ObjNode path on `sop_path` for the
# the ROP node we prefer it set to the SopNode path explicitly
# Allow sop level paths (e.g. /obj/geo1/box1)
if isinstance(selected_node, hou.SopNode):
parms["sop_path"] = selected_node.path()
self.log.debug(
"Valid SopNode selection, 'SOP Path' in ROP will be set to '%s'."
% selected_node.path()
)
# Allow object level paths to Geometry nodes (e.g. /obj/geo1)
# but do not allow other object level nodes types like cameras, etc.
elif isinstance(selected_node, hou.ObjNode) and \
selected_node.type().name() in ["geo"]:
# get the output node with the minimum
# 'outputidx' or the node with display flag
sop_path = self.get_obj_output(selected_node)
if sop_path:
parms["sop_path"] = sop_path.path()
self.log.debug(
"Valid ObjNode selection, 'SOP Path' in ROP will be set to "
"the child path '%s'."
% sop_path.path()
)
if not parms.get("sop_path", None):
self.log.debug(
"Selection isn't valid. 'SOP Path' in ROP will be empty."
)
else:
self.log.debug(
"No Selection. 'SOP Path' in ROP will be empty."
)
instance_node.setParms(parms)
instance_node.parm("trange").set(1)
@ -57,3 +85,23 @@ class CreatePointCache(plugin.HoudiniCreator):
hou.ropNodeTypeCategory(),
hou.sopNodeTypeCategory()
]
def get_obj_output(self, obj_node):
"""Find output node with the smallest 'outputidx'."""
outputs = obj_node.subnetOutputs()
# if obj_node is empty
if not outputs:
return
# if obj_node has one output child whether its
# sop output node or a node with the render flag
elif len(outputs) == 1:
return outputs[0]
# if there are more than one, then it have multiple ouput nodes
# return the one with the minimum 'outputidx'
else:
return min(outputs,
key=lambda node: node.evalParm('outputidx'))

View file

@ -3,12 +3,12 @@
import hou # noqa
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.lib import EnumDef
class CreateRedshiftROP(plugin.HoudiniCreator):
"""Redshift ROP"""
identifier = "io.openpype.creators.houdini.redshift_rop"
label = "Redshift ROP"
family = "redshift_rop"
@ -28,7 +28,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
instance = super(CreateRedshiftROP, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
pre_create_data)
instance_node = hou.node(instance.get("instance_node"))
@ -57,6 +57,8 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
fmt="${aov}.$F4.{ext}".format(aov="AOV", ext=ext)
)
ext_format_index = {"exr": 0, "tif": 1, "jpg": 2, "png": 3}
parms = {
# Render frame range
"trange": 1,
@ -64,6 +66,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
"RS_outputFileNamePrefix": filepath,
"RS_outputMultilayerMode": "1", # no multi-layered exr
"RS_outputBeautyAOVSuffix": "beauty",
"RS_outputFileFormat": ext_format_index[ext],
}
if self.selected_nodes:
@ -93,8 +96,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
def get_pre_create_attr_defs(self):
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
"exr", "tif", "jpg", "png",
]
return attrs + [

View file

@ -50,7 +50,7 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
num_aovs = rop.evalParm("ar_aovs")
for index in range(1, num_aovs + 1):
# Skip disabled AOVs
if not rop.evalParm("ar_enable_aovP{}".format(index)):
if not rop.evalParm("ar_enable_aov{}".format(index)):
continue
if rop.evalParm("ar_aov_exr_enable_layer_name{}".format(index)):

View file

@ -19,7 +19,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
"Collected filename from current scene name."
)
if host.has_unsaved_changes():
if host.workfile_has_unsaved_changes():
self.log.info("Saving current file: {}".format(current_file))
host.save_workfile(current_file)
else:

View file

@ -73,6 +73,14 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin):
cls.log.debug("Checking Primitive to Detail pattern: %s" % pattern)
cls.log.debug("Checking with path attribute: %s" % path_attr)
if not hasattr(output_node, "geometry"):
# In the case someone has explicitly set an Object
# node instead of a SOP node in Geometry context
# then for now we ignore - this allows us to also
# export object transforms.
cls.log.warning("No geometry output node found, skipping check..")
return
# Check if the primitive attribute exists
frame = instance.data.get("frameStart", 0)
geo = output_node.geometryAtFrame(frame)

View file

@ -60,6 +60,14 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin):
cls.log.debug("Checking for attribute: %s" % path_attr)
if not hasattr(output_node, "geometry"):
# In the case someone has explicitly set an Object
# node instead of a SOP node in Geometry context
# then for now we ignore - this allows us to also
# export object transforms.
cls.log.warning("No geometry output node found, skipping check..")
return
# Check if the primitive attribute exists
frame = instance.data.get("frameStart", 0)
geo = output_node.geometryAtFrame(frame)

View file

@ -1,6 +1,12 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import PublishValidationError
from openpype.hosts.houdini.api.action import (
SelectInvalidAction,
SelectROPAction,
)
import hou
class ValidateSopOutputNode(pyblish.api.InstancePlugin):
@ -19,6 +25,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
families = ["pointcache", "vdbcache"]
hosts = ["houdini"]
label = "Validate Output Node"
actions = [SelectROPAction, SelectInvalidAction]
def process(self, instance):
@ -31,9 +38,6 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
@classmethod
def get_invalid(cls, instance):
import hou
output_node = instance.data.get("output_node")
if output_node is None:
@ -43,7 +47,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
"Ensure a valid SOP output path is set." % node.path()
)
return [node.path()]
return [node]
# Output node must be a Sop node.
if not isinstance(output_node, hou.SopNode):
@ -53,7 +57,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
"instead found category type: %s"
% (output_node.path(), output_node.type().category().name())
)
return [output_node.path()]
return [output_node]
# For the sake of completeness also assert the category type
# is Sop to avoid potential edge case scenarios even though
@ -73,11 +77,11 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
except hou.Error as exc:
cls.log.error("Cook failed: %s" % exc)
cls.log.error(output_node.errors()[0])
return [output_node.path()]
return [output_node]
# Ensure the output node has at least Geometry data
if not output_node.geometry():
cls.log.error(
"Output node `%s` has no geometry data." % output_node.path()
)
return [output_node.path()]
return [output_node]

View file

@ -53,6 +53,8 @@ def update_mode_context(mode):
def get_geometry_at_frame(sop_node, frame, force=True):
"""Return geometry at frame but force a cooked value."""
if not hasattr(sop_node, "geometry"):
return
with update_mode_context(hou.updateMode.AutoUpdate):
sop_node.cook(force=force, frame_range=(frame, frame))
return sop_node.geometryAtFrame(frame)

View file

@ -78,6 +78,14 @@ def read(container) -> dict:
value.startswith(JSON_PREFIX):
with contextlib.suppress(json.JSONDecodeError):
value = json.loads(value[len(JSON_PREFIX):])
# default value behavior
# convert maxscript boolean values
if value == "true":
value = True
elif value == "false":
value = False
data[key.strip()] = value
data["instance_node"] = container.Name
@ -250,10 +258,7 @@ def reset_frame_range(fps: bool = True):
frame_range["handleStart"]
)
frame_end_handle = frame_range["frameEnd"] + int(frame_range["handleEnd"])
frange_cmd = (
f"animationRange = interval {frame_start_handle} {frame_end_handle}"
)
rt.Execute(frange_cmd)
set_timeline(frame_start_handle, frame_end_handle)
set_render_frame_range(frame_start_handle, frame_end_handle)
@ -285,3 +290,25 @@ def get_max_version():
"""
max_info = rt.MaxVersion()
return max_info[7]
@contextlib.contextmanager
def viewport_camera(camera):
original = rt.viewport.getCamera()
if not original:
# if there is no original camera
# use the current camera as original
original = rt.getNodeByName(camera)
review_camera = rt.getNodeByName(camera)
try:
rt.viewport.setCamera(review_camera)
yield
finally:
rt.viewport.setCamera(original)
def set_timeline(frameStart, frameEnd):
"""Set frame range for timeline editor in Max
"""
rt.animationRange = rt.interval(frameStart, frameEnd)
return rt.animationRange

View file

@ -15,6 +15,7 @@ MS_CUSTOM_ATTRIB = """attributes "openPypeData"
parameters main rollout:OPparams
(
all_handles type:#maxObjectTab tabSize:0 tabSizeVariable:on
sel_list type:#stringTab tabSize:0 tabSizeVariable:on
)
rollout OPparams "OP Parameters"
@ -30,11 +31,42 @@ MS_CUSTOM_ATTRIB = """attributes "openPypeData"
handle_name = obj_name + "<" + handle as string + ">"
return handle_name
)
fn nodes_to_add node =
(
sceneObjs = #()
if classOf node == Container do return false
n = node as string
for obj in Objects do
(
tmp_obj = obj as string
append sceneObjs tmp_obj
)
if sel_list != undefined do
(
for obj in sel_list do
(
idx = findItem sceneObjs obj
if idx do
(
deleteItem sceneObjs idx
)
)
)
idx = findItem sceneObjs n
if idx then return true else false
)
fn nodes_to_rmv node =
(
n = node as string
idx = findItem sel_list n
if idx then return true else false
)
on button_add pressed do
(
current_selection = selectByName title:"Select Objects to add to
the Container" buttontext:"Add"
the Container" buttontext:"Add" filter:nodes_to_add
if current_selection == undefined then return False
temp_arr = #()
i_node_arr = #()
@ -42,8 +74,14 @@ MS_CUSTOM_ATTRIB = """attributes "openPypeData"
(
handle_name = node_to_name c
node_ref = NodeTransformMonitor node:c
idx = finditem list_node.items handle_name
if idx do (
continue
)
name = c as string
append temp_arr handle_name
append i_node_arr node_ref
append sel_list name
)
all_handles = join i_node_arr all_handles
list_node.items = join temp_arr list_node.items
@ -52,7 +90,7 @@ MS_CUSTOM_ATTRIB = """attributes "openPypeData"
on button_del pressed do
(
current_selection = selectByName title:"Select Objects to remove
from the Container" buttontext:"Remove"
from the Container" buttontext:"Remove" filter: nodes_to_rmv
if current_selection == undefined then return False
temp_arr = #()
i_node_arr = #()
@ -63,6 +101,7 @@ MS_CUSTOM_ATTRIB = """attributes "openPypeData"
(
node_ref = NodeTransformMonitor node:c as string
handle_name = node_to_name c
n = c as string
tmp_all_handles = #()
for i in all_handles do
(
@ -80,6 +119,11 @@ MS_CUSTOM_ATTRIB = """attributes "openPypeData"
(
new_temp_arr = DeleteItem list_node.items idx
)
idx = finditem sel_list n
if idx do
(
sel_list = DeleteItem sel_list idx
)
)
all_handles = join i_node_arr new_i_node_arr
list_node.items = join temp_arr new_temp_arr

View file

@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating review in Max."""
from openpype.hosts.max.api import plugin
from openpype.lib import BoolDef, EnumDef, NumberDef
class CreateReview(plugin.MaxCreator):
"""Review in 3dsMax"""
identifier = "io.openpype.creators.max.review"
label = "Review"
family = "review"
icon = "video-camera"
def create(self, subset_name, instance_data, pre_create_data):
instance_data["imageFormat"] = pre_create_data.get("imageFormat")
instance_data["keepImages"] = pre_create_data.get("keepImages")
instance_data["percentSize"] = pre_create_data.get("percentSize")
instance_data["rndLevel"] = pre_create_data.get("rndLevel")
super(CreateReview, self).create(
subset_name,
instance_data,
pre_create_data)
def get_pre_create_attr_defs(self):
attrs = super(CreateReview, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "hdr", "rgb", "png",
"rla", "rpf", "dds", "sgi", "tga", "tif", "vrimg"
]
rndLevel_enum = [
"smoothhighlights", "smooth", "facethighlights",
"facet", "flat", "litwireframe", "wireframe", "box"
]
return attrs + [
BoolDef("keepImages",
label="Keep Image Sequences",
default=False),
EnumDef("imageFormat",
image_format_enum,
default="png",
label="Image Format Options"),
NumberDef("percentSize",
label="Percent of Output",
default=100,
minimum=1,
decimals=0),
EnumDef("rndLevel",
rndLevel_enum,
default="smoothhighlights",
label="Preference")
]

View file

@ -0,0 +1,92 @@
# dont forget getting the focal length for burnin
"""Collect Review"""
import pyblish.api
from pymxs import runtime as rt
from openpype.lib import BoolDef
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
class CollectReview(pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin):
"""Collect Review Data for Preview Animation"""
order = pyblish.api.CollectorOrder + 0.02
label = "Collect Review Data"
hosts = ['max']
families = ["review"]
def process(self, instance):
nodes = instance.data["members"]
focal_length = None
camera_name = None
for node in nodes:
if rt.classOf(node) in rt.Camera.classes:
camera_name = node.name
focal_length = node.fov
attr_values = self.get_attr_values_from_data(instance.data)
data = {
"review_camera": camera_name,
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"fps": instance.context.data["fps"],
"dspGeometry": attr_values.get("dspGeometry"),
"dspShapes": attr_values.get("dspShapes"),
"dspLights": attr_values.get("dspLights"),
"dspCameras": attr_values.get("dspCameras"),
"dspHelpers": attr_values.get("dspHelpers"),
"dspParticles": attr_values.get("dspParticles"),
"dspBones": attr_values.get("dspBones"),
"dspBkg": attr_values.get("dspBkg"),
"dspGrid": attr_values.get("dspGrid"),
"dspSafeFrame": attr_values.get("dspSafeFrame"),
"dspFrameNums": attr_values.get("dspFrameNums")
}
# Enable ftrack functionality
instance.data.setdefault("families", []).append('ftrack')
burnin_members = instance.data.setdefault("burninDataMembers", {})
burnin_members["focalLength"] = focal_length
self.log.debug(f"data:{data}")
instance.data.update(data)
@classmethod
def get_attribute_defs(cls):
return [
BoolDef("dspGeometry",
label="Geometry",
default=True),
BoolDef("dspShapes",
label="Shapes",
default=False),
BoolDef("dspLights",
label="Lights",
default=False),
BoolDef("dspCameras",
label="Cameras",
default=False),
BoolDef("dspHelpers",
label="Helpers",
default=False),
BoolDef("dspParticles",
label="Particle Systems",
default=True),
BoolDef("dspBones",
label="Bone Objects",
default=False),
BoolDef("dspBkg",
label="Background",
default=True),
BoolDef("dspGrid",
label="Active Grid",
default=False),
BoolDef("dspSafeFrame",
label="Safe Frames",
default=False),
BoolDef("dspFrameNums",
label="Frame Numbers",
default=False)
]

View file

@ -56,7 +56,7 @@ class ExtractAlembic(publish.Extractor):
container = instance.data["instance_node"]
self.log.info("Extracting pointcache ...")
self.log.debug("Extracting pointcache ...")
parent_dir = self.staging_dir(instance)
file_name = "{name}.abc".format(**instance.data)

View file

@ -0,0 +1,102 @@
import os
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import publish
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
class ExtractReviewAnimation(publish.Extractor):
"""
Extract Review by Review Animation
"""
order = pyblish.api.ExtractorOrder + 0.001
label = "Extract Review Animation"
hosts = ["max"]
families = ["review"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
ext = instance.data.get("imageFormat")
filename = "{0}..{1}".format(instance.name, ext)
start = int(instance.data["frameStart"])
end = int(instance.data["frameEnd"])
fps = int(instance.data["fps"])
filepath = os.path.join(staging_dir, filename)
filepath = filepath.replace("\\", "/")
filenames = self.get_files(
instance.name, start, end, ext)
self.log.debug(
"Writing Review Animation to"
" '%s' to '%s'" % (filename, staging_dir))
review_camera = instance.data["review_camera"]
with viewport_camera(review_camera):
preview_arg = self.set_preview_arg(
instance, filepath, start, end, fps)
rt.execute(preview_arg)
tags = ["review"]
if not instance.data.get("keepImages"):
tags.append("delete")
self.log.debug("Performing Extraction ...")
representation = {
"name": instance.data["imageFormat"],
"ext": instance.data["imageFormat"],
"files": filenames,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"tags": tags,
"preview": True,
"camera_name": review_camera
}
self.log.debug(f"{representation}")
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(representation)
def get_files(self, filename, start, end, ext):
file_list = []
for frame in range(int(start), int(end) + 1):
actual_name = "{}.{:04}.{}".format(
filename, frame, ext)
file_list.append(actual_name)
return file_list
def set_preview_arg(self, instance, filepath,
start, end, fps):
job_args = list()
default_option = f'CreatePreview filename:"{filepath}"'
job_args.append(default_option)
frame_option = f"outputAVI:false start:{start} end:{end} fps:{fps}" # noqa
job_args.append(frame_option)
rndLevel = instance.data.get("rndLevel")
if rndLevel:
option = f"rndLevel:#{rndLevel}"
job_args.append(option)
options = [
"percentSize", "dspGeometry", "dspShapes",
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
]
for key in options:
enabled = instance.data.get(key)
if enabled:
job_args.append(f"{key}:{enabled}")
if get_max_version() == 2024:
# hardcoded for current stage
auto_play_option = "autoPlay:false"
job_args.append(auto_play_option)
job_str = " ".join(job_args)
self.log.debug(job_str)
return job_str

View file

@ -0,0 +1,91 @@
import os
import tempfile
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import publish
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
class ExtractThumbnail(publish.Extractor):
"""
Extract Thumbnail for Review
"""
order = pyblish.api.ExtractorOrder
label = "Extract Thumbnail"
hosts = ["max"]
families = ["review"]
def process(self, instance):
# TODO: Create temp directory for thumbnail
# - this is to avoid "override" of source file
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
self.log.debug(
f"Create temp directory {tmp_staging} for thumbnail"
)
fps = int(instance.data["fps"])
frame = int(instance.data["frameStart"])
instance.context.data["cleanupFullPaths"].append(tmp_staging)
filename = "{name}_thumbnail..png".format(**instance.data)
filepath = os.path.join(tmp_staging, filename)
filepath = filepath.replace("\\", "/")
thumbnail = self.get_filename(instance.name, frame)
self.log.debug(
"Writing Thumbnail to"
" '%s' to '%s'" % (filename, tmp_staging))
review_camera = instance.data["review_camera"]
with viewport_camera(review_camera):
preview_arg = self.set_preview_arg(
instance, filepath, fps, frame)
rt.execute(preview_arg)
representation = {
"name": "thumbnail",
"ext": "png",
"files": thumbnail,
"stagingDir": tmp_staging,
"thumbnail": True
}
self.log.debug(f"{representation}")
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(representation)
def get_filename(self, filename, target_frame):
thumbnail_name = "{}_thumbnail.{:04}.png".format(
filename, target_frame
)
return thumbnail_name
def set_preview_arg(self, instance, filepath, fps, frame):
job_args = list()
default_option = f'CreatePreview filename:"{filepath}"'
job_args.append(default_option)
frame_option = f"outputAVI:false start:{frame} end:{frame} fps:{fps}" # noqa
job_args.append(frame_option)
rndLevel = instance.data.get("rndLevel")
if rndLevel:
option = f"rndLevel:#{rndLevel}"
job_args.append(option)
options = [
"percentSize", "dspGeometry", "dspShapes",
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
]
for key in options:
enabled = instance.data.get(key)
if enabled:
job_args.append(f"{key}:{enabled}")
if get_max_version() == 2024:
# hardcoded for current stage
auto_play_option = "autoPlay:false"
job_args.append(auto_play_option)
job_str = " ".join(job_args)
self.log.debug(job_str)
return job_str

View file

@ -0,0 +1,48 @@
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError
)
from openpype.hosts.max.api.lib import get_frame_range, set_timeline
class ValidateAnimationTimeline(pyblish.api.InstancePlugin):
"""
Validates Animation Timeline for Preview Animation in Max
"""
label = "Animation Timeline for Review"
order = ValidateContentsOrder
families = ["review"]
hosts = ["max"]
actions = [RepairAction]
def process(self, instance):
frame_range = get_frame_range()
frame_start_handle = frame_range["frameStart"] - int(
frame_range["handleStart"]
)
frame_end_handle = frame_range["frameEnd"] + int(
frame_range["handleEnd"]
)
if rt.animationRange.start != frame_start_handle or (
rt.animationRange.end != frame_end_handle
):
raise PublishValidationError("Incorrect animation timeline "
"set for preview animation.. "
"\nYou can use repair action to "
"the correct animation timeline")
@classmethod
def repair(cls, instance):
frame_range = get_frame_range()
frame_start_handle = frame_range["frameStart"] - int(
frame_range["handleStart"]
)
frame_end_handle = frame_range["frameEnd"] + int(
frame_range["handleEnd"]
)
set_timeline(frame_start_handle, frame_end_handle)

View file

@ -11,7 +11,7 @@ class ValidateCameraContent(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder
families = ["camera"]
families = ["camera", "review"]
hosts = ["max"]
label = "Camera Contents"
camera_type = ["$Free_Camera", "$Target_Camera",

View file

@ -13,7 +13,8 @@ class ValidateMaxContents(pyblish.api.InstancePlugin):
order = pyblish.api.ValidatorOrder
families = ["camera",
"maxScene",
"maxrender"]
"maxrender",
"review"]
hosts = ["max"]
label = "Max Scene Contents"

View file

@ -111,15 +111,13 @@ class SelectInvalidAction(pyblish.api.Action):
except ImportError:
raise ImportError("Current host is not Maya")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):

View file

@ -3,7 +3,6 @@
import os
from pprint import pformat
import sys
import platform
import uuid
import re
@ -33,13 +32,11 @@ from openpype.pipeline import (
load_container,
registered_host,
)
from openpype.pipeline.create import (
legacy_create,
get_legacy_creator_by_name,
)
from openpype.lib import NumberDef
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.pipeline.create import CreateContext
from openpype.pipeline.context_tools import (
get_current_asset_name,
get_current_project_asset,
get_current_project_name,
get_current_task_name
)
@ -123,16 +120,14 @@ FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
RENDERLIKE_INSTANCE_FAMILIES = ["rendering", "vrayscene"]
DISPLAY_LIGHTS_VALUES = [
"project_settings", "default", "all", "selected", "flat", "none"
]
DISPLAY_LIGHTS_LABELS = [
"Use Project Settings",
"Default Lighting",
"All Lights",
"Selected Lights",
"Flat Lighting",
"No Lights"
DISPLAY_LIGHTS_ENUM = [
{"label": "Use Project Settings", "value": "project_settings"},
{"label": "Default Lighting", "value": "default"},
{"label": "All Lights", "value": "all"},
{"label": "Selected Lights", "value": "selected"},
{"label": "Flat Lighting", "value": "flat"},
{"label": "No Lights", "value": "none"}
]
@ -344,8 +339,8 @@ def pairwise(iterable):
return zip(a, a)
def collect_animation_data(fps=False):
"""Get the basic animation data
def collect_animation_defs(fps=False):
"""Get the basic animation attribute defintions for the publisher.
Returns:
OrderedDict
@ -364,17 +359,42 @@ def collect_animation_data(fps=False):
handle_end = frame_end_handle - frame_end
# build attributes
data = OrderedDict()
data["frameStart"] = frame_start
data["frameEnd"] = frame_end
data["handleStart"] = handle_start
data["handleEnd"] = handle_end
data["step"] = 1.0
defs = [
NumberDef("frameStart",
label="Frame Start",
default=frame_start,
decimals=0),
NumberDef("frameEnd",
label="Frame End",
default=frame_end,
decimals=0),
NumberDef("handleStart",
label="Handle Start",
default=handle_start,
decimals=0),
NumberDef("handleEnd",
label="Handle End",
default=handle_end,
decimals=0),
NumberDef("step",
label="Step size",
tooltip="A smaller step size means more samples and larger "
"output files.\n"
"A 1.0 step size is a single sample every frame.\n"
"A 0.5 step size is two samples per frame.\n"
"A 0.2 step size is five samples per frame.",
default=1.0,
decimals=3),
]
if fps:
data["fps"] = mel.eval('currentTimeUnitToFPS()')
current_fps = mel.eval('currentTimeUnitToFPS()')
fps_def = NumberDef(
"fps", label="FPS", default=current_fps, decimals=5
)
defs.append(fps_def)
return data
return defs
def imprint(node, data):
@ -460,10 +480,10 @@ def lsattrs(attrs):
attrs (dict): Name and value pairs of expected matches
Example:
>> # Return nodes with an `age` of five.
>> lsattr({"age": "five"})
>> # Return nodes with both `age` and `color` of five and blue.
>> lsattr({"age": "five", "color": "blue"})
>>> # Return nodes with an `age` of five.
>>> lsattrs({"age": "five"})
>>> # Return nodes with both `age` and `color` of five and blue.
>>> lsattrs({"age": "five", "color": "blue"})
Return:
list: matching nodes.
@ -1523,7 +1543,15 @@ def set_attribute(attribute, value, node):
cmds.addAttr(node, longName=attribute, **kwargs)
node_attr = "{}.{}".format(node, attribute)
if "dataType" in kwargs:
enum_type = cmds.attributeQuery(attribute, node=node, enum=True)
if enum_type and value_type == "str":
enum_string_values = cmds.attributeQuery(
attribute, node=node, listEnum=True
)[0].split(":")
cmds.setAttr(
"{}.{}".format(node, attribute), enum_string_values.index(value)
)
elif "dataType" in kwargs:
attr_type = kwargs["dataType"]
cmds.setAttr(node_attr, value, type=attr_type)
else:
@ -2297,8 +2325,8 @@ def reset_frame_range(playback=True, render=True, fps=True):
cmds.currentTime(frame_start)
if render:
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
cmds.setAttr("defaultRenderGlobals.startFrame", animation_start)
cmds.setAttr("defaultRenderGlobals.endFrame", animation_end)
def reset_scene_resolution():
@ -2811,19 +2839,22 @@ def get_attr_in_layer(attr, layer):
def fix_incompatible_containers():
"""Backwards compatibility: old containers to use new ReferenceLoader"""
old_loaders = {
"MayaAsciiLoader",
"AbcLoader",
"ModelLoader",
"CameraLoader",
"RigLoader",
"FBXLoader"
}
host = registered_host()
for container in host.ls():
loader = container['loader']
print(container['loader'])
if loader in ["MayaAsciiLoader",
"AbcLoader",
"ModelLoader",
"CameraLoader",
"RigLoader",
"FBXLoader"]:
if loader in old_loaders:
log.info(
"Converting legacy container loader {} to "
"ReferenceLoader: {}".format(loader, container["objectName"])
)
cmds.setAttr(container["objectName"] + ".loader",
"ReferenceLoader", type="string")
@ -2951,7 +2982,7 @@ def _get_render_instances():
list: list of instances
"""
objectset = cmds.ls("*.id", long=True, type="objectSet",
objectset = cmds.ls("*.id", long=True, exactType="objectSet",
recursive=True, objectsOnly=True)
instances = []
@ -3238,36 +3269,21 @@ def iter_shader_edits(relationships, shader_nodes, nodes_by_id, label=None):
def set_colorspace():
"""Set Colorspace from project configuration
"""
"""Set Colorspace from project configuration"""
# set color spaces for rendering space and view transforms
def _colormanage(**kwargs):
"""Wrapper around `cmds.colorManagementPrefs`.
This logs errors instead of raising an error so color management
settings get applied as much as possible.
"""
assert len(kwargs) == 1, "Must receive one keyword argument"
try:
cmds.colorManagementPrefs(edit=True, **kwargs)
log.debug("Setting Color Management Preference: {}".format(kwargs))
except RuntimeError as exc:
log.error(exc)
project_name = os.getenv("AVALON_PROJECT")
project_name = get_current_project_name()
imageio = get_project_settings(project_name)["maya"]["imageio"]
# ocio compatibility variables
ocio_v2_maya_version = 2022
maya_version = int(cmds.about(version=True))
ocio_v2_support = use_ocio_v2 = maya_version >= ocio_v2_maya_version
is_ocio_set = bool(os.environ.get("OCIO"))
root_dict = {}
use_workfile_settings = imageio.get("workfile", {}).get("enabled")
if use_workfile_settings:
root_dict = imageio["workfile"]
else:
# TODO: deprecated code from 3.15.5 - remove
# Maya 2022+ introduces new OCIO v2 color management settings that
# can override the old color management preferences. OpenPype has
@ -3290,40 +3306,63 @@ def set_colorspace():
if not isinstance(root_dict, dict):
msg = "set_colorspace(): argument should be dictionary"
log.error(msg)
return
else:
root_dict = imageio["workfile"]
# backward compatibility
# TODO: deprecated code from 3.15.5 - remove with deprecated code above
view_name = root_dict.get("viewTransform")
if view_name is None:
view_name = root_dict.get("viewName")
log.debug(">> root_dict: {}".format(pformat(root_dict)))
if not root_dict:
return
if root_dict:
# enable color management
cmds.colorManagementPrefs(e=True, cmEnabled=True)
cmds.colorManagementPrefs(e=True, ocioRulesEnabled=True)
# set color spaces for rendering space and view transforms
def _colormanage(**kwargs):
"""Wrapper around `cmds.colorManagementPrefs`.
# backward compatibility
# TODO: deprecated code from 3.15.5 - refactor to use new settings
view_name = root_dict.get("viewTransform")
if view_name is None:
view_name = root_dict.get("viewName")
This logs errors instead of raising an error so color management
settings get applied as much as possible.
if use_ocio_v2:
# Use Maya 2022+ default OCIO v2 config
"""
assert len(kwargs) == 1, "Must receive one keyword argument"
try:
cmds.colorManagementPrefs(edit=True, **kwargs)
log.debug("Setting Color Management Preference: {}".format(kwargs))
except RuntimeError as exc:
log.error(exc)
# enable color management
cmds.colorManagementPrefs(edit=True, cmEnabled=True)
cmds.colorManagementPrefs(edit=True, ocioRulesEnabled=True)
if use_ocio_v2:
log.info("Using Maya OCIO v2")
if not is_ocio_set:
# Set the Maya 2022+ default OCIO v2 config file path
log.info("Setting default Maya OCIO v2 config")
cmds.colorManagementPrefs(edit=True, configFilePath="")
# Note: Setting "" as value also sets this default however
# introduces a bug where launching a file on startup will prompt
# to save the empty scene before it, so we set using the path.
# This value has been the same for 2022, 2023 and 2024
path = "<MAYA_RESOURCES>/OCIO-configs/Maya2022-default/config.ocio"
cmds.colorManagementPrefs(edit=True, configFilePath=path)
# set rendering space and view transform
_colormanage(renderingSpaceName=root_dict["renderSpace"])
_colormanage(viewName=view_name)
_colormanage(displayName=root_dict["displayName"])
else:
# set rendering space and view transform
_colormanage(renderingSpaceName=root_dict["renderSpace"])
_colormanage(viewName=view_name)
_colormanage(displayName=root_dict["displayName"])
else:
log.info("Using Maya OCIO v1 (legacy)")
if not is_ocio_set:
# Set the Maya default config file path
log.info("Setting default Maya OCIO v1 legacy config")
cmds.colorManagementPrefs(edit=True, configFilePath="legacy")
# set rendering space and view transform
_colormanage(renderingSpaceName=root_dict["renderSpace"])
_colormanage(viewTransformName=view_name)
# set rendering space and view transform
_colormanage(renderingSpaceName=root_dict["renderSpace"])
_colormanage(viewTransformName=view_name)
@contextlib.contextmanager
@ -3966,6 +4005,71 @@ def get_capture_preset(task_name, task_type, subset, project_settings, log):
return capture_preset or {}
def get_reference_node(members, log=None):
"""Get the reference node from the container members
Args:
members: list of node names
Returns:
str: Reference node name.
"""
# Collect the references without .placeHolderList[] attributes as
# unique entries (objects only) and skipping the sharedReferenceNode.
references = set()
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
# Ignore any `:sharedReferenceNode`
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
continue
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
continue
references.add(ref)
assert references, "No reference node found in container"
# Get highest reference node (least parents)
highest = min(references,
key=lambda x: len(get_reference_node_parents(x)))
# Warn the user when we're taking the highest reference node
if len(references) > 1:
if not log:
log = logging.getLogger(__name__)
log.warning("More than one reference node found in "
"container, using highest reference node: "
"%s (in: %s)", highest, list(references))
return highest
def get_reference_node_parents(ref):
"""Return all parent reference nodes of reference node
Args:
ref (str): reference node.
Returns:
list: The upstream parent reference nodes.
"""
parent = cmds.referenceQuery(ref,
referenceNode=True,
parent=True)
parents = []
while parent:
parents.append(parent)
parent = cmds.referenceQuery(parent,
referenceNode=True,
parent=True)
return parents
def create_rig_animation_instance(
nodes, context, namespace, options=None, log=None
):
@ -4003,12 +4107,10 @@ def create_rig_animation_instance(
)
assert roots, "No root nodes in rig, this is a bug."
asset = legacy_io.Session["AVALON_ASSET"]
dependency = str(context["representation"]["_id"])
custom_subset = options.get("animationSubsetName")
if custom_subset:
formatting_data = {
# TODO remove 'asset_type' and replace 'asset_name' with 'asset'
"asset_name": context['asset']['name'],
"asset_type": context['asset']['type'],
"subset": context['subset']['name'],
@ -4026,14 +4128,17 @@ def create_rig_animation_instance(
if log:
log.info("Creating subset: {}".format(namespace))
# Fill creator identifier
creator_identifier = "io.openpype.creators.maya.animation"
host = registered_host()
create_context = CreateContext(host)
# Create the animation instance
creator_plugin = get_legacy_creator_by_name("CreateAnimation")
with maintained_selection():
cmds.select([output, controls] + roots, noExpand=True)
legacy_create(
creator_plugin,
name=namespace,
asset=asset,
options={"useSelection": True},
data={"dependencies": dependency}
create_context.create(
creator_identifier=creator_identifier,
variant=namespace,
pre_create_data={"use_selection": True}
)

View file

@ -177,7 +177,7 @@ def get(layer, render_instance=None):
}.get(renderer_name.lower(), None)
if renderer is None:
raise UnsupportedRendererException(
"unsupported {}".format(renderer_name)
"Unsupported renderer: {}".format(renderer_name)
)
return renderer(layer, render_instance)
@ -274,12 +274,14 @@ class ARenderProducts:
"Unsupported renderer {}".format(self.renderer)
)
# Note: When this attribute is never set (e.g. on maya launch) then
# this can return None even though it is a string attribute
prefix = self._get_attr(prefix_attr)
if not prefix:
# Fall back to scene name by default
log.debug("Image prefix not set, using <Scene>")
file_prefix = "<Scene>"
log.warning("Image prefix not set, using <Scene>")
prefix = "<Scene>"
return prefix

View file

@ -66,10 +66,12 @@ def install():
cmds.menuItem(divider=True)
# Create default items
cmds.menuItem(
"Create...",
command=lambda *args: host_tools.show_creator(parent=parent_widget)
command=lambda *args: host_tools.show_publisher(
parent=parent_widget,
tab="create"
)
)
cmds.menuItem(
@ -82,8 +84,9 @@ def install():
cmds.menuItem(
"Publish...",
command=lambda *args: host_tools.show_publish(
parent=parent_widget
command=lambda *args: host_tools.show_publisher(
parent=parent_widget,
tab="publish"
),
image=pyblish_icon
)

View file

@ -1,7 +1,10 @@
import json
import base64
import os
import errno
import logging
import contextlib
import shutil
from maya import utils, cmds, OpenMaya
import maya.api.OpenMaya as om
@ -13,6 +16,7 @@ from openpype.host import (
HostBase,
IWorkfileHost,
ILoadHost,
IPublishHost,
HostDirmap,
)
from openpype.tools.utils import host_tools
@ -63,7 +67,7 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
class MayaHost(HostBase, IWorkfileHost, ILoadHost):
class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "maya"
def __init__(self):
@ -113,7 +117,10 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost):
register_event_callback("taskChanged", on_task_changed)
register_event_callback("workfile.open.before", before_workfile_open)
register_event_callback("workfile.save.before", before_workfile_save)
register_event_callback("workfile.save.before", after_workfile_save)
register_event_callback(
"workfile.save.before", workfile_save_before_xgen
)
register_event_callback("workfile.save.after", after_workfile_save)
def open_workfile(self, filepath):
return open_file(filepath)
@ -146,6 +153,20 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost):
with lib.maintained_selection():
yield
def get_context_data(self):
data = cmds.fileInfo("OpenPypeContext", query=True)
if not data:
return {}
data = data[0] # Maya seems to return a list
decoded = base64.b64decode(data).decode("utf-8")
return json.loads(decoded)
def update_context_data(self, data, changes):
json_str = json.dumps(data)
encoded = base64.b64encode(json_str.encode("utf-8"))
return cmds.fileInfo("OpenPypeContext", encoded)
def _register_callbacks(self):
for handler, event in self._op_events.copy().items():
if event is None:
@ -480,18 +501,16 @@ def on_init():
# Force load objExport plug-in (requested by artists)
cmds.loadPlugin("objExport", quiet=True)
from .customize import (
override_component_mask_commands,
override_toolbox_ui
)
safe_deferred(override_component_mask_commands)
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
if launch_workfiles:
safe_deferred(host_tools.show_workfiles)
if not lib.IS_HEADLESS:
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
if launch_workfiles:
safe_deferred(host_tools.show_workfiles)
from .customize import (
override_component_mask_commands,
override_toolbox_ui
)
safe_deferred(override_component_mask_commands)
safe_deferred(override_toolbox_ui)
@ -549,37 +568,29 @@ def on_save():
Any transform of a mesh, without an existing ID, is given one
automatically on file save.
"""
log.info("Running callback on save..")
# remove lockfile if users jumps over from one scene to another
_remove_workfile_lock()
# # Update current task for the current scene
# update_task_from_path(cmds.file(query=True, sceneName=True))
# Generate ids of the current context on nodes in the scene
nodes = lib.get_id_required_nodes(referenced_nodes=False)
for node, new_id in lib.generate_ids(nodes):
lib.set_id(node, new_id, overwrite=False)
def _update_render_layer_observers():
# Helper to trigger update for all renderlayer observer logic
lib.remove_render_layer_observer()
lib.add_render_layer_observer()
lib.add_render_layer_change_observer()
def on_open():
"""On scene open let's assume the containers have changed."""
from qtpy import QtWidgets
from openpype.widgets import popup
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.remove_render_layer_observer()")
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.add_render_layer_observer()")
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.add_render_layer_change_observer()")
# # Update current task for the current scene
# update_task_from_path(cmds.file(query=True, sceneName=True))
utils.executeDeferred(_update_render_layer_observers)
# Validate FPS after update_task_from_path to
# ensure it is using correct FPS for the asset
@ -590,10 +601,7 @@ def on_open():
log.warning("Scene has outdated content.")
# Find maya main window
top_level_widgets = {w.objectName(): w for w in
QtWidgets.QApplication.topLevelWidgets()}
parent = top_level_widgets.get("MayaWindow", None)
parent = lib.get_main_window()
if parent is None:
log.info("Skipping outdated content pop-up "
"because Maya window can't be found.")
@ -618,16 +626,9 @@ def on_new():
"""Set project resolution and fps when create a new file"""
log.info("Running callback on new..")
with lib.suspended_refresh():
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.remove_render_layer_observer()")
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.add_render_layer_observer()")
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.add_render_layer_change_observer()")
lib.set_context_settings()
utils.executeDeferred(_update_render_layer_observers)
_remove_workfile_lock()
@ -681,6 +682,91 @@ def before_workfile_save(event):
create_workspace_mel(workdir_path, project_name)
def workfile_save_before_xgen(event):
"""Manage Xgen external files when switching context.
Xgen has various external files that needs to be unique and relative to the
workfile, so we need to copy and potentially overwrite these files when
switching context.
Args:
event (Event) - openpype/lib/events.py
"""
if not cmds.pluginInfo("xgenToolkit", query=True, loaded=True):
return
import xgenm
current_work_dir = legacy_io.Session["AVALON_WORKDIR"].replace("\\", "/")
expected_work_dir = event.data["workdir_path"].replace("\\", "/")
if current_work_dir == expected_work_dir:
return
palettes = cmds.ls(type="xgmPalette", long=True)
if not palettes:
return
transfers = []
overwrites = []
attribute_changes = {}
attrs = ["xgFileName", "xgBaseFile"]
for palette in palettes:
sanitized_palette = palette.replace("|", "")
project_path = xgenm.getAttr("xgProjectPath", sanitized_palette)
_, maya_extension = os.path.splitext(event.data["filename"])
for attr in attrs:
node_attr = "{}.{}".format(palette, attr)
attr_value = cmds.getAttr(node_attr)
if not attr_value:
continue
source = os.path.join(project_path, attr_value)
attr_value = event.data["filename"].replace(
maya_extension,
"__{}{}".format(
sanitized_palette.replace(":", "__"),
os.path.splitext(attr_value)[1]
)
)
target = os.path.join(expected_work_dir, attr_value)
transfers.append((source, target))
attribute_changes[node_attr] = attr_value
relative_path = xgenm.getAttr(
"xgDataPath", sanitized_palette
).split(os.pathsep)[0]
absolute_path = relative_path.replace("${PROJECT}", project_path)
for root, _, files in os.walk(absolute_path):
for f in files:
source = os.path.join(root, f).replace("\\", "/")
target = source.replace(project_path, expected_work_dir + "/")
transfers.append((source, target))
if os.path.exists(target):
overwrites.append(target)
# Ask user about overwriting files.
if overwrites:
log.warning(
"WARNING! Potential loss of data.\n\n"
"Found duplicate Xgen files in new context.\n{}".format(
"\n".join(overwrites)
)
)
return
for source, destination in transfers:
if not os.path.exists(os.path.dirname(destination)):
os.makedirs(os.path.dirname(destination))
shutil.copy(source, destination)
for attribute, value in attribute_changes.items():
cmds.setAttr(attribute, value, type="string")
def after_workfile_save(event):
workfile_name = event["filename"]
if (

View file

@ -1,87 +1,57 @@
import json
import os
import re
from maya import cmds
from abc import ABCMeta
import qargparse
import six
from maya import cmds
from maya.app.renderSetup.model import renderSetup
from openpype.lib import Logger
from openpype.lib import BoolDef, Logger
from openpype.pipeline import AVALON_CONTAINER_ID, Anatomy, CreatedInstance
from openpype.pipeline import Creator as NewCreator
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
get_representation_path,
AVALON_CONTAINER_ID,
Anatomy,
)
CreatorError, LegacyCreator, LoaderPlugin, get_representation_path,
legacy_io)
from openpype.pipeline.load import LoadError
from openpype.settings import get_project_settings
from .pipeline import containerise
from . import lib
from .lib import imprint, read
from .pipeline import containerise
log = Logger.get_logger()
def get_reference_node(members, log=None):
def _get_attr(node, attr, default=None):
"""Helper to get attribute which allows attribute to not exist."""
if not cmds.attributeQuery(attr, node=node, exists=True):
return default
return cmds.getAttr("{}.{}".format(node, attr))
# Backwards compatibility: these functions has been moved to lib.
def get_reference_node(*args, **kwargs):
"""Get the reference node from the container members
Args:
members: list of node names
Returns:
str: Reference node name.
Deprecated:
This function was moved and will be removed in 3.16.x.
"""
# Collect the references without .placeHolderList[] attributes as
# unique entries (objects only) and skipping the sharedReferenceNode.
references = set()
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
# Ignore any `:sharedReferenceNode`
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
continue
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
continue
references.add(ref)
assert references, "No reference node found in container"
# Get highest reference node (least parents)
highest = min(references,
key=lambda x: len(get_reference_node_parents(x)))
# Warn the user when we're taking the highest reference node
if len(references) > 1:
if not log:
log = Logger.get_logger(__name__)
log.warning("More than one reference node found in "
"container, using highest reference node: "
"%s (in: %s)", highest, list(references))
return highest
msg = "Function 'get_reference_node' has been moved."
log.warning(msg)
cmds.warning(msg)
return lib.get_reference_node(*args, **kwargs)
def get_reference_node_parents(ref):
"""Return all parent reference nodes of reference node
Args:
ref (str): reference node.
Returns:
list: The upstream parent reference nodes.
def get_reference_node_parents(*args, **kwargs):
"""
parent = cmds.referenceQuery(ref,
referenceNode=True,
parent=True)
parents = []
while parent:
parents.append(parent)
parent = cmds.referenceQuery(parent,
referenceNode=True,
parent=True)
return parents
Deprecated:
This function was moved and will be removed in 3.16.x.
"""
msg = "Function 'get_reference_node_parents' has been moved."
log.warning(msg)
cmds.warning(msg)
return lib.get_reference_node_parents(*args, **kwargs)
class Creator(LegacyCreator):
@ -100,6 +70,379 @@ class Creator(LegacyCreator):
return instance
@six.add_metaclass(ABCMeta)
class MayaCreatorBase(object):
@staticmethod
def cache_subsets(shared_data):
"""Cache instances for Creators to shared data.
Create `maya_cached_subsets` key when needed in shared data and
fill it with all collected instances from the scene under its
respective creator identifiers.
If legacy instances are detected in the scene, create
`maya_cached_legacy_subsets` there and fill it with
all legacy subsets under family as a key.
Args:
Dict[str, Any]: Shared data.
Return:
Dict[str, Any]: Shared data dictionary.
"""
if shared_data.get("maya_cached_subsets") is None:
cache = dict()
cache_legacy = dict()
for node in cmds.ls(type="objectSet"):
if _get_attr(node, attr="id") != "pyblish.avalon.instance":
continue
creator_id = _get_attr(node, attr="creator_identifier")
if creator_id is not None:
# creator instance
cache.setdefault(creator_id, []).append(node)
else:
# legacy instance
family = _get_attr(node, attr="family")
if family is None:
# must be a broken instance
continue
cache_legacy.setdefault(family, []).append(node)
shared_data["maya_cached_subsets"] = cache
shared_data["maya_cached_legacy_subsets"] = cache_legacy
return shared_data
def imprint_instance_node(self, node, data):
# We never store the instance_node as value on the node since
# it's the node name itself
data.pop("instance_node", None)
# We store creator attributes at the root level and assume they
# will not clash in names with `subset`, `task`, etc. and other
# default names. This is just so these attributes in many cases
# are still editable in the maya UI by artists.
# pop to move to end of dict to sort attributes last on the node
creator_attributes = data.pop("creator_attributes", {})
data.update(creator_attributes)
# We know the "publish_attributes" will be complex data of
# settings per plugins, we'll store this as a flattened json structure
# pop to move to end of dict to sort attributes last on the node
data["publish_attributes"] = json.dumps(
data.pop("publish_attributes", {})
)
# Since we flattened the data structure for creator attributes we want
# to correctly detect which flattened attributes should end back in the
# creator attributes when reading the data from the node, so we store
# the relevant keys as a string
data["__creator_attributes_keys"] = ",".join(creator_attributes.keys())
# Kill any existing attributes just so we can imprint cleanly again
for attr in data.keys():
if cmds.attributeQuery(attr, node=node, exists=True):
cmds.deleteAttr("{}.{}".format(node, attr))
return imprint(node, data)
def read_instance_node(self, node):
node_data = read(node)
# Never care about a cbId attribute on the object set
# being read as 'data'
node_data.pop("cbId", None)
# Move the relevant attributes into "creator_attributes" that
# we flattened originally
node_data["creator_attributes"] = {}
creator_attribute_keys = node_data.pop("__creator_attributes_keys",
"").split(",")
for key in creator_attribute_keys:
if key in node_data:
node_data["creator_attributes"][key] = node_data.pop(key)
publish_attributes = node_data.get("publish_attributes")
if publish_attributes:
node_data["publish_attributes"] = json.loads(publish_attributes)
# Explicitly re-parse the node name
node_data["instance_node"] = node
return node_data
@six.add_metaclass(ABCMeta)
class MayaCreator(NewCreator, MayaCreatorBase):
def create(self, subset_name, instance_data, pre_create_data):
members = list()
if pre_create_data.get("use_selection"):
members = cmds.ls(selection=True)
with lib.undo_chunk():
instance_node = cmds.sets(members, name=subset_name)
instance_data["instance_node"] = instance_node
instance = CreatedInstance(
self.family,
subset_name,
instance_data,
self)
self._add_instance_to_context(instance)
self.imprint_instance_node(instance_node,
data=instance.data_to_store())
return instance
def collect_instances(self):
self.cache_subsets(self.collection_shared_data)
cached_subsets = self.collection_shared_data["maya_cached_subsets"]
for node in cached_subsets.get(self.identifier, []):
node_data = self.read_instance_node(node)
created_instance = CreatedInstance.from_existing(node_data, self)
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
data = created_inst.data_to_store()
node = data.get("instance_node")
self.imprint_instance_node(node, data)
def remove_instances(self, instances):
"""Remove specified instance from the scene.
This is only removing `id` parameter so instance is no longer
instance, because it might contain valuable data for artist.
"""
for instance in instances:
node = instance.data.get("instance_node")
if node:
cmds.delete(node)
self._remove_instance_from_context(instance)
def get_pre_create_attr_defs(self):
return [
BoolDef("use_selection",
label="Use selection",
default=True)
]
def ensure_namespace(namespace):
"""Make sure the namespace exists.
Args:
namespace (str): The preferred namespace name.
Returns:
str: The generated or existing namespace
"""
exists = cmds.namespace(exists=namespace)
if exists:
return namespace
else:
return cmds.namespace(add=namespace)
class RenderlayerCreator(NewCreator, MayaCreatorBase):
"""Creator which creates an instance per renderlayer in the workfile.
Create and manages renderlayer subset per renderLayer in workfile.
This generates a singleton node in the scene which, if it exists, tells the
Creator to collect Maya rendersetup renderlayers as individual instances.
As such, triggering create doesn't actually create the instance node per
layer but only the node which tells the Creator it may now collect
an instance per renderlayer.
"""
# These are required to be overridden in subclass
singleton_node_name = ""
# These are optional to be overridden in subclass
layer_instance_prefix = None
def _get_singleton_node(self, return_all=False):
nodes = lib.lsattr("pre_creator_identifier", self.identifier)
if nodes:
return nodes if return_all else nodes[0]
def create(self, subset_name, instance_data, pre_create_data):
# A Renderlayer is never explicitly created using the create method.
# Instead, renderlayers from the scene are collected. Thus "create"
# would only ever be called to say, 'hey, please refresh collect'
self.create_singleton_node()
# if no render layers are present, create default one with
# asterisk selector
rs = renderSetup.instance()
if not rs.getRenderLayers():
render_layer = rs.createRenderLayer("Main")
collection = render_layer.createCollection("defaultCollection")
collection.getSelector().setPattern('*')
# By RenderLayerCreator.create we make it so that the renderlayer
# instances directly appear even though it just collects scene
# renderlayers. This doesn't actually 'create' any scene contents.
self.collect_instances()
def create_singleton_node(self):
if self._get_singleton_node():
raise CreatorError("A Render instance already exists - only "
"one can be configured.")
with lib.undo_chunk():
node = cmds.sets(empty=True, name=self.singleton_node_name)
lib.imprint(node, data={
"pre_creator_identifier": self.identifier
})
return node
def collect_instances(self):
# We only collect if the global render instance exists
if not self._get_singleton_node():
return
rs = renderSetup.instance()
layers = rs.getRenderLayers()
for layer in layers:
layer_instance_node = self.find_layer_instance_node(layer)
if layer_instance_node:
data = self.read_instance_node(layer_instance_node)
instance = CreatedInstance.from_existing(data, creator=self)
else:
# No existing scene instance node for this layer. Note that
# this instance will not have the `instance_node` data yet
# until it's been saved/persisted at least once.
# TODO: Correctly define the subset name using templates
prefix = self.layer_instance_prefix or self.family
subset_name = "{}{}".format(prefix, layer.name())
instance_data = {
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"],
"variant": layer.name(),
}
instance = CreatedInstance(
family=self.family,
subset_name=subset_name,
data=instance_data,
creator=self
)
instance.transient_data["layer"] = layer
self._add_instance_to_context(instance)
def find_layer_instance_node(self, layer):
connected_sets = cmds.listConnections(
"{}.message".format(layer.name()),
source=False,
destination=True,
type="objectSet"
) or []
for node in connected_sets:
if not cmds.attributeQuery("creator_identifier",
node=node,
exists=True):
continue
creator_identifier = cmds.getAttr(node + ".creator_identifier")
if creator_identifier == self.identifier:
self.log.info(f"Found node: {node}")
return node
def _create_layer_instance_node(self, layer):
# We only collect if a CreateRender instance exists
create_render_set = self._get_singleton_node()
if not create_render_set:
raise CreatorError("Creating a renderlayer instance node is not "
"allowed if no 'CreateRender' instance exists")
namespace = "_{}".format(self.singleton_node_name)
namespace = ensure_namespace(namespace)
name = "{}:{}".format(namespace, layer.name())
render_set = cmds.sets(name=name, empty=True)
# Keep an active link with the renderlayer so we can retrieve it
# later by a physical maya connection instead of relying on the layer
# name
cmds.addAttr(render_set, longName="renderlayer", at="message")
cmds.connectAttr("{}.message".format(layer.name()),
"{}.renderlayer".format(render_set), force=True)
# Add the set to the 'CreateRender' set.
cmds.sets(render_set, forceElement=create_render_set)
return render_set
def update_instances(self, update_list):
# We only generate the persisting layer data into the scene once
# we save with the UI on e.g. validate or publish
for instance, _changes in update_list:
instance_node = instance.data.get("instance_node")
# Ensure a node exists to persist the data to
if not instance_node:
layer = instance.transient_data["layer"]
instance_node = self._create_layer_instance_node(layer)
instance.data["instance_node"] = instance_node
self.imprint_instance_node(instance_node,
data=instance.data_to_store())
def imprint_instance_node(self, node, data):
# Do not ever try to update the `renderlayer` since it'll try
# to remove the attribute and recreate it but fail to keep it a
# message attribute link. We only ever imprint that on the initial
# node creation.
# TODO: Improve how this is handled
data.pop("renderlayer", None)
data.get("creator_attributes", {}).pop("renderlayer", None)
return super(RenderlayerCreator, self).imprint_instance_node(node,
data=data)
def remove_instances(self, instances):
"""Remove specified instances from the scene.
This is only removing `id` parameter so instance is no longer
instance, because it might contain valuable data for artist.
"""
# Instead of removing the single instance or renderlayers we instead
# remove the CreateRender node this creator relies on to decide whether
# it should collect anything at all.
nodes = self._get_singleton_node(return_all=True)
if nodes:
cmds.delete(nodes)
# Remove ALL the instances even if only one gets deleted
for instance in list(self.create_context.instances):
if instance.get("creator_identifier") == self.identifier:
self._remove_instance_from_context(instance)
# Remove the stored settings per renderlayer too
node = instance.data.get("instance_node")
if node and cmds.objExists(node):
cmds.delete(node)
class Loader(LoaderPlugin):
hosts = ["maya"]
@ -205,7 +548,7 @@ class ReferenceLoader(Loader):
if not nodes:
return
ref_node = get_reference_node(nodes, self.log)
ref_node = lib.get_reference_node(nodes, self.log)
container = containerise(
name=name,
namespace=namespace,
@ -226,6 +569,7 @@ class ReferenceLoader(Loader):
def update(self, container, representation):
from maya import cmds
from openpype.hosts.maya.api.lib import get_container_members
node = container["objectName"]
@ -234,7 +578,7 @@ class ReferenceLoader(Loader):
# Get reference node from container members
members = get_container_members(node)
reference_node = get_reference_node(members, self.log)
reference_node = lib.get_reference_node(members, self.log)
namespace = cmds.referenceQuery(reference_node, namespace=True)
file_type = {
@ -382,7 +726,7 @@ class ReferenceLoader(Loader):
# Assume asset has been referenced
members = cmds.sets(node, query=True)
reference_node = get_reference_node(members, self.log)
reference_node = lib.get_reference_node(members, self.log)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")

View file

@ -15,7 +15,6 @@ import contextlib
from maya import cmds
from maya.app.renderSetup.model import renderSetup
# from colorbleed.maya import lib
from .lib import pairwise

View file

@ -14,7 +14,7 @@ from openpype.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from .lib import read, imprint
from .lib import read, imprint, get_reference_node, get_main_window
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
@ -173,44 +173,37 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
def create_placeholder(self, placeholder_data):
selection = cmds.ls(selection=True)
if not selection:
raise ValueError("Nothing is selected")
if len(selection) > 1:
raise ValueError("More then one item are selected")
parent = selection[0] if selection else None
placeholder_data["plugin_identifier"] = self.identifier
placeholder_name = self._create_placeholder_name(placeholder_data)
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
# TODO: this can crash if selection can't be used
cmds.parent(placeholder, selection[0])
if parent:
placeholder = cmds.parent(placeholder, selection[0])[0]
# get the long name of the placeholder (with the groups)
placeholder_full_name = (
cmds.ls(selection[0], long=True)[0]
+ "|"
+ placeholder.replace("|", "")
)
imprint(placeholder_full_name, placeholder_data)
imprint(placeholder, placeholder_data)
# Add helper attributes to keep placeholder info
cmds.addAttr(
placeholder_full_name,
placeholder,
longName="parent",
hidden=True,
dataType="string"
)
cmds.addAttr(
placeholder_full_name,
placeholder,
longName="index",
hidden=True,
attributeType="short",
defaultValue=-1
)
cmds.setAttr(placeholder_full_name + ".parent", "", type="string")
cmds.setAttr(placeholder + ".parent", "", type="string")
def update_placeholder(self, placeholder_item, placeholder_data):
node_name = placeholder_item.scene_identifier
@ -233,7 +226,7 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
if placeholder_data.get("plugin_identifier") != self.identifier:
continue
# TODO do data validations and maybe updgrades if are invalid
# TODO do data validations and maybe upgrades if they are invalid
output.append(
LoadPlaceholderItem(node_name, placeholder_data, self)
)
@ -250,15 +243,19 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def cleanup_placeholder(self, placeholder, failed):
def post_placeholder_process(self, placeholder, failed):
"""Hide placeholder, add them to placeholder set
"""
node = placeholder._scene_identifier
node = placeholder.scene_identifier
cmds.sets(node, addElement=PLACEHOLDER_SET)
cmds.hide(node)
cmds.setAttr(node + ".hiddenInOutliner", True)
def delete_placeholder(self, placeholder):
"""Remove placeholder if building was successful"""
cmds.delete(placeholder.scene_identifier)
def load_succeed(self, placeholder, container):
self._parent_in_hierarchy(placeholder, container)
@ -275,9 +272,24 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
return
roots = cmds.sets(container, q=True)
ref_node = None
try:
ref_node = get_reference_node(roots)
except AssertionError as e:
self.log.info(e.args[0])
nodes_to_parent = []
for root in roots:
if ref_node:
ref_root = cmds.referenceQuery(root, nodes=True)[0]
ref_root = (
cmds.listRelatives(ref_root, parent=True, path=True) or
[ref_root]
)
nodes_to_parent.extend(ref_root)
continue
if root.endswith("_RN"):
# Backwards compatibility for hardcoded reference names.
refRoot = cmds.referenceQuery(root, n=True)[0]
refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
nodes_to_parent.extend(refRoot)
@ -294,10 +306,17 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
matrix=True,
worldSpace=True
)
scene_parent = cmds.listRelatives(
placeholder.scene_identifier, parent=True, fullPath=True
)
for node in set(nodes_to_parent):
cmds.reorder(node, front=True)
cmds.reorder(node, relative=placeholder.data["index"])
cmds.xform(node, matrix=placeholder_form, ws=True)
if scene_parent:
cmds.parent(node, scene_parent)
else:
cmds.parent(node, world=True)
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
if not holding_sets:
@ -319,8 +338,9 @@ def update_workfile_template(*args):
def create_placeholder(*args):
host = registered_host()
builder = MayaTemplateBuilder(host)
window = WorkfileBuildPlaceholderDialog(host, builder)
window.exec_()
window = WorkfileBuildPlaceholderDialog(host, builder,
parent=get_main_window())
window.show()
def update_placeholder(*args):
@ -343,6 +363,7 @@ def update_placeholder(*args):
raise ValueError("Too many selected nodes")
placeholder_item = placeholder_items[0]
window = WorkfileBuildPlaceholderDialog(host, builder)
window = WorkfileBuildPlaceholderDialog(host, builder,
parent=get_main_window())
window.set_update_mode(placeholder_item)
window.exec_()

View file

@ -0,0 +1,165 @@
from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin
from openpype.hosts.maya.api import plugin
from openpype.hosts.maya.api.lib import read
from maya import cmds
from maya.app.renderSetup.model import renderSetup
class MayaLegacyConvertor(SubsetConvertorPlugin,
plugin.MayaCreatorBase):
"""Find and convert any legacy subsets in the scene.
This Convertor will find all legacy subsets in the scene and will
transform them to the current system. Since the old subsets doesn't
retain any information about their original creators, the only mapping
we can do is based on their families.
Its limitation is that you can have multiple creators creating subset
of the same family and there is no way to handle it. This code should
nevertheless cover all creators that came with OpenPype.
"""
identifier = "io.openpype.creators.maya.legacy"
# Cases where the identifier or new family doesn't correspond to the
# original family on the legacy instances
special_family_conversions = {
"rendering": "io.openpype.creators.maya.renderlayer",
}
def find_instances(self):
self.cache_subsets(self.collection_shared_data)
legacy = self.collection_shared_data.get("maya_cached_legacy_subsets")
if not legacy:
return
self.add_convertor_item("Convert legacy instances")
def convert(self):
self.remove_convertor_item()
# We can't use the collected shared data cache here
# we re-query it here directly to convert all found.
cache = {}
self.cache_subsets(cache)
legacy = cache.get("maya_cached_legacy_subsets")
if not legacy:
return
# From all current new style manual creators find the mapping
# from family to identifier
family_to_id = {}
for identifier, creator in self.create_context.manual_creators.items():
family = getattr(creator, "family", None)
if not family:
continue
if family in family_to_id:
# We have a clash of family -> identifier. Multiple
# new style creators use the same family
self.log.warning("Clash on family->identifier: "
"{}".format(identifier))
family_to_id[family] = identifier
family_to_id.update(self.special_family_conversions)
# We also embed the current 'task' into the instance since legacy
# instances didn't store that data on the instances. The old style
# logic was thus to be live to the current task to begin with.
data = dict()
data["task"] = self.create_context.get_current_task_name()
for family, instance_nodes in legacy.items():
if family not in family_to_id:
self.log.warning(
"Unable to convert legacy instance with family '{}'"
" because there is no matching new creator's family"
"".format(family)
)
continue
creator_id = family_to_id[family]
creator = self.create_context.manual_creators[creator_id]
data["creator_identifier"] = creator_id
if isinstance(creator, plugin.RenderlayerCreator):
self._convert_per_renderlayer(instance_nodes, data, creator)
else:
self._convert_regular(instance_nodes, data)
def _convert_regular(self, instance_nodes, data):
# We only imprint the creator identifier for it to identify
# as the new style creator
for instance_node in instance_nodes:
self.imprint_instance_node(instance_node,
data=data.copy())
def _convert_per_renderlayer(self, instance_nodes, data, creator):
# Split the instance into an instance per layer
rs = renderSetup.instance()
layers = rs.getRenderLayers()
if not layers:
self.log.error(
"Can't convert legacy renderlayer instance because no existing"
" renderSetup layers exist in the scene."
)
return
creator_attribute_names = {
attr_def.key for attr_def in creator.get_instance_attr_defs()
}
for instance_node in instance_nodes:
# Ensure we have the new style singleton node generated
# TODO: Make function public
singleton_node = creator._get_singleton_node()
if singleton_node:
self.log.error(
"Can't convert legacy renderlayer instance '{}' because"
" new style instance '{}' already exists".format(
instance_node,
singleton_node
)
)
continue
creator.create_singleton_node()
# We are creating new nodes to replace the original instance
# Copy the attributes of the original instance to the new node
original_data = read(instance_node)
# The family gets converted to the new family (this is due to
# "rendering" family being converted to "renderlayer" family)
original_data["family"] = creator.family
# Convert to creator attributes when relevant
creator_attributes = {}
for key in list(original_data.keys()):
# Iterate in order of the original attributes to preserve order
# in the output creator attributes
if key in creator_attribute_names:
creator_attributes[key] = original_data.pop(key)
original_data["creator_attributes"] = creator_attributes
# For layer in maya layers
for layer in layers:
layer_instance_node = creator.find_layer_instance_node(layer)
if not layer_instance_node:
# TODO: Make function public
layer_instance_node = creator._create_layer_instance_node(
layer
)
# Transfer the main attributes of the original instance
layer_data = original_data.copy()
layer_data.update(data)
self.imprint_instance_node(layer_instance_node,
data=layer_data)
# Delete the legacy instance node
cmds.delete(instance_node)

View file

@ -2,9 +2,13 @@ from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.lib import (
BoolDef,
TextDef
)
class CreateAnimation(plugin.Creator):
class CreateAnimation(plugin.MayaCreator):
"""Animation output for character rigs"""
# We hide the animation creator from the UI since the creation of it
@ -13,48 +17,71 @@ class CreateAnimation(plugin.Creator):
# Note: This setting is actually applied from project settings
enabled = False
identifier = "io.openpype.creators.maya.animation"
name = "animationDefault"
label = "Animation"
family = "animation"
icon = "male"
write_color_sets = False
write_face_sets = False
include_parent_hierarchy = False
include_user_defined_attributes = False
def __init__(self, *args, **kwargs):
super(CreateAnimation, self).__init__(*args, **kwargs)
# TODO: Would be great if we could visually hide this from the creator
# by default but do allow to generate it through code.
# create an ordered dict with the existing data first
def get_instance_attr_defs(self):
# get basic animation data : start / end / handles / steps
for key, value in lib.collect_animation_data().items():
self.data[key] = value
defs = lib.collect_animation_defs()
# Write vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = self.write_face_sets
# Include only renderable visible shapes.
# Skips locators and empty transforms
self.data["renderableOnly"] = False
# Include only nodes that are visible at least once during the
# frame range.
self.data["visibleOnly"] = False
# Include the groups above the out_SET content
self.data["includeParentHierarchy"] = self.include_parent_hierarchy
# Default to exporting world-space
self.data["worldSpace"] = True
defs.extend([
BoolDef("writeColorSets",
label="Write vertex colors",
tooltip="Write vertex colors with the geometry",
default=self.write_color_sets),
BoolDef("writeFaceSets",
label="Write face sets",
tooltip="Write face sets with the geometry",
default=self.write_face_sets),
BoolDef("writeNormals",
label="Write normals",
tooltip="Write normals with the deforming geometry",
default=True),
BoolDef("renderableOnly",
label="Renderable Only",
tooltip="Only export renderable visible shapes",
default=False),
BoolDef("visibleOnly",
label="Visible Only",
tooltip="Only export dag objects visible during "
"frame range",
default=False),
BoolDef("includeParentHierarchy",
label="Include Parent Hierarchy",
tooltip="Whether to include parent hierarchy of nodes in "
"the publish instance",
default=self.include_parent_hierarchy),
BoolDef("worldSpace",
label="World-Space Export",
default=True),
BoolDef("includeUserDefinedAttributes",
label="Include User Defined Attributes",
default=self.include_user_defined_attributes),
TextDef("attr",
label="Custom Attributes",
default="",
placeholder="attr1, attr2"),
TextDef("attrPrefix",
label="Custom Attributes Prefix",
placeholder="prefix1, prefix2")
])
# TODO: Implement these on a Deadline plug-in instead?
"""
# Default to not send to farm.
self.data["farm"] = False
self.data["priority"] = 50
"""
# Default to write normals.
self.data["writeNormals"] = True
value = self.include_user_defined_attributes
self.data["includeUserDefinedAttributes"] = value
return defs

View file

@ -2,17 +2,20 @@ from openpype.hosts.maya.api import (
lib,
plugin
)
from maya import cmds
from openpype.lib import (
NumberDef,
BoolDef
)
class CreateArnoldSceneSource(plugin.Creator):
class CreateArnoldSceneSource(plugin.MayaCreator):
"""Arnold Scene Source"""
name = "ass"
identifier = "io.openpype.creators.maya.ass"
label = "Arnold Scene Source"
family = "ass"
icon = "cube"
expandProcedurals = False
motionBlur = True
motionBlurKeys = 2
@ -28,39 +31,71 @@ class CreateArnoldSceneSource(plugin.Creator):
maskColor_manager = False
maskOperator = False
def __init__(self, *args, **kwargs):
super(CreateArnoldSceneSource, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
# Add animation data
self.data.update(lib.collect_animation_data())
defs = lib.collect_animation_defs()
self.data["expandProcedurals"] = self.expandProcedurals
self.data["motionBlur"] = self.motionBlur
self.data["motionBlurKeys"] = self.motionBlurKeys
self.data["motionBlurLength"] = self.motionBlurLength
defs.extend([
BoolDef("expandProcedural",
label="Expand Procedural",
default=self.expandProcedurals),
BoolDef("motionBlur",
label="Motion Blur",
default=self.motionBlur),
NumberDef("motionBlurKeys",
label="Motion Blur Keys",
decimals=0,
default=self.motionBlurKeys),
NumberDef("motionBlurLength",
label="Motion Blur Length",
decimals=3,
default=self.motionBlurLength),
# Masks
self.data["maskOptions"] = self.maskOptions
self.data["maskCamera"] = self.maskCamera
self.data["maskLight"] = self.maskLight
self.data["maskShape"] = self.maskShape
self.data["maskShader"] = self.maskShader
self.data["maskOverride"] = self.maskOverride
self.data["maskDriver"] = self.maskDriver
self.data["maskFilter"] = self.maskFilter
self.data["maskColor_manager"] = self.maskColor_manager
self.data["maskOperator"] = self.maskOperator
# Masks
BoolDef("maskOptions",
label="Export Options",
default=self.maskOptions),
BoolDef("maskCamera",
label="Export Cameras",
default=self.maskCamera),
BoolDef("maskLight",
label="Export Lights",
default=self.maskLight),
BoolDef("maskShape",
label="Export Shapes",
default=self.maskShape),
BoolDef("maskShader",
label="Export Shaders",
default=self.maskShader),
BoolDef("maskOverride",
label="Export Override Nodes",
default=self.maskOverride),
BoolDef("maskDriver",
label="Export Drivers",
default=self.maskDriver),
BoolDef("maskFilter",
label="Export Filters",
default=self.maskFilter),
BoolDef("maskOperator",
label="Export Operators",
default=self.maskOperator),
BoolDef("maskColor_manager",
label="Export Color Managers",
default=self.maskColor_manager),
])
def process(self):
instance = super(CreateArnoldSceneSource, self).process()
return defs
nodes = []
def create(self, subset_name, instance_data, pre_create_data):
if (self.options or {}).get("useSelection"):
nodes = cmds.ls(selection=True)
from maya import cmds
cmds.sets(nodes, rm=instance)
instance = super(CreateArnoldSceneSource, self).create(
subset_name, instance_data, pre_create_data
)
assContent = cmds.sets(name=instance + "_content_SET")
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
cmds.sets([assContent, assProxy], forceElement=instance)
instance_node = instance.get("instance_node")
content = cmds.sets(name=instance_node + "_content_SET", empty=True)
proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
cmds.sets([content, proxy], forceElement=instance_node)

View file

@ -1,10 +1,10 @@
from openpype.hosts.maya.api import plugin
class CreateAssembly(plugin.Creator):
class CreateAssembly(plugin.MayaCreator):
"""A grouped package of loaded content"""
name = "assembly"
identifier = "io.openpype.creators.maya.assembly"
label = "Assembly"
family = "assembly"
icon = "cubes"

View file

@ -2,33 +2,35 @@ from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.lib import BoolDef
class CreateCamera(plugin.Creator):
class CreateCamera(plugin.MayaCreator):
"""Single baked camera"""
name = "cameraMain"
identifier = "io.openpype.creators.maya.camera"
label = "Camera"
family = "camera"
icon = "video-camera"
def __init__(self, *args, **kwargs):
super(CreateCamera, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
# get basic animation data : start / end / handles / steps
animation_data = lib.collect_animation_data()
for key, value in animation_data.items():
self.data[key] = value
defs = lib.collect_animation_defs()
# Bake to world space by default, when this is False it will also
# include the parent hierarchy in the baked results
self.data['bakeToWorldSpace'] = True
defs.extend([
BoolDef("bakeToWorldSpace",
label="Bake to World-Space",
tooltip="Bake to World-Space",
default=True),
])
return defs
class CreateCameraRig(plugin.Creator):
class CreateCameraRig(plugin.MayaCreator):
"""Complex hierarchy with camera."""
name = "camerarigMain"
identifier = "io.openpype.creators.maya.camerarig"
label = "Camera Rig"
family = "camerarig"
icon = "video-camera"

View file

@ -1,16 +1,21 @@
from openpype.hosts.maya.api import plugin
from openpype.lib import BoolDef
class CreateLayout(plugin.Creator):
class CreateLayout(plugin.MayaCreator):
"""A grouped package of loaded content"""
name = "layoutMain"
identifier = "io.openpype.creators.maya.layout"
label = "Layout"
family = "layout"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateLayout, self).__init__(*args, **kwargs)
# enable this when you want to
# publish group of loaded asset
self.data["groupLoadedAssets"] = False
def get_instance_attr_defs(self):
return [
BoolDef("groupLoadedAssets",
label="Group Loaded Assets",
tooltip="Enable this when you want to publish group of "
"loaded asset",
default=False)
]

View file

@ -1,29 +1,53 @@
from openpype.hosts.maya.api import (
lib,
plugin
plugin,
lib
)
from openpype.lib import (
BoolDef,
TextDef
)
class CreateLook(plugin.Creator):
class CreateLook(plugin.MayaCreator):
"""Shader connections defining shape look"""
name = "look"
identifier = "io.openpype.creators.maya.look"
label = "Look"
family = "look"
icon = "paint-brush"
make_tx = True
rs_tex = False
def __init__(self, *args, **kwargs):
super(CreateLook, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
self.data["renderlayer"] = lib.get_current_renderlayer()
return [
# TODO: This value should actually get set on create!
TextDef("renderLayer",
# TODO: Bug: Hidden attribute's label is still shown in UI?
hidden=True,
default=lib.get_current_renderlayer(),
label="Renderlayer",
tooltip="Renderlayer to extract the look from"),
BoolDef("maketx",
label="MakeTX",
tooltip="Whether to generate .tx files for your textures",
default=self.make_tx),
BoolDef("rstex",
label="Convert textures to .rstex",
tooltip="Whether to generate Redshift .rstex files for "
"your textures",
default=self.rs_tex),
BoolDef("forceCopy",
label="Force Copy",
tooltip="Enable users to force a copy instead of hardlink."
"\nNote: On Windows copy is always forced due to "
"bugs in windows' implementation of hardlinks.",
default=False)
]
# Whether to automatically convert the textures to .tx upon publish.
self.data["maketx"] = self.make_tx
# Whether to automatically convert the textures to .rstex upon publish.
self.data["rstex"] = self.rs_tex
# Enable users to force a copy.
# - on Windows is "forceCopy" always changed to `True` because of
# windows implementation of hardlinks
self.data["forceCopy"] = False
def get_pre_create_attr_defs(self):
# Show same attributes on create but include use selection
defs = super(CreateLook, self).get_pre_create_attr_defs()
defs.extend(self.get_instance_attr_defs())
return defs

View file

@ -1,9 +1,10 @@
from openpype.hosts.maya.api import plugin
class CreateMayaScene(plugin.Creator):
class CreateMayaScene(plugin.MayaCreator):
"""Raw Maya Scene file export"""
identifier = "io.openpype.creators.maya.mayascene"
name = "mayaScene"
label = "Maya Scene"
family = "mayaScene"

View file

@ -1,26 +1,43 @@
from openpype.hosts.maya.api import plugin
from openpype.lib import (
BoolDef,
TextDef
)
class CreateModel(plugin.Creator):
class CreateModel(plugin.MayaCreator):
"""Polygonal static geometry"""
name = "modelMain"
identifier = "io.openpype.creators.maya.model"
label = "Model"
family = "model"
icon = "cube"
defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"]
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateModel, self).__init__(*args, **kwargs)
# Vertex colors with the geometry
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = self.write_face_sets
def get_instance_attr_defs(self):
# Include attributes by attribute name or prefix
self.data["attr"] = ""
self.data["attrPrefix"] = ""
# Whether to include parent hierarchy of nodes in the instance
self.data["includeParentHierarchy"] = False
return [
BoolDef("writeColorSets",
label="Write vertex colors",
tooltip="Write vertex colors with the geometry",
default=self.write_color_sets),
BoolDef("writeFaceSets",
label="Write face sets",
tooltip="Write face sets with the geometry",
default=self.write_face_sets),
BoolDef("includeParentHierarchy",
label="Include Parent Hierarchy",
tooltip="Whether to include parent hierarchy of nodes in "
"the publish instance",
default=False),
TextDef("attr",
label="Custom Attributes",
default="",
placeholder="attr1, attr2"),
TextDef("attrPrefix",
label="Custom Attributes Prefix",
placeholder="prefix1, prefix2")
]

View file

@ -1,15 +1,27 @@
from openpype.hosts.maya.api import plugin
from openpype.lib import (
BoolDef,
EnumDef
)
class CreateMultiverseLook(plugin.Creator):
class CreateMultiverseLook(plugin.MayaCreator):
"""Create Multiverse Look"""
name = "mvLook"
identifier = "io.openpype.creators.maya.mvlook"
label = "Multiverse Look"
family = "mvLook"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateMultiverseLook, self).__init__(*args, **kwargs)
self.data["fileFormat"] = ["usda", "usd"]
self.data["publishMipMap"] = True
def get_instance_attr_defs(self):
return [
EnumDef("fileFormat",
label="File Format",
tooltip="USD export file format",
items=["usda", "usd"],
default="usda"),
BoolDef("publishMipMap",
label="Publish MipMap",
default=True),
]

View file

@ -1,53 +1,135 @@
from openpype.hosts.maya.api import plugin, lib
from openpype.lib import (
BoolDef,
NumberDef,
TextDef,
EnumDef
)
class CreateMultiverseUsd(plugin.Creator):
class CreateMultiverseUsd(plugin.MayaCreator):
"""Create Multiverse USD Asset"""
name = "mvUsdMain"
identifier = "io.openpype.creators.maya.mvusdasset"
label = "Multiverse USD Asset"
family = "usd"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateMultiverseUsd, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
# Add animation data first, since it maintains order.
self.data.update(lib.collect_animation_data(True))
defs = lib.collect_animation_defs(fps=True)
defs.extend([
EnumDef("fileFormat",
label="File format",
items=["usd", "usda", "usdz"],
default="usd"),
BoolDef("stripNamespaces",
label="Strip Namespaces",
default=True),
BoolDef("mergeTransformAndShape",
label="Merge Transform and Shape",
default=False),
BoolDef("writeAncestors",
label="Write Ancestors",
default=True),
BoolDef("flattenParentXforms",
label="Flatten Parent Xforms",
default=False),
BoolDef("writeSparseOverrides",
label="Write Sparse Overrides",
default=False),
BoolDef("useMetaPrimPath",
label="Use Meta Prim Path",
default=False),
TextDef("customRootPath",
label="Custom Root Path",
default=''),
TextDef("customAttributes",
label="Custom Attributes",
tooltip="Comma-separated list of attribute names",
default=''),
TextDef("nodeTypesToIgnore",
label="Node Types to Ignore",
tooltip="Comma-separated list of node types to be ignored",
default=''),
BoolDef("writeMeshes",
label="Write Meshes",
default=True),
BoolDef("writeCurves",
label="Write Curves",
default=True),
BoolDef("writeParticles",
label="Write Particles",
default=True),
BoolDef("writeCameras",
label="Write Cameras",
default=False),
BoolDef("writeLights",
label="Write Lights",
default=False),
BoolDef("writeJoints",
label="Write Joints",
default=False),
BoolDef("writeCollections",
label="Write Collections",
default=False),
BoolDef("writePositions",
label="Write Positions",
default=True),
BoolDef("writeNormals",
label="Write Normals",
default=True),
BoolDef("writeUVs",
label="Write UVs",
default=True),
BoolDef("writeColorSets",
label="Write Color Sets",
default=False),
BoolDef("writeTangents",
label="Write Tangents",
default=False),
BoolDef("writeRefPositions",
label="Write Ref Positions",
default=True),
BoolDef("writeBlendShapes",
label="Write BlendShapes",
default=False),
BoolDef("writeDisplayColor",
label="Write Display Color",
default=True),
BoolDef("writeSkinWeights",
label="Write Skin Weights",
default=False),
BoolDef("writeMaterialAssignment",
label="Write Material Assignment",
default=False),
BoolDef("writeHardwareShader",
label="Write Hardware Shader",
default=False),
BoolDef("writeShadingNetworks",
label="Write Shading Networks",
default=False),
BoolDef("writeTransformMatrix",
label="Write Transform Matrix",
default=True),
BoolDef("writeUsdAttributes",
label="Write USD Attributes",
default=True),
BoolDef("writeInstancesAsReferences",
label="Write Instances as References",
default=False),
BoolDef("timeVaryingTopology",
label="Time Varying Topology",
default=False),
TextDef("customMaterialNamespace",
label="Custom Material Namespace",
default=''),
NumberDef("numTimeSamples",
label="Num Time Samples",
default=1),
NumberDef("timeSamplesSpan",
label="Time Samples Span",
default=0.0),
])
self.data["fileFormat"] = ["usd", "usda", "usdz"]
self.data["stripNamespaces"] = True
self.data["mergeTransformAndShape"] = False
self.data["writeAncestors"] = True
self.data["flattenParentXforms"] = False
self.data["writeSparseOverrides"] = False
self.data["useMetaPrimPath"] = False
self.data["customRootPath"] = ''
self.data["customAttributes"] = ''
self.data["nodeTypesToIgnore"] = ''
self.data["writeMeshes"] = True
self.data["writeCurves"] = True
self.data["writeParticles"] = True
self.data["writeCameras"] = False
self.data["writeLights"] = False
self.data["writeJoints"] = False
self.data["writeCollections"] = False
self.data["writePositions"] = True
self.data["writeNormals"] = True
self.data["writeUVs"] = True
self.data["writeColorSets"] = False
self.data["writeTangents"] = False
self.data["writeRefPositions"] = True
self.data["writeBlendShapes"] = False
self.data["writeDisplayColor"] = True
self.data["writeSkinWeights"] = False
self.data["writeMaterialAssignment"] = False
self.data["writeHardwareShader"] = False
self.data["writeShadingNetworks"] = False
self.data["writeTransformMatrix"] = True
self.data["writeUsdAttributes"] = True
self.data["writeInstancesAsReferences"] = False
self.data["timeVaryingTopology"] = False
self.data["customMaterialNamespace"] = ''
self.data["numTimeSamples"] = 1
self.data["timeSamplesSpan"] = 0.0
return defs

View file

@ -1,26 +1,48 @@
from openpype.hosts.maya.api import plugin, lib
from openpype.lib import (
BoolDef,
NumberDef,
EnumDef
)
class CreateMultiverseUsdComp(plugin.Creator):
class CreateMultiverseUsdComp(plugin.MayaCreator):
"""Create Multiverse USD Composition"""
name = "mvUsdCompositionMain"
identifier = "io.openpype.creators.maya.mvusdcomposition"
label = "Multiverse USD Composition"
family = "mvUsdComposition"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateMultiverseUsdComp, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
# Add animation data first, since it maintains order.
self.data.update(lib.collect_animation_data(True))
defs = lib.collect_animation_defs(fps=True)
defs.extend([
EnumDef("fileFormat",
label="File format",
items=["usd", "usda"],
default="usd"),
BoolDef("stripNamespaces",
label="Strip Namespaces",
default=False),
BoolDef("mergeTransformAndShape",
label="Merge Transform and Shape",
default=False),
BoolDef("flattenContent",
label="Flatten Content",
default=False),
BoolDef("writeAsCompoundLayers",
label="Write As Compound Layers",
default=False),
BoolDef("writePendingOverrides",
label="Write Pending Overrides",
default=False),
NumberDef("numTimeSamples",
label="Num Time Samples",
default=1),
NumberDef("timeSamplesSpan",
label="Time Samples Span",
default=0.0),
])
# Order of `fileFormat` must match extract_multiverse_usd_comp.py
self.data["fileFormat"] = ["usda", "usd"]
self.data["stripNamespaces"] = False
self.data["mergeTransformAndShape"] = False
self.data["flattenContent"] = False
self.data["writeAsCompoundLayers"] = False
self.data["writePendingOverrides"] = False
self.data["numTimeSamples"] = 1
self.data["timeSamplesSpan"] = 0.0
return defs

View file

@ -1,30 +1,59 @@
from openpype.hosts.maya.api import plugin, lib
from openpype.lib import (
BoolDef,
NumberDef,
EnumDef
)
class CreateMultiverseUsdOver(plugin.Creator):
"""Create Multiverse USD Override"""
name = "mvUsdOverrideMain"
identifier = "io.openpype.creators.maya.mvusdoverride"
label = "Multiverse USD Override"
family = "mvUsdOverride"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateMultiverseUsdOver, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
defs = lib.collect_animation_defs(fps=True)
defs.extend([
EnumDef("fileFormat",
label="File format",
items=["usd", "usda"],
default="usd"),
BoolDef("writeAll",
label="Write All",
default=False),
BoolDef("writeTransforms",
label="Write Transforms",
default=True),
BoolDef("writeVisibility",
label="Write Visibility",
default=True),
BoolDef("writeAttributes",
label="Write Attributes",
default=True),
BoolDef("writeMaterials",
label="Write Materials",
default=True),
BoolDef("writeVariants",
label="Write Variants",
default=True),
BoolDef("writeVariantsDefinition",
label="Write Variants Definition",
default=True),
BoolDef("writeActiveState",
label="Write Active State",
default=True),
BoolDef("writeNamespaces",
label="Write Namespaces",
default=False),
NumberDef("numTimeSamples",
label="Num Time Samples",
default=1),
NumberDef("timeSamplesSpan",
label="Time Samples Span",
default=0.0),
])
# Add animation data first, since it maintains order.
self.data.update(lib.collect_animation_data(True))
# Order of `fileFormat` must match extract_multiverse_usd_over.py
self.data["fileFormat"] = ["usda", "usd"]
self.data["writeAll"] = False
self.data["writeTransforms"] = True
self.data["writeVisibility"] = True
self.data["writeAttributes"] = True
self.data["writeMaterials"] = True
self.data["writeVariants"] = True
self.data["writeVariantsDefinition"] = True
self.data["writeActiveState"] = True
self.data["writeNamespaces"] = False
self.data["numTimeSamples"] = 1
self.data["timeSamplesSpan"] = 0.0
return defs

View file

@ -4,47 +4,85 @@ from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.lib import (
BoolDef,
TextDef
)
class CreatePointCache(plugin.Creator):
class CreatePointCache(plugin.MayaCreator):
"""Alembic pointcache for animated data"""
name = "pointcache"
label = "Point Cache"
identifier = "io.openpype.creators.maya.pointcache"
label = "Pointcache"
family = "pointcache"
icon = "gears"
write_color_sets = False
write_face_sets = False
include_user_defined_attributes = False
def __init__(self, *args, **kwargs):
super(CreatePointCache, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
# Add animation data
self.data.update(lib.collect_animation_data())
defs = lib.collect_animation_defs()
# Vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
# Vertex colors with the geometry.
self.data["writeFaceSets"] = self.write_face_sets
self.data["renderableOnly"] = False # Only renderable visible shapes
self.data["visibleOnly"] = False # only nodes that are visible
self.data["includeParentHierarchy"] = False # Include parent groups
self.data["worldSpace"] = True # Default to exporting world-space
self.data["refresh"] = False # Default to suspend refresh.
# Add options for custom attributes
value = self.include_user_defined_attributes
self.data["includeUserDefinedAttributes"] = value
self.data["attr"] = ""
self.data["attrPrefix"] = ""
defs.extend([
BoolDef("writeColorSets",
label="Write vertex colors",
tooltip="Write vertex colors with the geometry",
default=False),
BoolDef("writeFaceSets",
label="Write face sets",
tooltip="Write face sets with the geometry",
default=False),
BoolDef("renderableOnly",
label="Renderable Only",
tooltip="Only export renderable visible shapes",
default=False),
BoolDef("visibleOnly",
label="Visible Only",
tooltip="Only export dag objects visible during "
"frame range",
default=False),
BoolDef("includeParentHierarchy",
label="Include Parent Hierarchy",
tooltip="Whether to include parent hierarchy of nodes in "
"the publish instance",
default=False),
BoolDef("worldSpace",
label="World-Space Export",
default=True),
BoolDef("refresh",
label="Refresh viewport during export",
default=False),
BoolDef("includeUserDefinedAttributes",
label="Include User Defined Attributes",
default=self.include_user_defined_attributes),
TextDef("attr",
label="Custom Attributes",
default="",
placeholder="attr1, attr2"),
TextDef("attrPrefix",
label="Custom Attributes Prefix",
default="",
placeholder="prefix1, prefix2")
])
# TODO: Implement these on a Deadline plug-in instead?
"""
# Default to not send to farm.
self.data["farm"] = False
self.data["priority"] = 50
"""
def process(self):
instance = super(CreatePointCache, self).process()
return defs
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
cmds.sets(assProxy, forceElement=instance)
def create(self, subset_name, instance_data, pre_create_data):
instance = super(CreatePointCache, self).create(
subset_name, instance_data, pre_create_data
)
instance_node = instance.get("instance_node")
# For Arnold standin proxy
proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
cmds.sets(proxy_set, forceElement=instance_node)

View file

@ -2,34 +2,49 @@ from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.lib import (
BoolDef,
TextDef
)
class CreateProxyAlembic(plugin.Creator):
class CreateProxyAlembic(plugin.MayaCreator):
"""Proxy Alembic for animated data"""
name = "proxyAbcMain"
identifier = "io.openpype.creators.maya.proxyabc"
label = "Proxy Alembic"
family = "proxyAbc"
icon = "gears"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateProxyAlembic, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
# Add animation data
self.data.update(lib.collect_animation_data())
defs = lib.collect_animation_defs()
# Vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
# Vertex colors with the geometry.
self.data["writeFaceSets"] = self.write_face_sets
# Default to exporting world-space
self.data["worldSpace"] = True
defs.extend([
BoolDef("writeColorSets",
label="Write vertex colors",
tooltip="Write vertex colors with the geometry",
default=self.write_color_sets),
BoolDef("writeFaceSets",
label="Write face sets",
tooltip="Write face sets with the geometry",
default=self.write_face_sets),
BoolDef("worldSpace",
label="World-Space Export",
default=True),
TextDef("nameSuffix",
label="Name Suffix for Bounding Box",
default="_BBox",
placeholder="_BBox"),
TextDef("attr",
label="Custom Attributes",
default="",
placeholder="attr1, attr2"),
TextDef("attrPrefix",
label="Custom Attributes Prefix",
placeholder="prefix1, prefix2")
])
# name suffix for the bounding box
self.data["nameSuffix"] = "_BBox"
# Add options for custom attributes
self.data["attr"] = ""
self.data["attrPrefix"] = ""
return defs

View file

@ -2,22 +2,24 @@
"""Creator of Redshift proxy subset types."""
from openpype.hosts.maya.api import plugin, lib
from openpype.lib import BoolDef
class CreateRedshiftProxy(plugin.Creator):
class CreateRedshiftProxy(plugin.MayaCreator):
"""Create instance of Redshift Proxy subset."""
name = "redshiftproxy"
identifier = "io.openpype.creators.maya.redshiftproxy"
label = "Redshift Proxy"
family = "redshiftproxy"
icon = "gears"
def __init__(self, *args, **kwargs):
super(CreateRedshiftProxy, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
animation_data = lib.collect_animation_data()
defs = [
BoolDef("animation",
label="Export animation",
default=False)
]
self.data["animation"] = False
self.data["proxyFrameStart"] = animation_data["frameStart"]
self.data["proxyFrameEnd"] = animation_data["frameEnd"]
self.data["proxyFrameStep"] = animation_data["step"]
defs.extend(lib.collect_animation_defs())
return defs

View file

@ -1,425 +1,108 @@
# -*- coding: utf-8 -*-
"""Create ``Render`` instance in Maya."""
import json
import os
import appdirs
import requests
from maya import cmds
from maya.app.renderSetup.model import renderSetup
from openpype.settings import (
get_system_settings,
get_project_settings,
)
from openpype.lib import requests_get
from openpype.modules import ModulesManager
from openpype.pipeline import legacy_io
from openpype.hosts.maya.api import (
lib,
lib_rendersettings,
plugin
)
from openpype.pipeline import CreatorError
from openpype.lib import (
BoolDef,
NumberDef,
)
class CreateRender(plugin.Creator):
"""Create *render* instance.
class CreateRenderlayer(plugin.RenderlayerCreator):
"""Create and manages renderlayer subset per renderLayer in workfile.
Render instances are not actually published, they hold options for
collecting of render data. It render instance is present, it will trigger
collection of render layers, AOVs, cameras for either direct submission
to render farm or export as various standalone formats (like V-Rays
``vrscenes`` or Arnolds ``ass`` files) and then submitting them to render
farm.
Instance has following attributes::
primaryPool (list of str): Primary list of slave machine pool to use.
secondaryPool (list of str): Optional secondary list of slave pools.
suspendPublishJob (bool): Suspend the job after it is submitted.
extendFrames (bool): Use already existing frames from previous version
to extend current render.
overrideExistingFrame (bool): Overwrite already existing frames.
priority (int): Submitted job priority
framesPerTask (int): How many frames per task to render. This is
basically job division on render farm.
whitelist (list of str): White list of slave machines
machineList (list of str): Specific list of slave machines to use
useMayaBatch (bool): Use Maya batch mode to render as opposite to
Maya interactive mode. This consumes different licenses.
vrscene (bool): Submit as ``vrscene`` file for standalone V-Ray
renderer.
ass (bool): Submit as ``ass`` file for standalone Arnold renderer.
tileRendering (bool): Instance is set to tile rendering mode. We
won't submit actual render, but we'll make publish job to wait
for Tile Assembly job done and then publish.
strict_error_checking (bool): Enable/disable error checking on DL
See Also:
https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup
This generates a single node in the scene which tells the Creator to if
it exists collect Maya rendersetup renderlayers as individual instances.
As such, triggering create doesn't actually create the instance node per
layer but only the node which tells the Creator it may now collect
the renderlayers.
"""
identifier = "io.openpype.creators.maya.renderlayer"
family = "renderlayer"
label = "Render"
family = "rendering"
icon = "eye"
_token = None
_user = None
_password = None
_project_settings = None
layer_instance_prefix = "render"
singleton_node_name = "renderingMain"
def __init__(self, *args, **kwargs):
"""Constructor."""
super(CreateRender, self).__init__(*args, **kwargs)
render_settings = {}
# Defaults
self._project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"])
if self._project_settings["maya"]["RenderSettings"]["apply_render_settings"]: # noqa
@classmethod
def apply_settings(cls, project_settings, system_settings):
cls.render_settings = project_settings["maya"]["RenderSettings"]
def create(self, subset_name, instance_data, pre_create_data):
# Only allow a single render instance to exist
if self._get_singleton_node():
raise CreatorError("A Render instance already exists - only "
"one can be configured.")
# Apply default project render settings on create
if self.render_settings.get("apply_render_settings"):
lib_rendersettings.RenderSettings().set_default_renderer_settings()
# Deadline-only
manager = ModulesManager()
deadline_settings = get_system_settings()["modules"]["deadline"]
if not deadline_settings["enabled"]:
self.deadline_servers = {}
return
self.deadline_module = manager.modules_by_name["deadline"]
try:
default_servers = deadline_settings["deadline_urls"]
project_servers = (
self._project_settings["deadline"]["deadline_servers"]
)
self.deadline_servers = {
k: default_servers[k]
for k in project_servers
if k in default_servers
}
super(CreateRenderlayer, self).create(subset_name,
instance_data,
pre_create_data)
if not self.deadline_servers:
self.deadline_servers = default_servers
except AttributeError:
# Handle situation were we had only one url for deadline.
# get default deadline webservice url from deadline module
self.deadline_servers = self.deadline_module.deadline_urls
def process(self):
"""Entry point."""
exists = cmds.ls(self.name)
if exists:
cmds.warning("%s already exists." % exists[0])
return
use_selection = self.options.get("useSelection")
with lib.undo_chunk():
self._create_render_settings()
self.instance = super(CreateRender, self).process()
# create namespace with instance
index = 1
namespace_name = "_{}".format(str(self.instance))
try:
cmds.namespace(rm=namespace_name)
except RuntimeError:
# namespace is not empty, so we leave it untouched
pass
while cmds.namespace(exists=namespace_name):
namespace_name = "_{}{}".format(str(self.instance), index)
index += 1
namespace = cmds.namespace(add=namespace_name)
# add Deadline server selection list
if self.deadline_servers:
cmds.scriptJob(
attributeChange=[
"{}.deadlineServers".format(self.instance),
self._deadline_webservice_changed
])
cmds.setAttr("{}.machineList".format(self.instance), lock=True)
rs = renderSetup.instance()
layers = rs.getRenderLayers()
if use_selection:
self.log.info("Processing existing layers")
sets = []
for layer in layers:
self.log.info(" - creating set for {}:{}".format(
namespace, layer.name()))
render_set = cmds.sets(
n="{}:{}".format(namespace, layer.name()))
sets.append(render_set)
cmds.sets(sets, forceElement=self.instance)
# if no render layers are present, create default one with
# asterisk selector
if not layers:
render_layer = rs.createRenderLayer('Main')
collection = render_layer.createCollection("defaultCollection")
collection.getSelector().setPattern('*')
return self.instance
def _deadline_webservice_changed(self):
"""Refresh Deadline server dependent options."""
# get selected server
webservice = self.deadline_servers[
self.server_aliases[
cmds.getAttr("{}.deadlineServers".format(self.instance))
]
]
pools = self.deadline_module.get_deadline_pools(webservice, self.log)
cmds.deleteAttr("{}.primaryPool".format(self.instance))
cmds.deleteAttr("{}.secondaryPool".format(self.instance))
pool_setting = (self._project_settings["deadline"]
["publish"]
["CollectDeadlinePools"])
primary_pool = pool_setting["primary_pool"]
sorted_pools = self._set_default_pool(list(pools), primary_pool)
cmds.addAttr(
self.instance,
longName="primaryPool",
attributeType="enum",
enumName=":".join(sorted_pools)
)
cmds.setAttr(
"{}.primaryPool".format(self.instance),
0,
keyable=False,
channelBox=True
)
pools = ["-"] + pools
secondary_pool = pool_setting["secondary_pool"]
sorted_pools = self._set_default_pool(list(pools), secondary_pool)
cmds.addAttr(
self.instance,
longName="secondaryPool",
attributeType="enum",
enumName=":".join(sorted_pools)
)
cmds.setAttr(
"{}.secondaryPool".format(self.instance),
0,
keyable=False,
channelBox=True
)
def _create_render_settings(self):
def get_instance_attr_defs(self):
"""Create instance settings."""
# get pools (slave machines of the render farm)
pool_names = []
default_priority = 50
self.data["suspendPublishJob"] = False
self.data["review"] = True
self.data["extendFrames"] = False
self.data["overrideExistingFrame"] = True
# self.data["useLegacyRenderLayers"] = True
self.data["priority"] = default_priority
self.data["tile_priority"] = default_priority
self.data["framesPerTask"] = 1
self.data["whitelist"] = False
self.data["machineList"] = ""
self.data["useMayaBatch"] = False
self.data["tileRendering"] = False
self.data["tilesX"] = 2
self.data["tilesY"] = 2
self.data["convertToScanline"] = False
self.data["useReferencedAovs"] = False
self.data["renderSetupIncludeLights"] = (
self._project_settings.get(
"maya", {}).get(
"RenderSettings", {}).get(
"enable_all_lights", False)
)
# Disable for now as this feature is not working yet
# self.data["assScene"] = False
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
BoolDef("extendFrames",
label="Extend Frames",
tooltip="Extends the frames on top of the previous "
"publish.\nIf the previous was 1001-1050 and you "
"would now submit 1020-1070 only the new frames "
"1051-1070 would be rendered and published "
"together with the previously rendered frames.\n"
"If 'overrideExistingFrame' is enabled it *will* "
"render any existing frames.",
default=False),
BoolDef("overrideExistingFrame",
label="Override Existing Frame",
tooltip="Override existing rendered frames "
"(if they exist).",
default=True),
system_settings = get_system_settings()["modules"]
# TODO: Should these move to submit_maya_deadline plugin?
# Tile rendering
BoolDef("tileRendering",
label="Enable tiled rendering",
default=False),
NumberDef("tilesX",
label="Tiles X",
default=2,
minimum=1,
decimals=0),
NumberDef("tilesY",
label="Tiles Y",
default=2,
minimum=1,
decimals=0),
deadline_enabled = system_settings["deadline"]["enabled"]
muster_enabled = system_settings["muster"]["enabled"]
muster_url = system_settings["muster"]["MUSTER_REST_URL"]
# Additional settings
BoolDef("convertToScanline",
label="Convert to Scanline",
tooltip="Convert the output images to scanline images",
default=False),
BoolDef("useReferencedAovs",
label="Use Referenced AOVs",
tooltip="Consider the AOVs from referenced scenes as well",
default=False),
if deadline_enabled and muster_enabled:
self.log.error(
"Both Deadline and Muster are enabled. " "Cannot support both."
)
raise RuntimeError("Both Deadline and Muster are enabled")
if deadline_enabled:
self.server_aliases = list(self.deadline_servers.keys())
self.data["deadlineServers"] = self.server_aliases
try:
deadline_url = self.deadline_servers["default"]
except KeyError:
# if 'default' server is not between selected,
# use first one for initial list of pools.
deadline_url = next(iter(self.deadline_servers.values()))
# Uses function to get pool machines from the assigned deadline
# url in settings
pool_names = self.deadline_module.get_deadline_pools(deadline_url,
self.log)
maya_submit_dl = self._project_settings.get(
"deadline", {}).get(
"publish", {}).get(
"MayaSubmitDeadline", {})
priority = maya_submit_dl.get("priority", default_priority)
self.data["priority"] = priority
tile_priority = maya_submit_dl.get("tile_priority",
default_priority)
self.data["tile_priority"] = tile_priority
strict_error_checking = maya_submit_dl.get("strict_error_checking",
True)
self.data["strict_error_checking"] = strict_error_checking
# Pool attributes should be last since they will be recreated when
# the deadline server changes.
pool_setting = (self._project_settings["deadline"]
["publish"]
["CollectDeadlinePools"])
primary_pool = pool_setting["primary_pool"]
self.data["primaryPool"] = self._set_default_pool(pool_names,
primary_pool)
# We add a string "-" to allow the user to not
# set any secondary pools
pool_names = ["-"] + pool_names
secondary_pool = pool_setting["secondary_pool"]
self.data["secondaryPool"] = self._set_default_pool(pool_names,
secondary_pool)
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")
self._load_credentials()
self.log.info(">>> Getting pools ...")
pools = []
try:
pools = self._get_muster_pools()
except requests.exceptions.HTTPError as e:
if e.startswith("401"):
self.log.warning("access token expired")
self._show_login()
raise RuntimeError("Access token expired")
except requests.exceptions.ConnectionError:
self.log.error("Cannot connect to Muster API endpoint.")
raise RuntimeError("Cannot connect to {}".format(muster_url))
for pool in pools:
self.log.info(" - pool: {}".format(pool["name"]))
pool_names.append(pool["name"])
self.options = {"useSelection": False} # Force no content
def _set_default_pool(self, pool_names, pool_value):
"""Reorder pool names, default should come first"""
if pool_value and pool_value in pool_names:
pool_names.remove(pool_value)
pool_names = [pool_value] + pool_names
return pool_names
def _load_credentials(self):
"""Load Muster credentials.
Load Muster credentials from file and set ``MUSTER_USER``,
``MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from settings.
Raises:
RuntimeError: If loaded credentials are invalid.
AttributeError: If ``MUSTER_REST_URL`` is not set.
"""
app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
file_name = "muster_cred.json"
fpath = os.path.join(app_dir, file_name)
file = open(fpath, "r")
muster_json = json.load(file)
self._token = muster_json.get("token", None)
if not self._token:
self._show_login()
raise RuntimeError("Invalid access token for Muster")
file.close()
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
if not self.MUSTER_REST_URL:
raise AttributeError("Muster REST API url not set")
def _get_muster_pools(self):
"""Get render pools from Muster.
Raises:
Exception: If pool list cannot be obtained from Muster.
"""
params = {"authToken": self._token}
api_entry = "/api/pools/list"
response = requests_get(self.MUSTER_REST_URL + api_entry,
params=params)
if response.status_code != 200:
if response.status_code == 401:
self.log.warning("Authentication token expired.")
self._show_login()
else:
self.log.error(
("Cannot get pools from "
"Muster: {}").format(response.status_code)
)
raise Exception("Cannot get pools from Muster")
try:
pools = response.json()["ResponseData"]["pools"]
except ValueError as e:
self.log.error("Invalid response from Muster server {}".format(e))
raise Exception("Invalid response from Muster server")
return pools
def _show_login(self):
# authentication token expired so we need to login to Muster
# again to get it. We use Pype API call to show login window.
api_url = "{}/muster/show_login".format(
os.environ["OPENPYPE_WEBSERVER_URL"])
self.log.debug(api_url)
login_response = requests_get(api_url, timeout=1)
if login_response.status_code != 200:
self.log.error("Cannot show login form to Muster")
raise Exception("Cannot show login form to Muster")
def _requests_post(self, *args, **kwargs):
"""Wrap request post method.
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True)
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
"""Wrap request get method.
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True)
return requests.get(*args, **kwargs)
BoolDef("renderSetupIncludeLights",
label="Render Setup Include Lights",
default=self.render_settings.get("enable_all_lights",
False))
]

View file

@ -1,55 +1,31 @@
from openpype.hosts.maya.api import (
lib,
plugin
)
from maya import cmds
from openpype.hosts.maya.api import plugin
from openpype.pipeline import CreatorError
class CreateRenderSetup(plugin.Creator):
class CreateRenderSetup(plugin.MayaCreator):
"""Create rendersetup template json data"""
name = "rendersetup"
identifier = "io.openpype.creators.maya.rendersetup"
label = "Render Setup Preset"
family = "rendersetup"
icon = "tablet"
def __init__(self, *args, **kwargs):
super(CreateRenderSetup, self).__init__(*args, **kwargs)
def get_pre_create_attr_defs(self):
# Do not show the "use_selection" setting from parent class
return []
# here we can pre-create renderSetup layers, possibly utlizing
# settings for it.
def create(self, subset_name, instance_data, pre_create_data):
# _____
# / __\__
# | / __\__
# | | / \
# | | | |
# \__| | |
# \__| |
# \_____/
existing_instance = None
for instance in self.create_context.instances:
if instance.family == self.family:
existing_instance = instance
break
# from pype.api import get_project_settings
# import maya.app.renderSetup.model.renderSetup as renderSetup
# settings = get_project_settings(os.environ['AVALON_PROJECT'])
# layer = settings['maya']['create']['renderSetup']["layer"]
if existing_instance:
raise CreatorError("A RenderSetup instance already exists - only "
"one can be configured.")
# rs = renderSetup.instance()
# rs.createRenderLayer(layer)
self.options = {"useSelection": False} # Force no content
def process(self):
exists = cmds.ls(self.name)
assert len(exists) <= 1, (
"More than one renderglobal exists, this is a bug"
)
if exists:
return cmds.warning("%s already exists." % exists[0])
with lib.undo_chunk():
instance = super(CreateRenderSetup, self).process()
self.data["renderSetup"] = "42"
null = cmds.sets(name="null_SET", empty=True)
cmds.sets([null], forceElement=instance)
super(CreateRenderSetup, self).create(subset_name,
instance_data,
pre_create_data)

View file

@ -1,76 +1,142 @@
import os
from collections import OrderedDict
import json
from maya import cmds
from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.settings import get_project_settings
from openpype.pipeline import get_current_project_name, get_current_task_name
from openpype.lib import (
BoolDef,
NumberDef,
EnumDef
)
from openpype.pipeline import CreatedInstance
from openpype.client import get_asset_by_name
TRANSPARENCIES = [
"preset",
"simple",
"object sorting",
"weighted average",
"depth peeling",
"alpha cut"
]
class CreateReview(plugin.Creator):
"""Single baked camera"""
name = "reviewDefault"
class CreateReview(plugin.MayaCreator):
"""Playblast reviewable"""
identifier = "io.openpype.creators.maya.review"
label = "Review"
family = "review"
icon = "video-camera"
keepImages = False
isolate = False
imagePlane = True
Width = 0
Height = 0
transparency = [
"preset",
"simple",
"object sorting",
"weighted average",
"depth peeling",
"alpha cut"
]
useMayaTimeline = True
panZoom = False
def __init__(self, *args, **kwargs):
super(CreateReview, self).__init__(*args, **kwargs)
data = OrderedDict(**self.data)
# Overriding "create" method to prefill values from settings.
def create(self, subset_name, instance_data, pre_create_data):
project_name = get_current_project_name()
asset_doc = get_asset_by_name(project_name, data["asset"])
task_name = get_current_task_name()
members = list()
if pre_create_data.get("use_selection"):
members = cmds.ls(selection=True)
project_name = self.project_name
asset_doc = get_asset_by_name(project_name, instance_data["asset"])
task_name = instance_data["task"]
preset = lib.get_capture_preset(
task_name,
asset_doc["data"]["tasks"][task_name]["type"],
data["subset"],
get_project_settings(project_name),
subset_name,
self.project_settings,
self.log
)
if os.environ.get("OPENPYPE_DEBUG") == "1":
self.log.debug(
"Using preset: {}".format(
json.dumps(preset, indent=4, sort_keys=True)
)
self.log.debug(
"Using preset: {}".format(
json.dumps(preset, indent=4, sort_keys=True)
)
)
with lib.undo_chunk():
instance_node = cmds.sets(members, name=subset_name)
instance_data["instance_node"] = instance_node
instance = CreatedInstance(
self.family,
subset_name,
instance_data,
self)
creator_attribute_defs_by_key = {
x.key: x for x in instance.creator_attribute_defs
}
mapping = {
"review_width": preset["Resolution"]["width"],
"review_height": preset["Resolution"]["height"],
"isolate": preset["Generic"]["isolate_view"],
"imagePlane": preset["Viewport Options"]["imagePlane"],
"panZoom": preset["Generic"]["pan_zoom"]
}
for key, value in mapping.items():
creator_attribute_defs_by_key[key].default = value
self._add_instance_to_context(instance)
self.imprint_instance_node(instance_node,
data=instance.data_to_store())
return instance
def get_instance_attr_defs(self):
defs = lib.collect_animation_defs()
# Option for using Maya or asset frame range in settings.
frame_range = lib.get_frame_range()
if self.useMayaTimeline:
frame_range = lib.collect_animation_data(fps=True)
for key, value in frame_range.items():
data[key] = value
if not self.useMayaTimeline:
# Update the defaults to be the asset frame range
frame_range = lib.get_frame_range()
defs_by_key = {attr_def.key: attr_def for attr_def in defs}
for key, value in frame_range.items():
if key not in defs_by_key:
raise RuntimeError("Attribute definition not found to be "
"updated for key: {}".format(key))
attr_def = defs_by_key[key]
attr_def.default = value
data["fps"] = lib.collect_animation_data(fps=True)["fps"]
defs.extend([
NumberDef("review_width",
label="Review width",
tooltip="A value of zero will use the asset resolution.",
decimals=0,
minimum=0,
default=0),
NumberDef("review_height",
label="Review height",
tooltip="A value of zero will use the asset resolution.",
decimals=0,
minimum=0,
default=0),
BoolDef("keepImages",
label="Keep Images",
tooltip="Whether to also publish along the image sequence "
"next to the video reviewable.",
default=False),
BoolDef("isolate",
label="Isolate render members of instance",
tooltip="When enabled only the members of the instance "
"will be included in the playblast review.",
default=False),
BoolDef("imagePlane",
label="Show Image Plane",
default=True),
EnumDef("transparency",
label="Transparency",
items=TRANSPARENCIES),
BoolDef("panZoom",
label="Enable camera pan/zoom",
default=True),
EnumDef("displayLights",
label="Display Lights",
items=lib.DISPLAY_LIGHTS_ENUM),
])
data["keepImages"] = self.keepImages
data["transparency"] = self.transparency
data["review_width"] = preset["Resolution"]["width"]
data["review_height"] = preset["Resolution"]["height"]
data["isolate"] = preset["Generic"]["isolate_view"]
data["imagePlane"] = preset["Viewport Options"]["imagePlane"]
data["panZoom"] = preset["Generic"]["pan_zoom"]
data["displayLights"] = lib.DISPLAY_LIGHTS_LABELS
self.data = data
return defs

View file

@ -1,25 +1,25 @@
from maya import cmds
from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.hosts.maya.api import plugin
class CreateRig(plugin.Creator):
class CreateRig(plugin.MayaCreator):
"""Artist-friendly rig with controls to direct motion"""
name = "rigDefault"
identifier = "io.openpype.creators.maya.rig"
label = "Rig"
family = "rig"
icon = "wheelchair"
def process(self):
def create(self, subset_name, instance_data, pre_create_data):
with lib.undo_chunk():
instance = super(CreateRig, self).process()
instance = super(CreateRig, self).create(subset_name,
instance_data,
pre_create_data)
self.log.info("Creating Rig instance set up ...")
controls = cmds.sets(name="controls_SET", empty=True)
pointcache = cmds.sets(name="out_SET", empty=True)
cmds.sets([controls, pointcache], forceElement=instance)
instance_node = instance.get("instance_node")
self.log.info("Creating Rig instance set up ...")
controls = cmds.sets(name="controls_SET", empty=True)
pointcache = cmds.sets(name="out_SET", empty=True)
cmds.sets([controls, pointcache], forceElement=instance_node)

View file

@ -1,16 +1,19 @@
from openpype.hosts.maya.api import plugin
from openpype.lib import BoolDef
class CreateSetDress(plugin.Creator):
class CreateSetDress(plugin.MayaCreator):
"""A grouped package of loaded content"""
name = "setdressMain"
identifier = "io.openpype.creators.maya.setdress"
label = "Set Dress"
family = "setdress"
icon = "cubes"
defaults = ["Main", "Anim"]
def __init__(self, *args, **kwargs):
super(CreateSetDress, self).__init__(*args, **kwargs)
self.data["exactSetMembersOnly"] = True
def get_instance_attr_defs(self):
return [
BoolDef("exactSetMembersOnly",
label="Exact Set Members Only",
default=True)
]

View file

@ -1,47 +1,63 @@
# -*- coding: utf-8 -*-
"""Creator for Unreal Skeletal Meshes."""
from openpype.hosts.maya.api import plugin, lib
from openpype.pipeline import legacy_io
from openpype.lib import (
BoolDef,
TextDef
)
from maya import cmds # noqa
class CreateUnrealSkeletalMesh(plugin.Creator):
class CreateUnrealSkeletalMesh(plugin.MayaCreator):
"""Unreal Static Meshes with collisions."""
name = "staticMeshMain"
identifier = "io.openpype.creators.maya.unrealskeletalmesh"
label = "Unreal - Skeletal Mesh"
family = "skeletalMesh"
icon = "thumbs-up"
dynamic_subset_keys = ["asset"]
joint_hints = []
# Defined in settings
joint_hints = set()
def __init__(self, *args, **kwargs):
"""Constructor."""
super(CreateUnrealSkeletalMesh, self).__init__(*args, **kwargs)
@classmethod
def get_dynamic_data(
cls, variant, task_name, asset_id, project_name, host_name
):
dynamic_data = super(CreateUnrealSkeletalMesh, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
def apply_settings(self, project_settings, system_settings):
"""Apply project settings to creator"""
settings = (
project_settings["maya"]["create"]["CreateUnrealSkeletalMesh"]
)
dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET")
self.joint_hints = set(settings.get("joint_hints", []))
def get_dynamic_data(
self, variant, task_name, asset_doc, project_name, host_name, instance
):
"""
The default subset name templates for Unreal include {asset} and thus
we should pass that along as dynamic data.
"""
dynamic_data = super(CreateUnrealSkeletalMesh, self).get_dynamic_data(
variant, task_name, asset_doc, project_name, host_name, instance
)
dynamic_data["asset"] = asset_doc["name"]
return dynamic_data
def process(self):
self.name = "{}_{}".format(self.family, self.name)
with lib.undo_chunk():
instance = super(CreateUnrealSkeletalMesh, self).process()
content = cmds.sets(instance, query=True)
def create(self, subset_name, instance_data, pre_create_data):
with lib.undo_chunk():
instance = super(CreateUnrealSkeletalMesh, self).create(
subset_name, instance_data, pre_create_data)
instance_node = instance.get("instance_node")
# We reorganize the geometry that was originally added into the
# set into either 'joints_SET' or 'geometry_SET' based on the
# joint_hints from project settings
members = cmds.sets(instance_node, query=True)
cmds.sets(clear=instance_node)
# empty set and process its former content
cmds.sets(content, rm=instance)
geometry_set = cmds.sets(name="geometry_SET", empty=True)
joints_set = cmds.sets(name="joints_SET", empty=True)
cmds.sets([geometry_set, joints_set], forceElement=instance)
members = cmds.ls(content) or []
cmds.sets([geometry_set, joints_set], forceElement=instance_node)
for node in members:
if node in self.joint_hints:
@ -49,20 +65,38 @@ class CreateUnrealSkeletalMesh(plugin.Creator):
else:
cmds.sets(node, forceElement=geometry_set)
# Add animation data
self.data.update(lib.collect_animation_data())
def get_instance_attr_defs(self):
# Only renderable visible shapes
self.data["renderableOnly"] = False
# only nodes that are visible
self.data["visibleOnly"] = False
# Include parent groups
self.data["includeParentHierarchy"] = False
# Default to exporting world-space
self.data["worldSpace"] = True
# Default to suspend refresh.
self.data["refresh"] = False
defs = lib.collect_animation_defs()
# Add options for custom attributes
self.data["attr"] = ""
self.data["attrPrefix"] = ""
defs.extend([
BoolDef("renderableOnly",
label="Renderable Only",
tooltip="Only export renderable visible shapes",
default=False),
BoolDef("visibleOnly",
label="Visible Only",
tooltip="Only export dag objects visible during "
"frame range",
default=False),
BoolDef("includeParentHierarchy",
label="Include Parent Hierarchy",
tooltip="Whether to include parent hierarchy of nodes in "
"the publish instance",
default=False),
BoolDef("worldSpace",
label="World-Space Export",
default=True),
BoolDef("refresh",
label="Refresh viewport during export",
default=False),
TextDef("attr",
label="Custom Attributes",
default="",
placeholder="attr1, attr2"),
TextDef("attrPrefix",
label="Custom Attributes Prefix",
placeholder="prefix1, prefix2")
])
return defs

View file

@ -1,58 +1,90 @@
# -*- coding: utf-8 -*-
"""Creator for Unreal Static Meshes."""
from openpype.hosts.maya.api import plugin, lib
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
from maya import cmds # noqa
class CreateUnrealStaticMesh(plugin.Creator):
class CreateUnrealStaticMesh(plugin.MayaCreator):
"""Unreal Static Meshes with collisions."""
name = "staticMeshMain"
identifier = "io.openpype.creators.maya.unrealstaticmesh"
label = "Unreal - Static Mesh"
family = "staticMesh"
icon = "cube"
dynamic_subset_keys = ["asset"]
def __init__(self, *args, **kwargs):
"""Constructor."""
super(CreateUnrealStaticMesh, self).__init__(*args, **kwargs)
self._project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"])
# Defined in settings
collision_prefixes = []
def apply_settings(self, project_settings, system_settings):
"""Apply project settings to creator"""
settings = project_settings["maya"]["create"]["CreateUnrealStaticMesh"]
self.collision_prefixes = settings["collision_prefixes"]
@classmethod
def get_dynamic_data(
cls, variant, task_name, asset_id, project_name, host_name
self, variant, task_name, asset_doc, project_name, host_name, instance
):
dynamic_data = super(CreateUnrealStaticMesh, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
"""
The default subset name templates for Unreal include {asset} and thus
we should pass that along as dynamic data.
"""
dynamic_data = super(CreateUnrealStaticMesh, self).get_dynamic_data(
variant, task_name, asset_doc, project_name, host_name, instance
)
dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET")
dynamic_data["asset"] = asset_doc["name"]
return dynamic_data
def process(self):
self.name = "{}_{}".format(self.family, self.name)
with lib.undo_chunk():
instance = super(CreateUnrealStaticMesh, self).process()
content = cmds.sets(instance, query=True)
def create(self, subset_name, instance_data, pre_create_data):
with lib.undo_chunk():
instance = super(CreateUnrealStaticMesh, self).create(
subset_name, instance_data, pre_create_data)
instance_node = instance.get("instance_node")
# We reorganize the geometry that was originally added into the
# set into either 'collision_SET' or 'geometry_SET' based on the
# collision_prefixes from project settings
members = cmds.sets(instance_node, query=True)
cmds.sets(clear=instance_node)
# empty set and process its former content
cmds.sets(content, rm=instance)
geometry_set = cmds.sets(name="geometry_SET", empty=True)
collisions_set = cmds.sets(name="collisions_SET", empty=True)
cmds.sets([geometry_set, collisions_set], forceElement=instance)
cmds.sets([geometry_set, collisions_set],
forceElement=instance_node)
members = cmds.ls(content, long=True) or []
members = cmds.ls(members, long=True) or []
children = cmds.listRelatives(members, allDescendents=True,
fullPath=True) or []
children = cmds.ls(children, type="transform")
for node in children:
if cmds.listRelatives(node, type="shape"):
if [
n for n in self.collision_prefixes
if node.startswith(n)
]:
cmds.sets(node, forceElement=collisions_set)
else:
cmds.sets(node, forceElement=geometry_set)
transforms = cmds.ls(members + children, type="transform")
for transform in transforms:
if not cmds.listRelatives(transform,
type="shape",
noIntermediate=True):
# Exclude all transforms that have no direct shapes
continue
if self.has_collision_prefix(transform):
cmds.sets(transform, forceElement=collisions_set)
else:
cmds.sets(transform, forceElement=geometry_set)
def has_collision_prefix(self, node_path):
"""Return whether node name of path matches collision prefix.
If the node name matches the collision prefix we add it to the
`collisions_SET` instead of the `geometry_SET`.
Args:
node_path (str): Maya node path.
Returns:
bool: Whether the node should be considered a collision mesh.
"""
node_name = node_path.rsplit("|", 1)[-1]
for prefix in self.collision_prefixes:
if node_name.startswith(prefix):
return True
return False

View file

@ -1,10 +1,14 @@
from openpype.hosts.maya.api import plugin
from openpype.hosts.maya.api import (
plugin,
lib
)
from openpype.lib import BoolDef
class CreateVrayProxy(plugin.Creator):
class CreateVrayProxy(plugin.MayaCreator):
"""Alembic pointcache for animated data"""
name = "vrayproxy"
identifier = "io.openpype.creators.maya.vrayproxy"
label = "VRay Proxy"
family = "vrayproxy"
icon = "gears"
@ -12,15 +16,35 @@ class CreateVrayProxy(plugin.Creator):
vrmesh = True
alembic = True
def __init__(self, *args, **kwargs):
super(CreateVrayProxy, self).__init__(*args, **kwargs)
def get_instance_attr_defs(self):
self.data["animation"] = False
self.data["frameStart"] = 1
self.data["frameEnd"] = 1
defs = [
BoolDef("animation",
label="Export Animation",
default=False)
]
# Write vertex colors
self.data["vertexColors"] = False
# Add time range attributes but remove some attributes
# which this instance actually doesn't use
defs.extend(lib.collect_animation_defs())
remove = {"handleStart", "handleEnd", "step"}
defs = [attr_def for attr_def in defs if attr_def.key not in remove]
self.data["vrmesh"] = self.vrmesh
self.data["alembic"] = self.alembic
defs.extend([
BoolDef("vertexColors",
label="Write vertex colors",
tooltip="Write vertex colors with the geometry",
default=False),
BoolDef("vrmesh",
label="Export VRayMesh",
tooltip="Publish a .vrmesh (VRayMesh) file for "
"this VRayProxy",
default=self.vrmesh),
BoolDef("alembic",
label="Export Alembic",
tooltip="Publish a .abc (Alembic) file for "
"this VRayProxy",
default=self.alembic),
])
return defs

View file

@ -1,266 +1,52 @@
# -*- coding: utf-8 -*-
"""Create instance of vrayscene."""
import os
import json
import appdirs
import requests
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
from openpype.hosts.maya.api import (
lib,
lib_rendersettings,
plugin
)
from openpype.settings import (
get_system_settings,
get_project_settings
)
from openpype.lib import requests_get
from openpype.pipeline import (
CreatorError,
legacy_io,
)
from openpype.modules import ModulesManager
from openpype.pipeline import CreatorError
from openpype.lib import BoolDef
class CreateVRayScene(plugin.Creator):
class CreateVRayScene(plugin.RenderlayerCreator):
"""Create Vray Scene."""
label = "VRay Scene"
identifier = "io.openpype.creators.maya.vrayscene"
family = "vrayscene"
label = "VRay Scene"
icon = "cubes"
_project_settings = None
render_settings = {}
singleton_node_name = "vraysceneMain"
def __init__(self, *args, **kwargs):
"""Entry."""
super(CreateVRayScene, self).__init__(*args, **kwargs)
self._rs = renderSetup.instance()
self.data["exportOnFarm"] = False
deadline_settings = get_system_settings()["modules"]["deadline"]
@classmethod
def apply_settings(cls, project_settings, system_settings):
cls.render_settings = project_settings["maya"]["RenderSettings"]
manager = ModulesManager()
self.deadline_module = manager.modules_by_name["deadline"]
def create(self, subset_name, instance_data, pre_create_data):
# Only allow a single render instance to exist
if self._get_singleton_node():
raise CreatorError("A Render instance already exists - only "
"one can be configured.")
if not deadline_settings["enabled"]:
self.deadline_servers = {}
return
self._project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"])
super(CreateVRayScene, self).create(subset_name,
instance_data,
pre_create_data)
try:
default_servers = deadline_settings["deadline_urls"]
project_servers = (
self._project_settings["deadline"]["deadline_servers"]
)
self.deadline_servers = {
k: default_servers[k]
for k in project_servers
if k in default_servers
}
# Apply default project render settings on create
if self.render_settings.get("apply_render_settings"):
lib_rendersettings.RenderSettings().set_default_renderer_settings()
if not self.deadline_servers:
self.deadline_servers = default_servers
def get_instance_attr_defs(self):
"""Create instance settings."""
except AttributeError:
# Handle situation were we had only one url for deadline.
# get default deadline webservice url from deadline module
self.deadline_servers = self.deadline_module.deadline_urls
def process(self):
"""Entry point."""
exists = cmds.ls(self.name)
if exists:
return cmds.warning("%s already exists." % exists[0])
use_selection = self.options.get("useSelection")
with lib.undo_chunk():
self._create_vray_instance_settings()
self.instance = super(CreateVRayScene, self).process()
index = 1
namespace_name = "_{}".format(str(self.instance))
try:
cmds.namespace(rm=namespace_name)
except RuntimeError:
# namespace is not empty, so we leave it untouched
pass
while(cmds.namespace(exists=namespace_name)):
namespace_name = "_{}{}".format(str(self.instance), index)
index += 1
namespace = cmds.namespace(add=namespace_name)
# add Deadline server selection list
if self.deadline_servers:
cmds.scriptJob(
attributeChange=[
"{}.deadlineServers".format(self.instance),
self._deadline_webservice_changed
])
# create namespace with instance
layers = self._rs.getRenderLayers()
if use_selection:
print(">>> processing existing layers")
sets = []
for layer in layers:
print(" - creating set for {}".format(layer.name()))
render_set = cmds.sets(
n="{}:{}".format(namespace, layer.name()))
sets.append(render_set)
cmds.sets(sets, forceElement=self.instance)
# if no render layers are present, create default one with
# asterix selector
if not layers:
render_layer = self._rs.createRenderLayer('Main')
collection = render_layer.createCollection("defaultCollection")
collection.getSelector().setPattern('*')
def _deadline_webservice_changed(self):
"""Refresh Deadline server dependent options."""
# get selected server
from maya import cmds
webservice = self.deadline_servers[
self.server_aliases[
cmds.getAttr("{}.deadlineServers".format(self.instance))
]
return [
BoolDef("vraySceneMultipleFiles",
label="V-Ray Scene Multiple Files",
default=False),
BoolDef("exportOnFarm",
label="Export on farm",
default=False)
]
pools = self.deadline_module.get_deadline_pools(webservice)
cmds.deleteAttr("{}.primaryPool".format(self.instance))
cmds.deleteAttr("{}.secondaryPool".format(self.instance))
cmds.addAttr(self.instance, longName="primaryPool",
attributeType="enum",
enumName=":".join(pools))
cmds.addAttr(self.instance, longName="secondaryPool",
attributeType="enum",
enumName=":".join(["-"] + pools))
def _create_vray_instance_settings(self):
# get pools
pools = []
system_settings = get_system_settings()["modules"]
deadline_enabled = system_settings["deadline"]["enabled"]
muster_enabled = system_settings["muster"]["enabled"]
muster_url = system_settings["muster"]["MUSTER_REST_URL"]
if deadline_enabled and muster_enabled:
self.log.error(
"Both Deadline and Muster are enabled. " "Cannot support both."
)
raise CreatorError("Both Deadline and Muster are enabled")
self.server_aliases = self.deadline_servers.keys()
self.data["deadlineServers"] = self.server_aliases
if deadline_enabled:
# if default server is not between selected, use first one for
# initial list of pools.
try:
deadline_url = self.deadline_servers["default"]
except KeyError:
deadline_url = [
self.deadline_servers[k]
for k in self.deadline_servers.keys()
][0]
pool_names = self.deadline_module.get_deadline_pools(deadline_url)
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")
self._load_credentials()
self.log.info(">>> Getting pools ...")
try:
pools = self._get_muster_pools()
except requests.exceptions.HTTPError as e:
if e.startswith("401"):
self.log.warning("access token expired")
self._show_login()
raise CreatorError("Access token expired")
except requests.exceptions.ConnectionError:
self.log.error("Cannot connect to Muster API endpoint.")
raise CreatorError("Cannot connect to {}".format(muster_url))
pool_names = []
for pool in pools:
self.log.info(" - pool: {}".format(pool["name"]))
pool_names.append(pool["name"])
self.data["primaryPool"] = pool_names
self.data["suspendPublishJob"] = False
self.data["priority"] = 50
self.data["whitelist"] = False
self.data["machineList"] = ""
self.data["vraySceneMultipleFiles"] = False
self.options = {"useSelection": False} # Force no content
def _load_credentials(self):
"""Load Muster credentials.
Load Muster credentials from file and set ``MUSTER_USER``,
``MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from presets.
Raises:
CreatorError: If loaded credentials are invalid.
AttributeError: If ``MUSTER_REST_URL`` is not set.
"""
app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
file_name = "muster_cred.json"
fpath = os.path.join(app_dir, file_name)
file = open(fpath, "r")
muster_json = json.load(file)
self._token = muster_json.get("token", None)
if not self._token:
self._show_login()
raise CreatorError("Invalid access token for Muster")
file.close()
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
if not self.MUSTER_REST_URL:
raise AttributeError("Muster REST API url not set")
def _get_muster_pools(self):
"""Get render pools from Muster.
Raises:
CreatorError: If pool list cannot be obtained from Muster.
"""
params = {"authToken": self._token}
api_entry = "/api/pools/list"
response = requests_get(self.MUSTER_REST_URL + api_entry,
params=params)
if response.status_code != 200:
if response.status_code == 401:
self.log.warning("Authentication token expired.")
self._show_login()
else:
self.log.error(
("Cannot get pools from "
"Muster: {}").format(response.status_code)
)
raise CreatorError("Cannot get pools from Muster")
try:
pools = response.json()["ResponseData"]["pools"]
except ValueError as e:
self.log.error("Invalid response from Muster server {}".format(e))
raise CreatorError("Invalid response from Muster server")
return pools
def _show_login(self):
# authentication token expired so we need to login to Muster
# again to get it. We use Pype API call to show login window.
api_url = "{}/muster/show_login".format(
os.environ["OPENPYPE_WEBSERVER_URL"])
self.log.debug(api_url)
login_response = requests_get(api_url, timeout=1)
if login_response.status_code != 200:
self.log.error("Cannot show login form to Muster")
raise CreatorError("Cannot show login form to Muster")

View file

@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating workfiles."""
from openpype.pipeline import CreatedInstance, AutoCreator
from openpype.client import get_asset_by_name
from openpype.hosts.maya.api import plugin
from maya import cmds
class CreateWorkfile(plugin.MayaCreatorBase, AutoCreator):
"""Workfile auto-creator."""
identifier = "io.openpype.creators.maya.workfile"
label = "Workfile"
family = "workfile"
icon = "fa5.file"
default_variant = "Main"
def create(self):
variant = self.default_variant
current_instance = next(
(
instance for instance in self.create_context.instances
if instance.creator_identifier == self.identifier
), None)
project_name = self.project_name
asset_name = self.create_context.get_current_asset_name()
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
if current_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": variant
}
data.update(
self.get_dynamic_data(
variant, task_name, asset_doc,
project_name, host_name, current_instance)
)
self.log.info("Auto-creating workfile instance...")
current_instance = CreatedInstance(
self.family, subset_name, data, self
)
self._add_instance_to_context(current_instance)
elif (
current_instance["asset"] != asset_name
or current_instance["task"] != task_name
):
# Update instance context if is not the same
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
current_instance["asset"] = asset_name
current_instance["task"] = task_name
current_instance["subset"] = subset_name
def collect_instances(self):
self.cache_subsets(self.collection_shared_data)
cached_subsets = self.collection_shared_data["maya_cached_subsets"]
for node in cached_subsets.get(self.identifier, []):
node_data = self.read_instance_node(node)
created_instance = CreatedInstance.from_existing(node_data, self)
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
data = created_inst.data_to_store()
node = data.get("instance_node")
if not node:
node = self.create_node()
created_inst["instance_node"] = node
data = created_inst.data_to_store()
self.imprint_instance_node(node, data)
def create_node(self):
node = cmds.sets(empty=True, name="workfileMain")
cmds.setAttr(node + ".hiddenInOutliner", True)
return node

View file

@ -1,10 +1,10 @@
from openpype.hosts.maya.api import plugin
class CreateXgen(plugin.Creator):
class CreateXgen(plugin.MayaCreator):
"""Xgen"""
name = "xgen"
identifier = "io.openpype.creators.maya.xgen"
label = "Xgen"
family = "xgen"
icon = "pagelines"

View file

@ -1,15 +1,14 @@
from collections import OrderedDict
from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.lib import NumberDef
class CreateYetiCache(plugin.Creator):
class CreateYetiCache(plugin.MayaCreator):
"""Output for procedural plugin nodes of Yeti """
name = "yetiDefault"
identifier = "io.openpype.creators.maya.yeticache"
label = "Yeti Cache"
family = "yeticache"
icon = "pagelines"
@ -17,14 +16,23 @@ class CreateYetiCache(plugin.Creator):
def __init__(self, *args, **kwargs):
super(CreateYetiCache, self).__init__(*args, **kwargs)
self.data["preroll"] = 0
defs = [
NumberDef("preroll",
label="Preroll",
minimum=0,
default=0,
decimals=0)
]
# Add animation data without step and handles
anim_data = lib.collect_animation_data()
anim_data.pop("step")
anim_data.pop("handleStart")
anim_data.pop("handleEnd")
self.data.update(anim_data)
defs.extend(lib.collect_animation_defs())
remove = {"step", "handleStart", "handleEnd"}
defs = [attr_def for attr_def in defs if attr_def.key not in remove]
# Add samples
self.data["samples"] = 3
# Add samples after frame range
defs.append(
NumberDef("samples",
label="Samples",
default=3,
decimals=0)
)

View file

@ -6,18 +6,22 @@ from openpype.hosts.maya.api import (
)
class CreateYetiRig(plugin.Creator):
class CreateYetiRig(plugin.MayaCreator):
"""Output for procedural plugin nodes ( Yeti / XGen / etc)"""
identifier = "io.openpype.creators.maya.yetirig"
label = "Yeti Rig"
family = "yetiRig"
icon = "usb"
def process(self):
def create(self, subset_name, instance_data, pre_create_data):
with lib.undo_chunk():
instance = super(CreateYetiRig, self).process()
instance = super(CreateYetiRig, self).create(subset_name,
instance_data,
pre_create_data)
instance_node = instance.get("instance_node")
self.log.info("Creating Rig instance set up ...")
input_meshes = cmds.sets(name="input_SET", empty=True)
cmds.sets(input_meshes, forceElement=instance)
cmds.sets(input_meshes, forceElement=instance_node)

View file

@ -29,7 +29,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
color = "orange"
def process_reference(self, context, name, namespace, options):
import maya.cmds as cmds
from maya import cmds
with lib.maintained_selection():
file_url = self.prepare_root_value(self.fname,
@ -113,8 +113,8 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
# region compute lookup
nodes_by_id = defaultdict(list)
for n in nodes:
nodes_by_id[lib.get_id(n)].append(n)
for node in nodes:
nodes_by_id[lib.get_id(node)].append(node)
lib.apply_attributes(attributes, nodes_by_id)
def _get_nodes_with_shader(self, shader_nodes):
@ -125,14 +125,16 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
Returns
<list> node names
"""
import maya.cmds as cmds
from maya import cmds
nodes_list = []
for shader in shader_nodes:
connections = cmds.listConnections(cmds.listHistory(shader, f=1),
future = cmds.listHistory(shader, future=True)
connections = cmds.listConnections(future,
type='mesh')
if connections:
for connection in connections:
nodes_list.extend(cmds.listRelatives(connection,
shapes=True))
return nodes_list
# Ensure unique entries only to optimize query and results
connections = list(set(connections))
return cmds.listRelatives(connections,
shapes=True,
fullPath=True) or []
return []

View file

@ -221,6 +221,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
self._lock_camera_transforms(members)
def _post_process_rig(self, name, namespace, context, options):
nodes = self[:]
create_rig_animation_instance(
nodes, context, namespace, options=options, log=self.log

View file

@ -0,0 +1,17 @@
import pyblish.api
from maya import cmds
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file."""
order = pyblish.api.CollectorOrder - 0.4
label = "Maya Current File"
hosts = ['maya']
families = ["workfile"]
def process(self, context):
"""Inject the current working file"""
context.data['currentFile'] = cmds.file(query=True, sceneName=True)

View file

@ -172,7 +172,7 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
"""Collects inputs from nodes in renderlayer, incl. shaders + camera"""
# Get the renderlayer
renderlayer = instance.data.get("setMembers")
renderlayer = instance.data.get("renderlayer")
if renderlayer == "defaultRenderLayer":
# Assume all loaded containers in the scene are inputs

View file

@ -1,12 +1,11 @@
from maya import cmds
import pyblish.api
import json
from openpype.hosts.maya.api.lib import get_all_children
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by objectSet and pre-defined attribute
class CollectNewInstances(pyblish.api.InstancePlugin):
"""Gather members for instances and pre-defined attribute
This collector takes into account assets that are associated with
an objectSet and marked with a unique identifier;
@ -25,119 +24,70 @@ class CollectInstances(pyblish.api.ContextPlugin):
"""
label = "Collect Instances"
label = "Collect New Instance Data"
order = pyblish.api.CollectorOrder
hosts = ["maya"]
def process(self, context):
def process(self, instance):
objectset = cmds.ls("*.id", long=True, type="objectSet",
recursive=True, objectsOnly=True)
objset = instance.data.get("instance_node")
if not objset:
self.log.debug("Instance has no `instance_node` data")
context.data['objectsets'] = objectset
for objset in objectset:
if not cmds.attributeQuery("id", node=objset, exists=True):
continue
id_attr = "{}.id".format(objset)
if cmds.getAttr(id_attr) != "pyblish.avalon.instance":
continue
# The developer is responsible for specifying
# the family of each instance.
has_family = cmds.attributeQuery("family",
node=objset,
exists=True)
assert has_family, "\"%s\" was missing a family" % objset
members = cmds.sets(objset, query=True)
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
self.log.info("Creating instance for {}".format(objset))
data = dict()
# Apply each user defined attribute as data
for attr in cmds.listAttr(objset, userDefined=True) or list():
try:
value = cmds.getAttr("%s.%s" % (objset, attr))
except Exception:
# Some attributes cannot be read directly,
# such as mesh and color attributes. These
# are considered non-essential to this
# particular publishing pipeline.
value = None
data[attr] = value
# temporarily translation of `active` to `publish` till issue has
# been resolved, https://github.com/pyblish/pyblish-base/issues/307
if "active" in data:
data["publish"] = data["active"]
# TODO: We might not want to do this in the future
# Merge creator attributes into instance.data just backwards compatible
# code still runs as expected
creator_attributes = instance.data.get("creator_attributes", {})
if creator_attributes:
instance.data.update(creator_attributes)
members = cmds.sets(objset, query=True) or []
if members:
# Collect members
members = cmds.ls(members, long=True) or []
dag_members = cmds.ls(members, type="dagNode", long=True)
children = get_all_children(dag_members)
children = cmds.ls(children, noIntermediate=True, long=True)
parents = []
if data.get("includeParentHierarchy", True):
# If `includeParentHierarchy` then include the parents
# so they will also be picked up in the instance by validators
parents = self.get_all_parents(members)
parents = (
self.get_all_parents(members)
if creator_attributes.get("includeParentHierarchy", True)
else []
)
members_hierarchy = list(set(members + children + parents))
if 'families' not in data:
data['families'] = [data.get('family')]
# Create the instance
instance = context.create_instance(objset)
instance[:] = members_hierarchy
instance.data["objset"] = objset
# Store the exact members of the object set
instance.data["setMembers"] = members
elif instance.data["family"] != "workfile":
self.log.warning("Empty instance: \"%s\" " % objset)
# Store the exact members of the object set
instance.data["setMembers"] = members
# Define nice label
name = cmds.ls(objset, long=False)[0] # use short name
label = "{0} ({1})".format(name,
data["asset"])
# TODO: This might make more sense as a separate collector
# Convert frame values to integers
for attr_name in (
"handleStart", "handleEnd", "frameStart", "frameEnd",
):
value = instance.data.get(attr_name)
if value is not None:
instance.data[attr_name] = int(value)
# Append start frame and end frame to label if present
if "frameStart" and "frameEnd" in data:
# Take handles from context if not set locally on the instance
for key in ["handleStart", "handleEnd"]:
if key not in data:
data[key] = context.data[key]
# Append start frame and end frame to label if present
if "frameStart" in instance.data and "frameEnd" in instance.data:
# Take handles from context if not set locally on the instance
for key in ["handleStart", "handleEnd"]:
if key not in instance.data:
value = instance.context.data[key]
if value is not None:
value = int(value)
instance.data[key] = value
data["frameStartHandle"] = data["frameStart"] - data["handleStart"] # noqa: E501
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] # noqa: E501
label += " [{0}-{1}]".format(int(data["frameStartHandle"]),
int(data["frameEndHandle"]))
instance.data["label"] = label
instance.data.update(data)
# Produce diagnostic message for any graphical
# user interface interested in visualising it.
self.log.info("Found: \"%s\" " % instance.data["name"])
self.log.debug(
"DATA: {} ".format(json.dumps(instance.data, indent=4)))
def sort_by_family(instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))
# Sort/grouped by family (preserving local index)
context[:] = sorted(context, key=sort_by_family)
return context
instance.data["frameStartHandle"] = int(
instance.data["frameStart"] - instance.data["handleStart"]
)
instance.data["frameEndHandle"] = int(
instance.data["frameEnd"] + instance.data["handleEnd"]
)
def get_all_parents(self, nodes):
"""Get all parents by using string operations (optimization)

View file

@ -285,17 +285,17 @@ class CollectLook(pyblish.api.InstancePlugin):
instance: Instance to collect.
"""
self.log.info("Looking for look associations "
self.log.debug("Looking for look associations "
"for %s" % instance.data['name'])
# Discover related object sets
self.log.info("Gathering sets ...")
self.log.debug("Gathering sets ...")
sets = self.collect_sets(instance)
# Lookup set (optimization)
instance_lookup = set(cmds.ls(instance, long=True))
self.log.info("Gathering set relations ...")
self.log.debug("Gathering set relations ...")
# Ensure iteration happen in a list so we can remove keys from the
# dict within the loop
@ -308,7 +308,7 @@ class CollectLook(pyblish.api.InstancePlugin):
# if node is specified as renderer node type, it will be
# serialized with its attributes.
if cmds.nodeType(obj_set) in RENDERER_NODE_TYPES:
self.log.info("- {} is {}".format(
self.log.debug("- {} is {}".format(
obj_set, cmds.nodeType(obj_set)))
node_attrs = []
@ -354,13 +354,13 @@ class CollectLook(pyblish.api.InstancePlugin):
# Remove sets that didn't have any members assigned in the end
# Thus the data will be limited to only what we need.
self.log.info("obj_set {}".format(sets[obj_set]))
self.log.debug("obj_set {}".format(sets[obj_set]))
if not sets[obj_set]["members"]:
self.log.info(
"Removing redundant set information: {}".format(obj_set))
sets.pop(obj_set, None)
self.log.info("Gathering attribute changes to instance members..")
self.log.debug("Gathering attribute changes to instance members..")
attributes = self.collect_attributes_changed(instance)
# Store data on the instance
@ -433,14 +433,14 @@ class CollectLook(pyblish.api.InstancePlugin):
for node_type in all_supported_nodes:
files.extend(cmds.ls(history, type=node_type, long=True))
self.log.info("Collected file nodes:\n{}".format(files))
self.log.debug("Collected file nodes:\n{}".format(files))
# Collect textures if any file nodes are found
instance.data["resources"] = []
for n in files:
for res in self.collect_resources(n):
instance.data["resources"].append(res)
self.log.info("Collected resources: {}".format(
self.log.debug("Collected resources: {}".format(
instance.data["resources"]))
# Log warning when no relevant sets were retrieved for the look.
@ -536,7 +536,7 @@ class CollectLook(pyblish.api.InstancePlugin):
# Collect changes to "custom" attributes
node_attrs = get_look_attrs(node)
self.log.info(
self.log.debug(
"Node \"{0}\" attributes: {1}".format(node, node_attrs)
)

View file

@ -17,10 +17,12 @@ class CollectMayaSceneTime(pyblish.api.InstancePlugin):
def process(self, instance):
instance.data.update({
"frameStart": cmds.playbackOptions(query=True, minTime=True),
"frameEnd": cmds.playbackOptions(query=True, maxTime=True),
"frameStartHandle": cmds.playbackOptions(query=True,
animationStartTime=True),
"frameEndHandle": cmds.playbackOptions(query=True,
animationEndTime=True)
"frameStart": int(
cmds.playbackOptions(query=True, minTime=True)),
"frameEnd": int(
cmds.playbackOptions(query=True, maxTime=True)),
"frameStartHandle": int(
cmds.playbackOptions(query=True, animationStartTime=True)),
"frameEndHandle": int(
cmds.playbackOptions(query=True, animationEndTime=True))
})

View file

@ -16,14 +16,16 @@ class CollectPointcache(pyblish.api.InstancePlugin):
instance.data["families"].append("publish.farm")
proxy_set = None
for node in instance.data["setMembers"]:
if cmds.nodeType(node) != "objectSet":
continue
members = cmds.sets(node, query=True)
if members is None:
self.log.warning("Skipped empty objectset: \"%s\" " % node)
continue
for node in cmds.ls(instance.data["setMembers"],
exactType="objectSet"):
# Find proxy_SET objectSet in the instance for proxy meshes
if node.endswith("proxy_SET"):
members = cmds.sets(node, query=True)
if members is None:
self.log.debug("Skipped empty proxy_SET: \"%s\" " % node)
continue
self.log.debug("Found proxy set: {}".format(node))
proxy_set = node
instance.data["proxy"] = []
instance.data["proxyRoots"] = []
@ -36,8 +38,9 @@ class CollectPointcache(pyblish.api.InstancePlugin):
cmds.listRelatives(member, shapes=True, fullPath=True)
)
self.log.debug(
"proxy members: {}".format(instance.data["proxy"])
"Found proxy members: {}".format(instance.data["proxy"])
)
break
if proxy_set:
instance.remove(proxy_set)

View file

@ -39,27 +39,29 @@ Provides:
instance -> pixelAspect
"""
import re
import os
import platform
import json
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
import pyblish.api
from openpype.pipeline import KnownPublishError
from openpype.lib import get_formatted_current_time
from openpype.pipeline import legacy_io
from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501
from openpype.hosts.maya.api.lib_renderproducts import (
get as get_layer_render_products,
UnsupportedRendererException
)
from openpype.hosts.maya.api import lib
class CollectMayaRender(pyblish.api.ContextPlugin):
class CollectMayaRender(pyblish.api.InstancePlugin):
"""Gather all publishable render layers from renderSetup."""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["maya"]
families = ["renderlayer"]
label = "Collect Render Layers"
sync_workfile_version = False
@ -69,387 +71,250 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"underscore": "_"
}
def process(self, context):
"""Entry point to collector."""
render_instance = None
def process(self, instance):
for instance in context:
if "rendering" in instance.data["families"]:
render_instance = instance
render_instance.data["remove"] = True
# TODO: Re-add force enable of workfile instance?
# TODO: Re-add legacy layer support with LAYER_ prefix but in Creator
# TODO: Set and collect active state of RenderLayer in Creator using
# renderlayer.isRenderable()
context = instance.context
# make sure workfile instance publishing is enabled
if "workfile" in instance.data["families"]:
instance.data["publish"] = True
if not render_instance:
self.log.info(
"No render instance found, skipping render "
"layer collection."
)
return
render_globals = render_instance
collected_render_layers = render_instance.data["setMembers"]
layer = instance.data["transientData"]["layer"]
objset = instance.data.get("instance_node")
filepath = context.data["currentFile"].replace("\\", "/")
asset = legacy_io.Session["AVALON_ASSET"]
workspace = context.data["workspaceDir"]
# Retrieve render setup layers
rs = renderSetup.instance()
maya_render_layers = {
layer.name(): layer for layer in rs.getRenderLayers()
# check if layer is renderable
if not layer.isRenderable():
msg = "Render layer [ {} ] is not " "renderable".format(
layer.name()
)
self.log.warning(msg)
# detect if there are sets (subsets) to attach render to
sets = cmds.sets(objset, query=True) or []
attach_to = []
for s in sets:
if not cmds.attributeQuery("family", node=s, exists=True):
continue
attach_to.append(
{
"version": None, # we need integrator for that
"subset": s,
"family": cmds.getAttr("{}.family".format(s)),
}
)
self.log.info(" -> attach render to: {}".format(s))
layer_name = layer.name()
# collect all frames we are expecting to be rendered
# return all expected files for all cameras and aovs in given
# frame range
try:
layer_render_products = get_layer_render_products(layer.name())
except UnsupportedRendererException as exc:
raise KnownPublishError(exc)
render_products = layer_render_products.layer_data.products
assert render_products, "no render products generated"
expected_files = []
multipart = False
for product in render_products:
if product.multipart:
multipart = True
product_name = product.productName
if product.camera and layer_render_products.has_camera_token():
product_name = "{}{}".format(
product.camera,
"_{}".format(product_name) if product_name else "")
expected_files.append(
{
product_name: layer_render_products.get_files(
product)
})
has_cameras = any(product.camera for product in render_products)
assert has_cameras, "No render cameras found."
self.log.info("multipart: {}".format(
multipart))
assert expected_files, "no file names were generated, this is a bug"
self.log.info(
"expected files: {}".format(
json.dumps(expected_files, indent=4, sort_keys=True)
)
)
# if we want to attach render to subset, check if we have AOV's
# in expectedFiles. If so, raise error as we cannot attach AOV
# (considered to be subset on its own) to another subset
if attach_to:
assert isinstance(expected_files, list), (
"attaching multiple AOVs or renderable cameras to "
"subset is not supported"
)
# append full path
aov_dict = {}
image_directory = os.path.join(
cmds.workspace(query=True, rootDirectory=True),
cmds.workspace(fileRuleEntry="images")
)
# replace relative paths with absolute. Render products are
# returned as list of dictionaries.
publish_meta_path = None
for aov in expected_files:
full_paths = []
aov_first_key = list(aov.keys())[0]
for file in aov[aov_first_key]:
full_path = os.path.join(image_directory, file)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
publish_meta_path = os.path.dirname(full_path)
aov_dict[aov_first_key] = full_paths
full_exp_files = [aov_dict]
self.log.info(full_exp_files)
if publish_meta_path is None:
raise KnownPublishError("Unable to detect any expected output "
"images for: {}. Make sure you have a "
"renderable camera and a valid frame "
"range set for your renderlayer."
"".format(instance.name))
frame_start_render = int(self.get_render_attribute(
"startFrame", layer=layer_name))
frame_end_render = int(self.get_render_attribute(
"endFrame", layer=layer_name))
if (int(context.data["frameStartHandle"]) == frame_start_render
and int(context.data["frameEndHandle"]) == frame_end_render): # noqa: W503, E501
handle_start = context.data["handleStart"]
handle_end = context.data["handleEnd"]
frame_start = context.data["frameStart"]
frame_end = context.data["frameEnd"]
frame_start_handle = context.data["frameStartHandle"]
frame_end_handle = context.data["frameEndHandle"]
else:
handle_start = 0
handle_end = 0
frame_start = frame_start_render
frame_end = frame_end_render
frame_start_handle = frame_start_render
frame_end_handle = frame_end_render
# find common path to store metadata
# so if image prefix is branching to many directories
# metadata file will be located in top-most common
# directory.
# TODO: use `os.path.commonpath()` after switch to Python 3
publish_meta_path = os.path.normpath(publish_meta_path)
common_publish_meta_path = os.path.splitdrive(
publish_meta_path)[0]
if common_publish_meta_path:
common_publish_meta_path += os.path.sep
for part in publish_meta_path.replace(
common_publish_meta_path, "").split(os.path.sep):
common_publish_meta_path = os.path.join(
common_publish_meta_path, part)
if part == layer_name:
break
# TODO: replace this terrible linux hotfix with real solution :)
if platform.system().lower() in ["linux", "darwin"]:
common_publish_meta_path = "/" + common_publish_meta_path
self.log.info(
"Publish meta path: {}".format(common_publish_meta_path))
# Get layer specific settings, might be overrides
colorspace_data = lib.get_color_management_preferences()
data = {
"farm": True,
"attachTo": attach_to,
"multipartExr": multipart,
"review": instance.data.get("review") or False,
# Frame range
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_start_handle,
"frameEndHandle": frame_end_handle,
"byFrameStep": int(
self.get_render_attribute("byFrameStep",
layer=layer_name)),
# Renderlayer
"renderer": self.get_render_attribute(
"currentRenderer", layer=layer_name).lower(),
"setMembers": layer._getLegacyNodeName(), # legacy renderlayer
"renderlayer": layer_name,
# todo: is `time` and `author` still needed?
"time": get_formatted_current_time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath,
"expectedFiles": full_exp_files,
"publishRenderMetadataFolder": common_publish_meta_path,
"renderProducts": layer_render_products,
"resolutionWidth": lib.get_attr_in_layer(
"defaultResolution.width", layer=layer_name
),
"resolutionHeight": lib.get_attr_in_layer(
"defaultResolution.height", layer=layer_name
),
"pixelAspect": lib.get_attr_in_layer(
"defaultResolution.pixelAspect", layer=layer_name
),
# todo: Following are likely not needed due to collecting from the
# instance itself if they are attribute definitions
"tileRendering": instance.data.get("tileRendering") or False, # noqa: E501
"tilesX": instance.data.get("tilesX") or 2,
"tilesY": instance.data.get("tilesY") or 2,
"convertToScanline": instance.data.get(
"convertToScanline") or False,
"useReferencedAovs": instance.data.get(
"useReferencedAovs") or instance.data.get(
"vrayUseReferencedAovs") or False,
"aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501
"renderSetupIncludeLights": instance.data.get(
"renderSetupIncludeLights"
),
"colorspaceConfig": colorspace_data["config"],
"colorspaceDisplay": colorspace_data["display"],
"colorspaceView": colorspace_data["view"],
}
for layer in collected_render_layers:
if layer.startswith("LAYER_"):
# this is support for legacy mode where render layers
# started with `LAYER_` prefix.
layer_name_pattern = r"^LAYER_(.*)"
else:
# new way is to prefix render layer name with instance
# namespace.
layer_name_pattern = r"^.+:(.*)"
if self.sync_workfile_version:
data["version"] = context.data["version"]
for instance in context:
if instance.data['family'] == "workfile":
instance.data["version"] = context.data["version"]
# todo: We should have a more explicit way to link the renderlayer
match = re.match(layer_name_pattern, layer)
if not match:
msg = "Invalid layer name in set [ {} ]".format(layer)
self.log.warning(msg)
continue
expected_layer_name = match.group(1)
self.log.info("Processing '{}' as layer [ {} ]"
"".format(layer, expected_layer_name))
# check if layer is part of renderSetup
if expected_layer_name not in maya_render_layers:
msg = "Render layer [ {} ] is not in " "Render Setup".format(
expected_layer_name
)
self.log.warning(msg)
continue
# check if layer is renderable
if not maya_render_layers[expected_layer_name].isRenderable():
msg = "Render layer [ {} ] is not " "renderable".format(
expected_layer_name
)
self.log.warning(msg)
continue
# detect if there are sets (subsets) to attach render to
sets = cmds.sets(layer, query=True) or []
attach_to = []
for s in sets:
if not cmds.attributeQuery("family", node=s, exists=True):
continue
attach_to.append(
{
"version": None, # we need integrator for that
"subset": s,
"family": cmds.getAttr("{}.family".format(s)),
}
)
self.log.info(" -> attach render to: {}".format(s))
layer_name = "rs_{}".format(expected_layer_name)
# collect all frames we are expecting to be rendered
# return all expected files for all cameras and aovs in given
# frame range
layer_render_products = get_layer_render_products(layer_name)
render_products = layer_render_products.layer_data.products
assert render_products, "no render products generated"
exp_files = []
multipart = False
for product in render_products:
if product.multipart:
multipart = True
product_name = product.productName
if product.camera and layer_render_products.has_camera_token():
product_name = "{}{}".format(
product.camera,
"_" + product_name if product_name else "")
exp_files.append(
{
product_name: layer_render_products.get_files(
product)
})
has_cameras = any(product.camera for product in render_products)
assert has_cameras, "No render cameras found."
self.log.info("multipart: {}".format(
multipart))
assert exp_files, "no file names were generated, this is bug"
self.log.info(
"expected files: {}".format(
json.dumps(exp_files, indent=4, sort_keys=True)
)
)
# if we want to attach render to subset, check if we have AOV's
# in expectedFiles. If so, raise error as we cannot attach AOV
# (considered to be subset on its own) to another subset
if attach_to:
assert isinstance(exp_files, list), (
"attaching multiple AOVs or renderable cameras to "
"subset is not supported"
)
# append full path
aov_dict = {}
image_directory = os.path.join(
cmds.workspace(query=True, rootDirectory=True),
cmds.workspace(fileRuleEntry="images")
)
# replace relative paths with absolute. Render products are
# returned as list of dictionaries.
publish_meta_path = None
for aov in exp_files:
full_paths = []
aov_first_key = list(aov.keys())[0]
for file in aov[aov_first_key]:
full_path = os.path.join(image_directory, file)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
publish_meta_path = os.path.dirname(full_path)
aov_dict[aov_first_key] = full_paths
full_exp_files = [aov_dict]
frame_start_render = int(self.get_render_attribute(
"startFrame", layer=layer_name))
frame_end_render = int(self.get_render_attribute(
"endFrame", layer=layer_name))
if (int(context.data['frameStartHandle']) == frame_start_render
and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501
handle_start = context.data['handleStart']
handle_end = context.data['handleEnd']
frame_start = context.data['frameStart']
frame_end = context.data['frameEnd']
frame_start_handle = context.data['frameStartHandle']
frame_end_handle = context.data['frameEndHandle']
else:
handle_start = 0
handle_end = 0
frame_start = frame_start_render
frame_end = frame_end_render
frame_start_handle = frame_start_render
frame_end_handle = frame_end_render
# find common path to store metadata
# so if image prefix is branching to many directories
# metadata file will be located in top-most common
# directory.
# TODO: use `os.path.commonpath()` after switch to Python 3
publish_meta_path = os.path.normpath(publish_meta_path)
common_publish_meta_path = os.path.splitdrive(
publish_meta_path)[0]
if common_publish_meta_path:
common_publish_meta_path += os.path.sep
for part in publish_meta_path.replace(
common_publish_meta_path, "").split(os.path.sep):
common_publish_meta_path = os.path.join(
common_publish_meta_path, part)
if part == expected_layer_name:
break
# TODO: replace this terrible linux hotfix with real solution :)
if platform.system().lower() in ["linux", "darwin"]:
common_publish_meta_path = "/" + common_publish_meta_path
self.log.info(
"Publish meta path: {}".format(common_publish_meta_path))
self.log.info(full_exp_files)
self.log.info("collecting layer: {}".format(layer_name))
# Get layer specific settings, might be overrides
colorspace_data = lib.get_color_management_preferences()
data = {
"subset": expected_layer_name,
"attachTo": attach_to,
"setMembers": layer_name,
"multipartExr": multipart,
"review": render_instance.data.get("review") or False,
"publish": True,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_start_handle,
"frameEndHandle": frame_end_handle,
"byFrameStep": int(
self.get_render_attribute("byFrameStep",
layer=layer_name)),
"renderer": self.get_render_attribute(
"currentRenderer", layer=layer_name).lower(),
# instance subset
"family": "renderlayer",
"families": ["renderlayer"],
"asset": asset,
"time": get_formatted_current_time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath,
"expectedFiles": full_exp_files,
"publishRenderMetadataFolder": common_publish_meta_path,
"renderProducts": layer_render_products,
"resolutionWidth": lib.get_attr_in_layer(
"defaultResolution.width", layer=layer_name
),
"resolutionHeight": lib.get_attr_in_layer(
"defaultResolution.height", layer=layer_name
),
"pixelAspect": lib.get_attr_in_layer(
"defaultResolution.pixelAspect", layer=layer_name
),
"tileRendering": render_instance.data.get("tileRendering") or False, # noqa: E501
"tilesX": render_instance.data.get("tilesX") or 2,
"tilesY": render_instance.data.get("tilesY") or 2,
"priority": render_instance.data.get("priority"),
"convertToScanline": render_instance.data.get(
"convertToScanline") or False,
"useReferencedAovs": render_instance.data.get(
"useReferencedAovs") or render_instance.data.get(
"vrayUseReferencedAovs") or False,
"aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501
"renderSetupIncludeLights": render_instance.data.get(
"renderSetupIncludeLights"
),
"colorspaceConfig": colorspace_data["config"],
"colorspaceDisplay": colorspace_data["display"],
"colorspaceView": colorspace_data["view"],
"strict_error_checking": render_instance.data.get(
"strict_error_checking", True
)
}
# Collect Deadline url if Deadline module is enabled
deadline_settings = (
context.data["system_settings"]["modules"]["deadline"]
)
if deadline_settings["enabled"]:
data["deadlineUrl"] = render_instance.data["deadlineUrl"]
if self.sync_workfile_version:
data["version"] = context.data["version"]
for instance in context:
if instance.data['family'] == "workfile":
instance.data["version"] = context.data["version"]
# handle standalone renderers
if render_instance.data.get("vrayScene") is True:
data["families"].append("vrayscene_render")
if render_instance.data.get("assScene") is True:
data["families"].append("assscene_render")
# Include (optional) global settings
# Get global overrides and translate to Deadline values
overrides = self.parse_options(str(render_globals))
data.update(**overrides)
# get string values for pools
primary_pool = overrides["renderGlobals"]["Pool"]
secondary_pool = overrides["renderGlobals"].get("SecondaryPool")
data["primaryPool"] = primary_pool
data["secondaryPool"] = secondary_pool
# Define nice label
label = "{0} ({1})".format(expected_layer_name, data["asset"])
label += " [{0}-{1}]".format(
int(data["frameStartHandle"]), int(data["frameEndHandle"])
)
instance = context.create_instance(expected_layer_name)
instance.data["label"] = label
instance.data["farm"] = True
instance.data.update(data)
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without.
Here's the kicker. These globals override defaults in the submission
integrator, but an empty value means no overriding is made.
Otherwise, Frames would override the default frames set under globals.
Args:
render_globals (str): collection of render globals
Returns:
dict: only overrides with values
"""
attributes = lib.read(render_globals)
options = {"renderGlobals": {}}
options["renderGlobals"]["Priority"] = attributes["priority"]
# Check for specific pools
pool_a, pool_b = self._discover_pools(attributes)
options["renderGlobals"].update({"Pool": pool_a})
if pool_b:
options["renderGlobals"].update({"SecondaryPool": pool_b})
# Machine list
machine_list = attributes["machineList"]
if machine_list:
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
options["renderGlobals"][key] = machine_list
# Suspend publish job
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
options["publishJobState"] = state
chunksize = attributes.get("framesPerTask", 1)
options["renderGlobals"]["ChunkSize"] = chunksize
# Define nice label
label = "{0} ({1})".format(layer_name, instance.data["asset"])
label += " [{0}-{1}]".format(
int(data["frameStartHandle"]), int(data["frameEndHandle"])
)
data["label"] = label
# Override frames should be False if extendFrames is False. This is
# to ensure it doesn't go off doing crazy unpredictable things
override_frames = False
extend_frames = attributes.get("extendFrames", False)
if extend_frames:
override_frames = attributes.get("overrideExistingFrame", False)
extend_frames = instance.data.get("extendFrames", False)
if not extend_frames:
instance.data["overrideExistingFrame"] = False
options["extendFrames"] = extend_frames
options["overrideExistingFrame"] = override_frames
maya_render_plugin = "MayaBatch"
options["mayaRenderPlugin"] = maya_render_plugin
return options
def _discover_pools(self, attributes):
pool_a = None
pool_b = None
# Check for specific pools
pool_b = []
if "primaryPool" in attributes:
pool_a = attributes["primaryPool"]
if "secondaryPool" in attributes:
pool_b = attributes["secondaryPool"]
else:
# Backwards compatibility
pool_str = attributes.get("pools", None)
if pool_str:
pool_a, pool_b = pool_str.split(";")
# Ensure empty entry token is caught
if pool_b == "-":
pool_b = None
return pool_a, pool_b
# Update the instace
instance.data.update(data)
@staticmethod
def get_render_attribute(attr, layer):

View file

@ -50,7 +50,7 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
result = []
# Collect all AOVs / Render Elements
layer = instance.data["setMembers"]
layer = instance.data["renderlayer"]
node_type = rp_node_types[renderer]
render_elements = cmds.ls(type=node_type)

View file

@ -19,7 +19,7 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin):
if "vrayscene_layer" in instance.data.get("families", []):
layer = instance.data.get("layer")
else:
layer = instance.data["setMembers"]
layer = instance.data["renderlayer"]
self.log.info("layer: {}".format(layer))
cameras = cmds.ls(type="camera", long=True)

View file

@ -18,14 +18,10 @@ class CollectReview(pyblish.api.InstancePlugin):
def process(self, instance):
self.log.debug('instance: {}'.format(instance))
task = legacy_io.Session["AVALON_TASK"]
# Get panel.
instance.data["panel"] = cmds.playblast(
activeEditor=True
).split("|")[-1]
).rsplit("|", 1)[-1]
# get cameras
members = instance.data['setMembers']
@ -34,11 +30,12 @@ class CollectReview(pyblish.api.InstancePlugin):
camera = cameras[0] if cameras else None
context = instance.context
objectset = context.data['objectsets']
objectset = {
i.data.get("instance_node") for i in context
}
# Convert enum attribute index to string for Display Lights.
index = instance.data.get("displayLights", 0)
display_lights = lib.DISPLAY_LIGHTS_VALUES[index]
# Collect display lights.
display_lights = instance.data.get("displayLights", "default")
if display_lights == "project_settings":
settings = instance.context.data["project_settings"]
settings = settings["maya"]["publish"]["ExtractPlayblast"]
@ -60,7 +57,7 @@ class CollectReview(pyblish.api.InstancePlugin):
burninDataMembers["focalLength"] = focal_length
# Account for nested instances like model.
reviewable_subsets = list(set(members) & set(objectset))
reviewable_subsets = list(set(members) & objectset)
if reviewable_subsets:
if len(reviewable_subsets) > 1:
raise KnownPublishError(
@ -97,7 +94,11 @@ class CollectReview(pyblish.api.InstancePlugin):
data["frameStart"] = instance.data["frameStart"]
data["frameEnd"] = instance.data["frameEnd"]
data['step'] = instance.data['step']
data['fps'] = instance.data['fps']
# this (with other time related data) should be set on
# representations. Once plugins like Extract Review start
# using representations, this should be removed from here
# as Extract Playblast is already adding fps to representation.
data['fps'] = context.data['fps']
data['review_width'] = instance.data['review_width']
data['review_height'] = instance.data['review_height']
data["isolate"] = instance.data["isolate"]
@ -112,6 +113,7 @@ class CollectReview(pyblish.api.InstancePlugin):
instance.data['remove'] = True
else:
task = legacy_io.Session["AVALON_TASK"]
legacy_subset_name = task + 'Review'
asset_doc = instance.context.data['assetEntity']
project_name = legacy_io.active_project()
@ -133,6 +135,11 @@ class CollectReview(pyblish.api.InstancePlugin):
instance.data["frameEndHandle"]
instance.data["displayLights"] = display_lights
instance.data["burninDataMembers"] = burninDataMembers
# this (with other time related data) should be set on
# representations. Once plugins like Extract Review start
# using representations, this should be removed from here
# as Extract Playblast is already adding fps to representation.
instance.data["fps"] = instance.context.data["fps"]
# make ftrack publishable
instance.data.setdefault("families", []).append('ftrack')

View file

@ -24,129 +24,91 @@ class CollectVrayScene(pyblish.api.InstancePlugin):
def process(self, instance):
"""Collector entry point."""
collected_render_layers = instance.data["setMembers"]
instance.data["remove"] = True
context = instance.context
_rs = renderSetup.instance()
# current_layer = _rs.getVisibleRenderLayer()
layer = instance.data["transientData"]["layer"]
layer_name = layer.name()
renderer = self.get_render_attribute("currentRenderer",
layer=layer_name)
if renderer != "vray":
self.log.warning("Layer '{}' renderer is not set to V-Ray".format(
layer_name
))
# collect all frames we are expecting to be rendered
renderer = cmds.getAttr(
"defaultRenderGlobals.currentRenderer"
).lower()
frame_start_render = int(self.get_render_attribute(
"startFrame", layer=layer_name))
frame_end_render = int(self.get_render_attribute(
"endFrame", layer=layer_name))
if renderer != "vray":
raise AssertionError("Vray is not enabled.")
if (int(context.data['frameStartHandle']) == frame_start_render
and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501
maya_render_layers = {
layer.name(): layer for layer in _rs.getRenderLayers()
handle_start = context.data['handleStart']
handle_end = context.data['handleEnd']
frame_start = context.data['frameStart']
frame_end = context.data['frameEnd']
frame_start_handle = context.data['frameStartHandle']
frame_end_handle = context.data['frameEndHandle']
else:
handle_start = 0
handle_end = 0
frame_start = frame_start_render
frame_end = frame_end_render
frame_start_handle = frame_start_render
frame_end_handle = frame_end_render
# Get layer specific settings, might be overrides
data = {
"subset": layer_name,
"layer": layer_name,
# TODO: This likely needs fixing now
# Before refactor: cmds.sets(layer, q=True) or ["*"]
"setMembers": ["*"],
"review": False,
"publish": True,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_start_handle,
"frameEndHandle": frame_end_handle,
"byFrameStep": int(
self.get_render_attribute("byFrameStep",
layer=layer_name)),
"renderer": renderer,
# instance subset
"family": "vrayscene_layer",
"families": ["vrayscene_layer"],
"time": get_formatted_current_time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": context.data["currentFile"].replace("\\", "/"),
"resolutionWidth": lib.get_attr_in_layer(
"defaultResolution.height", layer=layer_name
),
"resolutionHeight": lib.get_attr_in_layer(
"defaultResolution.width", layer=layer_name
),
"pixelAspect": lib.get_attr_in_layer(
"defaultResolution.pixelAspect", layer=layer_name
),
"priority": instance.data.get("priority"),
"useMultipleSceneFiles": instance.data.get(
"vraySceneMultipleFiles")
}
layer_list = []
for layer in collected_render_layers:
# every layer in set should start with `LAYER_` prefix
try:
expected_layer_name = re.search(r"^.+:(.*)", layer).group(1)
except IndexError:
msg = "Invalid layer name in set [ {} ]".format(layer)
self.log.warning(msg)
continue
instance.data.update(data)
self.log.info("processing %s" % layer)
# check if layer is part of renderSetup
if expected_layer_name not in maya_render_layers:
msg = "Render layer [ {} ] is not in " "Render Setup".format(
expected_layer_name
)
self.log.warning(msg)
continue
# check if layer is renderable
if not maya_render_layers[expected_layer_name].isRenderable():
msg = "Render layer [ {} ] is not " "renderable".format(
expected_layer_name
)
self.log.warning(msg)
continue
layer_name = "rs_{}".format(expected_layer_name)
self.log.debug(expected_layer_name)
layer_list.append(expected_layer_name)
frame_start_render = int(self.get_render_attribute(
"startFrame", layer=layer_name))
frame_end_render = int(self.get_render_attribute(
"endFrame", layer=layer_name))
if (int(context.data['frameStartHandle']) == frame_start_render
and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501
handle_start = context.data['handleStart']
handle_end = context.data['handleEnd']
frame_start = context.data['frameStart']
frame_end = context.data['frameEnd']
frame_start_handle = context.data['frameStartHandle']
frame_end_handle = context.data['frameEndHandle']
else:
handle_start = 0
handle_end = 0
frame_start = frame_start_render
frame_end = frame_end_render
frame_start_handle = frame_start_render
frame_end_handle = frame_end_render
# Get layer specific settings, might be overrides
data = {
"subset": expected_layer_name,
"layer": layer_name,
"setMembers": cmds.sets(layer, q=True) or ["*"],
"review": False,
"publish": True,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_start_handle,
"frameEndHandle": frame_end_handle,
"byFrameStep": int(
self.get_render_attribute("byFrameStep",
layer=layer_name)),
"renderer": self.get_render_attribute("currentRenderer",
layer=layer_name),
# instance subset
"family": "vrayscene_layer",
"families": ["vrayscene_layer"],
"asset": legacy_io.Session["AVALON_ASSET"],
"time": get_formatted_current_time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": context.data["currentFile"].replace("\\", "/"),
"resolutionWidth": lib.get_attr_in_layer(
"defaultResolution.height", layer=layer_name
),
"resolutionHeight": lib.get_attr_in_layer(
"defaultResolution.width", layer=layer_name
),
"pixelAspect": lib.get_attr_in_layer(
"defaultResolution.pixelAspect", layer=layer_name
),
"priority": instance.data.get("priority"),
"useMultipleSceneFiles": instance.data.get(
"vraySceneMultipleFiles")
}
# Define nice label
label = "{0} ({1})".format(expected_layer_name, data["asset"])
label += " [{0}-{1}]".format(
int(data["frameStartHandle"]), int(data["frameEndHandle"])
)
instance = context.create_instance(expected_layer_name)
instance.data["label"] = label
instance.data.update(data)
# Define nice label
label = "{0} ({1})".format(layer_name, instance.data["asset"])
label += " [{0}-{1}]".format(
int(data["frameStartHandle"]), int(data["frameEndHandle"])
)
instance.data["label"] = label
def get_render_attribute(self, attr, layer):
"""Get attribute from render options.

View file

@ -1,46 +1,30 @@
import os
import pyblish.api
from maya import cmds
from openpype.pipeline import legacy_io
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
class CollectWorkfileData(pyblish.api.InstancePlugin):
"""Inject data into Workfile instance"""
order = pyblish.api.CollectorOrder - 0.01
label = "Maya Workfile"
hosts = ['maya']
families = ["workfile"]
def process(self, context):
def process(self, instance):
"""Inject the current working file"""
current_file = cmds.file(query=True, sceneName=True)
context.data['currentFile'] = current_file
context = instance.context
current_file = instance.context.data['currentFile']
folder, file = os.path.split(current_file)
filename, ext = os.path.splitext(file)
task = legacy_io.Session["AVALON_TASK"]
data = {}
# create instance
instance = context.create_instance(name=filename)
subset = 'workfile' + task.capitalize()
data.update({
"subset": subset,
"asset": os.getenv("AVALON_ASSET", None),
"label": subset,
"publish": True,
"family": 'workfile',
"families": ['workfile'],
data = { # noqa
"setMembers": [current_file],
"frameStart": context.data['frameStart'],
"frameEnd": context.data['frameEnd'],
"handleStart": context.data['handleStart'],
"handleEnd": context.data['handleEnd']
})
}
data['representations'] = [{
'name': ext.lstrip("."),
@ -50,8 +34,3 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
}]
instance.data.update(data)
self.log.info('Collected instance: {}'.format(file))
self.log.info('Scene path: {}'.format(current_file))
self.log.info('staging Dir: {}'.format(folder))
self.log.info('subset: {}'.format(subset))

View file

@ -31,7 +31,7 @@ class ExtractAssembly(publish.Extractor):
with open(json_path, "w") as filepath:
json.dump(instance.data["scenedata"], filepath, ensure_ascii=False)
self.log.info("Extracting point cache ..")
self.log.debug("Extracting pointcache ..")
cmds.select(instance.data["nodesHierarchy"])
# Run basic alembic exporter

View file

@ -106,7 +106,7 @@ class ExtractCameraMayaScene(publish.Extractor):
instance.context.data["project_settings"]["maya"]["ext_mapping"]
)
if ext_mapping:
self.log.info("Looking in settings for scene type ...")
self.log.debug("Looking in settings for scene type ...")
# use extension mapping for first family found
for family in self.families:
try:

View file

@ -8,10 +8,12 @@ import tempfile
from openpype.lib import run_subprocess
from openpype.pipeline import publish
from openpype.pipeline.publish import OptionalPyblishPluginMixin
from openpype.hosts.maya.api import lib
class ExtractImportReference(publish.Extractor):
class ExtractImportReference(publish.Extractor,
OptionalPyblishPluginMixin):
"""
Extract the scene with imported reference.
@ -32,11 +34,14 @@ class ExtractImportReference(publish.Extractor):
cls.active = project_setting["deadline"]["publish"]["MayaSubmitDeadline"]["import_reference"] # noqa
def process(self, instance):
if not self.is_active(instance.data):
return
ext_mapping = (
instance.context.data["project_settings"]["maya"]["ext_mapping"]
)
if ext_mapping:
self.log.info("Looking in settings for scene type ...")
self.log.debug("Looking in settings for scene type ...")
# use extension mapping for first family found
for family in self.families:
try:

Some files were not shown because too many files have changed in this diff Show more