mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 16:34:53 +01:00
Merge branch 'develop' into enhancement/OP-5600_Houdini-manage-colorspaces-in-review-ROP
This commit is contained in:
commit
c92541f5d4
195 changed files with 2433 additions and 1668 deletions
|
|
@ -45,6 +45,9 @@ class OCIOEnvHook(PreLaunchHook):
|
|||
if config_data:
|
||||
ocio_path = config_data["path"]
|
||||
|
||||
if self.host_name in ["nuke", "hiero"]:
|
||||
ocio_path = ocio_path.replace("\\", "/")
|
||||
|
||||
self.log.info(
|
||||
f"Setting OCIO environment to config path: {ocio_path}")
|
||||
|
||||
|
|
|
|||
|
|
@ -164,7 +164,7 @@ class RenderCreator(Creator):
|
|||
api.get_stub().rename_item(comp_id,
|
||||
new_comp_name)
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
def apply_settings(self, project_settings):
|
||||
plugin_settings = (
|
||||
project_settings["aftereffects"]["create"]["RenderCreator"]
|
||||
)
|
||||
|
|
|
|||
|
|
@ -138,7 +138,6 @@ class CollectAERender(publish.AbstractCollectRender):
|
|||
fam = "render.farm"
|
||||
if fam not in instance.families:
|
||||
instance.families.append(fam)
|
||||
instance.toBeRenderedOn = "deadline"
|
||||
instance.renderer = "aerender"
|
||||
instance.farm = True # to skip integrate
|
||||
if "review" in instance.families:
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ class BlendLoader(plugin.AssetLoader):
|
|||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
libpath = self.fname
|
||||
libpath = self.filepath_from_context(context)
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,16 +0,0 @@
|
|||
from openpype.hosts.fusion.api import (
|
||||
comp_lock_and_undo_chunk,
|
||||
get_current_comp
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
comp = get_current_comp()
|
||||
"""Set all selected backgrounds to 32 bit"""
|
||||
with comp_lock_and_undo_chunk(comp, 'Selected Backgrounds to 32bit'):
|
||||
tools = comp.GetToolList(True, "Background").values()
|
||||
for tool in tools:
|
||||
tool.Depth = 5
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
from openpype.hosts.fusion.api import (
|
||||
comp_lock_and_undo_chunk,
|
||||
get_current_comp
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
comp = get_current_comp()
|
||||
"""Set all backgrounds to 32 bit"""
|
||||
with comp_lock_and_undo_chunk(comp, 'Backgrounds to 32bit'):
|
||||
tools = comp.GetToolList(False, "Background").values()
|
||||
for tool in tools:
|
||||
tool.Depth = 5
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
from openpype.hosts.fusion.api import (
|
||||
comp_lock_and_undo_chunk,
|
||||
get_current_comp
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
comp = get_current_comp()
|
||||
"""Set all selected loaders to 32 bit"""
|
||||
with comp_lock_and_undo_chunk(comp, 'Selected Loaders to 32bit'):
|
||||
tools = comp.GetToolList(True, "Loader").values()
|
||||
for tool in tools:
|
||||
tool.Depth = 5
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
from openpype.hosts.fusion.api import (
|
||||
comp_lock_and_undo_chunk,
|
||||
get_current_comp
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
comp = get_current_comp()
|
||||
"""Set all loaders to 32 bit"""
|
||||
with comp_lock_and_undo_chunk(comp, 'Loaders to 32bit'):
|
||||
tools = comp.GetToolList(False, "Loader").values()
|
||||
for tool in tools:
|
||||
tool.Depth = 5
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,200 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import glob
|
||||
import logging
|
||||
|
||||
from qtpy import QtWidgets, QtCore
|
||||
|
||||
import qtawesome as qta
|
||||
|
||||
from openpype.client import get_assets
|
||||
from openpype import style
|
||||
from openpype.pipeline import (
|
||||
install_host,
|
||||
get_current_project_name,
|
||||
)
|
||||
from openpype.hosts.fusion import api
|
||||
from openpype.pipeline.context_tools import get_workdir_from_session
|
||||
|
||||
log = logging.getLogger("Fusion Switch Shot")
|
||||
|
||||
|
||||
class App(QtWidgets.QWidget):
|
||||
|
||||
def __init__(self, parent=None):
|
||||
|
||||
################################################
|
||||
# |---------------------| |------------------| #
|
||||
# |Comp | |Asset | #
|
||||
# |[..][ v]| |[ v]| #
|
||||
# |---------------------| |------------------| #
|
||||
# | Update existing comp [ ] | #
|
||||
# |------------------------------------------| #
|
||||
# | Switch | #
|
||||
# |------------------------------------------| #
|
||||
################################################
|
||||
|
||||
QtWidgets.QWidget.__init__(self, parent)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout()
|
||||
|
||||
# Comp related input
|
||||
comp_hlayout = QtWidgets.QHBoxLayout()
|
||||
comp_label = QtWidgets.QLabel("Comp file")
|
||||
comp_label.setFixedWidth(50)
|
||||
comp_box = QtWidgets.QComboBox()
|
||||
|
||||
button_icon = qta.icon("fa.folder", color="white")
|
||||
open_from_dir = QtWidgets.QPushButton()
|
||||
open_from_dir.setIcon(button_icon)
|
||||
|
||||
comp_box.setFixedHeight(25)
|
||||
open_from_dir.setFixedWidth(25)
|
||||
open_from_dir.setFixedHeight(25)
|
||||
|
||||
comp_hlayout.addWidget(comp_label)
|
||||
comp_hlayout.addWidget(comp_box)
|
||||
comp_hlayout.addWidget(open_from_dir)
|
||||
|
||||
# Asset related input
|
||||
asset_hlayout = QtWidgets.QHBoxLayout()
|
||||
asset_label = QtWidgets.QLabel("Shot")
|
||||
asset_label.setFixedWidth(50)
|
||||
|
||||
asset_box = QtWidgets.QComboBox()
|
||||
asset_box.setLineEdit(QtWidgets.QLineEdit())
|
||||
asset_box.setFixedHeight(25)
|
||||
|
||||
refresh_icon = qta.icon("fa.refresh", color="white")
|
||||
refresh_btn = QtWidgets.QPushButton()
|
||||
refresh_btn.setIcon(refresh_icon)
|
||||
|
||||
asset_box.setFixedHeight(25)
|
||||
refresh_btn.setFixedWidth(25)
|
||||
refresh_btn.setFixedHeight(25)
|
||||
|
||||
asset_hlayout.addWidget(asset_label)
|
||||
asset_hlayout.addWidget(asset_box)
|
||||
asset_hlayout.addWidget(refresh_btn)
|
||||
|
||||
# Options
|
||||
options = QtWidgets.QHBoxLayout()
|
||||
options.setAlignment(QtCore.Qt.AlignLeft)
|
||||
|
||||
current_comp_check = QtWidgets.QCheckBox()
|
||||
current_comp_check.setChecked(True)
|
||||
current_comp_label = QtWidgets.QLabel("Use current comp")
|
||||
|
||||
options.addWidget(current_comp_label)
|
||||
options.addWidget(current_comp_check)
|
||||
|
||||
accept_btn = QtWidgets.QPushButton("Switch")
|
||||
|
||||
layout.addLayout(options)
|
||||
layout.addLayout(comp_hlayout)
|
||||
layout.addLayout(asset_hlayout)
|
||||
layout.addWidget(accept_btn)
|
||||
|
||||
self._open_from_dir = open_from_dir
|
||||
self._comps = comp_box
|
||||
self._assets = asset_box
|
||||
self._use_current = current_comp_check
|
||||
self._accept_btn = accept_btn
|
||||
self._refresh_btn = refresh_btn
|
||||
|
||||
self.setWindowTitle("Fusion Switch Shot")
|
||||
self.setLayout(layout)
|
||||
|
||||
self.resize(260, 140)
|
||||
self.setMinimumWidth(260)
|
||||
self.setFixedHeight(140)
|
||||
|
||||
self.connections()
|
||||
|
||||
# Update ui to correct state
|
||||
self._on_use_current_comp()
|
||||
self._refresh()
|
||||
|
||||
def connections(self):
|
||||
self._use_current.clicked.connect(self._on_use_current_comp)
|
||||
self._open_from_dir.clicked.connect(self._on_open_from_dir)
|
||||
self._refresh_btn.clicked.connect(self._refresh)
|
||||
self._accept_btn.clicked.connect(self._on_switch)
|
||||
|
||||
def _on_use_current_comp(self):
|
||||
state = self._use_current.isChecked()
|
||||
self._open_from_dir.setEnabled(not state)
|
||||
self._comps.setEnabled(not state)
|
||||
|
||||
def _on_open_from_dir(self):
|
||||
|
||||
start_dir = get_workdir_from_session()
|
||||
comp_file, _ = QtWidgets.QFileDialog.getOpenFileName(
|
||||
self, "Choose comp", start_dir)
|
||||
|
||||
if not comp_file:
|
||||
return
|
||||
|
||||
# Create completer
|
||||
self.populate_comp_box([comp_file])
|
||||
self._refresh()
|
||||
|
||||
def _refresh(self):
|
||||
# Clear any existing items
|
||||
self._assets.clear()
|
||||
|
||||
asset_names = self.collect_asset_names()
|
||||
completer = QtWidgets.QCompleter(asset_names)
|
||||
|
||||
self._assets.setCompleter(completer)
|
||||
self._assets.addItems(asset_names)
|
||||
|
||||
def _on_switch(self):
|
||||
|
||||
if not self._use_current.isChecked():
|
||||
file_name = self._comps.itemData(self._comps.currentIndex())
|
||||
else:
|
||||
comp = api.get_current_comp()
|
||||
file_name = comp.GetAttrs("COMPS_FileName")
|
||||
|
||||
asset = self._assets.currentText()
|
||||
|
||||
import colorbleed.scripts.fusion_switch_shot as switch_shot
|
||||
switch_shot.switch(asset_name=asset, filepath=file_name, new=True)
|
||||
|
||||
def collect_slap_comps(self, directory):
|
||||
items = glob.glob("{}/*.comp".format(directory))
|
||||
return items
|
||||
|
||||
def collect_asset_names(self):
|
||||
project_name = get_current_project_name()
|
||||
asset_docs = get_assets(project_name, fields=["name"])
|
||||
asset_names = {
|
||||
asset_doc["name"]
|
||||
for asset_doc in asset_docs
|
||||
}
|
||||
return list(asset_names)
|
||||
|
||||
def populate_comp_box(self, files):
|
||||
"""Ensure we display the filename only but the path is stored as well
|
||||
|
||||
Args:
|
||||
files (list): list of full file path [path/to/item/item.ext,]
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
for f in files:
|
||||
filename = os.path.basename(f)
|
||||
self._comps.addItem(filename, userData=f)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
install_host(api)
|
||||
|
||||
app = QtWidgets.QApplication(sys.argv)
|
||||
window = App()
|
||||
window.setStyleSheet(style.load_stylesheet())
|
||||
window.show()
|
||||
sys.exit(app.exec_())
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
"""Forces Fusion to 'retrigger' the Loader to update.
|
||||
|
||||
Warning:
|
||||
This might change settings like 'Reverse', 'Loop', trims and other
|
||||
settings of the Loader. So use this at your own risk.
|
||||
|
||||
"""
|
||||
from openpype.hosts.fusion.api.pipeline import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk
|
||||
)
|
||||
|
||||
|
||||
def update_loader_ranges():
|
||||
comp = get_current_comp()
|
||||
with comp_lock_and_undo_chunk(comp, "Reload clip time ranges"):
|
||||
tools = comp.GetToolList(True, "Loader").values()
|
||||
for tool in tools:
|
||||
|
||||
# Get tool attributes
|
||||
tool_a = tool.GetAttrs()
|
||||
clipTable = tool_a['TOOLST_Clip_Name']
|
||||
altclipTable = tool_a['TOOLST_AltClip_Name']
|
||||
startTime = tool_a['TOOLNT_Clip_Start']
|
||||
old_global_in = tool.GlobalIn[comp.CurrentTime]
|
||||
|
||||
# Reapply
|
||||
for index, _ in clipTable.items():
|
||||
time = startTime[index]
|
||||
tool.Clip[time] = tool.Clip[time]
|
||||
|
||||
for index, _ in altclipTable.items():
|
||||
time = startTime[index]
|
||||
tool.ProxyFilename[time] = tool.ProxyFilename[time]
|
||||
|
||||
tool.GlobalIn[comp.CurrentTime] = old_global_in
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
update_loader_ranges()
|
||||
|
|
@ -5,7 +5,7 @@ Global = {
|
|||
Map = {
|
||||
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
|
||||
["Config:"] = "UserPaths:Config;OpenPype:Config",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
|
||||
},
|
||||
},
|
||||
Script = {
|
||||
|
|
|
|||
|
|
@ -30,10 +30,6 @@ class CreateSaver(NewCreator):
|
|||
instance_attributes = [
|
||||
"reviewable"
|
||||
]
|
||||
default_variants = [
|
||||
"Main",
|
||||
"Mask"
|
||||
]
|
||||
|
||||
# TODO: This should be renamed together with Nuke so it is aligned
|
||||
temp_rendering_path_template = (
|
||||
|
|
@ -250,11 +246,7 @@ class CreateSaver(NewCreator):
|
|||
label="Review",
|
||||
)
|
||||
|
||||
def apply_settings(
|
||||
self,
|
||||
project_settings,
|
||||
system_settings
|
||||
):
|
||||
def apply_settings(self, project_settings):
|
||||
"""Method called on initialization of plugin to apply settings."""
|
||||
|
||||
# plugin settings
|
||||
|
|
|
|||
|
|
@ -85,5 +85,5 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
|
|||
# Add review family if the instance is marked as 'review'
|
||||
# This could be done through a 'review' Creator attribute.
|
||||
if instance.data.get("review", False):
|
||||
self.log.info("Adding review family..")
|
||||
self.log.debug("Adding review family..")
|
||||
instance.data["families"].append("review")
|
||||
|
|
|
|||
|
|
@ -108,7 +108,6 @@ class CollectFusionRender(
|
|||
fam = "render.farm"
|
||||
if fam not in instance.families:
|
||||
instance.families.append(fam)
|
||||
instance.toBeRenderedOn = "deadline"
|
||||
instance.farm = True # to skip integrate
|
||||
if "review" in instance.families:
|
||||
# to skip ExtractReview locally
|
||||
|
|
|
|||
|
|
@ -147,13 +147,13 @@ class CollectFarmRender(publish.AbstractCollectRender):
|
|||
attachTo=False,
|
||||
setMembers=[node],
|
||||
publish=info[4],
|
||||
review=False,
|
||||
renderer=None,
|
||||
priority=50,
|
||||
name=node.split("/")[1],
|
||||
|
||||
family="render.farm",
|
||||
families=["render.farm"],
|
||||
farm=True,
|
||||
|
||||
resolutionWidth=context.data["resolutionWidth"],
|
||||
resolutionHeight=context.data["resolutionHeight"],
|
||||
|
|
@ -174,7 +174,6 @@ class CollectFarmRender(publish.AbstractCollectRender):
|
|||
outputFormat=info[1],
|
||||
outputStartFrame=info[3],
|
||||
leadingZeros=info[2],
|
||||
toBeRenderedOn='deadline',
|
||||
ignoreFrameHandleCheck=True
|
||||
|
||||
)
|
||||
|
|
|
|||
|
|
@ -57,28 +57,31 @@ def create_interactive(creator_identifier, **kwargs):
|
|||
list: The created instances.
|
||||
|
||||
"""
|
||||
|
||||
# TODO Use Qt instead
|
||||
result, variant = hou.ui.readInput('Define variant name',
|
||||
buttons=("Ok", "Cancel"),
|
||||
initial_contents='Main',
|
||||
title="Define variant",
|
||||
help="Set the variant for the "
|
||||
"publish instance",
|
||||
close_choice=1)
|
||||
if result == 1:
|
||||
# User interrupted
|
||||
return
|
||||
variant = variant.strip()
|
||||
if not variant:
|
||||
raise RuntimeError("Empty variant value entered.")
|
||||
|
||||
host = registered_host()
|
||||
context = CreateContext(host)
|
||||
creator = context.manual_creators.get(creator_identifier)
|
||||
if not creator:
|
||||
raise RuntimeError("Invalid creator identifier: "
|
||||
"{}".format(creator_identifier))
|
||||
raise RuntimeError("Invalid creator identifier: {}".format(
|
||||
creator_identifier)
|
||||
)
|
||||
|
||||
# TODO Use Qt instead
|
||||
result, variant = hou.ui.readInput(
|
||||
"Define variant name",
|
||||
buttons=("Ok", "Cancel"),
|
||||
initial_contents=creator.get_default_variant(),
|
||||
title="Define variant",
|
||||
help="Set the variant for the publish instance",
|
||||
close_choice=1
|
||||
)
|
||||
|
||||
if result == 1:
|
||||
# User interrupted
|
||||
return
|
||||
|
||||
variant = variant.strip()
|
||||
if not variant:
|
||||
raise RuntimeError("Empty variant value entered.")
|
||||
|
||||
# TODO: Once more elaborate unique create behavior should exist per Creator
|
||||
# instead of per network editor area then we should move this from here
|
||||
|
|
|
|||
|
|
@ -303,6 +303,28 @@ def on_save():
|
|||
lib.set_id(node, new_id, overwrite=False)
|
||||
|
||||
|
||||
def _show_outdated_content_popup():
|
||||
# Get main window
|
||||
parent = lib.get_main_window()
|
||||
if parent is None:
|
||||
log.info("Skipping outdated content pop-up "
|
||||
"because Houdini window can't be found.")
|
||||
else:
|
||||
from openpype.widgets import popup
|
||||
|
||||
# Show outdated pop-up
|
||||
def _on_show_inventory():
|
||||
from openpype.tools.utils import host_tools
|
||||
host_tools.show_scene_inventory(parent=parent)
|
||||
|
||||
dialog = popup.Popup(parent=parent)
|
||||
dialog.setWindowTitle("Houdini scene has outdated content")
|
||||
dialog.setMessage("There are outdated containers in "
|
||||
"your Houdini scene.")
|
||||
dialog.on_clicked.connect(_on_show_inventory)
|
||||
dialog.show()
|
||||
|
||||
|
||||
def on_open():
|
||||
|
||||
if not hou.isUIAvailable():
|
||||
|
|
@ -316,28 +338,18 @@ def on_open():
|
|||
lib.validate_fps()
|
||||
|
||||
if any_outdated_containers():
|
||||
from openpype.widgets import popup
|
||||
|
||||
log.warning("Scene has outdated content.")
|
||||
|
||||
# Get main window
|
||||
parent = lib.get_main_window()
|
||||
if parent is None:
|
||||
log.info("Skipping outdated content pop-up "
|
||||
"because Houdini window can't be found.")
|
||||
# When opening Houdini with last workfile on launch the UI hasn't
|
||||
# initialized yet completely when the `on_open` callback triggers.
|
||||
# We defer the dialog popup to wait for the UI to become available.
|
||||
# We assume it will open because `hou.isUIAvailable()` returns True
|
||||
import hdefereval
|
||||
hdefereval.executeDeferred(_show_outdated_content_popup)
|
||||
else:
|
||||
_show_outdated_content_popup()
|
||||
|
||||
# Show outdated pop-up
|
||||
def _on_show_inventory():
|
||||
from openpype.tools.utils import host_tools
|
||||
host_tools.show_scene_inventory(parent=parent)
|
||||
|
||||
dialog = popup.Popup(parent=parent)
|
||||
dialog.setWindowTitle("Houdini scene has outdated content")
|
||||
dialog.setMessage("There are outdated containers in "
|
||||
"your Houdini scene.")
|
||||
dialog.on_clicked.connect(_on_show_inventory)
|
||||
dialog.show()
|
||||
log.warning("Scene has outdated content.")
|
||||
|
||||
|
||||
def on_new():
|
||||
|
|
|
|||
|
|
@ -296,7 +296,7 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
|
|||
"""
|
||||
return [hou.ropNodeTypeCategory()]
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
def apply_settings(self, project_settings):
|
||||
"""Method called on initialization of plugin to apply settings."""
|
||||
|
||||
settings_name = self.settings_name
|
||||
|
|
|
|||
|
|
@ -59,6 +59,9 @@ class HdaLoader(load.LoaderPlugin):
|
|||
def_paths = [d.libraryFilePath() for d in defs]
|
||||
new = def_paths.index(file_path)
|
||||
defs[new].setIsPreferred(True)
|
||||
hda_node.setParms({
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def remove(self, container):
|
||||
node = container["node"]
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
|
||||
|
||||
class CollectOutputSOPPath(pyblish.api.InstancePlugin):
|
||||
"""Collect the out node's SOP/COP Path value."""
|
||||
|
|
@ -58,8 +60,8 @@ class CollectOutputSOPPath(pyblish.api.InstancePlugin):
|
|||
elif node_type == "Redshift_Proxy_Output":
|
||||
out_node = node.parm("RS_archive_sopPath").evalAsNode()
|
||||
else:
|
||||
raise ValueError(
|
||||
"ROP node type '%s' is" " not supported." % node_type
|
||||
raise KnownPublishError(
|
||||
"ROP node type '{}' is not supported.".format(node_type)
|
||||
)
|
||||
|
||||
if not out_node:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import pyblish.api
|
|||
|
||||
from openpype.lib import version_up
|
||||
from openpype.pipeline import registered_host
|
||||
from openpype.action import get_errored_plugins_from_data
|
||||
from openpype.pipeline.publish import get_errored_plugins_from_context
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
|
||||
|
|
@ -27,7 +27,7 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
|
||||
errored_plugins = get_errored_plugins_from_data(context)
|
||||
errored_plugins = get_errored_plugins_from_context(context)
|
||||
if any(
|
||||
plugin.__name__ == "HoudiniSubmitPublishDeadline"
|
||||
for plugin in errored_plugins
|
||||
|
|
@ -40,9 +40,10 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
|
|||
# Filename must not have changed since collecting
|
||||
host = registered_host() # type: HoudiniHost
|
||||
current_file = host.current_file()
|
||||
assert (
|
||||
context.data["currentFile"] == current_file
|
||||
), "Collected filename mismatches from current scene name."
|
||||
if context.data["currentFile"] != current_file:
|
||||
raise KnownPublishError(
|
||||
"Collected filename mismatches from current scene name."
|
||||
)
|
||||
|
||||
new_filepath = version_up(current_file)
|
||||
host.save_workfile(new_filepath)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import PublishValidationError
|
||||
from openpype.hosts.houdini.api import lib
|
||||
import hou
|
||||
|
||||
|
|
@ -30,7 +31,7 @@ class ValidateAnimationSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError(
|
||||
raise PublishValidationError(
|
||||
"Output settings do no match for '%s'" % instance
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -36,11 +36,11 @@ class ValidateRemotePublishOutNode(pyblish.api.ContextPlugin):
|
|||
if node.parm("shellexec").eval():
|
||||
self.raise_error("Must not execute in shell")
|
||||
if node.parm("prerender").eval() != cmd:
|
||||
self.raise_error(("REMOTE_PUBLISH node does not have "
|
||||
"correct prerender script."))
|
||||
self.raise_error("REMOTE_PUBLISH node does not have "
|
||||
"correct prerender script.")
|
||||
if node.parm("lprerender").eval() != "python":
|
||||
self.raise_error(("REMOTE_PUBLISH node prerender script "
|
||||
"type not set to 'python'"))
|
||||
self.raise_error("REMOTE_PUBLISH node prerender script "
|
||||
"type not set to 'python'")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, context):
|
||||
|
|
@ -48,5 +48,4 @@ class ValidateRemotePublishOutNode(pyblish.api.ContextPlugin):
|
|||
lib.create_remote_publish_node(force=True)
|
||||
|
||||
def raise_error(self, message):
|
||||
self.log.error(message)
|
||||
raise PublishValidationError(message, title=self.label)
|
||||
raise PublishValidationError(message)
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ class ValidateUSDRenderProductNames(pyblish.api.InstancePlugin):
|
|||
|
||||
if not os.path.isabs(filepath):
|
||||
invalid.append(
|
||||
"Output file path is not " "absolute path: %s" % filepath
|
||||
"Output file path is not absolute path: %s" % filepath
|
||||
)
|
||||
|
||||
if invalid:
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from typing import Any, Dict, Union
|
|||
|
||||
import six
|
||||
from openpype.pipeline.context_tools import (
|
||||
get_current_project, get_current_project_asset,)
|
||||
get_current_project, get_current_project_asset)
|
||||
from pymxs import runtime as rt
|
||||
|
||||
JSON_PREFIX = "JSON::"
|
||||
|
|
@ -312,3 +312,98 @@ def set_timeline(frameStart, frameEnd):
|
|||
"""
|
||||
rt.animationRange = rt.interval(frameStart, frameEnd)
|
||||
return rt.animationRange
|
||||
|
||||
|
||||
def unique_namespace(namespace, format="%02d",
|
||||
prefix="", suffix="", con_suffix="CON"):
|
||||
"""Return unique namespace
|
||||
|
||||
Arguments:
|
||||
namespace (str): Name of namespace to consider
|
||||
format (str, optional): Formatting of the given iteration number
|
||||
suffix (str, optional): Only consider namespaces with this suffix.
|
||||
con_suffix: max only, for finding the name of the master container
|
||||
|
||||
>>> unique_namespace("bar")
|
||||
# bar01
|
||||
>>> unique_namespace(":hello")
|
||||
# :hello01
|
||||
>>> unique_namespace("bar:", suffix="_NS")
|
||||
# bar01_NS:
|
||||
|
||||
"""
|
||||
|
||||
def current_namespace():
|
||||
current = namespace
|
||||
# When inside a namespace Max adds no trailing :
|
||||
if not current.endswith(":"):
|
||||
current += ":"
|
||||
return current
|
||||
|
||||
# Always check against the absolute namespace root
|
||||
# There's no clash with :x if we're defining namespace :a:x
|
||||
ROOT = ":" if namespace.startswith(":") else current_namespace()
|
||||
|
||||
# Strip trailing `:` tokens since we might want to add a suffix
|
||||
start = ":" if namespace.startswith(":") else ""
|
||||
end = ":" if namespace.endswith(":") else ""
|
||||
namespace = namespace.strip(":")
|
||||
if ":" in namespace:
|
||||
# Split off any nesting that we don't uniqify anyway.
|
||||
parents, namespace = namespace.rsplit(":", 1)
|
||||
start += parents + ":"
|
||||
ROOT += start
|
||||
|
||||
iteration = 1
|
||||
increment_version = True
|
||||
while increment_version:
|
||||
nr_namespace = namespace + format % iteration
|
||||
unique = prefix + nr_namespace + suffix
|
||||
container_name = f"{unique}:{namespace}{con_suffix}"
|
||||
if not rt.getNodeByName(container_name):
|
||||
name_space = start + unique + end
|
||||
increment_version = False
|
||||
return name_space
|
||||
else:
|
||||
increment_version = True
|
||||
iteration += 1
|
||||
|
||||
|
||||
def get_namespace(container_name):
|
||||
"""Get the namespace and name of the sub-container
|
||||
|
||||
Args:
|
||||
container_name (str): the name of master container
|
||||
|
||||
Raises:
|
||||
RuntimeError: when there is no master container found
|
||||
|
||||
Returns:
|
||||
namespace (str): namespace of the sub-container
|
||||
name (str): name of the sub-container
|
||||
"""
|
||||
node = rt.getNodeByName(container_name)
|
||||
if not node:
|
||||
raise RuntimeError("Master Container Not Found..")
|
||||
name = rt.getUserProp(node, "name")
|
||||
namespace = rt.getUserProp(node, "namespace")
|
||||
return namespace, name
|
||||
|
||||
|
||||
def object_transform_set(container_children):
|
||||
"""A function which allows to store the transform of
|
||||
previous loaded object(s)
|
||||
Args:
|
||||
container_children(list): A list of nodes
|
||||
|
||||
Returns:
|
||||
transform_set (dict): A dict with all transform data of
|
||||
the previous loaded object(s)
|
||||
"""
|
||||
transform_set = {}
|
||||
for node in container_children:
|
||||
name = f"{node.name}.transform"
|
||||
transform_set[name] = node.pos
|
||||
name = f"{node.name}.scale"
|
||||
transform_set[name] = node.scale
|
||||
return transform_set
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ class RenderSettings(object):
|
|||
rt.viewport.setCamera(sel)
|
||||
break
|
||||
if not found:
|
||||
raise RuntimeError("Camera not found")
|
||||
raise RuntimeError("Active Camera not found")
|
||||
|
||||
def render_output(self, container):
|
||||
folder = rt.maxFilePath
|
||||
|
|
@ -113,7 +113,8 @@ class RenderSettings(object):
|
|||
# for setting up renderable camera
|
||||
arv = rt.MAXToAOps.ArnoldRenderView()
|
||||
render_camera = rt.viewport.GetCamera()
|
||||
arv.setOption("Camera", str(render_camera))
|
||||
if render_camera:
|
||||
arv.setOption("Camera", str(render_camera))
|
||||
|
||||
# TODO: add AOVs and extension
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
|
|
|
|||
|
|
@ -15,8 +15,10 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.hosts.max.api.menu import OpenPypeMenu
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.plugin import MS_CUSTOM_ATTRIB
|
||||
from openpype.hosts.max import MAX_HOST_DIR
|
||||
|
||||
|
||||
from pymxs import runtime as rt # noqa
|
||||
|
||||
log = logging.getLogger("openpype.hosts.max")
|
||||
|
|
@ -152,17 +154,18 @@ def ls() -> list:
|
|||
yield lib.read(container)
|
||||
|
||||
|
||||
def containerise(name: str, nodes: list, context, loader=None, suffix="_CON"):
|
||||
def containerise(name: str, nodes: list, context,
|
||||
namespace=None, loader=None, suffix="_CON"):
|
||||
data = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": name,
|
||||
"namespace": "",
|
||||
"namespace": namespace or "",
|
||||
"loader": loader,
|
||||
"representation": context["representation"]["_id"],
|
||||
}
|
||||
|
||||
container_name = f"{name}{suffix}"
|
||||
container_name = f"{namespace}:{name}{suffix}"
|
||||
container = rt.container(name=container_name)
|
||||
for node in nodes:
|
||||
node.Parent = container
|
||||
|
|
@ -170,3 +173,53 @@ def containerise(name: str, nodes: list, context, loader=None, suffix="_CON"):
|
|||
if not lib.imprint(container_name, data):
|
||||
print(f"imprinting of {container_name} failed.")
|
||||
return container
|
||||
|
||||
|
||||
def load_custom_attribute_data():
|
||||
"""Re-loading the Openpype/AYON custom parameter built by the creator
|
||||
|
||||
Returns:
|
||||
attribute: re-loading the custom OP attributes set in Maxscript
|
||||
"""
|
||||
return rt.Execute(MS_CUSTOM_ATTRIB)
|
||||
|
||||
|
||||
def import_custom_attribute_data(container: str, selections: list):
|
||||
"""Importing the Openpype/AYON custom parameter built by the creator
|
||||
|
||||
Args:
|
||||
container (str): target container which adds custom attributes
|
||||
selections (list): nodes to be added into
|
||||
group in custom attributes
|
||||
"""
|
||||
attrs = load_custom_attribute_data()
|
||||
modifier = rt.EmptyModifier()
|
||||
rt.addModifier(container, modifier)
|
||||
container.modifiers[0].name = "OP Data"
|
||||
rt.custAttributes.add(container.modifiers[0], attrs)
|
||||
node_list = []
|
||||
sel_list = []
|
||||
for i in selections:
|
||||
node_ref = rt.NodeTransformMonitor(node=i)
|
||||
node_list.append(node_ref)
|
||||
sel_list.append(str(i))
|
||||
|
||||
# Setting the property
|
||||
rt.setProperty(
|
||||
container.modifiers[0].openPypeData,
|
||||
"all_handles", node_list)
|
||||
rt.setProperty(
|
||||
container.modifiers[0].openPypeData,
|
||||
"sel_list", sel_list)
|
||||
|
||||
def update_custom_attribute_data(container: str, selections: list):
|
||||
"""Updating the Openpype/AYON custom parameter built by the creator
|
||||
|
||||
Args:
|
||||
container (str): target container which adds custom attributes
|
||||
selections (list): nodes to be added into
|
||||
group in custom attributes
|
||||
"""
|
||||
if container.modifiers[0].name == "OP Data":
|
||||
rt.deleteModifier(container, container.modifiers[0])
|
||||
import_custom_attribute_data(container, selections)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,16 @@
|
|||
import os
|
||||
|
||||
from openpype.hosts.max.api import lib, maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
object_transform_set
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise,
|
||||
import_custom_attribute_data,
|
||||
update_custom_attribute_data
|
||||
)
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
|
|
@ -13,50 +22,76 @@ class FbxLoader(load.LoaderPlugin):
|
|||
order = -9
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
filepath = self.filepath_from_context(context)
|
||||
filepath = os.path.normpath(filepath)
|
||||
rt.FBXImporterSetParam("Animation", True)
|
||||
rt.FBXImporterSetParam("Camera", True)
|
||||
rt.FBXImporterSetParam("AxisConversionMethod", True)
|
||||
rt.FBXImporterSetParam("Mode", rt.Name("create"))
|
||||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.ImportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
using=rt.FBXIMP)
|
||||
|
||||
container = rt.GetNodeByName(f"{name}")
|
||||
if not container:
|
||||
container = rt.Container()
|
||||
container.name = f"{name}"
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
container = rt.container(
|
||||
name=f"{namespace}:{name}_{self.postfix}")
|
||||
selections = rt.GetCurrentSelection()
|
||||
import_custom_attribute_data(container, selections)
|
||||
|
||||
for selection in rt.GetCurrentSelection():
|
||||
for selection in selections:
|
||||
selection.Parent = container
|
||||
selection.name = f"{namespace}:{selection.name}"
|
||||
|
||||
return containerise(
|
||||
name, [container], context, loader=self.__class__.__name__)
|
||||
name, [container], context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Select(node.Children)
|
||||
fbx_reimport_cmd = (
|
||||
f"""
|
||||
node_name = container["instance_node"]
|
||||
node = rt.getNodeByName(node_name)
|
||||
namespace, name = get_namespace(node_name)
|
||||
sub_node_name = f"{namespace}:{name}_{self.postfix}"
|
||||
inst_container = rt.getNodeByName(sub_node_name)
|
||||
rt.Select(inst_container.Children)
|
||||
transform_data = object_transform_set(inst_container.Children)
|
||||
for prev_fbx_obj in rt.selection:
|
||||
if rt.isValidNode(prev_fbx_obj):
|
||||
rt.Delete(prev_fbx_obj)
|
||||
|
||||
FBXImporterSetParam "Animation" true
|
||||
FBXImporterSetParam "Cameras" true
|
||||
FBXImporterSetParam "AxisConversionMethod" true
|
||||
FbxExporterSetParam "UpAxis" "Y"
|
||||
FbxExporterSetParam "Preserveinstances" true
|
||||
rt.FBXImporterSetParam("Animation", True)
|
||||
rt.FBXImporterSetParam("Camera", True)
|
||||
rt.FBXImporterSetParam("Mode", rt.Name("merge"))
|
||||
rt.FBXImporterSetParam("AxisConversionMethod", True)
|
||||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.ImportFile(
|
||||
path, rt.name("noPrompt"), using=rt.FBXIMP)
|
||||
current_fbx_objects = rt.GetCurrentSelection()
|
||||
for fbx_object in current_fbx_objects:
|
||||
if fbx_object.Parent != inst_container:
|
||||
fbx_object.Parent = inst_container
|
||||
fbx_object.name = f"{namespace}:{fbx_object.name}"
|
||||
fbx_object.pos = transform_data[
|
||||
f"{fbx_object.name}.transform"]
|
||||
fbx_object.scale = transform_data[
|
||||
f"{fbx_object.name}.scale"]
|
||||
|
||||
importFile @"{path}" #noPrompt using:FBXIMP
|
||||
""")
|
||||
rt.Execute(fbx_reimport_cmd)
|
||||
for children in node.Children:
|
||||
if rt.classOf(children) == rt.Container:
|
||||
if children.name == sub_node_name:
|
||||
update_custom_attribute_data(
|
||||
children, current_fbx_objects)
|
||||
|
||||
with maintained_selection():
|
||||
rt.Select(node)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,15 @@
|
|||
import os
|
||||
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
object_transform_set
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise, import_custom_attribute_data,
|
||||
update_custom_attribute_data
|
||||
)
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
|
|
@ -16,22 +24,34 @@ class MaxSceneLoader(load.LoaderPlugin):
|
|||
order = -8
|
||||
icon = "code-fork"
|
||||
color = "green"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = self.filepath_from_context(context)
|
||||
path = os.path.normpath(path)
|
||||
# import the max scene by using "merge file"
|
||||
path = path.replace('\\', '/')
|
||||
rt.MergeMaxFile(path)
|
||||
rt.MergeMaxFile(path, quiet=True, includeFullGroup=True)
|
||||
max_objects = rt.getLastMergedNodes()
|
||||
max_container = rt.Container(name=f"{name}")
|
||||
for max_object in max_objects:
|
||||
max_object.Parent = max_container
|
||||
max_object_names = [obj.name for obj in max_objects]
|
||||
# implement the OP/AYON custom attributes before load
|
||||
max_container = []
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
container_name = f"{namespace}:{name}_{self.postfix}"
|
||||
container = rt.Container(name=container_name)
|
||||
import_custom_attribute_data(container, max_objects)
|
||||
max_container.append(container)
|
||||
max_container.extend(max_objects)
|
||||
for max_obj, obj_name in zip(max_objects, max_object_names):
|
||||
max_obj.name = f"{namespace}:{obj_name}"
|
||||
return containerise(
|
||||
name, [max_container], context, loader=self.__class__.__name__)
|
||||
name, max_container, context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
|
@ -39,15 +59,32 @@ class MaxSceneLoader(load.LoaderPlugin):
|
|||
path = get_representation_path(representation)
|
||||
node_name = container["instance_node"]
|
||||
|
||||
rt.MergeMaxFile(path,
|
||||
rt.Name("noRedraw"),
|
||||
rt.Name("deleteOldDups"),
|
||||
rt.Name("useSceneMtlDups"))
|
||||
node = rt.getNodeByName(node_name)
|
||||
namespace, name = get_namespace(node_name)
|
||||
sub_container_name = f"{namespace}:{name}_{self.postfix}"
|
||||
# delete the old container with attribute
|
||||
# delete old duplicate
|
||||
rt.Select(node.Children)
|
||||
transform_data = object_transform_set(node.Children)
|
||||
for prev_max_obj in rt.GetCurrentSelection():
|
||||
if rt.isValidNode(prev_max_obj) and prev_max_obj.name != sub_container_name: # noqa
|
||||
rt.Delete(prev_max_obj)
|
||||
rt.MergeMaxFile(path, rt.Name("deleteOldDups"))
|
||||
|
||||
max_objects = rt.getLastMergedNodes()
|
||||
container_node = rt.GetNodeByName(node_name)
|
||||
for max_object in max_objects:
|
||||
max_object.Parent = container_node
|
||||
current_max_objects = rt.getLastMergedNodes()
|
||||
current_max_object_names = [obj.name for obj
|
||||
in current_max_objects]
|
||||
sub_container = rt.getNodeByName(sub_container_name)
|
||||
update_custom_attribute_data(sub_container, current_max_objects)
|
||||
for max_object in current_max_objects:
|
||||
max_object.Parent = node
|
||||
for max_obj, obj_name in zip(current_max_objects,
|
||||
current_max_object_names):
|
||||
max_obj.name = f"{namespace}:{obj_name}"
|
||||
max_obj.pos = transform_data[
|
||||
f"{max_obj.name}.transform"]
|
||||
max_obj.scale = transform_data[
|
||||
f"{max_obj.name}.scale"]
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
|
|
|
|||
|
|
@ -1,8 +1,14 @@
|
|||
import os
|
||||
from openpype.pipeline import load, get_representation_path
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise,
|
||||
import_custom_attribute_data,
|
||||
update_custom_attribute_data
|
||||
)
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.lib import maintained_selection
|
||||
from openpype.hosts.max.api.lib import (
|
||||
maintained_selection, unique_namespace
|
||||
)
|
||||
|
||||
|
||||
class ModelAbcLoader(load.LoaderPlugin):
|
||||
|
|
@ -14,6 +20,7 @@ class ModelAbcLoader(load.LoaderPlugin):
|
|||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
|
@ -30,7 +37,7 @@ class ModelAbcLoader(load.LoaderPlugin):
|
|||
rt.AlembicImport.CustomAttributes = True
|
||||
rt.AlembicImport.UVs = True
|
||||
rt.AlembicImport.VertexColors = True
|
||||
rt.importFile(file_path, rt.name("noPrompt"))
|
||||
rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport)
|
||||
|
||||
abc_after = {
|
||||
c
|
||||
|
|
@ -45,9 +52,22 @@ class ModelAbcLoader(load.LoaderPlugin):
|
|||
self.log.error("Something failed when loading.")
|
||||
|
||||
abc_container = abc_containers.pop()
|
||||
import_custom_attribute_data(
|
||||
abc_container, abc_container.Children)
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
for abc_object in abc_container.Children:
|
||||
abc_object.name = f"{namespace}:{abc_object.name}"
|
||||
# rename the abc container with namespace
|
||||
abc_container_name = f"{namespace}:{name}_{self.postfix}"
|
||||
abc_container.name = abc_container_name
|
||||
|
||||
return containerise(
|
||||
name, [abc_container], context, loader=self.__class__.__name__
|
||||
name, [abc_container], context,
|
||||
namespace, loader=self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
|
|
@ -55,21 +75,19 @@ class ModelAbcLoader(load.LoaderPlugin):
|
|||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Select(node.Children)
|
||||
|
||||
for alembic in rt.Selection:
|
||||
abc = rt.GetNodeByName(alembic.name)
|
||||
rt.Select(abc.Children)
|
||||
for abc_con in rt.Selection:
|
||||
container = rt.GetNodeByName(abc_con.name)
|
||||
container.source = path
|
||||
rt.Select(container.Children)
|
||||
for abc_obj in rt.Selection:
|
||||
alembic_obj = rt.GetNodeByName(abc_obj.name)
|
||||
alembic_obj.source = path
|
||||
|
||||
with maintained_selection():
|
||||
rt.Select(node)
|
||||
rt.Select(node.Children)
|
||||
|
||||
for alembic in rt.Selection:
|
||||
abc = rt.GetNodeByName(alembic.name)
|
||||
update_custom_attribute_data(abc, abc.Children)
|
||||
rt.Select(abc.Children)
|
||||
for abc_con in abc.Children:
|
||||
abc_con.source = path
|
||||
rt.Select(abc_con.Children)
|
||||
for abc_obj in abc_con.Children:
|
||||
abc_obj.source = path
|
||||
|
||||
lib.imprint(
|
||||
container["instance_node"],
|
||||
|
|
|
|||
|
|
@ -1,7 +1,15 @@
|
|||
import os
|
||||
from openpype.pipeline import load, get_representation_path
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise, import_custom_attribute_data,
|
||||
update_custom_attribute_data
|
||||
)
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
object_transform_set
|
||||
)
|
||||
from openpype.hosts.max.api.lib import maintained_selection
|
||||
|
||||
|
||||
|
|
@ -13,6 +21,7 @@ class FbxModelLoader(load.LoaderPlugin):
|
|||
order = -9
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
|
@ -20,39 +29,69 @@ class FbxModelLoader(load.LoaderPlugin):
|
|||
filepath = os.path.normpath(self.filepath_from_context(context))
|
||||
rt.FBXImporterSetParam("Animation", False)
|
||||
rt.FBXImporterSetParam("Cameras", False)
|
||||
rt.FBXImporterSetParam("Mode", rt.Name("create"))
|
||||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.importFile(filepath, rt.name("noPrompt"), using=rt.FBXIMP)
|
||||
|
||||
container = rt.GetNodeByName(name)
|
||||
if not container:
|
||||
container = rt.Container()
|
||||
container.name = name
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
container = rt.container(
|
||||
name=f"{namespace}:{name}_{self.postfix}")
|
||||
selections = rt.GetCurrentSelection()
|
||||
import_custom_attribute_data(container, selections)
|
||||
|
||||
for selection in rt.GetCurrentSelection():
|
||||
for selection in selections:
|
||||
selection.Parent = container
|
||||
selection.name = f"{namespace}:{selection.name}"
|
||||
|
||||
return containerise(
|
||||
name, [container], context, loader=self.__class__.__name__
|
||||
name, [container], context,
|
||||
namespace, loader=self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.select(node.Children)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.getNodeByName(node_name)
|
||||
namespace, name = get_namespace(node_name)
|
||||
sub_node_name = f"{namespace}:{name}_{self.postfix}"
|
||||
inst_container = rt.getNodeByName(sub_node_name)
|
||||
rt.Select(inst_container.Children)
|
||||
transform_data = object_transform_set(inst_container.Children)
|
||||
for prev_fbx_obj in rt.selection:
|
||||
if rt.isValidNode(prev_fbx_obj):
|
||||
rt.Delete(prev_fbx_obj)
|
||||
|
||||
rt.FBXImporterSetParam("Animation", False)
|
||||
rt.FBXImporterSetParam("Cameras", False)
|
||||
rt.FBXImporterSetParam("Mode", rt.Name("merge"))
|
||||
rt.FBXImporterSetParam("AxisConversionMethod", True)
|
||||
rt.FBXImporterSetParam("UpAxis", "Y")
|
||||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP)
|
||||
current_fbx_objects = rt.GetCurrentSelection()
|
||||
for fbx_object in current_fbx_objects:
|
||||
if fbx_object.Parent != inst_container:
|
||||
fbx_object.Parent = inst_container
|
||||
fbx_object.name = f"{namespace}:{fbx_object.name}"
|
||||
fbx_object.pos = transform_data[
|
||||
f"{fbx_object.name}.transform"]
|
||||
fbx_object.scale = transform_data[
|
||||
f"{fbx_object.name}.scale"]
|
||||
|
||||
for children in node.Children:
|
||||
if rt.classOf(children) == rt.Container:
|
||||
if children.name == sub_node_name:
|
||||
update_custom_attribute_data(
|
||||
children, current_fbx_objects)
|
||||
|
||||
with maintained_selection():
|
||||
rt.Select(node)
|
||||
|
||||
lib.imprint(
|
||||
container["instance_node"],
|
||||
node_name,
|
||||
{"representation": str(representation["_id"])},
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,18 @@
|
|||
import os
|
||||
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
maintained_selection,
|
||||
object_transform_set
|
||||
)
|
||||
from openpype.hosts.max.api.lib import maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise,
|
||||
import_custom_attribute_data,
|
||||
update_custom_attribute_data
|
||||
)
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
|
|
@ -14,6 +24,7 @@ class ObjLoader(load.LoaderPlugin):
|
|||
order = -9
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
|
@ -22,36 +33,49 @@ class ObjLoader(load.LoaderPlugin):
|
|||
self.log.debug("Executing command to import..")
|
||||
|
||||
rt.Execute(f'importFile @"{filepath}" #noPrompt using:ObjImp')
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
# create "missing" container for obj import
|
||||
container = rt.Container()
|
||||
container.name = name
|
||||
|
||||
container = rt.Container(name=f"{namespace}:{name}_{self.postfix}")
|
||||
selections = rt.GetCurrentSelection()
|
||||
import_custom_attribute_data(container, selections)
|
||||
# get current selection
|
||||
for selection in rt.GetCurrentSelection():
|
||||
for selection in selections:
|
||||
selection.Parent = container
|
||||
|
||||
asset = rt.GetNodeByName(name)
|
||||
|
||||
selection.name = f"{namespace}:{selection.name}"
|
||||
return containerise(
|
||||
name, [asset], context, loader=self.__class__.__name__)
|
||||
name, [container], context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.GetNodeByName(node_name)
|
||||
|
||||
instance_name, _ = node_name.split("_")
|
||||
container = rt.GetNodeByName(instance_name)
|
||||
for child in container.Children:
|
||||
rt.Delete(child)
|
||||
node = rt.getNodeByName(node_name)
|
||||
namespace, name = get_namespace(node_name)
|
||||
sub_node_name = f"{namespace}:{name}_{self.postfix}"
|
||||
inst_container = rt.getNodeByName(sub_node_name)
|
||||
rt.Select(inst_container.Children)
|
||||
transform_data = object_transform_set(inst_container.Children)
|
||||
for prev_obj in rt.selection:
|
||||
if rt.isValidNode(prev_obj):
|
||||
rt.Delete(prev_obj)
|
||||
|
||||
rt.Execute(f'importFile @"{path}" #noPrompt using:ObjImp')
|
||||
# get current selection
|
||||
for selection in rt.GetCurrentSelection():
|
||||
selection.Parent = container
|
||||
|
||||
selections = rt.GetCurrentSelection()
|
||||
update_custom_attribute_data(inst_container, selections)
|
||||
for selection in selections:
|
||||
selection.Parent = inst_container
|
||||
selection.name = f"{namespace}:{selection.name}"
|
||||
selection.pos = transform_data[
|
||||
f"{selection.name}.transform"]
|
||||
selection.scale = transform_data[
|
||||
f"{selection.name}.scale"]
|
||||
with maintained_selection():
|
||||
rt.Select(node)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,16 @@
|
|||
import os
|
||||
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
object_transform_set
|
||||
)
|
||||
from openpype.hosts.max.api.lib import maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise,
|
||||
import_custom_attribute_data
|
||||
)
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
|
|
@ -15,6 +23,7 @@ class ModelUSDLoader(load.LoaderPlugin):
|
|||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
|
@ -30,11 +39,24 @@ class ModelUSDLoader(load.LoaderPlugin):
|
|||
rt.LogLevel = rt.Name("info")
|
||||
rt.USDImporter.importFile(filepath,
|
||||
importOptions=import_options)
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
asset = rt.GetNodeByName(name)
|
||||
import_custom_attribute_data(asset, asset.Children)
|
||||
for usd_asset in asset.Children:
|
||||
usd_asset.name = f"{namespace}:{usd_asset.name}"
|
||||
|
||||
asset_name = f"{namespace}:{name}_{self.postfix}"
|
||||
asset.name = asset_name
|
||||
# need to get the correct container after renamed
|
||||
asset = rt.GetNodeByName(asset_name)
|
||||
|
||||
|
||||
return containerise(
|
||||
name, [asset], context, loader=self.__class__.__name__)
|
||||
name, [asset], context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
|
@ -42,11 +64,16 @@ class ModelUSDLoader(load.LoaderPlugin):
|
|||
path = get_representation_path(representation)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.GetNodeByName(node_name)
|
||||
namespace, name = get_namespace(node_name)
|
||||
sub_node_name = f"{namespace}:{name}_{self.postfix}"
|
||||
transform_data = None
|
||||
for n in node.Children:
|
||||
for r in n.Children:
|
||||
rt.Delete(r)
|
||||
rt.Select(n.Children)
|
||||
transform_data = object_transform_set(n.Children)
|
||||
for prev_usd_asset in rt.selection:
|
||||
if rt.isValidNode(prev_usd_asset):
|
||||
rt.Delete(prev_usd_asset)
|
||||
rt.Delete(n)
|
||||
instance_name, _ = node_name.split("_")
|
||||
|
||||
import_options = rt.USDImporter.CreateOptions()
|
||||
base_filename = os.path.basename(path)
|
||||
|
|
@ -55,11 +82,20 @@ class ModelUSDLoader(load.LoaderPlugin):
|
|||
|
||||
rt.LogPath = log_filepath
|
||||
rt.LogLevel = rt.Name("info")
|
||||
rt.USDImporter.importFile(path,
|
||||
importOptions=import_options)
|
||||
rt.USDImporter.importFile(
|
||||
path, importOptions=import_options)
|
||||
|
||||
asset = rt.GetNodeByName(instance_name)
|
||||
asset = rt.GetNodeByName(name)
|
||||
asset.Parent = node
|
||||
import_custom_attribute_data(asset, asset.Children)
|
||||
for children in asset.Children:
|
||||
children.name = f"{namespace}:{children.name}"
|
||||
children.pos = transform_data[
|
||||
f"{children.name}.transform"]
|
||||
children.scale = transform_data[
|
||||
f"{children.name}.scale"]
|
||||
|
||||
asset.name = sub_node_name
|
||||
|
||||
with maintained_selection():
|
||||
rt.Select(node)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,12 @@ Because of limited api, alembics can be only loaded, but not easily updated.
|
|||
import os
|
||||
from openpype.pipeline import load, get_representation_path
|
||||
from openpype.hosts.max.api import lib, maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api.lib import unique_namespace
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise,
|
||||
import_custom_attribute_data,
|
||||
update_custom_attribute_data
|
||||
)
|
||||
|
||||
|
||||
class AbcLoader(load.LoaderPlugin):
|
||||
|
|
@ -19,6 +24,7 @@ class AbcLoader(load.LoaderPlugin):
|
|||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
|
@ -33,7 +39,7 @@ class AbcLoader(load.LoaderPlugin):
|
|||
}
|
||||
|
||||
rt.AlembicImport.ImportToRoot = False
|
||||
rt.importFile(file_path, rt.name("noPrompt"))
|
||||
rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport)
|
||||
|
||||
abc_after = {
|
||||
c
|
||||
|
|
@ -48,13 +54,27 @@ class AbcLoader(load.LoaderPlugin):
|
|||
self.log.error("Something failed when loading.")
|
||||
|
||||
abc_container = abc_containers.pop()
|
||||
|
||||
for abc in rt.GetCurrentSelection():
|
||||
selections = rt.GetCurrentSelection()
|
||||
import_custom_attribute_data(
|
||||
abc_container, abc_container.Children)
|
||||
for abc in selections:
|
||||
for cam_shape in abc.Children:
|
||||
cam_shape.playbackType = 2
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
|
||||
for abc_object in abc_container.Children:
|
||||
abc_object.name = f"{namespace}:{abc_object.name}"
|
||||
# rename the abc container with namespace
|
||||
abc_container_name = f"{namespace}:{name}_{self.postfix}"
|
||||
abc_container.name = abc_container_name
|
||||
|
||||
return containerise(
|
||||
name, [abc_container], context, loader=self.__class__.__name__
|
||||
name, [abc_container], context,
|
||||
namespace, loader=self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
|
|
@ -63,28 +83,23 @@ class AbcLoader(load.LoaderPlugin):
|
|||
path = get_representation_path(representation)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
|
||||
alembic_objects = self.get_container_children(node, "AlembicObject")
|
||||
for alembic_object in alembic_objects:
|
||||
alembic_object.source = path
|
||||
|
||||
lib.imprint(
|
||||
container["instance_node"],
|
||||
{"representation": str(representation["_id"])},
|
||||
)
|
||||
|
||||
with maintained_selection():
|
||||
rt.Select(node.Children)
|
||||
|
||||
for alembic in rt.Selection:
|
||||
abc = rt.GetNodeByName(alembic.name)
|
||||
update_custom_attribute_data(abc, abc.Children)
|
||||
rt.Select(abc.Children)
|
||||
for abc_con in rt.Selection:
|
||||
container = rt.GetNodeByName(abc_con.name)
|
||||
container.source = path
|
||||
rt.Select(container.Children)
|
||||
for abc_obj in rt.Selection:
|
||||
alembic_obj = rt.GetNodeByName(abc_obj.name)
|
||||
alembic_obj.source = path
|
||||
for abc_con in abc.Children:
|
||||
abc_con.source = path
|
||||
rt.Select(abc_con.Children)
|
||||
for abc_obj in abc_con.Children:
|
||||
abc_obj.source = path
|
||||
|
||||
lib.imprint(
|
||||
container["instance_node"],
|
||||
{"representation": str(representation["_id"])},
|
||||
)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,14 @@
|
|||
import os
|
||||
|
||||
from openpype.hosts.max.api import lib, maintained_selection
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api.lib import (
|
||||
unique_namespace, get_namespace
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise,
|
||||
import_custom_attribute_data,
|
||||
update_custom_attribute_data
|
||||
)
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
|
|
@ -13,6 +20,7 @@ class PointCloudLoader(load.LoaderPlugin):
|
|||
order = -8
|
||||
icon = "code-fork"
|
||||
color = "green"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
"""load point cloud by tyCache"""
|
||||
|
|
@ -22,10 +30,19 @@ class PointCloudLoader(load.LoaderPlugin):
|
|||
obj = rt.tyCache()
|
||||
obj.filename = filepath
|
||||
|
||||
prt_container = rt.GetNodeByName(obj.name)
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
prt_container = rt.Container(
|
||||
name=f"{namespace}:{name}_{self.postfix}")
|
||||
import_custom_attribute_data(prt_container, [obj])
|
||||
obj.Parent = prt_container
|
||||
obj.name = f"{namespace}:{obj.name}"
|
||||
|
||||
return containerise(
|
||||
name, [prt_container], context, loader=self.__class__.__name__)
|
||||
name, [prt_container], context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""update the container"""
|
||||
|
|
@ -33,15 +50,18 @@ class PointCloudLoader(load.LoaderPlugin):
|
|||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
namespace, name = get_namespace(container["instance_node"])
|
||||
sub_node_name = f"{namespace}:{name}_{self.postfix}"
|
||||
inst_container = rt.getNodeByName(sub_node_name)
|
||||
update_custom_attribute_data(
|
||||
inst_container, inst_container.Children)
|
||||
with maintained_selection():
|
||||
rt.Select(node.Children)
|
||||
for prt in rt.Selection:
|
||||
prt_object = rt.GetNodeByName(prt.name)
|
||||
prt_object.filename = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
for prt in inst_container.Children:
|
||||
prt.filename = path
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
|
|||
|
|
@ -5,8 +5,15 @@ from openpype.pipeline import (
|
|||
load,
|
||||
get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise,
|
||||
import_custom_attribute_data,
|
||||
update_custom_attribute_data
|
||||
)
|
||||
from openpype.hosts.max.api import lib
|
||||
from openpype.hosts.max.api.lib import (
|
||||
unique_namespace, get_namespace
|
||||
)
|
||||
|
||||
|
||||
class RedshiftProxyLoader(load.LoaderPlugin):
|
||||
|
|
@ -18,6 +25,7 @@ class RedshiftProxyLoader(load.LoaderPlugin):
|
|||
order = -9
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
|
@ -30,24 +38,32 @@ class RedshiftProxyLoader(load.LoaderPlugin):
|
|||
if collections:
|
||||
rs_proxy.is_sequence = True
|
||||
|
||||
container = rt.container()
|
||||
container.name = name
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
container = rt.Container(
|
||||
name=f"{namespace}:{name}_{self.postfix}")
|
||||
rs_proxy.Parent = container
|
||||
|
||||
asset = rt.getNodeByName(name)
|
||||
rs_proxy.name = f"{namespace}:{rs_proxy.name}"
|
||||
import_custom_attribute_data(container, [rs_proxy])
|
||||
|
||||
return containerise(
|
||||
name, [asset], context, loader=self.__class__.__name__)
|
||||
name, [container], context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
for children in node.Children:
|
||||
children_node = rt.getNodeByName(children.name)
|
||||
for proxy in children_node.Children:
|
||||
proxy.file = path
|
||||
namespace, name = get_namespace(container["instance_node"])
|
||||
sub_node_name = f"{namespace}:{name}_{self.postfix}"
|
||||
inst_container = rt.getNodeByName(sub_node_name)
|
||||
|
||||
update_custom_attribute_data(
|
||||
inst_container, inst_container.Children)
|
||||
for proxy in inst_container.Children:
|
||||
proxy.file = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
|
|
|
|||
|
|
@ -34,6 +34,9 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
aovs = RenderProducts().get_aovs(instance.name)
|
||||
files_by_aov.update(aovs)
|
||||
|
||||
camera = rt.viewport.GetCamera()
|
||||
instance.data["cameras"] = [camera.name] if camera else None # noqa
|
||||
|
||||
if "expectedFiles" not in instance.data:
|
||||
instance.data["expectedFiles"] = list()
|
||||
instance.data["files"] = list()
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ class ValidateMaxContents(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.ValidatorOrder
|
||||
families = ["camera",
|
||||
"maxScene",
|
||||
"maxrender",
|
||||
"review"]
|
||||
hosts = ["max"]
|
||||
label = "Max Scene Contents"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,46 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from openpype.pipeline import (
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin)
|
||||
from openpype.pipeline.publish import RepairAction
|
||||
from openpype.hosts.max.api.lib import get_current_renderer
|
||||
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class ValidateRenderableCamera(pyblish.api.InstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validates Renderable Camera
|
||||
|
||||
Check if the renderable camera used for rendering
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["maxrender"]
|
||||
hosts = ["max"]
|
||||
label = "Renderable Camera"
|
||||
optional = True
|
||||
actions = [RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
if not instance.data["cameras"]:
|
||||
raise PublishValidationError(
|
||||
"No renderable Camera found in scene."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
||||
rt.viewport.setType(rt.Name("view_camera"))
|
||||
camera = rt.viewport.GetCamera()
|
||||
cls.log.info(f"Camera {camera} set as renderable camera")
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
if renderer == "Arnold":
|
||||
arv = rt.MAXToAOps.ArnoldRenderView()
|
||||
arv.setOption("Camera", str(camera))
|
||||
arv.close()
|
||||
instance.data["cameras"] = [camera.name]
|
||||
|
|
@ -260,7 +260,7 @@ class MayaCreator(NewCreator, MayaCreatorBase):
|
|||
default=True)
|
||||
]
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
def apply_settings(self, project_settings):
|
||||
"""Method called on initialization of plugin to apply settings."""
|
||||
|
||||
settings_name = self.settings_name
|
||||
|
|
|
|||
|
|
@ -81,10 +81,8 @@ class CreateAnimation(plugin.MayaHiddenCreator):
|
|||
|
||||
return defs
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
super(CreateAnimation, self).apply_settings(
|
||||
project_settings, system_settings
|
||||
)
|
||||
def apply_settings(self, project_settings):
|
||||
super(CreateAnimation, self).apply_settings(project_settings)
|
||||
# Hardcoding creator to be enabled due to existing settings would
|
||||
# disable the creator causing the creator plugin to not be
|
||||
# discoverable.
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ class CreateRenderlayer(plugin.RenderlayerCreator):
|
|||
render_settings = {}
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings, system_settings):
|
||||
def apply_settings(cls, project_settings):
|
||||
cls.render_settings = project_settings["maya"]["RenderSettings"]
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ class CreateUnrealSkeletalMesh(plugin.MayaCreator):
|
|||
# Defined in settings
|
||||
joint_hints = set()
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
def apply_settings(self, project_settings):
|
||||
"""Apply project settings to creator"""
|
||||
settings = (
|
||||
project_settings["maya"]["create"]["CreateUnrealSkeletalMesh"]
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ class CreateUnrealStaticMesh(plugin.MayaCreator):
|
|||
# Defined in settings
|
||||
collision_prefixes = []
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
def apply_settings(self, project_settings):
|
||||
"""Apply project settings to creator"""
|
||||
settings = project_settings["maya"]["create"]["CreateUnrealStaticMesh"]
|
||||
self.collision_prefixes = settings["collision_prefixes"]
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class CreateVRayScene(plugin.RenderlayerCreator):
|
|||
singleton_node_name = "vraysceneMain"
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings, system_settings):
|
||||
def apply_settings(cls, project_settings):
|
||||
cls.render_settings = project_settings["maya"]["RenderSettings"]
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
|
|
|||
|
|
@ -13,8 +13,7 @@ class CreateYetiCache(plugin.MayaCreator):
|
|||
family = "yeticache"
|
||||
icon = "pagelines"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateYetiCache, self).__init__(*args, **kwargs)
|
||||
def get_instance_attr_defs(self):
|
||||
|
||||
defs = [
|
||||
NumberDef("preroll",
|
||||
|
|
@ -36,3 +35,5 @@ class CreateYetiCache(plugin.MayaCreator):
|
|||
default=3,
|
||||
decimals=0)
|
||||
)
|
||||
|
||||
return defs
|
||||
|
|
|
|||
|
|
@ -15,6 +15,16 @@ from openpype.hosts.maya.api import lib
|
|||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
|
||||
|
||||
# Do not reset these values on update but only apply on first load
|
||||
# to preserve any potential local overrides
|
||||
SKIP_UPDATE_ATTRS = {
|
||||
"displayOutput",
|
||||
"viewportDensity",
|
||||
"viewportWidth",
|
||||
"viewportLength",
|
||||
}
|
||||
|
||||
|
||||
def set_attribute(node, attr, value):
|
||||
"""Wrapper of set attribute which ignores None values"""
|
||||
if value is None:
|
||||
|
|
@ -205,6 +215,8 @@ class YetiCacheLoader(load.LoaderPlugin):
|
|||
yeti_node = yeti_nodes[0]
|
||||
|
||||
for attr, value in node_settings["attrs"].items():
|
||||
if attr in SKIP_UPDATE_ATTRS:
|
||||
continue
|
||||
set_attribute(attr, value, yeti_node)
|
||||
|
||||
cmds.setAttr("{}.representation".format(container_node),
|
||||
|
|
@ -311,7 +323,6 @@ class YetiCacheLoader(load.LoaderPlugin):
|
|||
# Update attributes with defaults
|
||||
attributes = node_settings["attrs"]
|
||||
attributes.update({
|
||||
"viewportDensity": 0.1,
|
||||
"verbosity": 2,
|
||||
"fileMode": 1,
|
||||
|
||||
|
|
@ -321,6 +332,9 @@ class YetiCacheLoader(load.LoaderPlugin):
|
|||
"visibleInRefractions": True
|
||||
})
|
||||
|
||||
if "viewportDensity" not in attributes:
|
||||
attributes["viewportDensity"] = 0.1
|
||||
|
||||
# Apply attributes to pgYetiMaya node
|
||||
for attr, value in attributes.items():
|
||||
set_attribute(attr, value, yeti_node)
|
||||
|
|
|
|||
|
|
@ -35,14 +35,11 @@ class CollectAssembly(pyblish.api.InstancePlugin):
|
|||
# Get all content from the instance
|
||||
instance_lookup = set(cmds.ls(instance, type="transform", long=True))
|
||||
data = defaultdict(list)
|
||||
self.log.info(instance_lookup)
|
||||
|
||||
hierarchy_nodes = []
|
||||
for container in containers:
|
||||
|
||||
self.log.info(container)
|
||||
root = lib.get_container_transforms(container, root=True)
|
||||
self.log.info(root)
|
||||
if not root or root not in instance_lookup:
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ class CollectMayaHistory(pyblish.api.InstancePlugin):
|
|||
hosts = ["maya"]
|
||||
label = "Maya History"
|
||||
families = ["rig"]
|
||||
verbose = False
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ class CollectNewInstances(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["maya"]
|
||||
|
||||
valid_empty_families = {"workfile", "renderlayer"}
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
objset = instance.data.get("instance_node")
|
||||
|
|
@ -58,7 +60,7 @@ class CollectNewInstances(pyblish.api.InstancePlugin):
|
|||
|
||||
instance[:] = members_hierarchy
|
||||
|
||||
elif instance.data["family"] != "workfile":
|
||||
elif instance.data["family"] not in self.valid_empty_families:
|
||||
self.log.warning("Empty instance: \"%s\" " % objset)
|
||||
# Store the exact members of the object set
|
||||
instance.data["setMembers"] = members
|
||||
|
|
|
|||
|
|
@ -356,8 +356,9 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
# Thus the data will be limited to only what we need.
|
||||
self.log.debug("obj_set {}".format(sets[obj_set]))
|
||||
if not sets[obj_set]["members"]:
|
||||
self.log.info(
|
||||
"Removing redundant set information: {}".format(obj_set))
|
||||
self.log.debug(
|
||||
"Removing redundant set information: {}".format(obj_set)
|
||||
)
|
||||
sets.pop(obj_set, None)
|
||||
|
||||
self.log.debug("Gathering attribute changes to instance members..")
|
||||
|
|
@ -396,9 +397,9 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
if con:
|
||||
materials.extend(con)
|
||||
|
||||
self.log.info("Found materials:\n{}".format(materials))
|
||||
self.log.debug("Found materials:\n{}".format(materials))
|
||||
|
||||
self.log.info("Found the following sets:\n{}".format(look_sets))
|
||||
self.log.debug("Found the following sets:\n{}".format(look_sets))
|
||||
# Get the entire node chain of the look sets
|
||||
# history = cmds.listHistory(look_sets)
|
||||
history = []
|
||||
|
|
@ -456,7 +457,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
instance.extend(shader for shader in look_sets if shader
|
||||
not in instance_lookup)
|
||||
|
||||
self.log.info("Collected look for %s" % instance)
|
||||
self.log.debug("Collected look for %s" % instance)
|
||||
|
||||
def collect_sets(self, instance):
|
||||
"""Collect all objectSets which are of importance for publishing
|
||||
|
|
@ -593,7 +594,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
if attribute == "fileTextureName":
|
||||
computed_attribute = node + ".computedFileTextureNamePattern"
|
||||
|
||||
self.log.info(" - file source: {}".format(source))
|
||||
self.log.debug(" - file source: {}".format(source))
|
||||
color_space_attr = "{}.colorSpace".format(node)
|
||||
try:
|
||||
color_space = cmds.getAttr(color_space_attr)
|
||||
|
|
@ -621,7 +622,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
dependNode=True)
|
||||
)
|
||||
if not source and cmds.nodeType(node) in pxr_nodes:
|
||||
self.log.info("Renderman: source is empty, skipping...")
|
||||
self.log.debug("Renderman: source is empty, skipping...")
|
||||
continue
|
||||
# We replace backslashes with forward slashes because V-Ray
|
||||
# can't handle the UDIM files with the backslashes in the
|
||||
|
|
@ -630,14 +631,14 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
|
||||
files = get_file_node_files(node)
|
||||
if len(files) == 0:
|
||||
self.log.error("No valid files found from node `%s`" % node)
|
||||
self.log.debug("No valid files found from node `%s`" % node)
|
||||
|
||||
self.log.info("collection of resource done:")
|
||||
self.log.info(" - node: {}".format(node))
|
||||
self.log.info(" - attribute: {}".format(attribute))
|
||||
self.log.info(" - source: {}".format(source))
|
||||
self.log.info(" - file: {}".format(files))
|
||||
self.log.info(" - color space: {}".format(color_space))
|
||||
self.log.debug("collection of resource done:")
|
||||
self.log.debug(" - node: {}".format(node))
|
||||
self.log.debug(" - attribute: {}".format(attribute))
|
||||
self.log.debug(" - source: {}".format(source))
|
||||
self.log.debug(" - file: {}".format(files))
|
||||
self.log.debug(" - color space: {}".format(color_space))
|
||||
|
||||
# Define the resource
|
||||
yield {
|
||||
|
|
|
|||
|
|
@ -268,7 +268,7 @@ class CollectMultiverseLookData(pyblish.api.InstancePlugin):
|
|||
cmds.loadPlugin("MultiverseForMaya", quiet=True)
|
||||
import multiverse
|
||||
|
||||
self.log.info("Processing mvLook for '{}'".format(instance))
|
||||
self.log.debug("Processing mvLook for '{}'".format(instance))
|
||||
|
||||
nodes = set()
|
||||
for node in instance:
|
||||
|
|
@ -287,7 +287,7 @@ class CollectMultiverseLookData(pyblish.api.InstancePlugin):
|
|||
publishMipMap = instance.data["publishMipMap"]
|
||||
|
||||
for node in nodes:
|
||||
self.log.info("Getting resources for '{}'".format(node))
|
||||
self.log.debug("Getting resources for '{}'".format(node))
|
||||
|
||||
# We know what nodes need to be collected, now we need to
|
||||
# extract the materials overrides.
|
||||
|
|
@ -380,12 +380,12 @@ class CollectMultiverseLookData(pyblish.api.InstancePlugin):
|
|||
if len(files) == 0:
|
||||
self.log.error("No valid files found from node `%s`" % node)
|
||||
|
||||
self.log.info("collection of resource done:")
|
||||
self.log.info(" - node: {}".format(node))
|
||||
self.log.info(" - attribute: {}".format(fname_attrib))
|
||||
self.log.info(" - source: {}".format(source))
|
||||
self.log.info(" - file: {}".format(files))
|
||||
self.log.info(" - color space: {}".format(color_space))
|
||||
self.log.debug("collection of resource done:")
|
||||
self.log.debug(" - node: {}".format(node))
|
||||
self.log.debug(" - attribute: {}".format(fname_attrib))
|
||||
self.log.debug(" - source: {}".format(source))
|
||||
self.log.debug(" - file: {}".format(files))
|
||||
self.log.debug(" - color space: {}".format(color_space))
|
||||
|
||||
# Define the resource
|
||||
resource = {"node": node,
|
||||
|
|
@ -406,14 +406,14 @@ class CollectMultiverseLookData(pyblish.api.InstancePlugin):
|
|||
extra_files = []
|
||||
self.log.debug("Expecting MipMaps, going to look for them.")
|
||||
for fname in files:
|
||||
self.log.info("Checking '{}' for mipmaps".format(fname))
|
||||
self.log.debug("Checking '{}' for mipmaps".format(fname))
|
||||
if is_mipmap(fname):
|
||||
self.log.debug(" - file is already MipMap, skipping.")
|
||||
continue
|
||||
|
||||
mipmap = get_mipmap(fname)
|
||||
if mipmap:
|
||||
self.log.info(" mipmap found for '{}'".format(fname))
|
||||
self.log.debug(" mipmap found for '{}'".format(fname))
|
||||
extra_files.append(mipmap)
|
||||
else:
|
||||
self.log.warning(" no mipmap found for '{}'".format(fname))
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ class CollectMayaRender(pyblish.api.InstancePlugin):
|
|||
"family": cmds.getAttr("{}.family".format(s)),
|
||||
}
|
||||
)
|
||||
self.log.info(" -> attach render to: {}".format(s))
|
||||
self.log.debug(" -> attach render to: {}".format(s))
|
||||
|
||||
layer_name = layer.name()
|
||||
|
||||
|
|
@ -137,10 +137,10 @@ class CollectMayaRender(pyblish.api.InstancePlugin):
|
|||
has_cameras = any(product.camera for product in render_products)
|
||||
assert has_cameras, "No render cameras found."
|
||||
|
||||
self.log.info("multipart: {}".format(
|
||||
self.log.debug("multipart: {}".format(
|
||||
multipart))
|
||||
assert expected_files, "no file names were generated, this is a bug"
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"expected files: {}".format(
|
||||
json.dumps(expected_files, indent=4, sort_keys=True)
|
||||
)
|
||||
|
|
@ -175,7 +175,7 @@ class CollectMayaRender(pyblish.api.InstancePlugin):
|
|||
publish_meta_path = os.path.dirname(full_path)
|
||||
aov_dict[aov_first_key] = full_paths
|
||||
full_exp_files = [aov_dict]
|
||||
self.log.info(full_exp_files)
|
||||
self.log.debug(full_exp_files)
|
||||
|
||||
if publish_meta_path is None:
|
||||
raise KnownPublishError("Unable to detect any expected output "
|
||||
|
|
@ -227,7 +227,7 @@ class CollectMayaRender(pyblish.api.InstancePlugin):
|
|||
if platform.system().lower() in ["linux", "darwin"]:
|
||||
common_publish_meta_path = "/" + common_publish_meta_path
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Publish meta path: {}".format(common_publish_meta_path))
|
||||
|
||||
# Get layer specific settings, might be overrides
|
||||
|
|
@ -300,7 +300,7 @@ class CollectMayaRender(pyblish.api.InstancePlugin):
|
|||
)
|
||||
if rr_settings["enabled"]:
|
||||
data["rrPathName"] = instance.data.get("rrPathName")
|
||||
self.log.info(data["rrPathName"])
|
||||
self.log.debug(data["rrPathName"])
|
||||
|
||||
if self.sync_workfile_version:
|
||||
data["version"] = context.data["version"]
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
|||
|
||||
# Get renderer
|
||||
renderer = instance.data["renderer"]
|
||||
self.log.info("Renderer found: {}".format(renderer))
|
||||
self.log.debug("Renderer found: {}".format(renderer))
|
||||
|
||||
rp_node_types = {"vray": ["VRayRenderElement", "VRayRenderElementSet"],
|
||||
"arnold": ["aiAOV"],
|
||||
|
|
@ -66,8 +66,8 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
|||
|
||||
result.append(render_pass)
|
||||
|
||||
self.log.info("Found {} render elements / AOVs for "
|
||||
"'{}'".format(len(result), instance.data["subset"]))
|
||||
self.log.debug("Found {} render elements / AOVs for "
|
||||
"'{}'".format(len(result), instance.data["subset"]))
|
||||
|
||||
instance.data["renderPasses"] = result
|
||||
|
||||
|
|
|
|||
|
|
@ -21,11 +21,12 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin):
|
|||
else:
|
||||
layer = instance.data["renderlayer"]
|
||||
|
||||
self.log.info("layer: {}".format(layer))
|
||||
cameras = cmds.ls(type="camera", long=True)
|
||||
renderable = [c for c in cameras if
|
||||
get_attr_in_layer("%s.renderable" % c, layer)]
|
||||
renderable = [cam for cam in cameras if
|
||||
get_attr_in_layer("{}.renderable".format(cam), layer)]
|
||||
|
||||
self.log.info("Found cameras %s: %s" % (len(renderable), renderable))
|
||||
self.log.debug(
|
||||
"Found renderable cameras %s: %s", len(renderable), renderable
|
||||
)
|
||||
|
||||
instance.data["cameras"] = renderable
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class CollectUnrealStaticMesh(pyblish.api.InstancePlugin):
|
|||
instance.data["geometryMembers"] = cmds.sets(
|
||||
geometry_set, query=True)
|
||||
|
||||
self.log.info("geometry: {}".format(
|
||||
self.log.debug("geometry: {}".format(
|
||||
pformat(instance.data.get("geometryMembers"))))
|
||||
|
||||
collision_set = [
|
||||
|
|
@ -29,7 +29,7 @@ class CollectUnrealStaticMesh(pyblish.api.InstancePlugin):
|
|||
instance.data["collisionMembers"] = cmds.sets(
|
||||
collision_set, query=True)
|
||||
|
||||
self.log.info("collisions: {}".format(
|
||||
self.log.debug("collisions: {}".format(
|
||||
pformat(instance.data.get("collisionMembers"))))
|
||||
|
||||
frame = cmds.currentTime(query=True)
|
||||
|
|
|
|||
|
|
@ -67,5 +67,5 @@ class CollectXgen(pyblish.api.InstancePlugin):
|
|||
|
||||
data["transfers"] = transfers
|
||||
|
||||
self.log.info(data)
|
||||
self.log.debug(data)
|
||||
instance.data.update(data)
|
||||
|
|
|
|||
|
|
@ -4,12 +4,23 @@ import pyblish.api
|
|||
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
SETTINGS = {"renderDensity",
|
||||
"renderWidth",
|
||||
"renderLength",
|
||||
"increaseRenderBounds",
|
||||
"imageSearchPath",
|
||||
"cbId"}
|
||||
|
||||
SETTINGS = {
|
||||
# Preview
|
||||
"displayOutput",
|
||||
"colorR", "colorG", "colorB",
|
||||
"viewportDensity",
|
||||
"viewportWidth",
|
||||
"viewportLength",
|
||||
# Render attributes
|
||||
"renderDensity",
|
||||
"renderWidth",
|
||||
"renderLength",
|
||||
"increaseRenderBounds",
|
||||
"imageSearchPath",
|
||||
# Pipeline specific
|
||||
"cbId"
|
||||
}
|
||||
|
||||
|
||||
class CollectYetiCache(pyblish.api.InstancePlugin):
|
||||
|
|
@ -39,10 +50,6 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
|||
# Get yeti nodes and their transforms
|
||||
yeti_shapes = cmds.ls(instance, type="pgYetiMaya")
|
||||
for shape in yeti_shapes:
|
||||
shape_data = {"transform": None,
|
||||
"name": shape,
|
||||
"cbId": lib.get_id(shape),
|
||||
"attrs": None}
|
||||
|
||||
# Get specific node attributes
|
||||
attr_data = {}
|
||||
|
|
@ -58,9 +65,12 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
|||
parent = cmds.listRelatives(shape, parent=True)[0]
|
||||
transform_data = {"name": parent, "cbId": lib.get_id(parent)}
|
||||
|
||||
# Store collected data
|
||||
shape_data["attrs"] = attr_data
|
||||
shape_data["transform"] = transform_data
|
||||
shape_data = {
|
||||
"transform": transform_data,
|
||||
"name": shape,
|
||||
"cbId": lib.get_id(shape),
|
||||
"attrs": attr_data,
|
||||
}
|
||||
|
||||
settings["nodes"].append(shape_data)
|
||||
|
||||
|
|
|
|||
|
|
@ -119,7 +119,6 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
texture_filenames = []
|
||||
if image_search_paths:
|
||||
|
||||
|
||||
# TODO: Somehow this uses OS environment path separator, `:` vs `;`
|
||||
# Later on check whether this is pipeline OS cross-compatible.
|
||||
image_search_paths = [p for p in
|
||||
|
|
@ -130,13 +129,13 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
|
||||
# List all related textures
|
||||
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
|
||||
self.log.info("Found %i texture(s)" % len(texture_filenames))
|
||||
self.log.debug("Found %i texture(s)" % len(texture_filenames))
|
||||
|
||||
# Get all reference nodes
|
||||
reference_nodes = cmds.pgYetiGraph(node,
|
||||
listNodes=True,
|
||||
type="reference")
|
||||
self.log.info("Found %i reference node(s)" % len(reference_nodes))
|
||||
self.log.debug("Found %i reference node(s)" % len(reference_nodes))
|
||||
|
||||
if texture_filenames and not image_search_paths:
|
||||
raise ValueError("pgYetiMaya node '%s' is missing the path to the "
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Extracted instance {} to: {}".format(instance.name, staging_dir)
|
||||
)
|
||||
|
||||
|
|
@ -126,7 +126,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
instance.data["representations"].append(representation)
|
||||
|
||||
def _extract(self, nodes, attribute_data, kwargs):
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Writing {} with:\n{}".format(kwargs["filename"], kwargs)
|
||||
)
|
||||
filenames = []
|
||||
|
|
@ -180,12 +180,12 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
|
||||
with lib.attribute_values(attribute_data):
|
||||
with lib.maintained_selection():
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Writing: {}".format(duplicate_nodes)
|
||||
)
|
||||
cmds.select(duplicate_nodes, noExpand=True)
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Extracting ass sequence with: {}".format(kwargs)
|
||||
)
|
||||
|
||||
|
|
@ -194,6 +194,6 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
for file in exported_files:
|
||||
filenames.append(os.path.split(file)[1])
|
||||
|
||||
self.log.info("Exported: {}".format(filenames))
|
||||
self.log.debug("Exported: {}".format(filenames))
|
||||
|
||||
return filenames, nodes_by_id
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class ExtractAssembly(publish.Extractor):
|
|||
json_filename = "{}.json".format(instance.name)
|
||||
json_path = os.path.join(staging_dir, json_filename)
|
||||
|
||||
self.log.info("Dumping scene data for debugging ..")
|
||||
self.log.debug("Dumping scene data for debugging ..")
|
||||
with open(json_path, "w") as filepath:
|
||||
json.dump(instance.data["scenedata"], filepath, ensure_ascii=False)
|
||||
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ class ExtractCameraAlembic(publish.Extractor):
|
|||
"Attributes to bake must be specified as a list"
|
||||
)
|
||||
for attr in self.bake_attributes:
|
||||
self.log.info("Adding {} attribute".format(attr))
|
||||
self.log.debug("Adding {} attribute".format(attr))
|
||||
job_str += " -attr {0}".format(attr)
|
||||
|
||||
with lib.evaluation("off"):
|
||||
|
|
@ -112,5 +112,5 @@ class ExtractCameraAlembic(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
self.log.debug("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name, path))
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ class ExtractCameraMayaScene(publish.Extractor):
|
|||
for family in self.families:
|
||||
try:
|
||||
self.scene_type = ext_mapping[family]
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Using {} as scene type".format(self.scene_type))
|
||||
break
|
||||
except KeyError:
|
||||
|
|
@ -151,7 +151,7 @@ class ExtractCameraMayaScene(publish.Extractor):
|
|||
with lib.evaluation("off"):
|
||||
with lib.suspended_refresh():
|
||||
if bake_to_worldspace:
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Performing camera bakes: {}".format(transform))
|
||||
baked = lib.bake_to_world_space(
|
||||
transform,
|
||||
|
|
@ -186,7 +186,7 @@ class ExtractCameraMayaScene(publish.Extractor):
|
|||
unlock(plug)
|
||||
cmds.setAttr(plug, value)
|
||||
|
||||
self.log.info("Performing extraction..")
|
||||
self.log.debug("Performing extraction..")
|
||||
cmds.select(cmds.ls(members, dag=True,
|
||||
shapes=True, long=True), noExpand=True)
|
||||
cmds.file(path,
|
||||
|
|
@ -217,5 +217,5 @@ class ExtractCameraMayaScene(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
self.log.debug("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name, path))
|
||||
|
|
|
|||
|
|
@ -33,11 +33,11 @@ class ExtractFBX(publish.Extractor):
|
|||
# to format it into a string in a mel expression
|
||||
path = path.replace('\\', '/')
|
||||
|
||||
self.log.info("Extracting FBX to: {0}".format(path))
|
||||
self.log.debug("Extracting FBX to: {0}".format(path))
|
||||
|
||||
members = instance.data["setMembers"]
|
||||
self.log.info("Members: {0}".format(members))
|
||||
self.log.info("Instance: {0}".format(instance[:]))
|
||||
self.log.debug("Members: {0}".format(members))
|
||||
self.log.debug("Instance: {0}".format(instance[:]))
|
||||
|
||||
fbx_exporter.set_options_from_instance(instance)
|
||||
|
||||
|
|
@ -58,4 +58,4 @@ class ExtractFBX(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extract FBX successful to: {0}".format(path))
|
||||
self.log.debug("Extract FBX successful to: {0}".format(path))
|
||||
|
|
|
|||
|
|
@ -20,14 +20,10 @@ class ExtractGLB(publish.Extractor):
|
|||
filename = "{0}.glb".format(instance.name)
|
||||
path = os.path.join(staging_dir, filename)
|
||||
|
||||
self.log.info("Extracting GLB to: {}".format(path))
|
||||
|
||||
cmds.loadPlugin("maya2glTF", quiet=True)
|
||||
|
||||
nodes = instance[:]
|
||||
|
||||
self.log.info("Instance: {0}".format(nodes))
|
||||
|
||||
start_frame = instance.data('frameStart') or \
|
||||
int(cmds.playbackOptions(query=True,
|
||||
animationStartTime=True))# noqa
|
||||
|
|
@ -48,6 +44,7 @@ class ExtractGLB(publish.Extractor):
|
|||
"vno": True # visibleNodeOnly
|
||||
}
|
||||
|
||||
self.log.debug("Extracting GLB to: {}".format(path))
|
||||
with lib.maintained_selection():
|
||||
cmds.select(nodes, hi=True, noExpand=True)
|
||||
extract_gltf(staging_dir,
|
||||
|
|
@ -65,4 +62,4 @@ class ExtractGLB(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extract GLB successful to: {0}".format(path))
|
||||
self.log.debug("Extract GLB successful to: {0}".format(path))
|
||||
|
|
|
|||
|
|
@ -60,6 +60,6 @@ class ExtractGPUCache(publish.Extractor):
|
|||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Extracted instance {} to: {}".format(instance.name, staging_dir)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ class ExtractImportReference(publish.Extractor,
|
|||
for family in self.families:
|
||||
try:
|
||||
self.scene_type = ext_mapping[family]
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Using {} as scene type".format(self.scene_type))
|
||||
break
|
||||
|
||||
|
|
@ -69,7 +69,7 @@ class ExtractImportReference(publish.Extractor,
|
|||
reference_path = os.path.join(dir_path, ref_scene_name)
|
||||
tmp_path = os.path.dirname(current_name) + "/" + ref_scene_name
|
||||
|
||||
self.log.info("Performing extraction..")
|
||||
self.log.debug("Performing extraction..")
|
||||
|
||||
# This generates script for mayapy to take care of reference
|
||||
# importing outside current session. It is passing current scene
|
||||
|
|
@ -111,7 +111,7 @@ print("*** Done")
|
|||
# process until handles are closed by context manager.
|
||||
with tempfile.TemporaryDirectory() as tmp_dir_name:
|
||||
tmp_script_path = os.path.join(tmp_dir_name, "import_ref.py")
|
||||
self.log.info("Using script file: {}".format(tmp_script_path))
|
||||
self.log.debug("Using script file: {}".format(tmp_script_path))
|
||||
with open(tmp_script_path, "wt") as tmp:
|
||||
tmp.write(script)
|
||||
|
||||
|
|
@ -149,9 +149,9 @@ print("*** Done")
|
|||
"stagingDir": os.path.dirname(current_name),
|
||||
"outputName": "imported"
|
||||
}
|
||||
self.log.info("%s" % ref_representation)
|
||||
self.log.debug(ref_representation)
|
||||
|
||||
instance.data["representations"].append(ref_representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to : '%s'" % (ref_scene_name,
|
||||
reference_path))
|
||||
self.log.debug("Extracted instance '%s' to : '%s'" % (ref_scene_name,
|
||||
reference_path))
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class ExtractLayout(publish.Extractor):
|
|||
stagingdir = self.staging_dir(instance)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
self.log.debug("Performing extraction..")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
@ -64,7 +64,7 @@ class ExtractLayout(publish.Extractor):
|
|||
fields=["parent", "context.family"]
|
||||
)
|
||||
|
||||
self.log.info(representation)
|
||||
self.log.debug(representation)
|
||||
|
||||
version_id = representation.get("parent")
|
||||
family = representation.get("context").get("family")
|
||||
|
|
@ -159,5 +159,5 @@ class ExtractLayout(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(json_representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, json_representation)
|
||||
self.log.debug("Extracted instance '%s' to: %s",
|
||||
instance.name, json_representation)
|
||||
|
|
|
|||
|
|
@ -307,7 +307,7 @@ class MakeTX(TextureProcessor):
|
|||
|
||||
render_colorspace = color_management["rendering_space"]
|
||||
|
||||
self.log.info("tx: converting colorspace {0} "
|
||||
self.log.debug("tx: converting colorspace {0} "
|
||||
"-> {1}".format(colorspace,
|
||||
render_colorspace))
|
||||
args.extend(["--colorconvert", colorspace, render_colorspace])
|
||||
|
|
@ -331,7 +331,7 @@ class MakeTX(TextureProcessor):
|
|||
if not os.path.exists(resources_dir):
|
||||
os.makedirs(resources_dir)
|
||||
|
||||
self.log.info("Generating .tx file for %s .." % source)
|
||||
self.log.debug("Generating .tx file for %s .." % source)
|
||||
|
||||
subprocess_args = maketx_args + [
|
||||
"-v", # verbose
|
||||
|
|
@ -421,7 +421,7 @@ class ExtractLook(publish.Extractor):
|
|||
for family in self.families:
|
||||
try:
|
||||
self.scene_type = ext_mapping[family]
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Using {} as scene type".format(self.scene_type))
|
||||
break
|
||||
except KeyError:
|
||||
|
|
@ -453,7 +453,7 @@ class ExtractLook(publish.Extractor):
|
|||
relationships = lookdata["relationships"]
|
||||
sets = list(relationships.keys())
|
||||
if not sets:
|
||||
self.log.info("No sets found for the look")
|
||||
self.log.debug("No sets found for the look")
|
||||
return
|
||||
|
||||
# Specify texture processing executables to activate
|
||||
|
|
@ -485,7 +485,7 @@ class ExtractLook(publish.Extractor):
|
|||
remap = results["attrRemap"]
|
||||
|
||||
# Extract in correct render layer
|
||||
self.log.info("Extracting look maya scene file: {}".format(maya_path))
|
||||
self.log.debug("Extracting look maya scene file: {}".format(maya_path))
|
||||
layer = instance.data.get("renderlayer", "defaultRenderLayer")
|
||||
with lib.renderlayer(layer):
|
||||
# TODO: Ensure membership edits don't become renderlayer overrides
|
||||
|
|
@ -511,12 +511,12 @@ class ExtractLook(publish.Extractor):
|
|||
)
|
||||
|
||||
# Write the JSON data
|
||||
self.log.info("Extract json..")
|
||||
data = {
|
||||
"attributes": lookdata["attributes"],
|
||||
"relationships": relationships
|
||||
}
|
||||
|
||||
self.log.debug("Extracting json file: {}".format(json_path))
|
||||
with open(json_path, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
|
|
@ -557,8 +557,8 @@ class ExtractLook(publish.Extractor):
|
|||
# Source hash for the textures
|
||||
instance.data["sourceHashes"] = hashes
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
maya_path))
|
||||
self.log.debug("Extracted instance '%s' to: %s" % (instance.name,
|
||||
maya_path))
|
||||
|
||||
def _set_resource_result_colorspace(self, resource, colorspace):
|
||||
"""Update resource resulting colorspace after texture processing"""
|
||||
|
|
@ -589,14 +589,13 @@ class ExtractLook(publish.Extractor):
|
|||
resources = instance.data["resources"]
|
||||
color_management = lib.get_color_management_preferences()
|
||||
|
||||
# Temporary fix to NOT create hardlinks on windows machines
|
||||
if platform.system().lower() == "windows":
|
||||
self.log.info(
|
||||
force_copy = instance.data.get("forceCopy", False)
|
||||
if not force_copy and platform.system().lower() == "windows":
|
||||
# Temporary fix to NOT create hardlinks on windows machines
|
||||
self.log.warning(
|
||||
"Forcing copy instead of hardlink due to issues on Windows..."
|
||||
)
|
||||
force_copy = True
|
||||
else:
|
||||
force_copy = instance.data.get("forceCopy", False)
|
||||
|
||||
destinations_cache = {}
|
||||
|
||||
|
|
@ -671,11 +670,11 @@ class ExtractLook(publish.Extractor):
|
|||
destination = get_resource_destination_cached(source)
|
||||
if force_copy or texture_result.transfer_mode == COPY:
|
||||
transfers.append((source, destination))
|
||||
self.log.info('file will be copied {} -> {}'.format(
|
||||
self.log.debug('file will be copied {} -> {}'.format(
|
||||
source, destination))
|
||||
elif texture_result.transfer_mode == HARDLINK:
|
||||
hardlinks.append((source, destination))
|
||||
self.log.info('file will be hardlinked {} -> {}'.format(
|
||||
self.log.debug('file will be hardlinked {} -> {}'.format(
|
||||
source, destination))
|
||||
|
||||
# Store the hashes from hash to destination to include in the
|
||||
|
|
@ -707,7 +706,7 @@ class ExtractLook(publish.Extractor):
|
|||
color_space_attr = "{}.colorSpace".format(node)
|
||||
remap[color_space_attr] = resource["result_color_space"]
|
||||
|
||||
self.log.info("Finished remapping destinations ...")
|
||||
self.log.debug("Finished remapping destinations ...")
|
||||
|
||||
return {
|
||||
"fileTransfers": transfers,
|
||||
|
|
@ -815,8 +814,8 @@ class ExtractLook(publish.Extractor):
|
|||
if not processed_result:
|
||||
raise RuntimeError("Texture Processor {} returned "
|
||||
"no result.".format(processor))
|
||||
self.log.info("Generated processed "
|
||||
"texture: {}".format(processed_result.path))
|
||||
self.log.debug("Generated processed "
|
||||
"texture: {}".format(processed_result.path))
|
||||
|
||||
# TODO: Currently all processors force copy instead of allowing
|
||||
# hardlinks using source hashes. This should be refactored
|
||||
|
|
@ -827,7 +826,7 @@ class ExtractLook(publish.Extractor):
|
|||
if not force_copy:
|
||||
existing = self._get_existing_hashed_texture(filepath)
|
||||
if existing:
|
||||
self.log.info("Found hash in database, preparing hardlink..")
|
||||
self.log.debug("Found hash in database, preparing hardlink..")
|
||||
return TextureResult(
|
||||
path=filepath,
|
||||
file_hash=texture_hash,
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ class ExtractMayaSceneRaw(publish.Extractor):
|
|||
for family in self.families:
|
||||
try:
|
||||
self.scene_type = ext_mapping[family]
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Using {} as scene type".format(self.scene_type))
|
||||
break
|
||||
except KeyError:
|
||||
|
|
@ -63,7 +63,7 @@ class ExtractMayaSceneRaw(publish.Extractor):
|
|||
selection += self._get_loaded_containers(members)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction ...")
|
||||
self.log.debug("Performing extraction ...")
|
||||
with maintained_selection():
|
||||
cmds.select(selection, noExpand=True)
|
||||
cmds.file(path,
|
||||
|
|
@ -87,7 +87,8 @@ class ExtractMayaSceneRaw(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))
|
||||
self.log.debug("Extracted instance '%s' to: %s" % (instance.name,
|
||||
path))
|
||||
|
||||
@staticmethod
|
||||
def _get_loaded_containers(members):
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class ExtractModel(publish.Extractor,
|
|||
for family in self.families:
|
||||
try:
|
||||
self.scene_type = ext_mapping[family]
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Using {} as scene type".format(self.scene_type))
|
||||
break
|
||||
except KeyError:
|
||||
|
|
@ -56,7 +56,7 @@ class ExtractModel(publish.Extractor,
|
|||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction ...")
|
||||
self.log.debug("Performing extraction ...")
|
||||
|
||||
# Get only the shape contents we need in such a way that we avoid
|
||||
# taking along intermediateObjects
|
||||
|
|
@ -102,4 +102,5 @@ class ExtractModel(publish.Extractor,
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))
|
||||
self.log.debug("Extracted instance '%s' to: %s" % (instance.name,
|
||||
path))
|
||||
|
|
|
|||
|
|
@ -101,10 +101,10 @@ class ExtractMultiverseLook(publish.Extractor):
|
|||
|
||||
# Parse export options
|
||||
options = self.default_options
|
||||
self.log.info("Export options: {0}".format(options))
|
||||
self.log.debug("Export options: {0}".format(options))
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction ...")
|
||||
self.log.debug("Performing extraction ...")
|
||||
|
||||
with maintained_selection():
|
||||
members = instance.data("setMembers")
|
||||
|
|
@ -114,7 +114,7 @@ class ExtractMultiverseLook(publish.Extractor):
|
|||
type="mvUsdCompoundShape",
|
||||
noIntermediate=True,
|
||||
long=True)
|
||||
self.log.info('Collected object {}'.format(members))
|
||||
self.log.debug('Collected object {}'.format(members))
|
||||
if len(members) > 1:
|
||||
self.log.error('More than one member: {}'.format(members))
|
||||
|
||||
|
|
@ -153,5 +153,5 @@ class ExtractMultiverseLook(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance {} to {}".format(
|
||||
self.log.debug("Extracted instance {} to {}".format(
|
||||
instance.name, file_path))
|
||||
|
|
|
|||
|
|
@ -150,7 +150,6 @@ class ExtractMultiverseUsd(publish.Extractor):
|
|||
return options
|
||||
|
||||
def get_default_options(self):
|
||||
self.log.info("ExtractMultiverseUsd get_default_options")
|
||||
return self.default_options
|
||||
|
||||
def filter_members(self, members):
|
||||
|
|
@ -173,19 +172,19 @@ class ExtractMultiverseUsd(publish.Extractor):
|
|||
# Parse export options
|
||||
options = self.get_default_options()
|
||||
options = self.parse_overrides(instance, options)
|
||||
self.log.info("Export options: {0}".format(options))
|
||||
self.log.debug("Export options: {0}".format(options))
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction ...")
|
||||
self.log.debug("Performing extraction ...")
|
||||
|
||||
with maintained_selection():
|
||||
members = instance.data("setMembers")
|
||||
self.log.info('Collected objects: {}'.format(members))
|
||||
self.log.debug('Collected objects: {}'.format(members))
|
||||
members = self.filter_members(members)
|
||||
if not members:
|
||||
self.log.error('No members!')
|
||||
return
|
||||
self.log.info(' - filtered: {}'.format(members))
|
||||
self.log.debug(' - filtered: {}'.format(members))
|
||||
|
||||
import multiverse
|
||||
|
||||
|
|
@ -229,7 +228,7 @@ class ExtractMultiverseUsd(publish.Extractor):
|
|||
self.log.debug(" - {}={}".format(key, value))
|
||||
setattr(asset_write_opts, key, value)
|
||||
|
||||
self.log.info('WriteAsset: {} / {}'.format(file_path, members))
|
||||
self.log.debug('WriteAsset: {} / {}'.format(file_path, members))
|
||||
multiverse.WriteAsset(file_path, members, asset_write_opts)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
|
|
@ -243,7 +242,7 @@ class ExtractMultiverseUsd(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance {} to {}".format(
|
||||
self.log.debug("Extracted instance {} to {}".format(
|
||||
instance.name, file_path))
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -105,14 +105,14 @@ class ExtractMultiverseUsdComposition(publish.Extractor):
|
|||
# Parse export options
|
||||
options = self.default_options
|
||||
options = self.parse_overrides(instance, options)
|
||||
self.log.info("Export options: {0}".format(options))
|
||||
self.log.debug("Export options: {0}".format(options))
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction ...")
|
||||
self.log.debug("Performing extraction ...")
|
||||
|
||||
with maintained_selection():
|
||||
members = instance.data("setMembers")
|
||||
self.log.info('Collected object {}'.format(members))
|
||||
self.log.debug('Collected object {}'.format(members))
|
||||
|
||||
import multiverse
|
||||
|
||||
|
|
@ -175,5 +175,5 @@ class ExtractMultiverseUsdComposition(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance {} to {}".format(
|
||||
instance.name, file_path))
|
||||
self.log.debug("Extracted instance {} to {}".format(instance.name,
|
||||
file_path))
|
||||
|
|
|
|||
|
|
@ -87,10 +87,10 @@ class ExtractMultiverseUsdOverride(publish.Extractor):
|
|||
|
||||
# Parse export options
|
||||
options = self.default_options
|
||||
self.log.info("Export options: {0}".format(options))
|
||||
self.log.debug("Export options: {0}".format(options))
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction ...")
|
||||
self.log.debug("Performing extraction ...")
|
||||
|
||||
with maintained_selection():
|
||||
members = instance.data("setMembers")
|
||||
|
|
@ -100,7 +100,7 @@ class ExtractMultiverseUsdOverride(publish.Extractor):
|
|||
type="mvUsdCompoundShape",
|
||||
noIntermediate=True,
|
||||
long=True)
|
||||
self.log.info("Collected object {}".format(members))
|
||||
self.log.debug("Collected object {}".format(members))
|
||||
|
||||
# TODO: Deal with asset, composition, override with options.
|
||||
import multiverse
|
||||
|
|
@ -153,5 +153,5 @@ class ExtractMultiverseUsdOverride(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance {} to {}".format(
|
||||
self.log.debug("Extracted instance {} to {}".format(
|
||||
instance.name, file_path))
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ class ExtractObj(publish.Extractor):
|
|||
# The export requires forward slashes because we need to
|
||||
# format it into a string in a mel expression
|
||||
|
||||
self.log.info("Extracting OBJ to: {0}".format(path))
|
||||
self.log.debug("Extracting OBJ to: {0}".format(path))
|
||||
|
||||
members = instance.data("setMembers")
|
||||
members = cmds.ls(members,
|
||||
|
|
@ -39,8 +39,8 @@ class ExtractObj(publish.Extractor):
|
|||
type=("mesh", "nurbsCurve"),
|
||||
noIntermediate=True,
|
||||
long=True)
|
||||
self.log.info("Members: {0}".format(members))
|
||||
self.log.info("Instance: {0}".format(instance[:]))
|
||||
self.log.debug("Members: {0}".format(members))
|
||||
self.log.debug("Instance: {0}".format(instance[:]))
|
||||
|
||||
if not cmds.pluginInfo('objExport', query=True, loaded=True):
|
||||
cmds.loadPlugin('objExport')
|
||||
|
|
@ -74,4 +74,4 @@ class ExtractObj(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extract OBJ successful to: {0}".format(path))
|
||||
self.log.debug("Extract OBJ successful to: {0}".format(path))
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ class ExtractPlayblast(publish.Extractor):
|
|||
self.log.debug("playblast path {}".format(path))
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("Extracting capture..")
|
||||
self.log.debug("Extracting capture..")
|
||||
|
||||
# get scene fps
|
||||
fps = instance.data.get("fps") or instance.context.data.get("fps")
|
||||
|
|
@ -62,7 +62,7 @@ class ExtractPlayblast(publish.Extractor):
|
|||
if end is None:
|
||||
end = cmds.playbackOptions(query=True, animationEndTime=True)
|
||||
|
||||
self.log.info("start: {}, end: {}".format(start, end))
|
||||
self.log.debug("start: {}, end: {}".format(start, end))
|
||||
|
||||
# get cameras
|
||||
camera = instance.data["review_camera"]
|
||||
|
|
@ -119,7 +119,7 @@ class ExtractPlayblast(publish.Extractor):
|
|||
filename = "{0}".format(instance.name)
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
self.log.info("Outputting images to %s" % path)
|
||||
self.log.debug("Outputting images to %s" % path)
|
||||
|
||||
preset["filename"] = path
|
||||
preset["overwrite"] = True
|
||||
|
|
@ -237,7 +237,7 @@ class ExtractPlayblast(publish.Extractor):
|
|||
self.log.debug("collection head {}".format(filebase))
|
||||
if filebase in filename:
|
||||
frame_collection = collection
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"we found collection of interest {}".format(
|
||||
str(frame_collection)))
|
||||
|
||||
|
|
|
|||
|
|
@ -109,11 +109,11 @@ class ExtractAlembic(publish.Extractor):
|
|||
|
||||
instance.context.data["cleanupFullPaths"].append(path)
|
||||
|
||||
self.log.info("Extracted {} to {}".format(instance, dirname))
|
||||
self.log.debug("Extracted {} to {}".format(instance, dirname))
|
||||
|
||||
# Extract proxy.
|
||||
if not instance.data.get("proxy"):
|
||||
self.log.info("No proxy nodes found. Skipping proxy extraction.")
|
||||
self.log.debug("No proxy nodes found. Skipping proxy extraction.")
|
||||
return
|
||||
|
||||
path = path.replace(".abc", "_proxy.abc")
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class ExtractProxyAlembic(publish.Extractor):
|
|||
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
|
||||
attr_prefixes = [value for value in attr_prefixes if value.strip()]
|
||||
|
||||
self.log.info("Extracting Proxy Alembic..")
|
||||
self.log.debug("Extracting Proxy Alembic..")
|
||||
dirname = self.staging_dir(instance)
|
||||
|
||||
filename = "{name}.abc".format(**instance.data)
|
||||
|
|
@ -82,7 +82,7 @@ class ExtractProxyAlembic(publish.Extractor):
|
|||
|
||||
instance.context.data["cleanupFullPaths"].append(path)
|
||||
|
||||
self.log.info("Extracted {} to {}".format(instance, dirname))
|
||||
self.log.debug("Extracted {} to {}".format(instance, dirname))
|
||||
# remove the bounding box
|
||||
bbox_master = cmds.ls("bbox_grp")
|
||||
cmds.delete(bbox_master)
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ class ExtractRedshiftProxy(publish.Extractor):
|
|||
# vertex_colors = instance.data.get("vertexColors", False)
|
||||
|
||||
# Write out rs file
|
||||
self.log.info("Writing: '%s'" % file_path)
|
||||
self.log.debug("Writing: '%s'" % file_path)
|
||||
with maintained_selection():
|
||||
cmds.select(instance.data["setMembers"], noExpand=True)
|
||||
cmds.file(file_path,
|
||||
|
|
@ -82,5 +82,5 @@ class ExtractRedshiftProxy(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s"
|
||||
% (instance.name, staging_dir))
|
||||
self.log.debug("Extracted instance '%s' to: %s"
|
||||
% (instance.name, staging_dir))
|
||||
|
|
|
|||
|
|
@ -37,5 +37,5 @@ class ExtractRenderSetup(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Extracted instance '%s' to: %s" % (instance.name, json_path))
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class ExtractRig(publish.Extractor):
|
|||
for family in self.families:
|
||||
try:
|
||||
self.scene_type = ext_mapping[family]
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Using '.{}' as scene type".format(self.scene_type))
|
||||
break
|
||||
except AttributeError:
|
||||
|
|
@ -39,7 +39,7 @@ class ExtractRig(publish.Extractor):
|
|||
path = os.path.join(dir_path, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction ...")
|
||||
self.log.debug("Performing extraction ...")
|
||||
with maintained_selection():
|
||||
cmds.select(instance, noExpand=True)
|
||||
cmds.file(path,
|
||||
|
|
@ -63,4 +63,4 @@ class ExtractRig(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))
|
||||
self.log.debug("Extracted instance '%s' to: %s", instance.name, path)
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ class ExtractThumbnail(publish.Extractor):
|
|||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("Extracting capture..")
|
||||
self.log.debug("Extracting capture..")
|
||||
|
||||
camera = instance.data["review_camera"]
|
||||
|
||||
|
|
@ -96,7 +96,7 @@ class ExtractThumbnail(publish.Extractor):
|
|||
filename = "{0}".format(instance.name)
|
||||
path = os.path.join(dst_staging, filename)
|
||||
|
||||
self.log.info("Outputting images to %s" % path)
|
||||
self.log.debug("Outputting images to %s" % path)
|
||||
|
||||
preset["filename"] = path
|
||||
preset["overwrite"] = True
|
||||
|
|
@ -159,7 +159,7 @@ class ExtractThumbnail(publish.Extractor):
|
|||
|
||||
_, thumbnail = os.path.split(playblast)
|
||||
|
||||
self.log.info("file list {}".format(thumbnail))
|
||||
self.log.debug("file list {}".format(thumbnail))
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
|
|||
|
|
@ -57,9 +57,9 @@ class ExtractUnrealSkeletalMeshAbc(publish.Extractor):
|
|||
# to format it into a string in a mel expression
|
||||
path = path.replace('\\', '/')
|
||||
|
||||
self.log.info("Extracting ABC to: {0}".format(path))
|
||||
self.log.info("Members: {0}".format(nodes))
|
||||
self.log.info("Instance: {0}".format(instance[:]))
|
||||
self.log.debug("Extracting ABC to: {0}".format(path))
|
||||
self.log.debug("Members: {0}".format(nodes))
|
||||
self.log.debug("Instance: {0}".format(instance[:]))
|
||||
|
||||
options = {
|
||||
"step": instance.data.get("step", 1.0),
|
||||
|
|
@ -74,7 +74,7 @@ class ExtractUnrealSkeletalMeshAbc(publish.Extractor):
|
|||
"worldSpace": instance.data.get("worldSpace", True)
|
||||
}
|
||||
|
||||
self.log.info("Options: {}".format(options))
|
||||
self.log.debug("Options: {}".format(options))
|
||||
|
||||
if int(cmds.about(version=True)) >= 2017:
|
||||
# Since Maya 2017 alembic supports multiple uv sets - write them.
|
||||
|
|
@ -105,4 +105,4 @@ class ExtractUnrealSkeletalMeshAbc(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extract ABC successful to: {0}".format(path))
|
||||
self.log.debug("Extract ABC successful to: {0}".format(path))
|
||||
|
|
|
|||
|
|
@ -46,9 +46,9 @@ class ExtractUnrealSkeletalMeshFbx(publish.Extractor):
|
|||
# to format it into a string in a mel expression
|
||||
path = path.replace('\\', '/')
|
||||
|
||||
self.log.info("Extracting FBX to: {0}".format(path))
|
||||
self.log.info("Members: {0}".format(to_extract))
|
||||
self.log.info("Instance: {0}".format(instance[:]))
|
||||
self.log.debug("Extracting FBX to: {0}".format(path))
|
||||
self.log.debug("Members: {0}".format(to_extract))
|
||||
self.log.debug("Instance: {0}".format(instance[:]))
|
||||
|
||||
fbx_exporter.set_options_from_instance(instance)
|
||||
|
||||
|
|
@ -70,7 +70,7 @@ class ExtractUnrealSkeletalMeshFbx(publish.Extractor):
|
|||
renamed_to_extract.append("|".join(node_path))
|
||||
|
||||
with renamed(original_parent, parent_node):
|
||||
self.log.info("Extracting: {}".format(renamed_to_extract, path))
|
||||
self.log.debug("Extracting: {}".format(renamed_to_extract, path))
|
||||
fbx_exporter.export(renamed_to_extract, path)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
|
|
@ -84,4 +84,4 @@ class ExtractUnrealSkeletalMeshFbx(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extract FBX successful to: {0}".format(path))
|
||||
self.log.debug("Extract FBX successful to: {0}".format(path))
|
||||
|
|
|
|||
|
|
@ -37,15 +37,15 @@ class ExtractUnrealStaticMesh(publish.Extractor):
|
|||
# to format it into a string in a mel expression
|
||||
path = path.replace('\\', '/')
|
||||
|
||||
self.log.info("Extracting FBX to: {0}".format(path))
|
||||
self.log.info("Members: {0}".format(members))
|
||||
self.log.info("Instance: {0}".format(instance[:]))
|
||||
self.log.debug("Extracting FBX to: {0}".format(path))
|
||||
self.log.debug("Members: {0}".format(members))
|
||||
self.log.debug("Instance: {0}".format(instance[:]))
|
||||
|
||||
fbx_exporter.set_options_from_instance(instance)
|
||||
|
||||
with maintained_selection():
|
||||
with parent_nodes(members):
|
||||
self.log.info("Un-parenting: {}".format(members))
|
||||
self.log.debug("Un-parenting: {}".format(members))
|
||||
fbx_exporter.export(members, path)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
|
|
@ -59,4 +59,4 @@ class ExtractUnrealStaticMesh(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extract FBX successful to: {0}".format(path))
|
||||
self.log.debug("Extract FBX successful to: {0}".format(path))
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ class ExtractVRayProxy(publish.Extractor):
|
|||
vertex_colors = instance.data.get("vertexColors", False)
|
||||
|
||||
# Write out vrmesh file
|
||||
self.log.info("Writing: '%s'" % file_path)
|
||||
self.log.debug("Writing: '%s'" % file_path)
|
||||
with maintained_selection():
|
||||
cmds.select(instance.data["setMembers"], noExpand=True)
|
||||
cmds.vrayCreateProxy(exportType=1,
|
||||
|
|
@ -68,5 +68,5 @@ class ExtractVRayProxy(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s"
|
||||
% (instance.name, staging_dir))
|
||||
self.log.debug("Extracted instance '%s' to: %s"
|
||||
% (instance.name, staging_dir))
|
||||
|
|
|
|||
|
|
@ -20,13 +20,13 @@ class ExtractVrayscene(publish.Extractor):
|
|||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
if instance.data.get("exportOnFarm"):
|
||||
self.log.info("vrayscenes will be exported on farm.")
|
||||
self.log.debug("vrayscenes will be exported on farm.")
|
||||
raise NotImplementedError(
|
||||
"exporting vrayscenes is not implemented")
|
||||
|
||||
# handle sequence
|
||||
if instance.data.get("vraySceneMultipleFiles"):
|
||||
self.log.info("vrayscenes will be exported on farm.")
|
||||
self.log.debug("vrayscenes will be exported on farm.")
|
||||
raise NotImplementedError(
|
||||
"exporting vrayscene sequences not implemented yet")
|
||||
|
||||
|
|
@ -40,7 +40,6 @@ class ExtractVrayscene(publish.Extractor):
|
|||
layer_name = instance.data.get("layer")
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("staging: {}".format(staging_dir))
|
||||
template = cmds.getAttr("{}.vrscene_filename".format(node))
|
||||
start_frame = instance.data.get(
|
||||
"frameStartHandle") if instance.data.get(
|
||||
|
|
@ -56,21 +55,21 @@ class ExtractVrayscene(publish.Extractor):
|
|||
staging_dir, "vrayscene", *formatted_name.split("/"))
|
||||
|
||||
# Write out vrscene file
|
||||
self.log.info("Writing: '%s'" % file_path)
|
||||
self.log.debug("Writing: '%s'" % file_path)
|
||||
with maintained_selection():
|
||||
if "*" not in instance.data["setMembers"]:
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Exporting: {}".format(instance.data["setMembers"]))
|
||||
set_members = instance.data["setMembers"]
|
||||
cmds.select(set_members, noExpand=True)
|
||||
else:
|
||||
self.log.info("Exporting all ...")
|
||||
self.log.debug("Exporting all ...")
|
||||
set_members = cmds.ls(
|
||||
long=True, objectsOnly=True,
|
||||
geometry=True, lights=True, cameras=True)
|
||||
cmds.select(set_members, noExpand=True)
|
||||
|
||||
self.log.info("Appending layer name {}".format(layer_name))
|
||||
self.log.debug("Appending layer name {}".format(layer_name))
|
||||
set_members.append(layer_name)
|
||||
|
||||
export_in_rs_layer(
|
||||
|
|
@ -93,8 +92,8 @@ class ExtractVrayscene(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s"
|
||||
% (instance.name, staging_dir))
|
||||
self.log.debug("Extracted instance '%s' to: %s"
|
||||
% (instance.name, staging_dir))
|
||||
|
||||
@staticmethod
|
||||
def format_vray_output_filename(
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@ class ExtractWorkfileXgen(publish.Extractor):
|
|||
data[palette] = {attr: old_value}
|
||||
|
||||
cmds.setAttr(node_attr, value, type="string")
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Setting \"{}\" on \"{}\"".format(value, node_attr)
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ class ExtractXgen(publish.Extractor):
|
|||
xgenm.exportPalette(
|
||||
instance.data["xgmPalette"].replace("|", ""), temp_xgen_path
|
||||
)
|
||||
self.log.info("Extracted to {}".format(temp_xgen_path))
|
||||
self.log.debug("Extracted to {}".format(temp_xgen_path))
|
||||
|
||||
# Import xgen onto the duplicate.
|
||||
with maintained_selection():
|
||||
|
|
@ -118,7 +118,7 @@ class ExtractXgen(publish.Extractor):
|
|||
expressions=True
|
||||
)
|
||||
|
||||
self.log.info("Extracted to {}".format(maya_filepath))
|
||||
self.log.debug("Extracted to {}".format(maya_filepath))
|
||||
|
||||
if os.path.exists(temp_xgen_path):
|
||||
os.remove(temp_xgen_path)
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class ExtractYetiCache(publish.Extractor):
|
|||
else:
|
||||
kwargs.update({"samples": samples})
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Writing out cache {} - {}".format(start_frame, end_frame))
|
||||
# Start writing the files for snap shot
|
||||
# <NAME> will be replace by the Yeti node name
|
||||
|
|
@ -53,7 +53,7 @@ class ExtractYetiCache(publish.Extractor):
|
|||
|
||||
cache_files = [x for x in os.listdir(dirname) if x.endswith(".fur")]
|
||||
|
||||
self.log.info("Writing metadata file")
|
||||
self.log.debug("Writing metadata file")
|
||||
settings = instance.data["fursettings"]
|
||||
fursettings_path = os.path.join(dirname, "yeti.fursettings")
|
||||
with open(fursettings_path, "w") as fp:
|
||||
|
|
@ -63,7 +63,7 @@ class ExtractYetiCache(publish.Extractor):
|
|||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
self.log.info("cache files: {}".format(cache_files[0]))
|
||||
self.log.debug("cache files: {}".format(cache_files[0]))
|
||||
|
||||
# Workaround: We do not explicitly register these files with the
|
||||
# representation solely so that we can write multiple sequences
|
||||
|
|
@ -87,4 +87,4 @@ class ExtractYetiCache(publish.Extractor):
|
|||
}
|
||||
)
|
||||
|
||||
self.log.info("Extracted {} to {}".format(instance, dirname))
|
||||
self.log.debug("Extracted {} to {}".format(instance, dirname))
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ class ExtractYetiRig(publish.Extractor):
|
|||
for family in self.families:
|
||||
try:
|
||||
self.scene_type = ext_mapping[family]
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Using {} as scene type".format(self.scene_type))
|
||||
break
|
||||
except KeyError:
|
||||
|
|
@ -127,7 +127,7 @@ class ExtractYetiRig(publish.Extractor):
|
|||
maya_path = os.path.join(dirname,
|
||||
"yeti_rig.{}".format(self.scene_type))
|
||||
|
||||
self.log.info("Writing metadata file")
|
||||
self.log.debug("Writing metadata file: {}".format(settings_path))
|
||||
|
||||
image_search_path = resources_dir = instance.data["resourcesDir"]
|
||||
|
||||
|
|
@ -147,7 +147,7 @@ class ExtractYetiRig(publish.Extractor):
|
|||
dst = os.path.join(image_search_path, os.path.basename(file))
|
||||
instance.data['transfers'].append([src, dst])
|
||||
|
||||
self.log.info("adding transfer {} -> {}". format(src, dst))
|
||||
self.log.debug("adding transfer {} -> {}". format(src, dst))
|
||||
|
||||
# Ensure the imageSearchPath is being remapped to the publish folder
|
||||
attr_value = {"%s.imageSearchPath" % n: str(image_search_path) for
|
||||
|
|
@ -182,7 +182,7 @@ class ExtractYetiRig(publish.Extractor):
|
|||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
self.log.info("rig file: {}".format(maya_path))
|
||||
self.log.debug("rig file: {}".format(maya_path))
|
||||
instance.data["representations"].append(
|
||||
{
|
||||
'name': self.scene_type,
|
||||
|
|
@ -191,7 +191,7 @@ class ExtractYetiRig(publish.Extractor):
|
|||
'stagingDir': dirname
|
||||
}
|
||||
)
|
||||
self.log.info("settings file: {}".format(settings_path))
|
||||
self.log.debug("settings file: {}".format(settings_path))
|
||||
instance.data["representations"].append(
|
||||
{
|
||||
'name': 'rigsettings',
|
||||
|
|
@ -201,6 +201,6 @@ class ExtractYetiRig(publish.Extractor):
|
|||
}
|
||||
)
|
||||
|
||||
self.log.info("Extracted {} to {}".format(instance, dirname))
|
||||
self.log.debug("Extracted {} to {}".format(instance, dirname))
|
||||
|
||||
cmds.select(clear=True)
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class ResetXgenAttributes(pyblish.api.InstancePlugin):
|
|||
for palette, data in xgen_attributes.items():
|
||||
for attr, value in data.items():
|
||||
node_attr = "{}.{}".format(palette, attr)
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Setting \"{}\" on \"{}\"".format(value, node_attr)
|
||||
)
|
||||
cmds.setAttr(node_attr, value, type="string")
|
||||
|
|
@ -32,5 +32,5 @@ class ResetXgenAttributes(pyblish.api.InstancePlugin):
|
|||
# Need to save the scene, cause the attribute changes above does not
|
||||
# mark the scene as modified so user can exit without committing the
|
||||
# changes.
|
||||
self.log.info("Saving changes.")
|
||||
self.log.debug("Saving changes.")
|
||||
cmds.file(save=True)
|
||||
|
|
|
|||
|
|
@ -215,9 +215,9 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
:rtype: int
|
||||
:raises: Exception if template ID isn't found
|
||||
"""
|
||||
self.log.info("Trying to find template for [{}]".format(renderer))
|
||||
self.log.debug("Trying to find template for [{}]".format(renderer))
|
||||
mapped = _get_template_id(renderer)
|
||||
self.log.info("got id [{}]".format(mapped))
|
||||
self.log.debug("got id [{}]".format(mapped))
|
||||
return self._templates.get(mapped)
|
||||
|
||||
def _submit(self, payload):
|
||||
|
|
@ -249,7 +249,6 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
Authenticate with Muster, collect all data, prepare path for post
|
||||
render publish job and submit job to farm.
|
||||
"""
|
||||
instance.data["toBeRenderedOn"] = "muster"
|
||||
# setup muster environment
|
||||
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
|
||||
|
||||
|
|
@ -454,8 +453,8 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
|
||||
self.preflight_check(instance)
|
||||
|
||||
self.log.info("Submitting ...")
|
||||
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
|
||||
self.log.debug("Submitting ...")
|
||||
self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
|
||||
|
||||
response = self._submit(payload)
|
||||
# response = requests.post(url, json=payload)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class ValidateAssemblyName(pyblish.api.InstancePlugin):
|
|||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
cls.log.info("Checking name of {}".format(instance.name))
|
||||
cls.log.debug("Checking name of {}".format(instance.name))
|
||||
|
||||
content_instance = instance.data.get("setMembers", None)
|
||||
if not content_instance:
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class ValidateAssemblyNamespaces(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
self.log.info("Checking namespace for %s" % instance.name)
|
||||
self.log.debug("Checking namespace for %s" % instance.name)
|
||||
if self.get_invalid(instance):
|
||||
raise PublishValidationError("Nested namespaces found")
|
||||
|
||||
|
|
|
|||
|
|
@ -47,10 +47,10 @@ class ValidateFrameRange(pyblish.api.InstancePlugin,
|
|||
|
||||
context = instance.context
|
||||
if instance.data.get("tileRendering"):
|
||||
self.log.info((
|
||||
self.log.debug(
|
||||
"Skipping frame range validation because "
|
||||
"tile rendering is enabled."
|
||||
))
|
||||
)
|
||||
return
|
||||
|
||||
frame_start_handle = int(context.data.get("frameStartHandle"))
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ class ValidateGLSLMaterial(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
meshes = cmds.ls(instance, type="mesh", long=True)
|
||||
cls.log.info("meshes: {}".format(meshes))
|
||||
cls.log.debug("meshes: {}".format(meshes))
|
||||
# load the glsl shader plugin
|
||||
cmds.loadPlugin("glslShader", quiet=True)
|
||||
|
||||
|
|
@ -96,8 +96,8 @@ class ValidateGLSLMaterial(pyblish.api.InstancePlugin):
|
|||
cls.log.warning("ogsfx shader file "
|
||||
"not found in {}".format(ogsfx_path))
|
||||
|
||||
cls.log.info("Find the ogsfx shader file in "
|
||||
"default maya directory...")
|
||||
cls.log.debug("Searching the ogsfx shader file in "
|
||||
"default maya directory...")
|
||||
# re-direct to search the ogsfx path in maya_dir
|
||||
ogsfx_path = os.getenv("MAYA_APP_DIR") + ogsfx_path
|
||||
if not os.path.exists(ogsfx_path):
|
||||
|
|
@ -130,8 +130,8 @@ class ValidateGLSLMaterial(pyblish.api.InstancePlugin):
|
|||
@classmethod
|
||||
def pbs_shader_conversion(cls, main_shader, glsl):
|
||||
|
||||
cls.log.info("StringrayPBS detected "
|
||||
"-> Can do texture conversion")
|
||||
cls.log.debug("StringrayPBS detected "
|
||||
"-> Can do texture conversion")
|
||||
|
||||
for shader in main_shader:
|
||||
# get the file textures related to the PBS Shader
|
||||
|
|
@ -168,8 +168,8 @@ class ValidateGLSLMaterial(pyblish.api.InstancePlugin):
|
|||
|
||||
@classmethod
|
||||
def arnold_shader_conversion(cls, main_shader, glsl):
|
||||
cls.log.info("aiStandardSurface detected "
|
||||
"-> Can do texture conversion")
|
||||
cls.log.debug("aiStandardSurface detected "
|
||||
"-> Can do texture conversion")
|
||||
|
||||
for shader in main_shader:
|
||||
# get the file textures related to the PBS Shader
|
||||
|
|
|
|||
|
|
@ -1,60 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline.publish import (
|
||||
ValidateContentsOrder, PublishValidationError, RepairAction
|
||||
)
|
||||
from openpype.pipeline import discover_legacy_creator_plugins
|
||||
from openpype.hosts.maya.api.lib import imprint
|
||||
|
||||
|
||||
class ValidateInstanceAttributes(pyblish.api.InstancePlugin):
|
||||
"""Validate Instance Attributes.
|
||||
|
||||
New attributes can be introduced as new features come in. Old instances
|
||||
will need to be updated with these attributes for the documentation to make
|
||||
sense, and users do not have to recreate the instances.
|
||||
"""
|
||||
|
||||
order = ValidateContentsOrder
|
||||
hosts = ["maya"]
|
||||
families = ["*"]
|
||||
label = "Instance Attributes"
|
||||
plugins_by_family = {
|
||||
p.family: p for p in discover_legacy_creator_plugins()
|
||||
}
|
||||
actions = [RepairAction]
|
||||
|
||||
@classmethod
|
||||
def get_missing_attributes(self, instance):
|
||||
plugin = self.plugins_by_family[instance.data["family"]]
|
||||
subset = instance.data["subset"]
|
||||
asset = instance.data["asset"]
|
||||
objset = instance.data["objset"]
|
||||
|
||||
missing_attributes = {}
|
||||
for key, value in plugin(subset, asset).data.items():
|
||||
if not cmds.objExists("{}.{}".format(objset, key)):
|
||||
missing_attributes[key] = value
|
||||
|
||||
return missing_attributes
|
||||
|
||||
def process(self, instance):
|
||||
objset = instance.data.get("objset")
|
||||
if objset is None:
|
||||
self.log.debug(
|
||||
"Skipping {} because no objectset found.".format(instance)
|
||||
)
|
||||
return
|
||||
|
||||
missing_attributes = self.get_missing_attributes(instance)
|
||||
if missing_attributes:
|
||||
raise PublishValidationError(
|
||||
"Missing attributes on {}:\n{}".format(
|
||||
objset, missing_attributes
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
imprint(instance.data["objset"], cls.get_missing_attributes(instance))
|
||||
|
|
@ -3,94 +3,19 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import pyblish.api
|
||||
import openpype.hosts.maya.api.action
|
||||
from openpype.pipeline.publish import (
|
||||
ValidateContentsOrder, PublishValidationError
|
||||
RepairAction,
|
||||
ValidateContentsOrder,
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
class SelectInvalidInstances(pyblish.api.Action):
|
||||
"""Select invalid instances in Outliner."""
|
||||
|
||||
label = "Select Instances"
|
||||
icon = "briefcase"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
"""Process invalid validators and select invalid instances."""
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (
|
||||
result["error"] is None
|
||||
or result["instance"] is None
|
||||
or result["instance"] in failed
|
||||
or result["plugin"] != plugin
|
||||
):
|
||||
continue
|
||||
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
if instances:
|
||||
self.log.info(
|
||||
"Selecting invalid nodes: %s" % ", ".join(
|
||||
[str(x) for x in instances]
|
||||
)
|
||||
)
|
||||
self.select(instances)
|
||||
else:
|
||||
self.log.info("No invalid nodes found.")
|
||||
self.deselect()
|
||||
|
||||
def select(self, instances):
|
||||
cmds.select(instances, replace=True, noExpand=True)
|
||||
|
||||
def deselect(self):
|
||||
cmds.select(deselect=True)
|
||||
|
||||
|
||||
class RepairSelectInvalidInstances(pyblish.api.Action):
|
||||
"""Repair the instance asset."""
|
||||
|
||||
label = "Repair"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if result["error"] is None:
|
||||
continue
|
||||
if result["instance"] is None:
|
||||
continue
|
||||
if result["instance"] in failed:
|
||||
continue
|
||||
if result["plugin"] != plugin:
|
||||
continue
|
||||
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
context_asset = context.data["assetEntity"]["name"]
|
||||
for instance in instances:
|
||||
self.set_attribute(instance, context_asset)
|
||||
|
||||
def set_attribute(self, instance, context_asset):
|
||||
cmds.setAttr(
|
||||
instance.data.get("name") + ".asset",
|
||||
context_asset,
|
||||
type="string"
|
||||
)
|
||||
|
||||
|
||||
class ValidateInstanceInContext(pyblish.api.InstancePlugin):
|
||||
class ValidateInstanceInContext(pyblish.api.InstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validator to check if instance asset match context asset.
|
||||
|
||||
When working in per-shot style you always publish data in context of
|
||||
|
|
@ -104,11 +29,49 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin):
|
|||
label = "Instance in same Context"
|
||||
optional = True
|
||||
hosts = ["maya"]
|
||||
actions = [SelectInvalidInstances, RepairSelectInvalidInstances]
|
||||
actions = [
|
||||
openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
asset = instance.data.get("asset")
|
||||
context_asset = instance.context.data["assetEntity"]["name"]
|
||||
msg = "{} has asset {}".format(instance.name, asset)
|
||||
context_asset = self.get_context_asset(instance)
|
||||
if asset != context_asset:
|
||||
raise PublishValidationError(msg)
|
||||
raise PublishValidationError(
|
||||
message=(
|
||||
"Instance '{}' publishes to different asset than current "
|
||||
"context: {}. Current context: {}".format(
|
||||
instance.name, asset, context_asset
|
||||
)
|
||||
),
|
||||
description=(
|
||||
"## Publishing to a different asset\n"
|
||||
"There are publish instances present which are publishing "
|
||||
"into a different asset than your current context.\n\n"
|
||||
"Usually this is not what you want but there can be cases "
|
||||
"where you might want to publish into another asset or "
|
||||
"shot. If that's the case you can disable the validation "
|
||||
"on the instance to ignore it."
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
return [instance.data["instance_node"]]
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
context_asset = cls.get_context_asset(instance)
|
||||
instance_node = instance.data["instance_node"]
|
||||
cmds.setAttr(
|
||||
"{}.asset".format(instance_node),
|
||||
context_asset,
|
||||
type="string"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_context_asset(instance):
|
||||
return instance.context.data["assetEntity"]["name"]
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue