[Automated] Merged develop into main

This commit is contained in:
pypebot 2022-09-17 06:03:21 +02:00 committed by GitHub
commit 120a7ebc18
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
79 changed files with 1465 additions and 406 deletions

View file

@ -6,6 +6,8 @@ labels: bug
assignees: '' assignees: ''
--- ---
**Running version**
[ex. 3.14.1-nightly.2]
**Describe the bug** **Describe the bug**
A clear and concise description of what the bug is. A clear and concise description of what the bug is.

View file

@ -0,0 +1,114 @@
from bson.objectid import ObjectId
import pyblish.api
from openpype.pipeline import registered_host
def collect_input_containers(tools):
"""Collect containers that contain any of the node in `nodes`.
This will return any loaded Avalon container that contains at least one of
the nodes. As such, the Avalon container is an input for it. Or in short,
there are member nodes of that container.
Returns:
list: Input avalon containers
"""
# Lookup by node ids
lookup = frozenset([tool.Name for tool in tools])
containers = []
host = registered_host()
for container in host.ls():
name = container["_tool"].Name
# We currently assume no "groups" as containers but just single tools
# like a single "Loader" operator. As such we just check whether the
# Loader is part of the processing queue.
if name in lookup:
containers.append(container)
return containers
def iter_upstream(tool):
"""Yields all upstream inputs for the current tool.
Yields:
tool: The input tools.
"""
def get_connected_input_tools(tool):
"""Helper function that returns connected input tools for a tool."""
inputs = []
# Filter only to actual types that will have sensible upstream
# connections. So we ignore just "Number" inputs as they can be
# many to iterate, slowing things down quite a bit - and in practice
# they don't have upstream connections.
VALID_INPUT_TYPES = ['Image', 'Particles', 'Mask', 'DataType3D']
for type_ in VALID_INPUT_TYPES:
for input_ in tool.GetInputList(type_).values():
output = input_.GetConnectedOutput()
if output:
input_tool = output.GetTool()
inputs.append(input_tool)
return inputs
# Initialize process queue with the node's inputs itself
queue = get_connected_input_tools(tool)
# We keep track of which node names we have processed so far, to ensure we
# don't process the same hierarchy again. We are not pushing the tool
# itself into the set as that doesn't correctly recognize the same tool.
# Since tool names are unique in a comp in Fusion we rely on that.
collected = set(tool.Name for tool in queue)
# Traverse upstream references for all nodes and yield them as we
# process the queue.
while queue:
upstream_tool = queue.pop()
yield upstream_tool
# Find upstream tools that are not collected yet.
upstream_inputs = get_connected_input_tools(upstream_tool)
upstream_inputs = [t for t in upstream_inputs if
t.Name not in collected]
queue.extend(upstream_inputs)
collected.update(tool.Name for tool in upstream_inputs)
class CollectUpstreamInputs(pyblish.api.InstancePlugin):
"""Collect source input containers used for this publish.
This will include `inputs` data of which loaded publishes were used in the
generation of this publish. This leaves an upstream trace to what was used
as input.
"""
label = "Collect Inputs"
order = pyblish.api.CollectorOrder + 0.2
hosts = ["fusion"]
def process(self, instance):
# Get all upstream and include itself
tool = instance[0]
nodes = list(iter_upstream(tool))
nodes.append(tool)
# Collect containers for the given set of nodes
containers = collect_input_containers(nodes)
inputs = [ObjectId(c["representation"]) for c in containers]
instance.data["inputRepresentations"] = inputs
self.log.info("Collected inputs: %s" % inputs)

View file

@ -2,10 +2,11 @@
import os import os
import json import json
import pyblish.api import pyblish.api
import openpype
from openpype.pipeline import publish
class ExtractClipEffects(openpype.api.Extractor): class ExtractClipEffects(publish.Extractor):
"""Extract clip effects instances.""" """Extract clip effects instances."""
order = pyblish.api.ExtractorOrder order = pyblish.api.ExtractorOrder

View file

@ -1,9 +1,14 @@
import os import os
import pyblish.api import pyblish.api
import openpype
from openpype.lib import (
get_oiio_tools_path,
run_subprocess,
)
from openpype.pipeline import publish
class ExtractFrames(openpype.api.Extractor): class ExtractFrames(publish.Extractor):
"""Extracts frames""" """Extracts frames"""
order = pyblish.api.ExtractorOrder order = pyblish.api.ExtractorOrder
@ -13,7 +18,7 @@ class ExtractFrames(openpype.api.Extractor):
movie_extensions = ["mov", "mp4"] movie_extensions = ["mov", "mp4"]
def process(self, instance): def process(self, instance):
oiio_tool_path = openpype.lib.get_oiio_tools_path() oiio_tool_path = get_oiio_tools_path()
staging_dir = self.staging_dir(instance) staging_dir = self.staging_dir(instance)
output_template = os.path.join(staging_dir, instance.data["name"]) output_template = os.path.join(staging_dir, instance.data["name"])
sequence = instance.context.data["activeTimeline"] sequence = instance.context.data["activeTimeline"]
@ -43,7 +48,7 @@ class ExtractFrames(openpype.api.Extractor):
args.extend(["--powc", "0.45,0.45,0.45,1.0"]) args.extend(["--powc", "0.45,0.45,0.45,1.0"])
args.extend([input_path, "-o", output_path]) args.extend([input_path, "-o", output_path])
output = openpype.api.run_subprocess(args) output = run_subprocess(args)
failed_output = "oiiotool produced no output." failed_output = "oiiotool produced no output."
if failed_output in output: if failed_output in output:

View file

@ -1,9 +1,10 @@
import os import os
import pyblish.api import pyblish.api
import openpype.api
from openpype.pipeline import publish
class ExtractThumnail(openpype.api.Extractor): class ExtractThumnail(publish.Extractor):
""" """
Extractor for track item's tumnails Extractor for track item's tumnails
""" """

View file

@ -14,7 +14,7 @@ from openpype.pipeline import (
) )
from openpype.pipeline.load import any_outdated_containers from openpype.pipeline.load import any_outdated_containers
from openpype.hosts.houdini import HOUDINI_HOST_DIR from openpype.hosts.houdini import HOUDINI_HOST_DIR
from openpype.hosts.houdini.api import lib from openpype.hosts.houdini.api import lib, shelves
from openpype.lib import ( from openpype.lib import (
register_event_callback, register_event_callback,
@ -73,6 +73,7 @@ def install():
# so it initializes into the correct scene FPS, Frame Range, etc. # so it initializes into the correct scene FPS, Frame Range, etc.
# todo: make sure this doesn't trigger when opening with last workfile # todo: make sure this doesn't trigger when opening with last workfile
_set_context_settings() _set_context_settings()
shelves.generate_shelves()
def uninstall(): def uninstall():

View file

@ -0,0 +1,204 @@
import os
import logging
import platform
import six
from openpype.settings import get_project_settings
import hou
log = logging.getLogger("openpype.hosts.houdini.shelves")
if six.PY2:
FileNotFoundError = IOError
def generate_shelves():
"""This function generates complete shelves from shelf set to tools
in Houdini from openpype project settings houdini shelf definition.
Raises:
FileNotFoundError: Raised when the shelf set filepath does not exist
"""
current_os = platform.system().lower()
# load configuration of houdini shelves
project_settings = get_project_settings(os.getenv("AVALON_PROJECT"))
shelves_set_config = project_settings["houdini"]["shelves"]
if not shelves_set_config:
log.debug(
"No custom shelves found in project settings."
)
return
for shelf_set_config in shelves_set_config:
shelf_set_filepath = shelf_set_config.get('shelf_set_source_path')
if shelf_set_filepath[current_os]:
if not os.path.isfile(shelf_set_filepath[current_os]):
raise FileNotFoundError(
"This path doesn't exist - {}".format(
shelf_set_filepath[current_os]
)
)
hou.shelves.newShelfSet(file_path=shelf_set_filepath[current_os])
continue
shelf_set_name = shelf_set_config.get('shelf_set_name')
if not shelf_set_name:
log.warning(
"No name found in shelf set definition."
)
return
shelf_set = get_or_create_shelf_set(shelf_set_name)
shelves_definition = shelf_set_config.get('shelf_definition')
if not shelves_definition:
log.debug(
"No shelf definition found for shelf set named '{}'".format(
shelf_set_name
)
)
return
for shelf_definition in shelves_definition:
shelf_name = shelf_definition.get('shelf_name')
if not shelf_name:
log.warning(
"No name found in shelf definition."
)
return
shelf = get_or_create_shelf(shelf_name)
if not shelf_definition.get('tools_list'):
log.debug(
"No tool definition found for shelf named {}".format(
shelf_name
)
)
return
mandatory_attributes = {'name', 'script'}
for tool_definition in shelf_definition.get('tools_list'):
# We verify that the name and script attibutes of the tool
# are set
if not all(
tool_definition[key] for key in mandatory_attributes
):
log.warning(
"You need to specify at least the name and \
the script path of the tool.")
continue
tool = get_or_create_tool(tool_definition, shelf)
if not tool:
return
# Add the tool to the shelf if not already in it
if tool not in shelf.tools():
shelf.setTools(list(shelf.tools()) + [tool])
# Add the shelf in the shelf set if not already in it
if shelf not in shelf_set.shelves():
shelf_set.setShelves(shelf_set.shelves() + (shelf,))
def get_or_create_shelf_set(shelf_set_label):
"""This function verifies if the shelf set label exists. If not,
creates a new shelf set.
Arguments:
shelf_set_label (str): The label of the shelf set
Returns:
hou.ShelfSet: The shelf set existing or the new one
"""
all_shelves_sets = hou.shelves.shelfSets().values()
shelf_sets = [
shelf for shelf in all_shelves_sets if shelf.label() == shelf_set_label
]
if shelf_sets:
return shelf_sets[0]
shelf_set_name = shelf_set_label.replace(' ', '_').lower()
new_shelf_set = hou.shelves.newShelfSet(
name=shelf_set_name,
label=shelf_set_label
)
return new_shelf_set
def get_or_create_shelf(shelf_label):
"""This function verifies if the shelf label exists. If not, creates
a new shelf.
Arguments:
shelf_label (str): The label of the shelf
Returns:
hou.Shelf: The shelf existing or the new one
"""
all_shelves = hou.shelves.shelves().values()
shelf = [s for s in all_shelves if s.label() == shelf_label]
if shelf:
return shelf[0]
shelf_name = shelf_label.replace(' ', '_').lower()
new_shelf = hou.shelves.newShelf(
name=shelf_name,
label=shelf_label
)
return new_shelf
def get_or_create_tool(tool_definition, shelf):
"""This function verifies if the tool exists and updates it. If not, creates
a new one.
Arguments:
tool_definition (dict): Dict with label, script, icon and help
shelf (hou.Shelf): The parent shelf of the tool
Returns:
hou.Tool: The tool updated or the new one
"""
existing_tools = shelf.tools()
tool_label = tool_definition.get('label')
existing_tool = [
tool for tool in existing_tools if tool.label() == tool_label
]
if existing_tool:
tool_definition.pop('name', None)
tool_definition.pop('label', None)
existing_tool[0].setData(**tool_definition)
return existing_tool[0]
tool_name = tool_label.replace(' ', '_').lower()
if not os.path.exists(tool_definition['script']):
log.warning(
"This path doesn't exist - {}".format(
tool_definition['script']
)
)
return
with open(tool_definition['script']) as f:
script = f.read()
tool_definition.update({'script': script})
new_tool = hou.shelves.newTool(name=tool_name, **tool_definition)
return new_tool

View file

@ -1,3 +1,5 @@
from bson.objectid import ObjectId
import pyblish.api import pyblish.api
from openpype.pipeline import registered_host from openpype.pipeline import registered_host
@ -115,7 +117,7 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
# Collect containers for the given set of nodes # Collect containers for the given set of nodes
containers = collect_input_containers(nodes) containers = collect_input_containers(nodes)
inputs = [c["representation"] for c in containers] inputs = [ObjectId(c["representation"]) for c in containers]
instance.data["inputs"] = inputs instance.data["inputRepresentations"] = inputs
self.log.info("Collected inputs: %s" % inputs) self.log.info("Collected inputs: %s" % inputs)

View file

@ -2,10 +2,9 @@ import pyblish.api
from openpype.lib import version_up from openpype.lib import version_up
from openpype.pipeline import registered_host from openpype.pipeline import registered_host
from openpype.pipeline.publish import get_errored_plugins_from_context
class IncrementCurrentFile(pyblish.api.InstancePlugin): class IncrementCurrentFile(pyblish.api.ContextPlugin):
"""Increment the current file. """Increment the current file.
Saves the current scene with an increased version number. Saves the current scene with an increased version number.
@ -15,30 +14,10 @@ class IncrementCurrentFile(pyblish.api.InstancePlugin):
label = "Increment current file" label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0 order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"] hosts = ["houdini"]
families = ["colorbleed.usdrender", "redshift_rop"] families = ["workfile"]
targets = ["local"] optional = True
def process(self, instance): def process(self, context):
# This should be a ContextPlugin, but this is a workaround
# for a bug in pyblish to run once for a family: issue #250
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
context = instance.context
errored_plugins = get_errored_plugins_from_context(context)
if any(
plugin.__name__ == "HoudiniSubmitPublishDeadline"
for plugin in errored_plugins
):
raise RuntimeError(
"Skipping incrementing current file because "
"submission to deadline failed."
)
# Filename must not have changed since collecting # Filename must not have changed since collecting
host = registered_host() host = registered_host()

View file

@ -1,35 +0,0 @@
import pyblish.api
import hou
from openpype.lib import version_up
from openpype.pipeline.publish import get_errored_plugins_from_context
class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin):
"""Increment the current file.
Saves the current scene with an increased version number.
"""
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"]
targets = ["deadline"]
def process(self, context):
errored_plugins = get_errored_plugins_from_context(context)
if any(
plugin.__name__ == "HoudiniSubmitPublishDeadline"
for plugin in errored_plugins
):
raise RuntimeError(
"Skipping incrementing current file because "
"submission to deadline failed."
)
current_filepath = context.data["currentFile"]
new_filepath = version_up(current_filepath)
hou.hipFile.save(file_name=new_filepath, save_to_recent_files=True)

View file

@ -2483,7 +2483,7 @@ def load_capture_preset(data=None):
# DISPLAY OPTIONS # DISPLAY OPTIONS
id = 'Display Options' id = 'Display Options'
disp_options = {} disp_options = {}
for key in preset['Display Options']: for key in preset[id]:
if key.startswith('background'): if key.startswith('background'):
disp_options[key] = preset['Display Options'][key] disp_options[key] = preset['Display Options'][key]
if len(disp_options[key]) == 4: if len(disp_options[key]) == 4:

View file

@ -348,3 +348,71 @@ def get_attr_overrides(node_attr, layer,
break break
return reversed(plug_overrides) return reversed(plug_overrides)
def get_shader_in_layer(node, layer):
"""Return the assigned shader in a renderlayer without switching layers.
This has been developed and tested for Legacy Renderlayers and *not* for
Render Setup.
Note: This will also return the shader for any face assignments, however
it will *not* return the components they are assigned to. This could
be implemented, but since Maya's renderlayers are famous for breaking
with face assignments there has been no need for this function to
support that.
Returns:
list: The list of assigned shaders in the given layer.
"""
def _get_connected_shader(plug):
"""Return current shader"""
return cmds.listConnections(plug,
source=False,
destination=True,
plugs=False,
connections=False,
type="shadingEngine") or []
# We check the instObjGroups (shader connection) for layer overrides.
plug = node + ".instObjGroups"
# Ignore complex query if we're in the layer anyway (optimization)
current_layer = cmds.editRenderLayerGlobals(query=True,
currentRenderLayer=True)
if layer == current_layer:
return _get_connected_shader(plug)
connections = cmds.listConnections(plug,
plugs=True,
source=False,
destination=True,
type="renderLayer") or []
connections = filter(lambda x: x.endswith(".outPlug"), connections)
if not connections:
# If no overrides anywhere on the shader, just get the current shader
return _get_connected_shader(plug)
def _get_override(connections, layer):
"""Return the overridden connection for that layer in connections"""
# If there's an override on that layer, return that.
for connection in connections:
if (connection.startswith(layer + ".outAdjustments") and
connection.endswith(".outPlug")):
# This is a shader override on that layer so get the shader
# connected to .outValue of the .outAdjustment[i]
out_adjustment = connection.rsplit(".", 1)[0]
connection_attr = out_adjustment + ".outValue"
override = cmds.listConnections(connection_attr) or []
return override
override_shader = _get_override(connections, layer)
if override_shader is not None:
return override_shader
else:
# Get the override for "defaultRenderLayer" (=masterLayer)
return _get_override(connections, layer="defaultRenderLayer")

View file

@ -16,6 +16,7 @@ from openpype.host import (
HostDirmap, HostDirmap,
) )
from openpype.tools.utils import host_tools from openpype.tools.utils import host_tools
from openpype.tools.workfiles.lock_dialog import WorkfileLockDialog
from openpype.lib import ( from openpype.lib import (
register_event_callback, register_event_callback,
emit_event emit_event
@ -31,6 +32,12 @@ from openpype.pipeline import (
AVALON_CONTAINER_ID, AVALON_CONTAINER_ID,
) )
from openpype.pipeline.load import any_outdated_containers from openpype.pipeline.load import any_outdated_containers
from openpype.pipeline.workfile.lock_workfile import (
create_workfile_lock,
remove_workfile_lock,
is_workfile_locked,
is_workfile_lock_enabled
)
from openpype.hosts.maya import MAYA_ROOT_DIR from openpype.hosts.maya import MAYA_ROOT_DIR
from openpype.hosts.maya.lib import create_workspace_mel from openpype.hosts.maya.lib import create_workspace_mel
@ -99,8 +106,13 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost):
register_event_callback("open", on_open) register_event_callback("open", on_open)
register_event_callback("new", on_new) register_event_callback("new", on_new)
register_event_callback("before.save", on_before_save) register_event_callback("before.save", on_before_save)
register_event_callback("after.save", on_after_save)
register_event_callback("before.close", on_before_close)
register_event_callback("before.file.open", before_file_open)
register_event_callback("taskChanged", on_task_changed) register_event_callback("taskChanged", on_task_changed)
register_event_callback("workfile.open.before", before_workfile_open)
register_event_callback("workfile.save.before", before_workfile_save) register_event_callback("workfile.save.before", before_workfile_save)
register_event_callback("workfile.save.before", after_workfile_save)
def open_workfile(self, filepath): def open_workfile(self, filepath):
return open_file(filepath) return open_file(filepath)
@ -143,6 +155,13 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost):
OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save
) )
self._op_events[_after_scene_save] = (
OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kAfterSave,
_after_scene_save
)
)
self._op_events[_before_scene_save] = ( self._op_events[_before_scene_save] = (
OpenMaya.MSceneMessage.addCheckCallback( OpenMaya.MSceneMessage.addCheckCallback(
OpenMaya.MSceneMessage.kBeforeSaveCheck, OpenMaya.MSceneMessage.kBeforeSaveCheck,
@ -161,15 +180,35 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost):
) )
) )
self._op_events[_on_scene_open] = OpenMaya.MSceneMessage.addCallback( self._op_events[_on_scene_open] = (
OpenMaya.MSceneMessage.kAfterOpen, _on_scene_open OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kAfterOpen,
_on_scene_open
)
)
self._op_events[_before_scene_open] = (
OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kBeforeOpen,
_before_scene_open
)
)
self._op_events[_before_close_maya] = (
OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kMayaExiting,
_before_close_maya
)
) )
self.log.info("Installed event handler _on_scene_save..") self.log.info("Installed event handler _on_scene_save..")
self.log.info("Installed event handler _before_scene_save..") self.log.info("Installed event handler _before_scene_save..")
self.log.info("Installed event handler _on_after_save..")
self.log.info("Installed event handler _on_scene_new..") self.log.info("Installed event handler _on_scene_new..")
self.log.info("Installed event handler _on_maya_initialized..") self.log.info("Installed event handler _on_maya_initialized..")
self.log.info("Installed event handler _on_scene_open..") self.log.info("Installed event handler _on_scene_open..")
self.log.info("Installed event handler _check_lock_file..")
self.log.info("Installed event handler _before_close_maya..")
def _set_project(): def _set_project():
@ -208,6 +247,10 @@ def _on_scene_new(*args):
emit_event("new") emit_event("new")
def _after_scene_save(*arg):
emit_event("after.save")
def _on_scene_save(*args): def _on_scene_save(*args):
emit_event("save") emit_event("save")
@ -216,6 +259,14 @@ def _on_scene_open(*args):
emit_event("open") emit_event("open")
def _before_close_maya(*args):
emit_event("before.close")
def _before_scene_open(*args):
emit_event("before.file.open")
def _before_scene_save(return_code, client_data): def _before_scene_save(return_code, client_data):
# Default to allowing the action. Registered # Default to allowing the action. Registered
@ -229,6 +280,23 @@ def _before_scene_save(return_code, client_data):
) )
def _remove_workfile_lock():
"""Remove workfile lock on current file"""
if not handle_workfile_locks():
return
filepath = current_file()
log.info("Removing lock on current file {}...".format(filepath))
if filepath:
remove_workfile_lock(filepath)
def handle_workfile_locks():
if lib.IS_HEADLESS:
return False
project_name = legacy_io.active_project()
return is_workfile_lock_enabled(MayaHost.name, project_name)
def uninstall(): def uninstall():
pyblish.api.deregister_plugin_path(PUBLISH_PATH) pyblish.api.deregister_plugin_path(PUBLISH_PATH)
pyblish.api.deregister_host("mayabatch") pyblish.api.deregister_host("mayabatch")
@ -426,6 +494,46 @@ def on_before_save():
return lib.validate_fps() return lib.validate_fps()
def on_after_save():
"""Check if there is a lockfile after save"""
check_lock_on_current_file()
def check_lock_on_current_file():
"""Check if there is a user opening the file"""
if not handle_workfile_locks():
return
log.info("Running callback on checking the lock file...")
# add the lock file when opening the file
filepath = current_file()
if is_workfile_locked(filepath):
# add lockfile dialog
workfile_dialog = WorkfileLockDialog(filepath)
if not workfile_dialog.exec_():
cmds.file(new=True)
return
create_workfile_lock(filepath)
def on_before_close():
"""Delete the lock file after user quitting the Maya Scene"""
log.info("Closing Maya...")
# delete the lock file
filepath = current_file()
if handle_workfile_locks():
remove_workfile_lock(filepath)
def before_file_open():
"""check lock file when the file changed"""
# delete the lock file
_remove_workfile_lock()
def on_save(): def on_save():
"""Automatically add IDs to new nodes """Automatically add IDs to new nodes
@ -434,6 +542,8 @@ def on_save():
""" """
log.info("Running callback on save..") log.info("Running callback on save..")
# remove lockfile if users jumps over from one scene to another
_remove_workfile_lock()
# # Update current task for the current scene # # Update current task for the current scene
# update_task_from_path(cmds.file(query=True, sceneName=True)) # update_task_from_path(cmds.file(query=True, sceneName=True))
@ -491,6 +601,9 @@ def on_open():
dialog.on_clicked.connect(_on_show_inventory) dialog.on_clicked.connect(_on_show_inventory)
dialog.show() dialog.show()
# create lock file for the maya scene
check_lock_on_current_file()
def on_new(): def on_new():
"""Set project resolution and fps when create a new file""" """Set project resolution and fps when create a new file"""
@ -506,6 +619,7 @@ def on_new():
"from openpype.hosts.maya.api import lib;" "from openpype.hosts.maya.api import lib;"
"lib.add_render_layer_change_observer()") "lib.add_render_layer_change_observer()")
lib.set_context_settings() lib.set_context_settings()
_remove_workfile_lock()
def on_task_changed(): def on_task_changed():
@ -544,13 +658,28 @@ def on_task_changed():
) )
def before_workfile_open():
if handle_workfile_locks():
_remove_workfile_lock()
def before_workfile_save(event): def before_workfile_save(event):
project_name = legacy_io.active_project() project_name = legacy_io.active_project()
if handle_workfile_locks():
_remove_workfile_lock()
workdir_path = event["workdir_path"] workdir_path = event["workdir_path"]
if workdir_path: if workdir_path:
create_workspace_mel(workdir_path, project_name) create_workspace_mel(workdir_path, project_name)
def after_workfile_save(event):
workfile_name = event["filename"]
if handle_workfile_locks():
if workfile_name:
if not is_workfile_locked(workfile_name):
create_workfile_lock(workfile_name)
class MayaDirmap(HostDirmap): class MayaDirmap(HostDirmap):
def on_enable_dirmap(self): def on_enable_dirmap(self):
cmds.dirmap(en=True) cmds.dirmap(en=True)

View file

@ -0,0 +1,215 @@
import copy
from bson.objectid import ObjectId
from maya import cmds
import maya.api.OpenMaya as om
import pyblish.api
from openpype.pipeline import registered_host
from openpype.hosts.maya.api.lib import get_container_members
from openpype.hosts.maya.api.lib_rendersetup import get_shader_in_layer
def iter_history(nodes,
filter=om.MFn.kInvalid,
direction=om.MItDependencyGraph.kUpstream):
"""Iterate unique upstream history for list of nodes.
This acts as a replacement to maya.cmds.listHistory.
It's faster by about 2x-3x. It returns less than
maya.cmds.listHistory as it excludes the input nodes
from the output (unless an input node was history
for another input node). It also excludes duplicates.
Args:
nodes (list): Maya node names to start search from.
filter (om.MFn.Type): Filter to only specific types.
e.g. to dag nodes using om.MFn.kDagNode
direction (om.MItDependencyGraph.Direction): Direction to traverse in.
Defaults to upstream.
Yields:
str: Node names in upstream history.
"""
if not nodes:
return
sel = om.MSelectionList()
for node in nodes:
sel.add(node)
it = om.MItDependencyGraph(sel.getDependNode(0)) # init iterator
handle = om.MObjectHandle
traversed = set()
fn_dep = om.MFnDependencyNode()
fn_dag = om.MFnDagNode()
for i in range(sel.length()):
start_node = sel.getDependNode(i)
start_node_hash = handle(start_node).hashCode()
if start_node_hash in traversed:
continue
it.resetTo(start_node,
filter=filter,
direction=direction)
while not it.isDone():
node = it.currentNode()
node_hash = handle(node).hashCode()
if node_hash in traversed:
it.prune()
it.next() # noqa: B305
continue
traversed.add(node_hash)
if node.hasFn(om.MFn.kDagNode):
fn_dag.setObject(node)
yield fn_dag.fullPathName()
else:
fn_dep.setObject(node)
yield fn_dep.name()
it.next() # noqa: B305
def collect_input_containers(containers, nodes):
"""Collect containers that contain any of the node in `nodes`.
This will return any loaded Avalon container that contains at least one of
the nodes. As such, the Avalon container is an input for it. Or in short,
there are member nodes of that container.
Returns:
list: Input avalon containers
"""
# Assume the containers have collected their cached '_members' data
# in the collector.
return [container for container in containers
if any(node in container["_members"] for node in nodes)]
class CollectUpstreamInputs(pyblish.api.InstancePlugin):
"""Collect input source inputs for this publish.
This will include `inputs` data of which loaded publishes were used in the
generation of this publish. This leaves an upstream trace to what was used
as input.
"""
label = "Collect Inputs"
order = pyblish.api.CollectorOrder + 0.34
hosts = ["maya"]
def process(self, instance):
# For large scenes the querying of "host.ls()" can be relatively slow
# e.g. up to a second. Many instances calling it easily slows this
# down. As such, we cache it so we trigger it only once.
# todo: Instead of hidden cache make "CollectContainers" plug-in
cache_key = "__cache_containers"
scene_containers = instance.context.data.get(cache_key, None)
if scene_containers is None:
# Query the scenes' containers if there's no cache yet
host = registered_host()
scene_containers = list(host.ls())
for container in scene_containers:
# Embed the members into the container dictionary
container_members = set(get_container_members(container))
container["_members"] = container_members
instance.context.data["__cache_containers"] = scene_containers
# Collect the relevant input containers for this instance
if "renderlayer" in set(instance.data.get("families", [])):
# Special behavior for renderlayers
self.log.debug("Collecting renderlayer inputs....")
containers = self._collect_renderlayer_inputs(scene_containers,
instance)
else:
# Basic behavior
nodes = instance[:]
# Include any input connections of history with long names
# For optimization purposes only trace upstream from shape nodes
# looking for used dag nodes. This way having just a constraint
# on a transform is also ignored which tended to give irrelevant
# inputs for the majority of our use cases. We tend to care more
# about geometry inputs.
shapes = cmds.ls(nodes,
type=("mesh", "nurbsSurface", "nurbsCurve"),
noIntermediate=True)
if shapes:
history = list(iter_history(shapes, filter=om.MFn.kShape))
history = cmds.ls(history, long=True)
# Include the transforms in the collected history as shapes
# are excluded from containers
transforms = cmds.listRelatives(cmds.ls(history, shapes=True),
parent=True,
fullPath=True,
type="transform")
if transforms:
history.extend(transforms)
if history:
nodes = list(set(nodes + history))
# Collect containers for the given set of nodes
containers = collect_input_containers(scene_containers,
nodes)
inputs = [ObjectId(c["representation"]) for c in containers]
instance.data["inputRepresentations"] = inputs
self.log.info("Collected inputs: %s" % inputs)
def _collect_renderlayer_inputs(self, scene_containers, instance):
"""Collects inputs from nodes in renderlayer, incl. shaders + camera"""
# Get the renderlayer
renderlayer = instance.data.get("setMembers")
if renderlayer == "defaultRenderLayer":
# Assume all loaded containers in the scene are inputs
# for the masterlayer
return copy.deepcopy(scene_containers)
else:
# Get the members of the layer
members = cmds.editRenderLayerMembers(renderlayer,
query=True,
fullNames=True) or []
# In some cases invalid objects are returned from
# `editRenderLayerMembers` so we filter them out
members = cmds.ls(members, long=True)
# Include all children
children = cmds.listRelatives(members,
allDescendents=True,
fullPath=True) or []
members.extend(children)
# Include assigned shaders in renderlayer
shapes = cmds.ls(members, shapes=True, long=True)
shaders = set()
for shape in shapes:
shape_shaders = get_shader_in_layer(shape, layer=renderlayer)
if not shape_shaders:
continue
shaders.update(shape_shaders)
members.extend(shaders)
# Explicitly include the camera being rendered in renderlayer
cameras = instance.data.get("cameras")
members.extend(cameras)
containers = collect_input_containers(scene_containers, members)
return containers

View file

@ -1,25 +0,0 @@
from maya import cmds
import pyblish.api
class CollectMayaScene(pyblish.api.InstancePlugin):
"""Collect Maya Scene Data
"""
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Model Data'
families = ["mayaScene"]
def process(self, instance):
# Extract only current frame (override)
frame = cmds.currentTime(query=True)
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame
# make ftrack publishable
if instance.data.get('families'):
instance.data['families'].append('ftrack')
else:
instance.data['families'] = ['ftrack']

View file

@ -0,0 +1,26 @@
from maya import cmds
import pyblish.api
class CollectMayaSceneTime(pyblish.api.InstancePlugin):
"""Collect Maya Scene playback range
This allows to reproduce the playback range for the content to be loaded.
It does *not* limit the extracted data to only data inside that time range.
"""
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Maya Scene Time'
families = ["mayaScene"]
def process(self, instance):
instance.data.update({
"frameStart": cmds.playbackOptions(query=True, minTime=True),
"frameEnd": cmds.playbackOptions(query=True, maxTime=True),
"frameStartHandle": cmds.playbackOptions(query=True,
animationStartTime=True),
"frameEndHandle": cmds.playbackOptions(query=True,
animationEndTime=True)
})

View file

@ -1,22 +0,0 @@
from maya import cmds
import pyblish.api
class CollectRigData(pyblish.api.InstancePlugin):
"""Collect rig data
Ensures rigs are published to Ftrack.
"""
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Rig Data'
families = ["rig"]
def process(self, instance):
# make ftrack publishable
if instance.data.get('families'):
instance.data['families'].append('ftrack')
else:
instance.data['families'] = ['ftrack']

View file

@ -1,12 +1,12 @@
import os import os
import openpype.api
from maya import cmds from maya import cmds
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
class ExtractAssStandin(openpype.api.Extractor): class ExtractAssStandin(publish.Extractor):
"""Extract the content of the instance to a ass file """Extract the content of the instance to a ass file
Things to pay attention to: Things to pay attention to:

View file

@ -1,14 +1,13 @@
import os
import json import json
import os from openpype.pipeline import publish
import openpype.api
from openpype.hosts.maya.api.lib import extract_alembic from openpype.hosts.maya.api.lib import extract_alembic
from maya import cmds from maya import cmds
class ExtractAssembly(openpype.api.Extractor): class ExtractAssembly(publish.Extractor):
"""Produce an alembic of just point positions and normals. """Produce an alembic of just point positions and normals.
Positions and normals are preserved, but nothing more, Positions and normals are preserved, but nothing more,

View file

@ -3,17 +3,17 @@ import contextlib
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
class ExtractAssProxy(openpype.api.Extractor): class ExtractAssProxy(publish.Extractor):
"""Extract proxy model as Maya Ascii to use as arnold standin """Extract proxy model as Maya Ascii to use as arnold standin
""" """
order = openpype.api.Extractor.order + 0.2 order = publish.Extractor.order + 0.2
label = "Ass Proxy (Maya ASCII)" label = "Ass Proxy (Maya ASCII)"
hosts = ["maya"] hosts = ["maya"]
families = ["ass"] families = ["ass"]

View file

@ -2,11 +2,11 @@ import os
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api import lib from openpype.hosts.maya.api import lib
class ExtractCameraAlembic(openpype.api.Extractor): class ExtractCameraAlembic(publish.Extractor):
"""Extract a Camera as Alembic. """Extract a Camera as Alembic.
The cameras gets baked to world space by default. Only when the instance's The cameras gets baked to world space by default. Only when the instance's

View file

@ -5,7 +5,7 @@ import itertools
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api import lib from openpype.hosts.maya.api import lib
@ -78,7 +78,7 @@ def unlock(plug):
cmds.disconnectAttr(source, destination) cmds.disconnectAttr(source, destination)
class ExtractCameraMayaScene(openpype.api.Extractor): class ExtractCameraMayaScene(publish.Extractor):
"""Extract a Camera as Maya Scene. """Extract a Camera as Maya Scene.
This will create a duplicate of the camera that will be baked *with* This will create a duplicate of the camera that will be baked *with*

View file

@ -4,13 +4,13 @@ import os
from maya import cmds # noqa from maya import cmds # noqa
import maya.mel as mel # noqa import maya.mel as mel # noqa
import pyblish.api import pyblish.api
import openpype.api
from openpype.hosts.maya.api.lib import maintained_selection
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection
from openpype.hosts.maya.api import fbx from openpype.hosts.maya.api import fbx
class ExtractFBX(openpype.api.Extractor): class ExtractFBX(publish.Extractor):
"""Extract FBX from Maya. """Extract FBX from Maya.
This extracts reproducible FBX exports ignoring any of the This extracts reproducible FBX exports ignoring any of the

View file

@ -5,13 +5,11 @@ import json
from maya import cmds from maya import cmds
from maya.api import OpenMaya as om from maya.api import OpenMaya as om
from bson.objectid import ObjectId from openpype.client import get_representation_by_id
from openpype.pipeline import legacy_io, publish
from openpype.pipeline import legacy_io
import openpype.api
class ExtractLayout(openpype.api.Extractor): class ExtractLayout(publish.Extractor):
"""Extract a layout.""" """Extract a layout."""
label = "Extract Layout" label = "Extract Layout"
@ -30,6 +28,8 @@ class ExtractLayout(openpype.api.Extractor):
instance.data["representations"] = [] instance.data["representations"] = []
json_data = [] json_data = []
# TODO representation queries can be refactored to be faster
project_name = legacy_io.active_project()
for asset in cmds.sets(str(instance), query=True): for asset in cmds.sets(str(instance), query=True):
# Find the container # Find the container
@ -43,11 +43,11 @@ class ExtractLayout(openpype.api.Extractor):
representation_id = cmds.getAttr(f"{container}.representation") representation_id = cmds.getAttr(f"{container}.representation")
representation = legacy_io.find_one( representation = get_representation_by_id(
{ project_name,
"type": "representation", representation_id,
"_id": ObjectId(representation_id) fields=["parent", "context.family"]
}, projection={"parent": True, "context.family": True}) )
self.log.info(representation) self.log.info(representation)
@ -102,9 +102,10 @@ class ExtractLayout(openpype.api.Extractor):
for i in range(0, len(t_matrix_list), row_length): for i in range(0, len(t_matrix_list), row_length):
t_matrix.append(t_matrix_list[i:i + row_length]) t_matrix.append(t_matrix_list[i:i + row_length])
json_element["transform_matrix"] = [] json_element["transform_matrix"] = [
for row in t_matrix: list(row)
json_element["transform_matrix"].append(list(row)) for row in t_matrix
]
basis_list = [ basis_list = [
1, 0, 0, 0, 1, 0, 0, 0,

View file

@ -13,8 +13,8 @@ from maya import cmds # noqa
import pyblish.api import pyblish.api
import openpype.api from openpype.lib import source_hash
from openpype.pipeline import legacy_io from openpype.pipeline import legacy_io, publish
from openpype.hosts.maya.api import lib from openpype.hosts.maya.api import lib
# Modes for transfer # Modes for transfer
@ -161,7 +161,7 @@ def no_workspace_dir():
os.rmdir(fake_workspace_dir) os.rmdir(fake_workspace_dir)
class ExtractLook(openpype.api.Extractor): class ExtractLook(publish.Extractor):
"""Extract Look (Maya Scene + JSON) """Extract Look (Maya Scene + JSON)
Only extracts the sets (shadingEngines and alike) alongside a .json file Only extracts the sets (shadingEngines and alike) alongside a .json file
@ -505,7 +505,7 @@ class ExtractLook(openpype.api.Extractor):
args = [] args = []
if do_maketx: if do_maketx:
args.append("maketx") args.append("maketx")
texture_hash = openpype.api.source_hash(filepath, *args) texture_hash = source_hash(filepath, *args)
# If source has been published before with the same settings, # If source has been published before with the same settings,
# then don't reprocess but hardlink from the original # then don't reprocess but hardlink from the original

View file

@ -4,12 +4,11 @@ import os
from maya import cmds from maya import cmds
import openpype.api
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
from openpype.pipeline import AVALON_CONTAINER_ID from openpype.pipeline import AVALON_CONTAINER_ID, publish
class ExtractMayaSceneRaw(openpype.api.Extractor): class ExtractMayaSceneRaw(publish.Extractor):
"""Extract as Maya Scene (raw). """Extract as Maya Scene (raw).
This will preserve all references, construction history, etc. This will preserve all references, construction history, etc.

View file

@ -4,11 +4,11 @@ import os
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api import lib from openpype.hosts.maya.api import lib
class ExtractModel(openpype.api.Extractor): class ExtractModel(publish.Extractor):
"""Extract as Model (Maya Scene). """Extract as Model (Maya Scene).
Only extracts contents based on the original "setMembers" data to ensure Only extracts contents based on the original "setMembers" data to ensure

View file

@ -2,11 +2,11 @@ import os
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
class ExtractMultiverseLook(openpype.api.Extractor): class ExtractMultiverseLook(publish.Extractor):
"""Extractor for Multiverse USD look data. """Extractor for Multiverse USD look data.
This will extract: This will extract:

View file

@ -3,11 +3,11 @@ import six
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
class ExtractMultiverseUsd(openpype.api.Extractor): class ExtractMultiverseUsd(publish.Extractor):
"""Extractor for Multiverse USD Asset data. """Extractor for Multiverse USD Asset data.
This will extract settings for a Multiverse Write Asset operation: This will extract settings for a Multiverse Write Asset operation:

View file

@ -2,11 +2,11 @@ import os
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
class ExtractMultiverseUsdComposition(openpype.api.Extractor): class ExtractMultiverseUsdComposition(publish.Extractor):
"""Extractor of Multiverse USD Composition data. """Extractor of Multiverse USD Composition data.
This will extract settings for a Multiverse Write Composition operation: This will extract settings for a Multiverse Write Composition operation:

View file

@ -1,12 +1,12 @@
import os import os
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
from maya import cmds from maya import cmds
class ExtractMultiverseUsdOverride(openpype.api.Extractor): class ExtractMultiverseUsdOverride(publish.Extractor):
"""Extractor for Multiverse USD Override data. """Extractor for Multiverse USD Override data.
This will extract settings for a Multiverse Write Override operation: This will extract settings for a Multiverse Write Override operation:

View file

@ -1,18 +1,16 @@
import os import os
import glob
import contextlib
import clique import clique
import capture import capture
from openpype.pipeline import publish
from openpype.hosts.maya.api import lib from openpype.hosts.maya.api import lib
import openpype.api
from maya import cmds from maya import cmds
import pymel.core as pm import pymel.core as pm
class ExtractPlayblast(openpype.api.Extractor): class ExtractPlayblast(publish.Extractor):
"""Extract viewport playblast. """Extract viewport playblast.
Takes review camera and creates review Quicktime video based on viewport Takes review camera and creates review Quicktime video based on viewport

View file

@ -2,7 +2,7 @@ import os
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api.lib import (
extract_alembic, extract_alembic,
suspended_refresh, suspended_refresh,
@ -11,7 +11,7 @@ from openpype.hosts.maya.api.lib import (
) )
class ExtractAlembic(openpype.api.Extractor): class ExtractAlembic(publish.Extractor):
"""Produce an alembic of just point positions and normals. """Produce an alembic of just point positions and normals.
Positions and normals, uvs, creases are preserved, but nothing more, Positions and normals, uvs, creases are preserved, but nothing more,

View file

@ -4,11 +4,11 @@ import os
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
class ExtractRedshiftProxy(openpype.api.Extractor): class ExtractRedshiftProxy(publish.Extractor):
"""Extract the content of the instance to a redshift proxy file.""" """Extract the content of the instance to a redshift proxy file."""
label = "Redshift Proxy (.rs)" label = "Redshift Proxy (.rs)"

View file

@ -1,10 +1,11 @@
import json
import os import os
import openpype.api import json
import maya.app.renderSetup.model.renderSetup as renderSetup import maya.app.renderSetup.model.renderSetup as renderSetup
from openpype.pipeline import publish
class ExtractRenderSetup(openpype.api.Extractor): class ExtractRenderSetup(publish.Extractor):
""" """
Produce renderSetup template file Produce renderSetup template file

View file

@ -4,11 +4,11 @@ import os
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
class ExtractRig(openpype.api.Extractor): class ExtractRig(publish.Extractor):
"""Extract rig as Maya Scene.""" """Extract rig as Maya Scene."""
label = "Extract Rig (Maya Scene)" label = "Extract Rig (Maya Scene)"

View file

@ -3,14 +3,14 @@ import glob
import capture import capture
from openpype.pipeline import publish
from openpype.hosts.maya.api import lib from openpype.hosts.maya.api import lib
import openpype.api
from maya import cmds from maya import cmds
import pymel.core as pm import pymel.core as pm
class ExtractThumbnail(openpype.api.Extractor): class ExtractThumbnail(publish.Extractor):
"""Extract viewport thumbnail. """Extract viewport thumbnail.
Takes review camera and creates a thumbnail based on viewport Takes review camera and creates a thumbnail based on viewport

View file

@ -6,7 +6,8 @@ from contextlib import contextmanager
from maya import cmds # noqa from maya import cmds # noqa
import pyblish.api import pyblish.api
import openpype.api
from openpype.pipeline import publish
from openpype.hosts.maya.api import fbx from openpype.hosts.maya.api import fbx
@ -20,7 +21,7 @@ def renamed(original_name, renamed_name):
cmds.rename(renamed_name, original_name) cmds.rename(renamed_name, original_name)
class ExtractUnrealSkeletalMesh(openpype.api.Extractor): class ExtractUnrealSkeletalMesh(publish.Extractor):
"""Extract Unreal Skeletal Mesh as FBX from Maya. """ """Extract Unreal Skeletal Mesh as FBX from Maya. """
order = pyblish.api.ExtractorOrder - 0.1 order = pyblish.api.ExtractorOrder - 0.1

View file

@ -5,7 +5,8 @@ import os
from maya import cmds # noqa from maya import cmds # noqa
import pyblish.api import pyblish.api
import openpype.api
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api.lib import (
parent_nodes, parent_nodes,
maintained_selection maintained_selection
@ -13,7 +14,7 @@ from openpype.hosts.maya.api.lib import (
from openpype.hosts.maya.api import fbx from openpype.hosts.maya.api import fbx
class ExtractUnrealStaticMesh(openpype.api.Extractor): class ExtractUnrealStaticMesh(publish.Extractor):
"""Extract Unreal Static Mesh as FBX from Maya. """ """Extract Unreal Static Mesh as FBX from Maya. """
order = pyblish.api.ExtractorOrder - 0.1 order = pyblish.api.ExtractorOrder - 0.1

View file

@ -2,11 +2,11 @@ import os
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
class ExtractVRayProxy(openpype.api.Extractor): class ExtractVRayProxy(publish.Extractor):
"""Extract the content of the instance to a vrmesh file """Extract the content of the instance to a vrmesh file
Things to pay attention to: Things to pay attention to:

View file

@ -3,14 +3,14 @@
import os import os
import re import re
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.render_setup_tools import export_in_rs_layer from openpype.hosts.maya.api.render_setup_tools import export_in_rs_layer
from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api.lib import maintained_selection
from maya import cmds from maya import cmds
class ExtractVrayscene(openpype.api.Extractor): class ExtractVrayscene(publish.Extractor):
"""Extractor for vrscene.""" """Extractor for vrscene."""
label = "VRay Scene (.vrscene)" label = "VRay Scene (.vrscene)"

View file

@ -2,14 +2,14 @@ import os
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api.lib import (
suspended_refresh, suspended_refresh,
maintained_selection maintained_selection
) )
class ExtractXgenCache(openpype.api.Extractor): class ExtractXgenCache(publish.Extractor):
"""Produce an alembic of just xgen interactive groom """Produce an alembic of just xgen interactive groom
""" """

View file

@ -3,10 +3,10 @@ import json
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
class ExtractYetiCache(openpype.api.Extractor): class ExtractYetiCache(publish.Extractor):
"""Producing Yeti cache files using scene time range. """Producing Yeti cache files using scene time range.
This will extract Yeti cache file sequence and fur settings. This will extract Yeti cache file sequence and fur settings.

View file

@ -7,7 +7,7 @@ import contextlib
from maya import cmds from maya import cmds
import openpype.api from openpype.pipeline import publish
from openpype.hosts.maya.api import lib from openpype.hosts.maya.api import lib
@ -90,7 +90,7 @@ def yetigraph_attribute_values(assumed_destination, resources):
pass pass
class ExtractYetiRig(openpype.api.Extractor): class ExtractYetiRig(publish.Extractor):
"""Extract the Yeti rig to a Maya Scene and write the Yeti rig data.""" """Extract the Yeti rig to a Maya Scene and write the Yeti rig data."""
label = "Extract Yeti Rig" label = "Extract Yeti Rig"

View file

@ -1,4 +1,8 @@
import pyblish.api import pyblish.api
from openpype.pipeline.workfile.lock_workfile import (
is_workfile_lock_enabled,
remove_workfile_lock
)
class SaveCurrentScene(pyblish.api.ContextPlugin): class SaveCurrentScene(pyblish.api.ContextPlugin):
@ -22,6 +26,10 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
self.log.debug("Skipping file save as there " self.log.debug("Skipping file save as there "
"are no modifications..") "are no modifications..")
return return
project_name = context.data["projectName"]
project_settings = context.data["project_settings"]
# remove lockfile before saving
if is_workfile_lock_enabled("maya", project_name, project_settings):
remove_workfile_lock(current)
self.log.info("Saving current file..") self.log.info("Saving current file..")
cmds.file(save=True, force=True) cmds.file(save=True, force=True)

View file

@ -4,7 +4,7 @@ import nuke
import pyblish.api import pyblish.api
import openpype from openpype.pipeline import publish
from openpype.hosts.nuke.api.lib import ( from openpype.hosts.nuke.api.lib import (
maintained_selection, maintained_selection,
reset_selection, reset_selection,
@ -12,7 +12,7 @@ from openpype.hosts.nuke.api.lib import (
) )
class ExtractBackdropNode(openpype.api.Extractor): class ExtractBackdropNode(publish.Extractor):
"""Extracting content of backdrop nodes """Extracting content of backdrop nodes
Will create nuke script only with containing nodes. Will create nuke script only with containing nodes.

View file

@ -5,11 +5,12 @@ from pprint import pformat
import nuke import nuke
import pyblish.api import pyblish.api
import openpype.api
from openpype.pipeline import publish
from openpype.hosts.nuke.api.lib import maintained_selection from openpype.hosts.nuke.api.lib import maintained_selection
class ExtractCamera(openpype.api.Extractor): class ExtractCamera(publish.Extractor):
""" 3D camera exctractor """ 3D camera exctractor
""" """
label = 'Exctract Camera' label = 'Exctract Camera'

View file

@ -3,7 +3,7 @@ import nuke
import pyblish.api import pyblish.api
import openpype from openpype.pipeline import publish
from openpype.hosts.nuke.api import utils as pnutils from openpype.hosts.nuke.api import utils as pnutils
from openpype.hosts.nuke.api.lib import ( from openpype.hosts.nuke.api.lib import (
maintained_selection, maintained_selection,
@ -12,7 +12,7 @@ from openpype.hosts.nuke.api.lib import (
) )
class ExtractGizmo(openpype.api.Extractor): class ExtractGizmo(publish.Extractor):
"""Extracting Gizmo (Group) node """Extracting Gizmo (Group) node
Will create nuke script only with the Gizmo node. Will create nuke script only with the Gizmo node.

View file

@ -2,14 +2,15 @@ import os
from pprint import pformat from pprint import pformat
import nuke import nuke
import pyblish.api import pyblish.api
import openpype.api
from openpype.pipeline import publish
from openpype.hosts.nuke.api.lib import ( from openpype.hosts.nuke.api.lib import (
maintained_selection, maintained_selection,
select_nodes select_nodes
) )
class ExtractModel(openpype.api.Extractor): class ExtractModel(publish.Extractor):
""" 3D model exctractor """ 3D model exctractor
""" """
label = 'Exctract Model' label = 'Exctract Model'

View file

@ -1,11 +1,13 @@
import pyblish.api
import nuke
import os import os
import openpype
import pyblish.api
import clique import clique
import nuke
from openpype.pipeline import publish
class NukeRenderLocal(openpype.api.Extractor): class NukeRenderLocal(publish.Extractor):
# TODO: rewrite docstring to nuke # TODO: rewrite docstring to nuke
"""Render the current Nuke composition locally. """Render the current Nuke composition locally.

View file

@ -1,10 +1,11 @@
import os import os
import pyblish.api
import openpype
from pprint import pformat from pprint import pformat
import pyblish.api
from openpype.pipeline import publish
class ExtractReviewData(openpype.api.Extractor): class ExtractReviewData(publish.Extractor):
"""Extracts review tag into available representation """Extracts review tag into available representation
""" """

View file

@ -1,11 +1,12 @@
import os import os
import pyblish.api import pyblish.api
import openpype
from openpype.pipeline import publish
from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api import plugin
from openpype.hosts.nuke.api.lib import maintained_selection from openpype.hosts.nuke.api.lib import maintained_selection
class ExtractReviewDataLut(openpype.api.Extractor): class ExtractReviewDataLut(publish.Extractor):
"""Extracts movie and thumbnail with baked in luts """Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py must be run after extract_render_local.py

View file

@ -1,13 +1,14 @@
import os import os
from pprint import pformat
import re import re
from pprint import pformat
import pyblish.api import pyblish.api
import openpype
from openpype.pipeline import publish
from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api import plugin
from openpype.hosts.nuke.api.lib import maintained_selection from openpype.hosts.nuke.api.lib import maintained_selection
class ExtractReviewDataMov(openpype.api.Extractor): class ExtractReviewDataMov(publish.Extractor):
"""Extracts movie and thumbnail with baked in luts """Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py must be run after extract_render_local.py

View file

@ -6,7 +6,7 @@ import copy
import pyblish.api import pyblish.api
import six import six
import openpype from openpype.pipeline import publish
from openpype.hosts.nuke.api import ( from openpype.hosts.nuke.api import (
maintained_selection, maintained_selection,
duplicate_node, duplicate_node,
@ -14,7 +14,7 @@ from openpype.hosts.nuke.api import (
) )
class ExtractSlateFrame(openpype.api.Extractor): class ExtractSlateFrame(publish.Extractor):
"""Extracts movie and thumbnail with baked in luts """Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py must be run after extract_render_local.py

View file

@ -2,7 +2,8 @@ import sys
import os import os
import nuke import nuke
import pyblish.api import pyblish.api
import openpype
from openpype.pipeline import publish
from openpype.hosts.nuke.api import ( from openpype.hosts.nuke.api import (
maintained_selection, maintained_selection,
get_view_process_node get_view_process_node
@ -13,7 +14,7 @@ if sys.version_info[0] >= 3:
unicode = str unicode = str
class ExtractThumbnail(openpype.api.Extractor): class ExtractThumbnail(publish.Extractor):
"""Extracts movie and thumbnail with baked in luts """Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py must be run after extract_render_local.py

View file

@ -154,7 +154,7 @@ def convert_value_by_type_name(value_type, value, logger=None):
elif parts_len == 4: elif parts_len == 4:
divisor = 2 divisor = 2
elif parts_len == 9: elif parts_len == 9:
divisor == 3 divisor = 3
elif parts_len == 16: elif parts_len == 16:
divisor = 4 divisor = 4
else: else:

View file

@ -734,7 +734,7 @@ def _format_tiles(
Result for tile 0 for 4x4 will be: Result for tile 0 for 4x4 will be:
`maya/<Scene>/<RenderLayer>/_tile_1x1_4x4_<RenderLayer>_<RenderPass>` `maya/<Scene>/<RenderLayer>/_tile_1x1_4x4_<RenderLayer>_<RenderPass>`
Calculating coordinates is tricky as in Job they are defined as top, Calculating coordinates is tricky as in Job they are defined as top,
left, bottom, right with zero being in top-left corner. But Assembler left, bottom, right with zero being in top-left corner. But Assembler
configuration file takes tile coordinates as X, Y, Width and Height and configuration file takes tile coordinates as X, Y, Width and Height and
zero is bottom left corner. zero is bottom left corner.
@ -743,13 +743,13 @@ def _format_tiles(
filename (str): Filename to process as tiles. filename (str): Filename to process as tiles.
index (int): Index of that file if it is sequence. index (int): Index of that file if it is sequence.
tiles_x (int): Number of tiles in X. tiles_x (int): Number of tiles in X.
tiles_y (int): Number if tikes in Y. tiles_y (int): Number of tiles in Y.
width (int): Width resolution of final image. width (int): Width resolution of final image.
height (int): Height resolution of final image. height (int): Height resolution of final image.
prefix (str): Image prefix. prefix (str): Image prefix.
Returns: Returns:
(dict, dict): Tuple of two dictionaires - first can be used to (dict, dict): Tuple of two dictionaries - first can be used to
extend JobInfo, second has tiles x, y, width and height extend JobInfo, second has tiles x, y, width and height
used for assembler configuration. used for assembler configuration.
@ -776,21 +776,24 @@ def _format_tiles(
tiles_x, tiles_x,
tiles_y tiles_y
) )
top = height - (tile_y * h_space)
bottom = height - ((tile_y - 1) * h_space) - 1
left = (tile_x - 1) * w_space
right = (tile_x * w_space) - 1
# Job Info
new_filename = "{}/{}{}".format( new_filename = "{}/{}{}".format(
os.path.dirname(filename), os.path.dirname(filename),
tile_prefix, tile_prefix,
os.path.basename(filename) os.path.basename(filename)
) )
out["JobInfo"]["OutputFilename{}Tile{}".format(index, tile)] = new_filename # noqa
top = height - (tile_y * h_space)
bottom = height - ((tile_y - 1) * h_space) - 1
left = (tile_x - 1) * w_space
right = (tile_x * w_space) - 1
# Job info
out["JobInfo"]["OutputFilename{}Tile{}".format(index, tile)] = new_filename # noqa: E501
# Plugin Info # Plugin Info
out["PluginInfo"]["RegionPrefix{}".format(tile)] = "/{}".format(tile_prefix).join(prefix.rsplit("/", 1)) # noqa: E501 out["PluginInfo"]["RegionPrefix{}".format(str(tile))] = \
"/{}".format(tile_prefix).join(prefix.rsplit("/", 1))
out["PluginInfo"]["RegionTop{}".format(tile)] = top out["PluginInfo"]["RegionTop{}".format(tile)] = top
out["PluginInfo"]["RegionBottom{}".format(tile)] = bottom out["PluginInfo"]["RegionBottom{}".format(tile)] = bottom
out["PluginInfo"]["RegionLeft{}".format(tile)] = left out["PluginInfo"]["RegionLeft{}".format(tile)] = left

View file

@ -778,7 +778,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"resolutionHeight": data.get("resolutionHeight", 1080), "resolutionHeight": data.get("resolutionHeight", 1080),
"multipartExr": data.get("multipartExr", False), "multipartExr": data.get("multipartExr", False),
"jobBatchName": data.get("jobBatchName", ""), "jobBatchName": data.get("jobBatchName", ""),
"useSequenceForReview": data.get("useSequenceForReview", True) "useSequenceForReview": data.get("useSequenceForReview", True),
# map inputVersions `ObjectId` -> `str` so json supports it
"inputVersions": list(map(str, data.get("inputVersions", [])))
} }
# skip locking version if we are creating v01 # skip locking version if we are creating v01

View file

@ -71,7 +71,7 @@ def convert_value_by_type_name(value_type, value):
elif parts_len == 4: elif parts_len == 4:
divisor = 2 divisor = 2
elif parts_len == 9: elif parts_len == 9:
divisor == 3 divisor = 3
elif parts_len == 16: elif parts_len == 16:
divisor = 4 divisor = 4
else: else:
@ -453,7 +453,7 @@ class OpenPypeTileAssembler(DeadlinePlugin):
# Swap to have input as foreground # Swap to have input as foreground
args.append("--swap") args.append("--swap")
# Paste foreground to background # Paste foreground to background
args.append("--paste +{}+{}".format(pos_x, pos_y)) args.append("--paste {x:+d}{y:+d}".format(x=pos_x, y=pos_y))
args.append("-o") args.append("-o")
args.append(output_path) args.append(output_path)

View file

@ -0,0 +1,82 @@
import os
import json
from uuid import uuid4
from openpype.lib import Logger, filter_profiles
from openpype.lib.pype_info import get_workstation_info
from openpype.settings import get_project_settings
def _read_lock_file(lock_filepath):
if not os.path.exists(lock_filepath):
log = Logger.get_logger("_read_lock_file")
log.debug("lock file is not created or readable as expected!")
with open(lock_filepath, "r") as stream:
data = json.load(stream)
return data
def _get_lock_file(filepath):
return filepath + ".oplock"
def is_workfile_locked(filepath):
lock_filepath = _get_lock_file(filepath)
if not os.path.exists(lock_filepath):
return False
return True
def get_workfile_lock_data(filepath):
lock_filepath = _get_lock_file(filepath)
return _read_lock_file(lock_filepath)
def is_workfile_locked_for_current_process(filepath):
if not is_workfile_locked(filepath):
return False
lock_filepath = _get_lock_file(filepath)
data = _read_lock_file(lock_filepath)
return data["process_id"] == _get_process_id()
def delete_workfile_lock(filepath):
lock_filepath = _get_lock_file(filepath)
if os.path.exists(lock_filepath):
os.remove(lock_filepath)
def create_workfile_lock(filepath):
lock_filepath = _get_lock_file(filepath)
info = get_workstation_info()
info["process_id"] = _get_process_id()
with open(lock_filepath, "w") as stream:
json.dump(info, stream)
def remove_workfile_lock(filepath):
if is_workfile_locked_for_current_process(filepath):
delete_workfile_lock(filepath)
def _get_process_id():
process_id = os.environ.get("OPENPYPE_PROCESS_ID")
if not process_id:
process_id = str(uuid4())
os.environ["OPENPYPE_PROCESS_ID"] = process_id
return process_id
def is_workfile_lock_enabled(host_name, project_name, project_setting=None):
if project_setting is None:
project_setting = get_project_settings(project_name)
workfile_lock_profiles = (
project_setting
["global"]
["tools"]
["Workfiles"]
["workfile_lock_profiles"])
profile = filter_profiles(workfile_lock_profiles, {"host_name": host_name})
if not profile:
return False
return profile["enabled"]

View file

@ -0,0 +1,47 @@
import pyblish.api
from bson.objectid import ObjectId
from openpype.client import get_representations
class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin):
"""Converts collected input representations to input versions.
Any data in `instance.data["inputRepresentations"]` gets converted into
`instance.data["inputVersions"]` as supported in OpenPype v3.
"""
# This is a ContextPlugin because then we can query the database only once
# for the conversion of representation ids to version ids (optimization)
label = "Input Representations to Versions"
order = pyblish.api.CollectorOrder + 0.499
hosts = ["*"]
def process(self, context):
# Query all version ids for representation ids from the database once
representations = set()
for instance in context:
inst_repre = instance.data.get("inputRepresentations", [])
representations.update(inst_repre)
representations_docs = get_representations(
project_name=context.data["projectEntity"]["name"],
representation_ids=representations,
fields=["_id", "parent"])
representation_id_to_version_id = {
repre["_id"]: repre["parent"] for repre in representations_docs
}
for instance in context:
inst_repre = instance.data.get("inputRepresentations", [])
if not inst_repre:
continue
input_versions = instance.data.get("inputVersions", [])
for repre_id in inst_repre:
repre_id = ObjectId(repre_id)
version_id = representation_id_to_version_id[repre_id]
input_versions.append(version_id)
instance.data["inputVersions"] = input_versions

View file

@ -16,6 +16,8 @@ class ExtractOTIOFile(publish.Extractor):
hosts = ["resolve", "hiero", "traypublisher"] hosts = ["resolve", "hiero", "traypublisher"]
def process(self, instance): def process(self, instance):
if not instance.context.data.get("otioTimeline"):
return
# create representation data # create representation data
if "representations" not in instance.data: if "representations" not in instance.data:
instance.data["representations"] = [] instance.data["representations"] = []

View file

@ -36,31 +36,31 @@
"layout" "layout"
] ]
}, },
"ExtractBlendAnimation": {
"enabled": true,
"optional": true,
"active": true
},
"ExtractCamera": {
"enabled": true,
"optional": true,
"active": true
},
"ExtractFBX": { "ExtractFBX": {
"enabled": true, "enabled": true,
"optional": true, "optional": true,
"active": false "active": false
}, },
"ExtractAnimationFBX": {
"enabled": true,
"optional": true,
"active": false
},
"ExtractABC": { "ExtractABC": {
"enabled": true, "enabled": true,
"optional": true, "optional": true,
"active": false "active": false
}, },
"ExtractBlendAnimation": {
"enabled": true,
"optional": true,
"active": true
},
"ExtractAnimationFBX": {
"enabled": true,
"optional": true,
"active": false
},
"ExtractCamera": {
"enabled": true,
"optional": true,
"active": true
},
"ExtractLayout": { "ExtractLayout": {
"enabled": true, "enabled": true,
"optional": true, "optional": true,

View file

@ -407,7 +407,8 @@
"enabled": false "enabled": false
} }
], ],
"extra_folders": [] "extra_folders": [],
"workfile_lock_profiles": []
}, },
"loader": { "loader": {
"family_filter_profiles": [ "family_filter_profiles": [

View file

@ -1,4 +1,15 @@
{ {
"shelves": [
{
"shelf_set_name": "OpenPype Shelves",
"shelf_set_source_path": {
"windows": "",
"darwin": "",
"linux": ""
},
"shelf_definition": []
}
],
"create": { "create": {
"CreateArnoldAss": { "CreateArnoldAss": {
"enabled": true, "enabled": true,

View file

@ -678,25 +678,20 @@
"isolate_view": true, "isolate_view": true,
"off_screen": true "off_screen": true
}, },
"PanZoom": {
"pan_zoom": true
},
"Renderer": { "Renderer": {
"rendererName": "vp2Renderer" "rendererName": "vp2Renderer"
}, },
"Resolution": { "Resolution": {
"width": 1920, "width": 1920,
"height": 1080, "height": 1080
"percent": 1.0,
"mode": "Custom"
}, },
"Viewport Options": { "Viewport Options": {
"override_viewport_options": true, "override_viewport_options": true,
"displayLights": "default", "displayLights": "default",
"displayTextures": true,
"textureMaxResolution": 1024, "textureMaxResolution": 1024,
"renderDepthOfField": true, "renderDepthOfField": true,
"shadows": true, "shadows": true,
"textures": true,
"twoSidedLighting": true, "twoSidedLighting": true,
"lineAAEnable": true, "lineAAEnable": true,
"multiSample": 8, "multiSample": 8,
@ -719,7 +714,6 @@
"motionBlurShutterOpenFraction": 0.2, "motionBlurShutterOpenFraction": 0.2,
"cameras": false, "cameras": false,
"clipGhosts": false, "clipGhosts": false,
"controlVertices": false,
"deformers": false, "deformers": false,
"dimensions": false, "dimensions": false,
"dynamicConstraints": false, "dynamicConstraints": false,
@ -731,8 +725,7 @@
"grid": false, "grid": false,
"hairSystems": true, "hairSystems": true,
"handles": false, "handles": false,
"hud": false, "headsUpDisplay": false,
"hulls": false,
"ikHandles": false, "ikHandles": false,
"imagePlane": true, "imagePlane": true,
"joints": false, "joints": false,
@ -743,7 +736,9 @@
"nCloths": false, "nCloths": false,
"nParticles": false, "nParticles": false,
"nRigids": false, "nRigids": false,
"controlVertices": false,
"nurbsCurves": false, "nurbsCurves": false,
"hulls": false,
"nurbsSurfaces": false, "nurbsSurfaces": false,
"particleInstancers": false, "particleInstancers": false,
"pivots": false, "pivots": false,
@ -751,7 +746,8 @@
"pluginShapes": false, "pluginShapes": false,
"polymeshes": true, "polymeshes": true,
"strokes": false, "strokes": false,
"subdivSurfaces": false "subdivSurfaces": false,
"textures": false
}, },
"Camera Options": { "Camera Options": {
"displayGateMask": false, "displayGateMask": false,

View file

@ -5,6 +5,10 @@
"label": "Houdini", "label": "Houdini",
"is_file": true, "is_file": true,
"children": [ "children": [
{
"type": "schema",
"name": "schema_houdini_scriptshelf"
},
{ {
"type": "schema", "type": "schema",
"name": "schema_houdini_create" "name": "schema_houdini_create"

View file

@ -238,6 +238,31 @@
} }
] ]
} }
},
{
"type": "list",
"key": "workfile_lock_profiles",
"label": "Workfile lock profiles",
"use_label_wrap": true,
"object_type": {
"type": "dict",
"children": [
{
"type": "hosts-enum",
"key": "host_name",
"label": "Hosts",
"multiselection": true
},
{
"type": "splitter"
},
{
"key": "enabled",
"label": "Enabled",
"type": "boolean"
}
]
}
} }
] ]
}, },

View file

@ -0,0 +1,71 @@
{
"type": "list",
"key": "shelves",
"label": "Shelves Manager",
"is_group": true,
"use_label_wrap": true,
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "shelf_set_name",
"label": "Shelf Set Name"
},
{
"type": "path",
"key": "shelf_set_source_path",
"label": "Shelf Set Path (optional)",
"multipath": false,
"multiplatform": true
},
{
"type": "list",
"key": "shelf_definition",
"label": "Shelves",
"use_label_wrap": true,
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "shelf_name",
"label": "Shelf Name"
},
{
"type": "list",
"key": "tools_list",
"label": "Tools",
"use_label_wrap": true,
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "label",
"label": "Name"
},
{
"type": "path",
"key": "script",
"label": "Script"
},
{
"type": "path",
"key": "icon",
"label": "Icon"
},
{
"type": "text",
"key": "help",
"label": "Help"
}
]
}
}
]
}
}
]
}
}

View file

@ -94,18 +94,6 @@
} }
] ]
}, },
{
"type": "dict",
"key": "PanZoom",
"children": [
{
"type": "boolean",
"key": "pan_zoom",
"label": " Pan Zoom"
}
]
},
{ {
"type": "splitter" "type": "splitter"
}, },
@ -153,19 +141,6 @@
"decimal": 0, "decimal": 0,
"minimum": 0, "minimum": 0,
"maximum": 99999 "maximum": 99999
},
{
"type": "number",
"key": "percent",
"label": "percent",
"decimal": 1,
"minimum": 0,
"maximum": 200
},
{
"type": "text",
"key": "mode",
"label": "Mode"
} }
] ]
}, },
@ -195,6 +170,11 @@
{ "nolights": "No Lights"} { "nolights": "No Lights"}
] ]
}, },
{
"type": "boolean",
"key": "displayTextures",
"label": "Display Textures"
},
{ {
"type": "number", "type": "number",
"key": "textureMaxResolution", "key": "textureMaxResolution",
@ -217,11 +197,6 @@
"key": "shadows", "key": "shadows",
"label": "Display Shadows" "label": "Display Shadows"
}, },
{
"type": "boolean",
"key": "textures",
"label": "Display Textures"
},
{ {
"type": "boolean", "type": "boolean",
"key": "twoSidedLighting", "key": "twoSidedLighting",
@ -369,120 +344,114 @@
{ {
"type": "splitter" "type": "splitter"
}, },
{
"type": "label",
"label": "<b>Show</b>"
},
{ {
"type": "boolean", "type": "boolean",
"key": "cameras", "key": "cameras",
"label": "cameras" "label": "Cameras"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "clipGhosts", "key": "clipGhosts",
"label": "clipGhosts" "label": "Clip Ghosts"
},
{
"type": "boolean",
"key": "controlVertices",
"label": "controlVertices"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "deformers", "key": "deformers",
"label": "deformers" "label": "Deformers"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "dimensions", "key": "dimensions",
"label": "dimensions" "label": "Dimensions"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "dynamicConstraints", "key": "dynamicConstraints",
"label": "dynamicConstraints" "label": "Dynamic Constraints"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "dynamics", "key": "dynamics",
"label": "dynamics" "label": "Dynamics"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "fluids", "key": "fluids",
"label": "fluids" "label": "Fluids"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "follicles", "key": "follicles",
"label": "follicles" "label": "Follicles"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "gpuCacheDisplayFilter", "key": "gpuCacheDisplayFilter",
"label": "gpuCacheDisplayFilter" "label": "GPU Cache"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "greasePencils", "key": "greasePencils",
"label": "greasePencils" "label": "Grease Pencil"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "grid", "key": "grid",
"label": "grid" "label": "Grid"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "hairSystems", "key": "hairSystems",
"label": "hairSystems" "label": "Hair Systems"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "handles", "key": "handles",
"label": "handles" "label": "Handles"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "hud", "key": "headsUpDisplay",
"label": "hud" "label": "HUD"
},
{
"type": "boolean",
"key": "hulls",
"label": "hulls"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "ikHandles", "key": "ikHandles",
"label": "ikHandles" "label": "IK Handles"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "imagePlane", "key": "imagePlane",
"label": "imagePlane" "label": "Image Planes"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "joints", "key": "joints",
"label": "joints" "label": "Joints"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "lights", "key": "lights",
"label": "lights" "label": "Lights"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "locators", "key": "locators",
"label": "locators" "label": "Locators"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "manipulators", "key": "manipulators",
"label": "manipulators" "label": "Manipulators"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "motionTrails", "key": "motionTrails",
"label": "motionTrails" "label": "Motion Trails"
}, },
{ {
"type": "boolean", "type": "boolean",
@ -499,50 +468,65 @@
"key": "nRigids", "key": "nRigids",
"label": "nRigids" "label": "nRigids"
}, },
{
"type": "boolean",
"key": "controlVertices",
"label": "NURBS CVs"
},
{ {
"type": "boolean", "type": "boolean",
"key": "nurbsCurves", "key": "nurbsCurves",
"label": "nurbsCurves" "label": "NURBS Curves"
},
{
"type": "boolean",
"key": "hulls",
"label": "NURBS Hulls"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "nurbsSurfaces", "key": "nurbsSurfaces",
"label": "nurbsSurfaces" "label": "NURBS Surfaces"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "particleInstancers", "key": "particleInstancers",
"label": "particleInstancers" "label": "Particle Instancers"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "pivots", "key": "pivots",
"label": "pivots" "label": "Pivots"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "planes", "key": "planes",
"label": "planes" "label": "Planes"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "pluginShapes", "key": "pluginShapes",
"label": "pluginShapes" "label": "Plugin Shapes"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "polymeshes", "key": "polymeshes",
"label": "polymeshes" "label": "Polygons"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "strokes", "key": "strokes",
"label": "strokes" "label": "Strokes"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "subdivSurfaces", "key": "subdivSurfaces",
"label": "subdivSurfaces" "label": "Subdiv Surfaces"
},
{
"type": "boolean",
"key": "textures",
"label": "Texture Placements"
} }
] ]
}, },
@ -555,47 +539,47 @@
{ {
"type": "boolean", "type": "boolean",
"key": "displayGateMask", "key": "displayGateMask",
"label": "displayGateMask" "label": "Display Gate Mask"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "displayResolution", "key": "displayResolution",
"label": "displayResolution" "label": "Display Resolution"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "displayFilmGate", "key": "displayFilmGate",
"label": "displayFilmGate" "label": "Display Film Gate"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "displayFieldChart", "key": "displayFieldChart",
"label": "displayFieldChart" "label": "Display Field Chart"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "displaySafeAction", "key": "displaySafeAction",
"label": "displaySafeAction" "label": "Display Safe Action"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "displaySafeTitle", "key": "displaySafeTitle",
"label": "displaySafeTitle" "label": "Display Safe Title"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "displayFilmPivot", "key": "displayFilmPivot",
"label": "displayFilmPivot" "label": "Display Film Pivot"
}, },
{ {
"type": "boolean", "type": "boolean",
"key": "displayFilmOrigin", "key": "displayFilmOrigin",
"label": "displayFilmOrigin" "label": "Display Film Origin"
}, },
{ {
"type": "number", "type": "number",
"key": "overscan", "key": "overscan",
"label": "overscan", "label": "Overscan",
"decimal": 1, "decimal": 1,
"minimum": 0, "minimum": 0,
"maximum": 10 "maximum": 10

View file

@ -0,0 +1,28 @@
from Qt import QtWidgets, QtGui, QtCore
class LoadedInSceneDelegate(QtWidgets.QStyledItemDelegate):
"""Delegate for Loaded in Scene state columns.
Shows "yes" or "no" for True or False values
Colorizes green or dark grey based on True or False values
"""
def __init__(self, *args, **kwargs):
super(LoadedInSceneDelegate, self).__init__(*args, **kwargs)
self._colors = {
True: QtGui.QColor(80, 170, 80),
False: QtGui.QColor(90, 90, 90)
}
def displayText(self, value, locale):
return "yes" if value else "no"
def initStyleOption(self, option, index):
super(LoadedInSceneDelegate, self).initStyleOption(option, index)
# Colorize based on value
value = index.data(QtCore.Qt.DisplayRole)
color = self._colors[bool(value)]
option.palette.setBrush(QtGui.QPalette.Text, color)

View file

@ -17,6 +17,7 @@ from openpype.client import (
get_representations get_representations
) )
from openpype.pipeline import ( from openpype.pipeline import (
registered_host,
HeroVersionType, HeroVersionType,
schema, schema,
) )
@ -24,6 +25,7 @@ from openpype.pipeline import (
from openpype.style import get_default_entity_icon_color from openpype.style import get_default_entity_icon_color
from openpype.tools.utils.models import TreeModel, Item from openpype.tools.utils.models import TreeModel, Item
from openpype.tools.utils import lib from openpype.tools.utils import lib
from openpype.host import ILoadHost
from openpype.modules import ModulesManager from openpype.modules import ModulesManager
from openpype.tools.utils.constants import ( from openpype.tools.utils.constants import (
@ -136,6 +138,7 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
"duration", "duration",
"handles", "handles",
"step", "step",
"loaded_in_scene",
"repre_info" "repre_info"
] ]
@ -150,6 +153,7 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
"duration": "Duration", "duration": "Duration",
"handles": "Handles", "handles": "Handles",
"step": "Step", "step": "Step",
"loaded_in_scene": "In scene",
"repre_info": "Availability" "repre_info": "Availability"
} }
@ -231,8 +235,14 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
self._doc_fetching_stop = False self._doc_fetching_stop = False
self._doc_payload = {} self._doc_payload = {}
self.doc_fetched.connect(self._on_doc_fetched) self._host = registered_host()
self._loaded_representation_ids = set()
# Refresh loaded scene containers only every 3 seconds at most
self._host_loaded_refresh_timeout = 3
self._host_loaded_refresh_time = 0
self.doc_fetched.connect(self._on_doc_fetched)
self.refresh() self.refresh()
def get_item_by_id(self, item_id): def get_item_by_id(self, item_id):
@ -474,6 +484,27 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
last_versions_by_subset_id[subset_id] = hero_version last_versions_by_subset_id[subset_id] = hero_version
# Check loaded subsets
loaded_subset_ids = set()
ids = self._loaded_representation_ids
if ids:
if self._doc_fetching_stop:
return
# Get subset ids from loaded representations in workfile
# todo: optimize with aggregation query to distinct subset id
representations = get_representations(project_name,
representation_ids=ids,
fields=["parent"])
version_ids = set(repre["parent"] for repre in representations)
versions = get_versions(project_name,
version_ids=version_ids,
fields=["parent"])
loaded_subset_ids = set(version["parent"] for version in versions)
if self._doc_fetching_stop:
return
repre_info_by_version_id = {} repre_info_by_version_id = {}
if self.sync_server.enabled: if self.sync_server.enabled:
versions_by_id = {} versions_by_id = {}
@ -501,7 +532,8 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
"subset_docs_by_id": subset_docs_by_id, "subset_docs_by_id": subset_docs_by_id,
"subset_families": subset_families, "subset_families": subset_families,
"last_versions_by_subset_id": last_versions_by_subset_id, "last_versions_by_subset_id": last_versions_by_subset_id,
"repre_info_by_version_id": repre_info_by_version_id "repre_info_by_version_id": repre_info_by_version_id,
"subsets_loaded_by_id": loaded_subset_ids
} }
self.doc_fetched.emit() self.doc_fetched.emit()
@ -533,6 +565,20 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
self.doc_fetched.emit() self.doc_fetched.emit()
return return
# Collect scene container representations to compare loaded state
# This runs in the main thread because it involves the host DCC
if self._host:
time_since_refresh = time.time() - self._host_loaded_refresh_time
if time_since_refresh > self._host_loaded_refresh_timeout:
if isinstance(self._host, ILoadHost):
containers = self._host.get_containers()
else:
containers = self._host.ls()
repre_ids = {con.get("representation") for con in containers}
self._loaded_representation_ids = repre_ids
self._host_loaded_refresh_time = time.time()
self.fetch_subset_and_version() self.fetch_subset_and_version()
def _on_doc_fetched(self): def _on_doc_fetched(self):
@ -554,6 +600,10 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
"repre_info_by_version_id" "repre_info_by_version_id"
) )
subsets_loaded_by_id = self._doc_payload.get(
"subsets_loaded_by_id"
)
if ( if (
asset_docs_by_id is None asset_docs_by_id is None
or subset_docs_by_id is None or subset_docs_by_id is None
@ -568,7 +618,8 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
asset_docs_by_id, asset_docs_by_id,
subset_docs_by_id, subset_docs_by_id,
last_versions_by_subset_id, last_versions_by_subset_id,
repre_info_by_version_id repre_info_by_version_id,
subsets_loaded_by_id
) )
self.endResetModel() self.endResetModel()
self.refreshed.emit(True) self.refreshed.emit(True)
@ -596,8 +647,12 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
return merge_group return merge_group
def _fill_subset_items( def _fill_subset_items(
self, asset_docs_by_id, subset_docs_by_id, last_versions_by_subset_id, self,
repre_info_by_version_id asset_docs_by_id,
subset_docs_by_id,
last_versions_by_subset_id,
repre_info_by_version_id,
subsets_loaded_by_id
): ):
_groups_tuple = self.groups_config.split_subsets_for_groups( _groups_tuple = self.groups_config.split_subsets_for_groups(
subset_docs_by_id.values(), self._grouping subset_docs_by_id.values(), self._grouping
@ -621,6 +676,35 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
"index": self.index(group_item.row(), 0) "index": self.index(group_item.row(), 0)
} }
def _add_subset_item(subset_doc, parent_item, parent_index):
last_version = last_versions_by_subset_id.get(
subset_doc["_id"]
)
# do not show subset without version
if not last_version:
return
data = copy.deepcopy(subset_doc)
data["subset"] = subset_doc["name"]
asset_id = subset_doc["parent"]
data["asset"] = asset_docs_by_id[asset_id]["name"]
data["last_version"] = last_version
data["loaded_in_scene"] = subset_doc["_id"] in subsets_loaded_by_id
# Sync server data
data.update(
self._get_last_repre_info(repre_info_by_version_id,
last_version["_id"]))
item = Item()
item.update(data)
self.add_child(item, parent_item)
index = self.index(item.row(), 0, parent_index)
self.set_version(index, last_version)
subset_counter = 0 subset_counter = 0
for group_name, subset_docs_by_name in subset_docs_by_group.items(): for group_name, subset_docs_by_name in subset_docs_by_group.items():
parent_item = group_item_by_name[group_name]["item"] parent_item = group_item_by_name[group_name]["item"]
@ -643,31 +727,9 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
_parent_index = parent_index _parent_index = parent_index
for subset_doc in subset_docs: for subset_doc in subset_docs:
asset_id = subset_doc["parent"] _add_subset_item(subset_doc,
parent_item=_parent_item,
data = copy.deepcopy(subset_doc) parent_index=_parent_index)
data["subset"] = subset_name
data["asset"] = asset_docs_by_id[asset_id]["name"]
last_version = last_versions_by_subset_id.get(
subset_doc["_id"]
)
data["last_version"] = last_version
# do not show subset without version
if not last_version:
continue
data.update(
self._get_last_repre_info(repre_info_by_version_id,
last_version["_id"]))
item = Item()
item.update(data)
self.add_child(item, _parent_item)
index = self.index(item.row(), 0, _parent_index)
self.set_version(index, last_version)
for subset_name in sorted(subset_docs_without_group.keys()): for subset_name in sorted(subset_docs_without_group.keys()):
subset_docs = subset_docs_without_group[subset_name] subset_docs = subset_docs_without_group[subset_name]
@ -682,31 +744,9 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
subset_counter += 1 subset_counter += 1
for subset_doc in subset_docs: for subset_doc in subset_docs:
asset_id = subset_doc["parent"] _add_subset_item(subset_doc,
parent_item=parent_item,
data = copy.deepcopy(subset_doc) parent_index=parent_index)
data["subset"] = subset_name
data["asset"] = asset_docs_by_id[asset_id]["name"]
last_version = last_versions_by_subset_id.get(
subset_doc["_id"]
)
data["last_version"] = last_version
# do not show subset without version
if not last_version:
continue
data.update(
self._get_last_repre_info(repre_info_by_version_id,
last_version["_id"]))
item = Item()
item.update(data)
self.add_child(item, parent_item)
index = self.index(item.row(), 0, parent_index)
self.set_version(index, last_version)
def data(self, index, role): def data(self, index, role):
if not index.isValid(): if not index.isValid():

View file

@ -58,6 +58,7 @@ from .model import (
ITEM_ID_ROLE ITEM_ID_ROLE
) )
from . import lib from . import lib
from .delegates import LoadedInSceneDelegate
from openpype.tools.utils.constants import ( from openpype.tools.utils.constants import (
LOCAL_PROVIDER_ROLE, LOCAL_PROVIDER_ROLE,
@ -169,6 +170,7 @@ class SubsetWidget(QtWidgets.QWidget):
("duration", 60), ("duration", 60),
("handles", 55), ("handles", 55),
("step", 10), ("step", 10),
("loaded_in_scene", 25),
("repre_info", 65) ("repre_info", 65)
) )
@ -234,6 +236,10 @@ class SubsetWidget(QtWidgets.QWidget):
column = model.Columns.index("repre_info") column = model.Columns.index("repre_info")
view.setItemDelegateForColumn(column, avail_delegate) view.setItemDelegateForColumn(column, avail_delegate)
loaded_in_scene_delegate = LoadedInSceneDelegate(view)
column = model.Columns.index("loaded_in_scene")
view.setItemDelegateForColumn(column, loaded_in_scene_delegate)
layout = QtWidgets.QVBoxLayout(self) layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0) layout.setContentsMargins(0, 0, 0, 0)
layout.addLayout(top_bar_layout) layout.addLayout(top_bar_layout)

View file

@ -30,8 +30,8 @@ from .widgets import (
class PublisherWindow(QtWidgets.QDialog): class PublisherWindow(QtWidgets.QDialog):
"""Main window of publisher.""" """Main window of publisher."""
default_width = 1000 default_width = 1200
default_height = 600 default_height = 700
def __init__(self, parent=None, reset_on_show=None): def __init__(self, parent=None, reset_on_show=None):
super(PublisherWindow, self).__init__(parent) super(PublisherWindow, self).__init__(parent)

View file

@ -8,9 +8,15 @@ from Qt import QtWidgets, QtCore
from openpype.host import IWorkfileHost from openpype.host import IWorkfileHost
from openpype.client import get_asset_by_id from openpype.client import get_asset_by_id
from openpype.pipeline.workfile.lock_workfile import (
is_workfile_locked,
is_workfile_lock_enabled,
is_workfile_locked_for_current_process
)
from openpype.tools.utils import PlaceholderLineEdit from openpype.tools.utils import PlaceholderLineEdit
from openpype.tools.utils.delegates import PrettyTimeDelegate from openpype.tools.utils.delegates import PrettyTimeDelegate
from openpype.lib import emit_event from openpype.lib import emit_event
from openpype.tools.workfiles.lock_dialog import WorkfileLockDialog
from openpype.pipeline import ( from openpype.pipeline import (
registered_host, registered_host,
legacy_io, legacy_io,
@ -452,8 +458,19 @@ class FilesWidget(QtWidgets.QWidget):
"host_name": self.host_name "host_name": self.host_name
} }
def _is_workfile_locked(self, filepath):
if not is_workfile_lock_enabled(self.host_name, self.project_name):
return False
if not is_workfile_locked(filepath):
return False
return not is_workfile_locked_for_current_process(filepath)
def open_file(self, filepath): def open_file(self, filepath):
host = self.host host = self.host
if self._is_workfile_locked(filepath):
# add lockfile dialog
WorkfileLockDialog(filepath)
if isinstance(host, IWorkfileHost): if isinstance(host, IWorkfileHost):
has_unsaved_changes = host.workfile_has_unsaved_changes() has_unsaved_changes = host.workfile_has_unsaved_changes()
else: else:
@ -561,7 +578,7 @@ class FilesWidget(QtWidgets.QWidget):
src = self._get_selected_filepath() src = self._get_selected_filepath()
dst = os.path.join(self._workfiles_root, work_file) dst = os.path.join(self._workfiles_root, work_file)
shutil.copy(src, dst) shutil.copyfile(src, dst)
self.workfile_created.emit(dst) self.workfile_created.emit(dst)
@ -658,7 +675,7 @@ class FilesWidget(QtWidgets.QWidget):
else: else:
self.host.save_file(filepath) self.host.save_file(filepath)
else: else:
shutil.copy(src_path, filepath) shutil.copyfile(src_path, filepath)
if isinstance(self.host, IWorkfileHost): if isinstance(self.host, IWorkfileHost):
self.host.open_workfile(filepath) self.host.open_workfile(filepath)
else: else:

View file

@ -0,0 +1,47 @@
from Qt import QtWidgets, QtCore, QtGui
from openpype.style import load_stylesheet, get_app_icon_path
from openpype.pipeline.workfile.lock_workfile import get_workfile_lock_data
class WorkfileLockDialog(QtWidgets.QDialog):
def __init__(self, workfile_path, parent=None):
super(WorkfileLockDialog, self).__init__(parent)
self.setWindowTitle("Warning")
icon = QtGui.QIcon(get_app_icon_path())
self.setWindowIcon(icon)
data = get_workfile_lock_data(workfile_path)
message = "{} on {} machine is working on the same workfile.".format(
data["username"],
data["hostname"]
)
msg_label = QtWidgets.QLabel(message, self)
btns_widget = QtWidgets.QWidget(self)
cancel_btn = QtWidgets.QPushButton("Cancel", btns_widget)
ignore_btn = QtWidgets.QPushButton("Ignore lock", btns_widget)
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
btns_layout.setContentsMargins(0, 0, 0, 0)
btns_layout.setSpacing(10)
btns_layout.addStretch(1)
btns_layout.addWidget(cancel_btn, 0)
btns_layout.addWidget(ignore_btn, 0)
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(15, 15, 15, 15)
main_layout.addWidget(msg_label, 1, QtCore.Qt.AlignCenter),
main_layout.addSpacing(10)
main_layout.addWidget(btns_widget, 0)
cancel_btn.clicked.connect(self.reject)
ignore_btn.clicked.connect(self.accept)
def showEvent(self, event):
super(WorkfileLockDialog, self).showEvent(event)
self.setStyleSheet(load_stylesheet())

View file

@ -0,0 +1,11 @@
---
id: admin_hosts_houdini
title: Houdini
sidebar_label: Houdini
---
## Shelves Manager
You can add your custom shelf set into Houdini by setting your shelf sets, shelves and tools in **Houdini -> Shelves Manager**.
![Custom menu definition](assets/houdini-admin_shelvesmanager.png)
The Shelf Set Path is used to load a .shelf file to generate your shelf set. If the path is specified, you don't have to set the shelves and tools.

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View file

@ -101,6 +101,7 @@ module.exports = {
items: [ items: [
"admin_hosts_blender", "admin_hosts_blender",
"admin_hosts_hiero", "admin_hosts_hiero",
"admin_hosts_houdini",
"admin_hosts_maya", "admin_hosts_maya",
"admin_hosts_nuke", "admin_hosts_nuke",
"admin_hosts_resolve", "admin_hosts_resolve",
@ -142,7 +143,7 @@ module.exports = {
], ],
}, },
], ],
Dev: [ Dev: [
"dev_introduction", "dev_introduction",
"dev_requirements", "dev_requirements",
"dev_build", "dev_build",
@ -157,5 +158,5 @@ module.exports = {
"dev_publishing" "dev_publishing"
] ]
} }
] ]
}; };