Merge branch 'develop' into feature/OP-4245Data_Exchange_Geometry

This commit is contained in:
Ondřej Samohel 2023-05-03 14:53:39 +02:00 committed by GitHub
commit 23df39087e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
79 changed files with 4320 additions and 555 deletions

View file

@ -25,6 +25,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
"blender",
"photoshop",
"tvpaint",
"substancepainter",
"aftereffects"
]

View file

@ -0,0 +1,37 @@
from openpype.lib import PreLaunchHook
from openpype.pipeline.colorspace import get_imageio_config
from openpype.pipeline.template_data import get_template_data
class PreLaunchHostSetOCIO(PreLaunchHook):
"""Set OCIO environment for the host"""
order = 0
app_groups = ["substancepainter"]
def execute(self):
"""Hook entry method."""
anatomy_data = get_template_data(
project_doc=self.data["project_doc"],
asset_doc=self.data["asset_doc"],
task_name=self.data["task_name"],
host_name=self.host_name,
system_settings=self.data["system_settings"]
)
ocio_config = get_imageio_config(
project_name=self.data["project_doc"]["name"],
host_name=self.host_name,
project_settings=self.data["project_settings"],
anatomy_data=anatomy_data,
anatomy=self.data["anatomy"]
)
if ocio_config:
ocio_path = ocio_config["path"]
self.log.info(f"Setting OCIO config path: {ocio_path}")
self.launch_context.env["OCIO"] = ocio_path
else:
self.log.debug("OCIO not set or enabled")

View file

@ -0,0 +1,46 @@
import pyblish.api
import hou
from openpype.pipeline.publish import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid nodes in Maya when plug-in failed.
To retrieve the invalid nodes this assumes a static `get_invalid()`
method is available on the plugin.
"""
label = "Select invalid"
on = "failed" # This action is only available on a failed plug-in
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning("Plug-in returned to be invalid, "
"but has no selectable nodes.")
hou.clearAllSelected()
if invalid:
self.log.info("Selecting invalid nodes: {}".format(
", ".join(node.path() for node in invalid)
))
for node in invalid:
node.setSelected(True)
node.setCurrent(True)
else:
self.log.info("No invalid nodes found.")

View file

@ -12,26 +12,43 @@ import tempfile
import logging
import os
from openpype.client import get_asset_by_name
from openpype.pipeline import registered_host
from openpype.pipeline.create import CreateContext
from openpype.resources import get_openpype_icon_filepath
import hou
import stateutils
import soptoolutils
import loptoolutils
import cop2toolutils
log = logging.getLogger(__name__)
CATEGORY_GENERIC_TOOL = {
hou.sopNodeTypeCategory(): soptoolutils.genericTool,
hou.cop2NodeTypeCategory(): cop2toolutils.genericTool,
hou.lopNodeTypeCategory(): loptoolutils.genericTool
}
CREATE_SCRIPT = """
from openpype.hosts.houdini.api.creator_node_shelves import create_interactive
create_interactive("{identifier}")
create_interactive("{identifier}", **kwargs)
"""
def create_interactive(creator_identifier):
def create_interactive(creator_identifier, **kwargs):
"""Create a Creator using its identifier interactively.
This is used by the generated shelf tools as callback when a user selects
the creator from the node tab search menu.
The `kwargs` should be what Houdini passes to the tool create scripts
context. For more information see:
https://www.sidefx.com/docs/houdini/hom/tool_script.html#arguments
Args:
creator_identifier (str): The creator identifier of the Creator plugin
to create.
@ -58,6 +75,33 @@ def create_interactive(creator_identifier):
host = registered_host()
context = CreateContext(host)
creator = context.manual_creators.get(creator_identifier)
if not creator:
raise RuntimeError("Invalid creator identifier: "
"{}".format(creator_identifier))
# TODO: Once more elaborate unique create behavior should exist per Creator
# instead of per network editor area then we should move this from here
# to a method on the Creators for which this could be the default
# implementation.
pane = stateutils.activePane(kwargs)
if isinstance(pane, hou.NetworkEditor):
pwd = pane.pwd()
subset_name = creator.get_subset_name(
variant=variant,
task_name=context.get_current_task_name(),
asset_doc=get_asset_by_name(
project_name=context.get_current_project_name(),
asset_name=context.get_current_asset_name()
),
project_name=context.get_current_project_name(),
host_name=context.host_name
)
tool_fn = CATEGORY_GENERIC_TOOL.get(pwd.childTypeCategory())
if tool_fn is not None:
out_null = tool_fn(kwargs, "null")
out_null.setName("OUT_{}".format(subset_name), unique_name=True)
before = context.instances_by_id.copy()
@ -135,12 +179,20 @@ def install():
log.debug("Writing OpenPype Creator nodes to shelf: {}".format(filepath))
tools = []
with shelves_change_block():
for identifier, creator in create_context.manual_creators.items():
# TODO: Allow the creator plug-in itself to override the categories
# for where they are shown, by e.g. defining
# `Creator.get_network_categories()`
# Allow the creator plug-in itself to override the categories
# for where they are shown with `Creator.get_network_categories()`
if not hasattr(creator, "get_network_categories"):
log.debug("Creator {} has no `get_network_categories` method "
"and will not be added to TAB search.")
continue
network_categories = creator.get_network_categories()
if not network_categories:
continue
key = "openpype_create.{}".format(identifier)
log.debug(f"Registering {key}")
@ -153,17 +205,13 @@ def install():
creator.label
),
"help_url": None,
"network_categories": [
hou.ropNodeTypeCategory(),
hou.sopNodeTypeCategory()
],
"network_categories": network_categories,
"viewer_categories": [],
"cop_viewer_categories": [],
"network_op_type": None,
"viewer_op_type": None,
"locations": ["OpenPype"]
}
label = "Create {}".format(creator.label)
tool = hou.shelves.tool(key)
if tool:

View file

@ -276,3 +276,19 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
color = hou.Color((0.616, 0.871, 0.769))
node.setUserData('nodeshape', shape)
node.setColor(color)
def get_network_categories(self):
"""Return in which network view type this creator should show.
The node type categories returned here will be used to define where
the creator will show up in the TAB search for nodes in Houdini's
Network View.
This can be overridden in inherited classes to define where that
particular Creator should be visible in the TAB search.
Returns:
list: List of houdini node type categories
"""
return [hou.ropNodeTypeCategory()]

View file

@ -3,6 +3,8 @@
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance, CreatorError
import hou
class CreateAlembicCamera(plugin.HoudiniCreator):
"""Single baked camera from Alembic ROP."""
@ -47,3 +49,9 @@ class CreateAlembicCamera(plugin.HoudiniCreator):
self.lock_parameters(instance_node, to_lock)
instance_node.parm("trange").set(1)
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
hou.objNodeTypeCategory()
]

View file

@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating composite sequences."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.pipeline import CreatedInstance, CreatorError
import hou
class CreateCompositeSequence(plugin.HoudiniCreator):
@ -35,8 +37,20 @@ class CreateCompositeSequence(plugin.HoudiniCreator):
"copoutput": filepath
}
if self.selected_nodes:
if len(self.selected_nodes) > 1:
raise CreatorError("More than one item selected.")
path = self.selected_nodes[0].path()
parms["coppath"] = path
instance_node.setParms(parms)
# Lock any parameters in this list
to_lock = ["prim_to_detail_pattern"]
self.lock_parameters(instance_node, to_lock)
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
hou.cop2NodeTypeCategory()
]

View file

@ -3,6 +3,8 @@
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
import hou
class CreatePointCache(plugin.HoudiniCreator):
"""Alembic ROP to pointcache"""
@ -49,3 +51,9 @@ class CreatePointCache(plugin.HoudiniCreator):
# Lock any parameters in this list
to_lock = ["prim_to_detail_pattern"]
self.lock_parameters(instance_node, to_lock)
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
hou.sopNodeTypeCategory()
]

View file

@ -3,6 +3,8 @@
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
import hou
class CreateUSD(plugin.HoudiniCreator):
"""Universal Scene Description"""
@ -13,7 +15,6 @@ class CreateUSD(plugin.HoudiniCreator):
enabled = False
def create(self, subset_name, instance_data, pre_create_data):
import hou # noqa
instance_data.pop("active", None)
instance_data.update({"node_type": "usd"})
@ -43,3 +44,9 @@ class CreateUSD(plugin.HoudiniCreator):
"id",
]
self.lock_parameters(instance_node, to_lock)
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
hou.lopNodeTypeCategory()
]

View file

@ -3,6 +3,8 @@
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
import hou
class CreateVDBCache(plugin.HoudiniCreator):
"""OpenVDB from Geometry ROP"""
@ -34,3 +36,9 @@ class CreateVDBCache(plugin.HoudiniCreator):
parms["soppath"] = self.selected_nodes[0].path()
instance_node.setParms(parms)
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
hou.sopNodeTypeCategory()
]

View file

@ -14,7 +14,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
identifier = "io.openpype.creators.houdini.workfile"
label = "Workfile"
family = "workfile"
icon = "document"
icon = "fa5.file"
default_variant = "Main"

View file

@ -19,6 +19,9 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
instance.data["handleEnd"] = 0
instance.data["fps"] = instance.context.data["fps"]
# Enable ftrack functionality
instance.data.setdefault("families", []).append('ftrack')
# Get the camera from the rop node to collect the focal length
ropnode_path = instance.data["instance_node"]
ropnode = hou.node(ropnode_path)
@ -26,8 +29,9 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
camera_path = ropnode.parm("camera").eval()
camera_node = hou.node(camera_path)
if not camera_node:
raise RuntimeError("No valid camera node found on review node: "
"{}".format(camera_path))
self.log.warning("No valid camera node found on review node: "
"{}".format(camera_path))
return
# Collect focal length.
focal_length_parm = camera_node.parm("focal")
@ -49,5 +53,3 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
# Store focal length in `burninDataMembers`
burnin_members = instance.data.setdefault("burninDataMembers", {})
burnin_members["focalLength"] = focal_length
instance.data.setdefault("families", []).append('ftrack')

View file

@ -2,27 +2,20 @@ import os
import pyblish.api
from openpype.pipeline import (
publish,
OptionalPyblishPluginMixin
)
from openpype.pipeline import publish
from openpype.hosts.houdini.api.lib import render_rop
import hou
class ExtractOpenGL(publish.Extractor,
OptionalPyblishPluginMixin):
class ExtractOpenGL(publish.Extractor):
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract OpenGL"
families = ["review"]
hosts = ["houdini"]
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
ropnode = hou.node(instance.data.get("instance_node"))
output = ropnode.evalParm("picture")

View file

@ -1,21 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Scene setting</title>
<description>
## Invalid input node
VDB input must have the same number of VDBs, points, primitives and vertices as output.
</description>
<detail>
### __Detailed Info__ (optional)
A VDB is an inherited type of Prim, holds the following data:
- Primitives: 1
- Points: 1
- Vertices: 1
- VDBs: 1
</detail>
</error>
</root>

View file

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Invalid VDB</title>
<description>
## Invalid VDB output
All primitives of the output geometry must be VDBs, no other primitive
types are allowed. That means that regardless of the amount of VDBs in the
geometry it will have an equal amount of VDBs, points, primitives and
vertices since each VDB primitive is one point, one vertex and one VDB.
This validation only checks the geometry on the first frame of the export
frame range.
</description>
<detail>
### Detailed Info
ROP node `{rop_path}` is set to export SOP path `{sop_path}`.
{message}
</detail>
</error>
</root>

View file

@ -16,15 +16,19 @@ class ValidateSceneReview(pyblish.api.InstancePlugin):
label = "Scene Setting for review"
def process(self, instance):
invalid = self.get_invalid_scene_path(instance)
report = []
if invalid:
report.append(
"Scene path does not exist: '%s'" % invalid[0],
)
instance_node = hou.node(instance.data.get("instance_node"))
invalid = self.get_invalid_resolution(instance)
invalid = self.get_invalid_scene_path(instance_node)
if invalid:
report.append(invalid)
invalid = self.get_invalid_camera_path(instance_node)
if invalid:
report.append(invalid)
invalid = self.get_invalid_resolution(instance_node)
if invalid:
report.extend(invalid)
@ -33,26 +37,36 @@ class ValidateSceneReview(pyblish.api.InstancePlugin):
"\n\n".join(report),
title=self.label)
def get_invalid_scene_path(self, instance):
node = hou.node(instance.data.get("instance_node"))
scene_path_parm = node.parm("scenepath")
def get_invalid_scene_path(self, rop_node):
scene_path_parm = rop_node.parm("scenepath")
scene_path_node = scene_path_parm.evalAsNode()
if not scene_path_node:
return [scene_path_parm.evalAsString()]
path = scene_path_parm.evalAsString()
return "Scene path does not exist: '{}'".format(path)
def get_invalid_resolution(self, instance):
node = hou.node(instance.data.get("instance_node"))
def get_invalid_camera_path(self, rop_node):
camera_path_parm = rop_node.parm("camera")
camera_node = camera_path_parm.evalAsNode()
path = camera_path_parm.evalAsString()
if not camera_node:
return "Camera path does not exist: '{}'".format(path)
type_name = camera_node.type().name()
if type_name != "cam":
return "Camera path is not a camera: '{}' (type: {})".format(
path, type_name
)
def get_invalid_resolution(self, rop_node):
# The resolution setting is only used when Override Camera Resolution
# is enabled. So we skip validation if it is disabled.
override = node.parm("tres").eval()
override = rop_node.parm("tres").eval()
if not override:
return
invalid = []
res_width = node.parm("res1").eval()
res_height = node.parm("res2").eval()
res_width = rop_node.parm("res1").eval()
res_height = rop_node.parm("res2").eval()
if res_width == 0:
invalid.append("Override Resolution width is set to zero.")
if res_height == 0:

View file

@ -1,52 +0,0 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import (
PublishValidationError
)
class ValidateVDBInputNode(pyblish.api.InstancePlugin):
"""Validate that the node connected to the output node is of type VDB.
Regardless of the amount of VDBs create the output will need to have an
equal amount of VDBs, points, primitives and vertices
A VDB is an inherited type of Prim, holds the following data:
- Primitives: 1
- Points: 1
- Vertices: 1
- VDBs: 1
"""
order = pyblish.api.ValidatorOrder + 0.1
families = ["vdbcache"]
hosts = ["houdini"]
label = "Validate Input Node (VDB)"
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError(
self,
"Node connected to the output node is not of type VDB",
title=self.label
)
@classmethod
def get_invalid(cls, instance):
node = instance.data["output_node"]
prims = node.geometry().prims()
nr_of_prims = len(prims)
nr_of_points = len(node.geometry().points())
if nr_of_points != nr_of_prims:
cls.log.error("The number of primitives and points do not match")
return [instance]
for prim in prims:
if prim.numVertices() != 1:
cls.log.error("Found primitive with more than 1 vertex!")
return [instance]

View file

@ -1,14 +1,73 @@
# -*- coding: utf-8 -*-
import contextlib
import pyblish.api
import hou
from openpype.pipeline import PublishValidationError
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.houdini.api.action import SelectInvalidAction
def group_consecutive_numbers(nums):
"""
Args:
nums (list): List of sorted integer numbers.
Yields:
str: Group ranges as {start}-{end} if more than one number in the range
else it yields {end}
"""
start = None
end = None
def _result(a, b):
if a == b:
return "{}".format(a)
else:
return "{}-{}".format(a, b)
for num in nums:
if start is None:
start = num
end = num
elif num == end + 1:
end = num
else:
yield _result(start, end)
start = num
end = num
if start is not None:
yield _result(start, end)
@contextlib.contextmanager
def update_mode_context(mode):
original = hou.updateModeSetting()
try:
hou.setUpdateMode(mode)
yield
finally:
hou.setUpdateMode(original)
def get_geometry_at_frame(sop_node, frame, force=True):
"""Return geometry at frame but force a cooked value."""
with update_mode_context(hou.updateMode.AutoUpdate):
sop_node.cook(force=force, frame_range=(frame, frame))
return sop_node.geometryAtFrame(frame)
class ValidateVDBOutputNode(pyblish.api.InstancePlugin):
"""Validate that the node connected to the output node is of type VDB.
Regardless of the amount of VDBs create the output will need to have an
equal amount of VDBs, points, primitives and vertices
All primitives of the output geometry must be VDBs, no other primitive
types are allowed. That means that regardless of the amount of VDBs in the
geometry it will have an equal amount of VDBs, points, primitives and
vertices since each VDB primitive is one point, one vertex and one VDB.
This validation only checks the geometry on the first frame of the export
frame range for optimization purposes.
A VDB is an inherited type of Prim, holds the following data:
- Primitives: 1
@ -22,54 +81,95 @@ class ValidateVDBOutputNode(pyblish.api.InstancePlugin):
families = ["vdbcache"]
hosts = ["houdini"]
label = "Validate Output Node (VDB)"
actions = [SelectInvalidAction]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError(
"Node connected to the output node is not" " of type VDB!",
title=self.label
invalid_nodes, message = self.get_invalid_with_message(instance)
if invalid_nodes:
# instance_node is str, but output_node is hou.Node so we convert
output = instance.data.get("output_node")
output_path = output.path() if output else None
raise PublishXmlValidationError(
self,
"Invalid VDB content: {}".format(message),
formatting_data={
"message": message,
"rop_path": instance.data.get("instance_node"),
"sop_path": output_path
}
)
@classmethod
def get_invalid(cls, instance):
def get_invalid_with_message(cls, instance):
node = instance.data["output_node"]
node = instance.data.get("output_node")
if node is None:
cls.log.error(
instance_node = instance.data.get("instance_node")
error = (
"SOP path is not correctly set on "
"ROP node '%s'." % instance.data.get("instance_node")
"ROP node `{}`.".format(instance_node)
)
return [instance]
return [hou.node(instance_node), error]
frame = instance.data.get("frameStart", 0)
geometry = node.geometryAtFrame(frame)
geometry = get_geometry_at_frame(node, frame)
if geometry is None:
# No geometry data on this node, maybe the node hasn't cooked?
cls.log.error(
"SOP node has no geometry data. "
"Is it cooked? %s" % node.path()
error = (
"SOP node `{}` has no geometry data. "
"Was it unable to cook?".format(node.path())
)
return [node]
return [node, error]
prims = geometry.prims()
nr_of_prims = len(prims)
num_prims = geometry.intrinsicValue("primitivecount")
num_points = geometry.intrinsicValue("pointcount")
if num_prims == 0 and num_points == 0:
# Since we are only checking the first frame it doesn't mean there
# won't be VDB prims in a few frames. As such we'll assume for now
# the user knows what he or she is doing
cls.log.warning(
"SOP node `{}` has no primitives on start frame {}. "
"Validation is skipped and it is assumed elsewhere in the "
"frame range VDB prims and only VDB prims will exist."
"".format(node.path(), int(frame))
)
return [None, None]
# All primitives must be hou.VDB
invalid_prim = False
for prim in prims:
if not isinstance(prim, hou.VDB):
cls.log.error("Found non-VDB primitive: %s" % prim)
invalid_prim = True
if invalid_prim:
return [instance]
num_vdb_prims = geometry.countPrimType(hou.primType.VDB)
cls.log.debug("Detected {} VDB primitives".format(num_vdb_prims))
if num_prims != num_vdb_prims:
# There's at least one primitive that is not a VDB.
# Search them and report them to the artist.
prims = geometry.prims()
invalid_prims = [prim for prim in prims
if not isinstance(prim, hou.VDB)]
if invalid_prims:
# Log prim numbers as consecutive ranges so logging isn't very
# slow for large number of primitives
error = (
"Found non-VDB primitives for `{}`. "
"Primitive indices {} are not VDB primitives.".format(
node.path(),
", ".join(group_consecutive_numbers(
prim.number() for prim in invalid_prims
))
)
)
return [node, error]
nr_of_points = len(geometry.points())
if nr_of_points != nr_of_prims:
cls.log.error("The number of primitives and points do not match")
return [instance]
if num_points != num_vdb_prims:
# We have points unrelated to the VDB primitives.
error = (
"The number of primitives and points do not match in '{}'. "
"This likely means you have unconnected points, which we do "
"not allow in the VDB output.".format(node.path()))
return [node, error]
for prim in prims:
if prim.numVertices() != 1:
cls.log.error("Found primitive with more than 1 vertex!")
return [instance]
return [None, None]
@classmethod
def get_invalid(cls, instance):
nodes, _ = cls.get_invalid_with_message(instance)
return nodes

View file

@ -162,9 +162,15 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
with parent_nodes(roots, parent=None):
cmds.xform(group_name, zeroTransformPivots=True)
cmds.setAttr("{}.displayHandle".format(group_name), 1)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
display_handle = settings['maya']['load'].get(
'reference_loader', {}
).get('display_handle', True)
cmds.setAttr(
"{}.displayHandle".format(group_name), display_handle
)
colors = settings['maya']['load']['colors']
c = colors.get(family)
if c is not None:
@ -174,7 +180,9 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
(float(c[1]) / 255),
(float(c[2]) / 255))
cmds.setAttr("{}.displayHandle".format(group_name), 1)
cmds.setAttr(
"{}.displayHandle".format(group_name), display_handle
)
# get bounding box
bbox = cmds.exactWorldBoundingBox(group_name)
# get pivot position on world space

View file

@ -217,7 +217,11 @@ class ExtractPlayblast(publish.Extractor):
instance.data["panel"], edit=True, **viewport_defaults
)
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom)
try:
cmds.setAttr(
"{}.panZoomEnabled".format(preset["camera"]), pan_zoom)
except RuntimeError:
self.log.warning("Cannot restore Pan/Zoom settings.")
collected_files = os.listdir(stagingdir)
patterns = [clique.PATTERNS["frames"]]

View file

@ -6,7 +6,7 @@ import pyblish.api
from openpype.hosts.maya.api.lib import set_attribute
from openpype.pipeline.publish import (
RepairContextAction,
RepairAction,
ValidateContentsOrder,
)
@ -26,7 +26,7 @@ class ValidateAttributes(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
label = "Attributes"
hosts = ["maya"]
actions = [RepairContextAction]
actions = [RepairAction]
optional = True
attributes = None
@ -81,7 +81,7 @@ class ValidateAttributes(pyblish.api.InstancePlugin):
if node_name not in attributes:
continue
for attr_name, expected in attributes.items():
for attr_name, expected in attributes[node_name].items():
# Skip if attribute does not exist
if not cmds.attributeQuery(attr_name, node=node, exists=True):

View file

@ -7,28 +7,26 @@ from openpype.pipeline import (
from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances
class PSWorkfileCreator(AutoCreator):
identifier = "workfile"
family = "workfile"
default_variant = "Main"
class PSAutoCreator(AutoCreator):
"""Generic autocreator to extend."""
def get_instance_attr_defs(self):
return []
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
creator_id = instance_data.get("creator_identifier")
if creator_id == self.identifier:
subset_name = instance_data["subset"]
instance = CreatedInstance(
self.family, subset_name, instance_data, self
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
# nothing to change on workfiles
pass
self.log.debug("update_list:: {}".format(update_list))
for created_inst, _changes in update_list:
api.stub().imprint(created_inst.get("instance_id"),
created_inst.data_to_store())
def create(self, options=None):
existing_instance = None
@ -58,6 +56,9 @@ class PSWorkfileCreator(AutoCreator):
project_name, host_name, None
))
if not self.active_on_create:
data["active"] = False
new_instance = CreatedInstance(
self.family, subset_name, data, self
)

View file

@ -0,0 +1,120 @@
from openpype.pipeline import CreatedInstance
from openpype.lib import BoolDef
import openpype.hosts.photoshop.api as api
from openpype.hosts.photoshop.lib import PSAutoCreator
from openpype.pipeline.create import get_subset_name
from openpype.client import get_asset_by_name
class AutoImageCreator(PSAutoCreator):
"""Creates flatten image from all visible layers.
Used in simplified publishing as auto created instance.
Must be enabled in Setting and template for subset name provided
"""
identifier = "auto_image"
family = "image"
# Settings
default_variant = ""
# - Mark by default instance for review
mark_for_review = True
active_on_create = True
def create(self, options=None):
existing_instance = None
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
existing_instance = instance
break
context = self.create_context
project_name = context.get_current_project_name()
asset_name = context.get_current_asset_name()
task_name = context.get_current_task_name()
host_name = context.host_name
asset_doc = get_asset_by_name(project_name, asset_name)
if existing_instance is None:
subset_name = get_subset_name(
self.family, self.default_variant, task_name, asset_doc,
project_name, host_name
)
publishable_ids = [layer.id for layer in api.stub().get_layers()
if layer.visible]
data = {
"asset": asset_name,
"task": task_name,
# ids are "virtual" layers, won't get grouped as 'members' do
# same difference in color coded layers in WP
"ids": publishable_ids
}
if not self.active_on_create:
data["active"] = False
creator_attributes = {"mark_for_review": self.mark_for_review}
data.update({"creator_attributes": creator_attributes})
new_instance = CreatedInstance(
self.family, subset_name, data, self
)
self._add_instance_to_context(new_instance)
api.stub().imprint(new_instance.get("instance_id"),
new_instance.data_to_store())
elif ( # existing instance from different context
existing_instance["asset"] != asset_name
or existing_instance["task"] != task_name
):
subset_name = get_subset_name(
self.family, self.default_variant, task_name, asset_doc,
project_name, host_name
)
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
api.stub().imprint(existing_instance.get("instance_id"),
existing_instance.data_to_store())
def get_pre_create_attr_defs(self):
return [
BoolDef(
"mark_for_review",
label="Review",
default=self.mark_for_review
)
]
def get_instance_attr_defs(self):
return [
BoolDef(
"mark_for_review",
label="Review"
)
]
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["photoshop"]["create"]["AutoImageCreator"]
)
self.active_on_create = plugin_settings["active_on_create"]
self.default_variant = plugin_settings["default_variant"]
self.mark_for_review = plugin_settings["mark_for_review"]
self.enabled = plugin_settings["enabled"]
def get_detail_description(self):
return """Creator for flatten image.
Studio might configure simple publishing workflow. In that case
`image` instance is automatically created which will publish flat
image from all visible layers.
Artist might disable this instance from publishing or from creating
review for it though.
"""

View file

@ -23,6 +23,11 @@ class ImageCreator(Creator):
family = "image"
description = "Image creator"
# Settings
default_variants = ""
mark_for_review = False
active_on_create = True
def create(self, subset_name_from_ui, data, pre_create_data):
groups_to_create = []
top_layers_to_wrap = []
@ -94,6 +99,12 @@ class ImageCreator(Creator):
data.update({"layer_name": layer_name})
data.update({"long_name": "_".join(layer_names_in_hierarchy)})
creator_attributes = {"mark_for_review": self.mark_for_review}
data.update({"creator_attributes": creator_attributes})
if not self.active_on_create:
data["active"] = False
new_instance = CreatedInstance(self.family, subset_name, data,
self)
@ -134,11 +145,6 @@ class ImageCreator(Creator):
self.host.remove_instance(instance)
self._remove_instance_from_context(instance)
def get_default_variants(self):
return [
"Main"
]
def get_pre_create_attr_defs(self):
output = [
BoolDef("use_selection", default=True,
@ -148,10 +154,34 @@ class ImageCreator(Creator):
label="Create separate instance for each selected"),
BoolDef("use_layer_name",
default=False,
label="Use layer name in subset")
label="Use layer name in subset"),
BoolDef(
"mark_for_review",
label="Create separate review",
default=False
)
]
return output
def get_instance_attr_defs(self):
return [
BoolDef(
"mark_for_review",
label="Review"
)
]
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["photoshop"]["create"]["ImageCreator"]
)
self.active_on_create = plugin_settings["active_on_create"]
self.default_variants = plugin_settings["default_variants"]
self.mark_for_review = plugin_settings["mark_for_review"]
self.enabled = plugin_settings["enabled"]
def get_detail_description(self):
return """Creator for Image instances
@ -180,6 +210,11 @@ class ImageCreator(Creator):
but layer name should be used (set explicitly in UI or implicitly if
multiple images should be created), it is added in capitalized form
as a suffix to subset name.
Each image could have its separate review created if necessary via
`Create separate review` toggle.
But more use case is to use separate `review` instance to create review
from all published items.
"""
def _handle_legacy(self, instance_data):

View file

@ -0,0 +1,28 @@
from openpype.hosts.photoshop.lib import PSAutoCreator
class ReviewCreator(PSAutoCreator):
"""Creates review instance which might be disabled from publishing."""
identifier = "review"
family = "review"
default_variant = "Main"
def get_detail_description(self):
return """Auto creator for review.
Photoshop review is created from all published images or from all
visible layers if no `image` instances got created.
Review might be disabled by an artist (instance shouldn't be deleted as
it will get recreated in next publish either way).
"""
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["photoshop"]["create"]["ReviewCreator"]
)
self.default_variant = plugin_settings["default_variant"]
self.active_on_create = plugin_settings["active_on_create"]
self.enabled = plugin_settings["enabled"]

View file

@ -0,0 +1,28 @@
from openpype.hosts.photoshop.lib import PSAutoCreator
class WorkfileCreator(PSAutoCreator):
identifier = "workfile"
family = "workfile"
default_variant = "Main"
def get_detail_description(self):
return """Auto creator for workfile.
It is expected that each publish will also publish its source workfile
for safekeeping. This creator triggers automatically without need for
an artist to remember and trigger it explicitly.
Workfile instance could be disabled if it is not required to publish
workfile. (Instance shouldn't be deleted though as it will be recreated
in next publish automatically).
"""
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["photoshop"]["create"]["WorkfileCreator"]
)
self.active_on_create = plugin_settings["active_on_create"]
self.enabled = plugin_settings["enabled"]

View file

@ -0,0 +1,101 @@
import pyblish.api
from openpype.hosts.photoshop import api as photoshop
from openpype.pipeline.create import get_subset_name
class CollectAutoImage(pyblish.api.ContextPlugin):
"""Creates auto image in non artist based publishes (Webpublisher).
'remotepublish' should be renamed to 'autopublish' or similar in the future
"""
label = "Collect Auto Image"
order = pyblish.api.CollectorOrder
hosts = ["photoshop"]
order = pyblish.api.CollectorOrder + 0.2
targets = ["remotepublish"]
def process(self, context):
family = "image"
for instance in context:
creator_identifier = instance.data.get("creator_identifier")
if creator_identifier and creator_identifier == "auto_image":
self.log.debug("Auto image instance found, won't create new")
return
project_name = context.data["anatomyData"]["project"]["name"]
proj_settings = context.data["project_settings"]
task_name = context.data["anatomyData"]["task"]["name"]
host_name = context.data["hostName"]
asset_doc = context.data["assetEntity"]
asset_name = asset_doc["name"]
auto_creator = proj_settings.get(
"photoshop", {}).get(
"create", {}).get(
"AutoImageCreator", {})
if not auto_creator or not auto_creator["enabled"]:
self.log.debug("Auto image creator disabled, won't create new")
return
stub = photoshop.stub()
stored_items = stub.get_layers_metadata()
for item in stored_items:
if item.get("creator_identifier") == "auto_image":
if not item.get("active"):
self.log.debug("Auto_image instance disabled")
return
layer_items = stub.get_layers()
publishable_ids = [layer.id for layer in layer_items
if layer.visible]
# collect stored image instances
instance_names = []
for layer_item in layer_items:
layer_meta_data = stub.read(layer_item, stored_items)
# Skip layers without metadata.
if layer_meta_data is None:
continue
# Skip containers.
if "container" in layer_meta_data["id"]:
continue
# active might not be in legacy meta
if layer_meta_data.get("active", True) and layer_item.visible:
instance_names.append(layer_meta_data["subset"])
if len(instance_names) == 0:
variants = proj_settings.get(
"photoshop", {}).get(
"create", {}).get(
"CreateImage", {}).get(
"default_variants", [''])
family = "image"
variant = context.data.get("variant") or variants[0]
subset_name = get_subset_name(
family, variant, task_name, asset_doc,
project_name, host_name
)
instance = context.create_instance(subset_name)
instance.data["family"] = family
instance.data["asset"] = asset_name
instance.data["subset"] = subset_name
instance.data["ids"] = publishable_ids
instance.data["publish"] = True
instance.data["creator_identifier"] = "auto_image"
if auto_creator["mark_for_review"]:
instance.data["creator_attributes"] = {"mark_for_review": True}
instance.data["families"] = ["review"]
self.log.info("auto image instance: {} ".format(instance.data))

View file

@ -0,0 +1,92 @@
"""
Requires:
None
Provides:
instance -> family ("review")
"""
import pyblish.api
from openpype.hosts.photoshop import api as photoshop
from openpype.pipeline.create import get_subset_name
class CollectAutoReview(pyblish.api.ContextPlugin):
"""Create review instance in non artist based workflow.
Called only if PS is triggered in Webpublisher or in tests.
"""
label = "Collect Auto Review"
hosts = ["photoshop"]
order = pyblish.api.CollectorOrder + 0.2
targets = ["remotepublish"]
publish = True
def process(self, context):
family = "review"
has_review = False
for instance in context:
if instance.data["family"] == family:
self.log.debug("Review instance found, won't create new")
has_review = True
creator_attributes = instance.data.get("creator_attributes", {})
if (creator_attributes.get("mark_for_review") and
"review" not in instance.data["families"]):
instance.data["families"].append("review")
if has_review:
return
stub = photoshop.stub()
stored_items = stub.get_layers_metadata()
for item in stored_items:
if item.get("creator_identifier") == family:
if not item.get("active"):
self.log.debug("Review instance disabled")
return
auto_creator = context.data["project_settings"].get(
"photoshop", {}).get(
"create", {}).get(
"ReviewCreator", {})
if not auto_creator or not auto_creator["enabled"]:
self.log.debug("Review creator disabled, won't create new")
return
variant = (context.data.get("variant") or
auto_creator["default_variant"])
project_name = context.data["anatomyData"]["project"]["name"]
proj_settings = context.data["project_settings"]
task_name = context.data["anatomyData"]["task"]["name"]
host_name = context.data["hostName"]
asset_doc = context.data["assetEntity"]
asset_name = asset_doc["name"]
subset_name = get_subset_name(
family,
variant,
task_name,
asset_doc,
project_name,
host_name=host_name,
project_settings=proj_settings
)
instance = context.create_instance(subset_name)
instance.data.update({
"subset": subset_name,
"label": subset_name,
"name": subset_name,
"family": family,
"families": [],
"representations": [],
"asset": asset_name,
"publish": self.publish
})
self.log.debug("auto review created::{}".format(instance.data))

View file

@ -0,0 +1,99 @@
import os
import pyblish.api
from openpype.hosts.photoshop import api as photoshop
from openpype.pipeline.create import get_subset_name
class CollectAutoWorkfile(pyblish.api.ContextPlugin):
"""Collect current script for publish."""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Workfile"
hosts = ["photoshop"]
targets = ["remotepublish"]
def process(self, context):
family = "workfile"
file_path = context.data["currentFile"]
_, ext = os.path.splitext(file_path)
staging_dir = os.path.dirname(file_path)
base_name = os.path.basename(file_path)
workfile_representation = {
"name": ext[1:],
"ext": ext[1:],
"files": base_name,
"stagingDir": staging_dir,
}
for instance in context:
if instance.data["family"] == family:
self.log.debug("Workfile instance found, won't create new")
instance.data.update({
"label": base_name,
"name": base_name,
"representations": [],
})
# creating representation
_, ext = os.path.splitext(file_path)
instance.data["representations"].append(
workfile_representation)
return
stub = photoshop.stub()
stored_items = stub.get_layers_metadata()
for item in stored_items:
if item.get("creator_identifier") == family:
if not item.get("active"):
self.log.debug("Workfile instance disabled")
return
project_name = context.data["anatomyData"]["project"]["name"]
proj_settings = context.data["project_settings"]
auto_creator = proj_settings.get(
"photoshop", {}).get(
"create", {}).get(
"WorkfileCreator", {})
if not auto_creator or not auto_creator["enabled"]:
self.log.debug("Workfile creator disabled, won't create new")
return
# context.data["variant"] might come only from collect_batch_data
variant = (context.data.get("variant") or
auto_creator["default_variant"])
task_name = context.data["anatomyData"]["task"]["name"]
host_name = context.data["hostName"]
asset_doc = context.data["assetEntity"]
asset_name = asset_doc["name"]
subset_name = get_subset_name(
family,
variant,
task_name,
asset_doc,
project_name,
host_name=host_name,
project_settings=proj_settings
)
# Create instance
instance = context.create_instance(subset_name)
instance.data.update({
"subset": subset_name,
"label": base_name,
"name": base_name,
"family": family,
"families": [],
"representations": [],
"asset": asset_name
})
# creating representation
instance.data["representations"].append(workfile_representation)
self.log.debug("auto workfile review created:{}".format(instance.data))

View file

@ -1,116 +0,0 @@
import pprint
import pyblish.api
from openpype.settings import get_project_settings
from openpype.hosts.photoshop import api as photoshop
from openpype.lib import prepare_template_data
from openpype.pipeline import legacy_io
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by LayerSet and file metadata
Collects publishable instances from file metadata or enhance
already collected by creator (family == "image").
If no image instances are explicitly created, it looks if there is value
in `flatten_subset_template` (configurable in Settings), in that case it
produces flatten image with all visible layers.
Identifier:
id (str): "pyblish.avalon.instance"
"""
label = "Collect Instances"
order = pyblish.api.CollectorOrder
hosts = ["photoshop"]
families_mapping = {
"image": []
}
# configurable in Settings
flatten_subset_template = ""
def process(self, context):
instance_by_layer_id = {}
for instance in context:
if (
instance.data["family"] == "image" and
instance.data.get("members")):
layer_id = str(instance.data["members"][0])
instance_by_layer_id[layer_id] = instance
stub = photoshop.stub()
layer_items = stub.get_layers()
layers_meta = stub.get_layers_metadata()
instance_names = []
all_layer_ids = []
for layer_item in layer_items:
layer_meta_data = stub.read(layer_item, layers_meta)
all_layer_ids.append(layer_item.id)
# Skip layers without metadata.
if layer_meta_data is None:
continue
# Skip containers.
if "container" in layer_meta_data["id"]:
continue
# active might not be in legacy meta
if not layer_meta_data.get("active", True):
continue
instance = instance_by_layer_id.get(str(layer_item.id))
if instance is None:
instance = context.create_instance(layer_meta_data["subset"])
instance.data["layer"] = layer_item
instance.data.update(layer_meta_data)
instance.data["families"] = self.families_mapping[
layer_meta_data["family"]
]
instance.data["publish"] = layer_item.visible
instance_names.append(layer_meta_data["subset"])
# Produce diagnostic message for any graphical
# user interface interested in visualising it.
self.log.info("Found: \"%s\" " % instance.data["name"])
self.log.info("instance: {} ".format(
pprint.pformat(instance.data, indent=4)))
if len(instance_names) != len(set(instance_names)):
self.log.warning("Duplicate instances found. " +
"Remove unwanted via Publisher")
if len(instance_names) == 0 and self.flatten_subset_template:
project_name = context.data["projectEntity"]["name"]
variants = get_project_settings(project_name).get(
"photoshop", {}).get(
"create", {}).get(
"CreateImage", {}).get(
"defaults", [''])
family = "image"
task_name = legacy_io.Session["AVALON_TASK"]
asset_name = context.data["assetEntity"]["name"]
variant = context.data.get("variant") or variants[0]
fill_pairs = {
"variant": variant,
"family": family,
"task": task_name
}
subset = self.flatten_subset_template.format(
**prepare_template_data(fill_pairs))
instance = context.create_instance(subset)
instance.data["family"] = family
instance.data["asset"] = asset_name
instance.data["subset"] = subset
instance.data["ids"] = all_layer_ids
instance.data["families"] = self.families_mapping[family]
instance.data["publish"] = True
self.log.info("flatten instance: {} ".format(instance.data))

View file

@ -14,10 +14,7 @@ from openpype.pipeline.create import get_subset_name
class CollectReview(pyblish.api.ContextPlugin):
"""Gather the active document as review instance.
Triggers once even if no 'image' is published as by defaults it creates
flatten image from a workfile.
"""Adds review to families for instances marked to be reviewable.
"""
label = "Collect Review"
@ -28,25 +25,8 @@ class CollectReview(pyblish.api.ContextPlugin):
publish = True
def process(self, context):
family = "review"
subset = get_subset_name(
family,
context.data.get("variant", ''),
context.data["anatomyData"]["task"]["name"],
context.data["assetEntity"],
context.data["anatomyData"]["project"]["name"],
host_name=context.data["hostName"],
project_settings=context.data["project_settings"]
)
instance = context.create_instance(subset)
instance.data.update({
"subset": subset,
"label": subset,
"name": subset,
"family": family,
"families": [],
"representations": [],
"asset": os.environ["AVALON_ASSET"],
"publish": self.publish
})
for instance in context:
creator_attributes = instance.data["creator_attributes"]
if (creator_attributes.get("mark_for_review") and
"review" not in instance.data["families"]):
instance.data["families"].append("review")

View file

@ -14,50 +14,19 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
default_variant = "Main"
def process(self, context):
existing_instance = None
for instance in context:
if instance.data["family"] == "workfile":
self.log.debug("Workfile instance found, won't create new")
existing_instance = instance
break
file_path = context.data["currentFile"]
_, ext = os.path.splitext(file_path)
staging_dir = os.path.dirname(file_path)
base_name = os.path.basename(file_path)
family = "workfile"
# context.data["variant"] might come only from collect_batch_data
variant = context.data.get("variant") or self.default_variant
subset = get_subset_name(
family,
variant,
context.data["anatomyData"]["task"]["name"],
context.data["assetEntity"],
context.data["anatomyData"]["project"]["name"],
host_name=context.data["hostName"],
project_settings=context.data["project_settings"]
)
file_path = context.data["currentFile"]
staging_dir = os.path.dirname(file_path)
base_name = os.path.basename(file_path)
# Create instance
if existing_instance is None:
instance = context.create_instance(subset)
instance.data.update({
"subset": subset,
"label": base_name,
"name": base_name,
"family": family,
"families": [],
"representations": [],
"asset": os.environ["AVALON_ASSET"]
})
else:
instance = existing_instance
# creating representation
_, ext = os.path.splitext(file_path)
instance.data["representations"].append({
"name": ext[1:],
"ext": ext[1:],
"files": base_name,
"stagingDir": staging_dir,
})
# creating representation
_, ext = os.path.splitext(file_path)
instance.data["representations"].append({
"name": ext[1:],
"ext": ext[1:],
"files": base_name,
"stagingDir": staging_dir,
})
return

View file

@ -47,32 +47,42 @@ class ExtractReview(publish.Extractor):
layers = self._get_layers_from_image_instances(instance)
self.log.info("Layers image instance found: {}".format(layers))
repre_name = "jpg"
repre_skeleton = {
"name": repre_name,
"ext": "jpg",
"stagingDir": staging_dir,
"tags": self.jpg_options['tags'],
}
if instance.data["family"] != "review":
# enable creation of review, without this jpg review would clash
# with jpg of the image family
output_name = repre_name
repre_name = "{}_{}".format(repre_name, output_name)
repre_skeleton.update({"name": repre_name,
"outputName": output_name})
if self.make_image_sequence and len(layers) > 1:
self.log.info("Extract layers to image sequence.")
img_list = self._save_sequence_images(staging_dir, layers)
instance.data["representations"].append({
"name": "jpg",
"ext": "jpg",
"files": img_list,
repre_skeleton.update({
"frameStart": 0,
"frameEnd": len(img_list),
"fps": fps,
"stagingDir": staging_dir,
"tags": self.jpg_options['tags'],
"files": img_list,
})
instance.data["representations"].append(repre_skeleton)
processed_img_names = img_list
else:
self.log.info("Extract layers to flatten image.")
img_list = self._save_flatten_image(staging_dir, layers)
instance.data["representations"].append({
"name": "jpg",
"ext": "jpg",
"files": img_list, # cannot be [] for single frame
"stagingDir": staging_dir,
"tags": self.jpg_options['tags']
repre_skeleton.update({
"files": img_list,
})
instance.data["representations"].append(repre_skeleton)
processed_img_names = [img_list]
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")

View file

@ -0,0 +1,10 @@
from .addon import (
SubstanceAddon,
SUBSTANCE_HOST_DIR,
)
__all__ = (
"SubstanceAddon",
"SUBSTANCE_HOST_DIR"
)

View file

@ -0,0 +1,34 @@
import os
from openpype.modules import OpenPypeModule, IHostAddon
SUBSTANCE_HOST_DIR = os.path.dirname(os.path.abspath(__file__))
class SubstanceAddon(OpenPypeModule, IHostAddon):
name = "substancepainter"
host_name = "substancepainter"
def initialize(self, module_settings):
self.enabled = True
def add_implementation_envs(self, env, _app):
# Add requirements to SUBSTANCE_PAINTER_PLUGINS_PATH
plugin_path = os.path.join(SUBSTANCE_HOST_DIR, "deploy")
plugin_path = plugin_path.replace("\\", "/")
if env.get("SUBSTANCE_PAINTER_PLUGINS_PATH"):
plugin_path += os.pathsep + env["SUBSTANCE_PAINTER_PLUGINS_PATH"]
env["SUBSTANCE_PAINTER_PLUGINS_PATH"] = plugin_path
# Log in Substance Painter doesn't support custom terminal colors
env["OPENPYPE_LOG_NO_COLORS"] = "Yes"
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(SUBSTANCE_HOST_DIR, "hooks")
]
def get_workfile_extensions(self):
return [".spp", ".toc"]

View file

@ -0,0 +1,8 @@
from .pipeline import (
SubstanceHost,
)
__all__ = [
"SubstanceHost",
]

View file

@ -0,0 +1,157 @@
"""Substance Painter OCIO management
Adobe Substance 3D Painter supports OCIO color management using a per project
configuration. Output color spaces are defined at the project level
More information see:
- https://substance3d.adobe.com/documentation/spdoc/color-management-223053233.html # noqa
- https://substance3d.adobe.com/documentation/spdoc/color-management-with-opencolorio-225969419.html # noqa
"""
import substance_painter.export
import substance_painter.js
import json
from .lib import (
get_document_structure,
get_channel_format
)
def _iter_document_stack_channels():
"""Yield all stack paths and channels project"""
for material in get_document_structure()["materials"]:
material_name = material["name"]
for stack in material["stacks"]:
stack_name = stack["name"]
if stack_name:
stack_path = [material_name, stack_name]
else:
stack_path = material_name
for channel in stack["channels"]:
yield stack_path, channel
def _get_first_color_and_data_stack_and_channel():
"""Return first found color channel and data channel."""
color_channel = None
data_channel = None
for stack_path, channel in _iter_document_stack_channels():
channel_format = get_channel_format(stack_path, channel)
if channel_format["color"]:
color_channel = (stack_path, channel)
else:
data_channel = (stack_path, channel)
if color_channel and data_channel:
return color_channel, data_channel
return color_channel, data_channel
def get_project_channel_data():
"""Return colorSpace settings for the current substance painter project.
In Substance Painter only color channels have Color Management enabled
whereas data channels have no color management applied. This can't be
changed. The artist can only customize the export color space for color
channels per bit-depth for 8 bpc, 16 bpc and 32 bpc.
As such this returns the color space for 'data' and for per bit-depth
for color channels.
Example output:
{
"data": {'colorSpace': 'Utility - Raw'},
"8": {"colorSpace": "ACES - AcesCG"},
"16": {"colorSpace": "ACES - AcesCG"},
"16f": {"colorSpace": "ACES - AcesCG"},
"32f": {"colorSpace": "ACES - AcesCG"}
}
"""
keys = ["colorSpace"]
query = {key: f"${key}" for key in keys}
config = {
"exportPath": "/",
"exportShaderParams": False,
"defaultExportPreset": "query_preset",
"exportPresets": [{
"name": "query_preset",
# List of maps making up this export preset.
"maps": [{
"fileName": json.dumps(query),
# List of source/destination defining which channels will
# make up the texture file.
"channels": [],
"parameters": {
"fileFormat": "exr",
"bitDepth": "32f",
"dithering": False,
"sizeLog2": 4,
"paddingAlgorithm": "passthrough",
"dilationDistance": 16
}
}]
}],
}
def _get_query_output(config):
# Return the basename of the single output path we defined
result = substance_painter.export.list_project_textures(config)
path = next(iter(result.values()))[0]
# strip extension and slash since we know relevant json data starts
# and ends with { and } characters
path = path.strip("/\\.exr")
return json.loads(path)
# Query for each type of channel (color and data)
color_channel, data_channel = _get_first_color_and_data_stack_and_channel()
colorspaces = {}
for key, channel_data in {
"data": data_channel,
"color": color_channel
}.items():
if channel_data is None:
# No channel of that datatype anywhere in the Stack. We're
# unable to identify the output color space of the project
colorspaces[key] = None
continue
stack, channel = channel_data
# Stack must be a string
if not isinstance(stack, str):
# Assume iterable
stack = "/".join(stack)
# Define the temp output config
config["exportList"] = [{"rootPath": stack}]
config_map = config["exportPresets"][0]["maps"][0]
config_map["channels"] = [
{
"destChannel": x,
"srcChannel": x,
"srcMapType": "documentMap",
"srcMapName": channel
} for x in "RGB"
]
if key == "color":
# Query for each bit depth
# Color space definition can have a different OCIO config set
# for 8-bit, 16-bit and 32-bit outputs so we need to check each
# bit depth
for depth in ["8", "16", "16f", "32f"]:
config_map["parameters"]["bitDepth"] = depth # noqa
colorspaces[key + depth] = _get_query_output(config)
else:
# Data channel (not color managed)
colorspaces[key] = _get_query_output(config)
return colorspaces

View file

@ -0,0 +1,649 @@
import os
import re
import json
from collections import defaultdict
import substance_painter.project
import substance_painter.resource
import substance_painter.js
import substance_painter.export
from qtpy import QtGui, QtWidgets, QtCore
def get_export_presets():
"""Return Export Preset resource URLs for all available Export Presets.
Returns:
dict: {Resource url: GUI Label}
"""
# TODO: Find more optimal way to find all export templates
preset_resources = {}
for shelf in substance_painter.resource.Shelves.all():
shelf_path = os.path.normpath(shelf.path())
presets_path = os.path.join(shelf_path, "export-presets")
if not os.path.exists(presets_path):
continue
for filename in os.listdir(presets_path):
if filename.endswith(".spexp"):
template_name = os.path.splitext(filename)[0]
resource = substance_painter.resource.ResourceID(
context=shelf.name(),
name=template_name
)
resource_url = resource.url()
preset_resources[resource_url] = template_name
# Sort by template name
export_templates = dict(sorted(preset_resources.items(),
key=lambda x: x[1]))
# Add default built-ins at the start
# TODO: find the built-ins automatically; scraped with https://gist.github.com/BigRoy/97150c7c6f0a0c916418207b9a2bc8f1 # noqa
result = {
"export-preset-generator://viewport2d": "2D View", # noqa
"export-preset-generator://doc-channel-normal-no-alpha": "Document channels + Normal + AO (No Alpha)", # noqa
"export-preset-generator://doc-channel-normal-with-alpha": "Document channels + Normal + AO (With Alpha)", # noqa
"export-preset-generator://sketchfab": "Sketchfab", # noqa
"export-preset-generator://adobe-standard-material": "Substance 3D Stager", # noqa
"export-preset-generator://usd": "USD PBR Metal Roughness", # noqa
"export-preset-generator://gltf": "glTF PBR Metal Roughness", # noqa
"export-preset-generator://gltf-displacement": "glTF PBR Metal Roughness + Displacement texture (experimental)" # noqa
}
result.update(export_templates)
return result
def _convert_stack_path_to_cmd_str(stack_path):
"""Convert stack path `str` or `[str, str]` for javascript query
Example usage:
>>> stack_path = _convert_stack_path_to_cmd_str(stack_path)
>>> cmd = f"alg.mapexport.channelIdentifiers({stack_path})"
>>> substance_painter.js.evaluate(cmd)
Args:
stack_path (list or str): Path to the stack, could be
"Texture set name" or ["Texture set name", "Stack name"]
Returns:
str: Stack path usable as argument in javascript query.
"""
return json.dumps(stack_path)
def get_channel_identifiers(stack_path=None):
"""Return the list of channel identifiers.
If a context is passed (texture set/stack),
return only used channels with resolved user channels.
Channel identifiers are:
basecolor, height, specular, opacity, emissive, displacement,
glossiness, roughness, anisotropylevel, anisotropyangle, transmissive,
scattering, reflection, ior, metallic, normal, ambientOcclusion,
diffuse, specularlevel, blendingmask, [custom user names].
Args:
stack_path (list or str, Optional): Path to the stack, could be
"Texture set name" or ["Texture set name", "Stack name"]
Returns:
list: List of channel identifiers.
"""
if stack_path is None:
stack_path = ""
else:
stack_path = _convert_stack_path_to_cmd_str(stack_path)
cmd = f"alg.mapexport.channelIdentifiers({stack_path})"
return substance_painter.js.evaluate(cmd)
def get_channel_format(stack_path, channel):
"""Retrieve the channel format of a specific stack channel.
See `alg.mapexport.channelFormat` (javascript API) for more details.
The channel format data is:
"label" (str): The channel format label: could be one of
[sRGB8, L8, RGB8, L16, RGB16, L16F, RGB16F, L32F, RGB32F]
"color" (bool): True if the format is in color, False is grayscale
"floating" (bool): True if the format uses floating point
representation, false otherwise
"bitDepth" (int): Bit per color channel (could be 8, 16 or 32 bpc)
Arguments:
stack_path (list or str): Path to the stack, could be
"Texture set name" or ["Texture set name", "Stack name"]
channel (str): Identifier of the channel to export
(see `get_channel_identifiers`)
Returns:
dict: The channel format data.
"""
stack_path = _convert_stack_path_to_cmd_str(stack_path)
cmd = f"alg.mapexport.channelFormat({stack_path}, '{channel}')"
return substance_painter.js.evaluate(cmd)
def get_document_structure():
"""Dump the document structure.
See `alg.mapexport.documentStructure` (javascript API) for more details.
Returns:
dict: Document structure or None when no project is open
"""
return substance_painter.js.evaluate("alg.mapexport.documentStructure()")
def get_export_templates(config, format="png", strip_folder=True):
"""Return export config outputs.
This use the Javascript API `alg.mapexport.getPathsExportDocumentMaps`
which returns a different output than using the Python equivalent
`substance_painter.export.list_project_textures(config)`.
The nice thing about the Javascript API version is that it returns the
output textures grouped by filename template.
A downside is that it doesn't return all the UDIM tiles but per template
always returns a single file.
Note:
The file format needs to be explicitly passed to the Javascript API
but upon exporting through the Python API the file format can be based
on the output preset. So it's likely the file extension will mismatch
Warning:
Even though the function appears to solely get the expected outputs
the Javascript API will actually create the config's texture output
folder if it does not exist yet. As such, a valid path must be set.
Example output:
{
"DefaultMaterial": {
"$textureSet_BaseColor(_$colorSpace)(.$udim)": "DefaultMaterial_BaseColor_ACES - ACEScg.1002.png", # noqa
"$textureSet_Emissive(_$colorSpace)(.$udim)": "DefaultMaterial_Emissive_ACES - ACEScg.1002.png", # noqa
"$textureSet_Height(_$colorSpace)(.$udim)": "DefaultMaterial_Height_Utility - Raw.1002.png", # noqa
"$textureSet_Metallic(_$colorSpace)(.$udim)": "DefaultMaterial_Metallic_Utility - Raw.1002.png", # noqa
"$textureSet_Normal(_$colorSpace)(.$udim)": "DefaultMaterial_Normal_Utility - Raw.1002.png", # noqa
"$textureSet_Roughness(_$colorSpace)(.$udim)": "DefaultMaterial_Roughness_Utility - Raw.1002.png" # noqa
}
}
Arguments:
config (dict) Export config
format (str, Optional): Output format to write to, defaults to 'png'
strip_folder (bool, Optional): Whether to strip the output folder
from the output filenames.
Returns:
dict: The expected output maps.
"""
folder = config["exportPath"].replace("\\", "/")
preset = config["defaultExportPreset"]
cmd = f'alg.mapexport.getPathsExportDocumentMaps("{preset}", "{folder}", "{format}")' # noqa
result = substance_painter.js.evaluate(cmd)
if strip_folder:
for _stack, maps in result.items():
for map_template, map_filepath in maps.items():
map_filepath = map_filepath.replace("\\", "/")
assert map_filepath.startswith(folder)
map_filename = map_filepath[len(folder):].lstrip("/")
maps[map_template] = map_filename
return result
def _templates_to_regex(templates,
texture_set,
colorspaces,
project,
mesh):
"""Return regex based on a Substance Painter expot filename template.
This converts Substance Painter export filename templates like
`$mesh_$textureSet_BaseColor(_$colorSpace)(.$udim)` into a regex
which can be used to query an output filename to help retrieve:
- Which template filename the file belongs to.
- Which color space the file is written with.
- Which udim tile it is exactly.
This is used by `get_parsed_export_maps` which tries to as explicitly
as possible match the filename pattern against the known possible outputs.
That's why Texture Set name, Color spaces, Project path and mesh path must
be provided. By doing so we get the best shot at correctly matching the
right template because otherwise $texture_set could basically be any string
and thus match even that of a color space or mesh.
Arguments:
templates (list): List of templates to convert to regex.
texture_set (str): The texture set to match against.
colorspaces (list): The colorspaces defined in the current project.
project (str): Filepath of current substance project.
mesh (str): Path to mesh file used in current project.
Returns:
dict: Template: Template regex pattern
"""
def _filename_no_ext(path):
return os.path.splitext(os.path.basename(path))[0]
if colorspaces and any(colorspaces):
colorspace_match = "|".join(re.escape(c) for c in set(colorspaces))
colorspace_match = f"({colorspace_match})"
else:
# No colorspace support enabled
colorspace_match = ""
# Key to regex valid search values
key_matches = {
"$project": re.escape(_filename_no_ext(project)),
"$mesh": re.escape(_filename_no_ext(mesh)),
"$textureSet": re.escape(texture_set),
"$colorSpace": colorspace_match,
"$udim": "([0-9]{4})"
}
# Turn the templates into regexes
regexes = {}
for template in templates:
# We need to tweak a temp
search_regex = re.escape(template)
# Let's assume that any ( and ) character in the file template was
# intended as an optional template key and do a simple `str.replace`
# Note: we are matching against re.escape(template) so will need to
# search for the escaped brackets.
search_regex = search_regex.replace(re.escape("("), "(")
search_regex = search_regex.replace(re.escape(")"), ")?")
# Substitute each key into a named group
for key, key_expected_regex in key_matches.items():
# We want to use the template as a regex basis in the end so will
# escape the whole thing first. Note that thus we'll need to
# search for the escaped versions of the keys too.
escaped_key = re.escape(key)
key_label = key[1:] # key without $ prefix
key_expected_grp_regex = f"(?P<{key_label}>{key_expected_regex})"
search_regex = search_regex.replace(escaped_key,
key_expected_grp_regex)
# The filename templates don't include the extension so we add it
# to be able to match the out filename beginning to end
ext_regex = r"(?P<ext>\.[A-Za-z][A-Za-z0-9-]*)"
search_regex = rf"^{search_regex}{ext_regex}$"
regexes[template] = search_regex
return regexes
def strip_template(template, strip="._ "):
"""Return static characters in a substance painter filename template.
>>> strip_template("$textureSet_HELLO(.$udim)")
# HELLO
>>> strip_template("$mesh_$textureSet_HELLO_WORLD_$colorSpace(.$udim)")
# HELLO_WORLD
>>> strip_template("$textureSet_HELLO(.$udim)", strip=None)
# _HELLO
>>> strip_template("$mesh_$textureSet_$colorSpace(.$udim)", strip=None)
# _HELLO_
>>> strip_template("$textureSet_HELLO(.$udim)")
# _HELLO
Arguments:
template (str): Filename template to strip.
strip (str, optional): Characters to strip from beginning and end
of the static string in template. Defaults to: `._ `.
Returns:
str: The static string in filename template.
"""
# Return only characters that were part of the template that were static.
# Remove all keys
keys = ["$project", "$mesh", "$textureSet", "$udim", "$colorSpace"]
stripped_template = template
for key in keys:
stripped_template = stripped_template.replace(key, "")
# Everything inside an optional bracket space is excluded since it's not
# static. We keep a counter to track whether we are currently iterating
# over parts of the template that are inside an 'optional' group or not.
counter = 0
result = ""
for char in stripped_template:
if char == "(":
counter += 1
elif char == ")":
counter -= 1
if counter < 0:
counter = 0
else:
if counter == 0:
result += char
if strip:
# Strip of any trailing start/end characters. Technically these are
# static but usually start and end separators like space or underscore
# aren't wanted.
result = result.strip(strip)
return result
def get_parsed_export_maps(config):
"""Return Export Config's expected output textures with parsed data.
This tries to parse the texture outputs using a Python API export config.
Parses template keys: $project, $mesh, $textureSet, $colorSpace, $udim
Example:
{("DefaultMaterial", ""): {
"$mesh_$textureSet_BaseColor(_$colorSpace)(.$udim)": [
{
// OUTPUT DATA FOR FILE #1 OF THE TEMPLATE
},
{
// OUTPUT DATA FOR FILE #2 OF THE TEMPLATE
},
]
},
}}
File output data (all outputs are `str`).
1) Parsed tokens: These are parsed tokens from the template, they will
only exist if found in the filename template and output filename.
project: Workfile filename without extension
mesh: Filename of the loaded mesh without extension
textureSet: The texture set, e.g. "DefaultMaterial",
colorSpace: The color space, e.g. "ACES - ACEScg",
udim: The udim tile, e.g. "1001"
2) Template output and filepath
filepath: Full path to the resulting texture map, e.g.
"/path/to/mesh_DefaultMaterial_BaseColor_ACES - ACEScg.1002.png",
output: "mesh_DefaultMaterial_BaseColor_ACES - ACEScg.1002.png"
Note: if template had slashes (folders) then `output` will too.
So `output` might include a folder.
Returns:
dict: [texture_set, stack]: {template: [file1_data, file2_data]}
"""
# Import is here to avoid recursive lib <-> colorspace imports
from .colorspace import get_project_channel_data
outputs = substance_painter.export.list_project_textures(config)
templates = get_export_templates(config, strip_folder=False)
# Get all color spaces set for the current project
project_colorspaces = set(
data["colorSpace"] for data in get_project_channel_data().values()
)
# Get current project mesh path and project path to explicitly match
# the $mesh and $project tokens
project_mesh_path = substance_painter.project.last_imported_mesh_path()
project_path = substance_painter.project.file_path()
# Get the current export path to strip this of the beginning of filepath
# results, since filename templates don't have these we'll match without
# that part of the filename.
export_path = config["exportPath"]
export_path = export_path.replace("\\", "/")
if not export_path.endswith("/"):
export_path += "/"
# Parse the outputs
result = {}
for key, filepaths in outputs.items():
texture_set, stack = key
if stack:
stack_path = f"{texture_set}/{stack}"
else:
stack_path = texture_set
stack_templates = list(templates[stack_path].keys())
template_regex = _templates_to_regex(stack_templates,
texture_set=texture_set,
colorspaces=project_colorspaces,
mesh=project_mesh_path,
project=project_path)
# Let's precompile the regexes
for template, regex in template_regex.items():
template_regex[template] = re.compile(regex)
stack_results = defaultdict(list)
for filepath in sorted(filepaths):
# We strip explicitly using the full parent export path instead of
# using `os.path.basename` because export template is allowed to
# have subfolders in its template which we want to match against
filepath = filepath.replace("\\", "/")
assert filepath.startswith(export_path), (
f"Filepath {filepath} must start with folder {export_path}"
)
filename = filepath[len(export_path):]
for template, regex in template_regex.items():
match = regex.match(filename)
if match:
parsed = match.groupdict(default={})
# Include some special outputs for convenience
parsed["filepath"] = filepath
parsed["output"] = filename
stack_results[template].append(parsed)
break
else:
raise ValueError(f"Unable to match {filename} against any "
f"template in: {list(template_regex.keys())}")
result[key] = dict(stack_results)
return result
def load_shelf(path, name=None):
"""Add shelf to substance painter (for current application session)
This will dynamically add a Shelf for the current session. It's good
to note however that these will *not* persist on restart of the host.
Note:
Consider the loaded shelf a static library of resources.
The shelf will *not* be visible in application preferences in
Edit > Settings > Libraries.
The shelf will *not* show in the Assets browser if it has no existing
assets
The shelf will *not* be a selectable option for selecting it as a
destination to import resources too.
"""
# Ensure expanded path with forward slashes
path = os.path.expandvars(path)
path = os.path.abspath(path)
path = path.replace("\\", "/")
# Path must exist
if not os.path.isdir(path):
raise ValueError(f"Path is not an existing folder: {path}")
# This name must be unique and must only contain lowercase letters,
# numbers, underscores or hyphens.
if name is None:
name = os.path.basename(path)
name = name.lower()
name = re.sub(r"[^a-z0-9_\-]", "_", name) # sanitize to underscores
if substance_painter.resource.Shelves.exists(name):
shelf = next(
shelf for shelf in substance_painter.resource.Shelves.all()
if shelf.name() == name
)
if os.path.normpath(shelf.path()) != os.path.normpath(path):
raise ValueError(f"Shelf with name '{name}' already exists "
f"for a different path: '{shelf.path()}")
return
print(f"Adding Shelf '{name}' to path: {path}")
substance_painter.resource.Shelves.add(name, path)
return name
def _get_new_project_action():
"""Return QAction which triggers Substance Painter's new project dialog"""
main_window = substance_painter.ui.get_main_window()
# Find the file menu's New file action
menubar = main_window.menuBar()
new_action = None
for action in menubar.actions():
menu = action.menu()
if not menu:
continue
if menu.objectName() != "file":
continue
# Find the action with the CTRL+N key sequence
new_action = next(action for action in menu.actions()
if action.shortcut() == QtGui.QKeySequence.New)
break
return new_action
def prompt_new_file_with_mesh(mesh_filepath):
"""Prompts the user for a new file using Substance Painter's own dialog.
This will set the mesh path to load to the given mesh and disables the
dialog box to disallow the user to change the path. This way we can allow
user configuration of a project but set the mesh path ourselves.
Warning:
This is very hacky and experimental.
Note:
If a project is currently open using the same mesh filepath it can't
accurately detect whether the user had actually accepted the new project
dialog or whether the project afterwards is still the original project,
for example when the user might have cancelled the operation.
"""
app = QtWidgets.QApplication.instance()
assert os.path.isfile(mesh_filepath), \
f"Mesh filepath does not exist: {mesh_filepath}"
def _setup_file_dialog():
"""Set filepath in QFileDialog and trigger accept result"""
file_dialog = app.activeModalWidget()
assert isinstance(file_dialog, QtWidgets.QFileDialog)
# Quickly hide the dialog
file_dialog.hide()
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000)
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
file_dialog.selectUrl(url)
# Give the explorer window time to refresh to the folder and select
# the file
while not file_dialog.selectedFiles():
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000)
print(f"Selected: {file_dialog.selectedFiles()}")
# Set it again now we know the path is refreshed - without this
# accepting the dialog will often not trigger the correct filepath
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
file_dialog.selectUrl(url)
file_dialog.done(file_dialog.Accepted)
app.processEvents(QtCore.QEventLoop.AllEvents)
def _setup_prompt():
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents)
dialog = app.activeModalWidget()
assert dialog.objectName() == "NewProjectDialog"
# Set the window title
mesh = os.path.basename(mesh_filepath)
dialog.setWindowTitle(f"New Project with mesh: {mesh}")
# Get the select mesh file button
mesh_select = dialog.findChild(QtWidgets.QPushButton, "meshSelect")
# Hide the select mesh button to the user to block changing of mesh
mesh_select.setVisible(False)
# Ensure UI is visually up-to-date
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents)
# Trigger the 'select file' dialog to set the path and have the
# new file dialog to use the path.
QtCore.QTimer.singleShot(10, _setup_file_dialog)
mesh_select.click()
app.processEvents(QtCore.QEventLoop.AllEvents, 5000)
mesh_filename = dialog.findChild(QtWidgets.QFrame, "meshFileName")
mesh_filename_label = mesh_filename.findChild(QtWidgets.QLabel)
if not mesh_filename_label.text():
dialog.close()
raise RuntimeError(f"Failed to set mesh path: {mesh_filepath}")
new_action = _get_new_project_action()
if not new_action:
raise RuntimeError("Unable to detect new file action..")
QtCore.QTimer.singleShot(0, _setup_prompt)
new_action.trigger()
app.processEvents(QtCore.QEventLoop.AllEvents, 5000)
if not substance_painter.project.is_open():
return
# Confirm mesh was set as expected
project_mesh = substance_painter.project.last_imported_mesh_path()
if os.path.normpath(project_mesh) != os.path.normpath(mesh_filepath):
return
return project_mesh

View file

@ -0,0 +1,427 @@
# -*- coding: utf-8 -*-
"""Pipeline tools for OpenPype Substance Painter integration."""
import os
import logging
from functools import partial
# Substance 3D Painter modules
import substance_painter.ui
import substance_painter.event
import substance_painter.project
import pyblish.api
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
from openpype.settings import (
get_current_project_settings,
get_system_settings
)
from openpype.pipeline.template_data import get_template_data_with_names
from openpype.pipeline import (
register_creator_plugin_path,
register_loader_plugin_path,
AVALON_CONTAINER_ID,
Anatomy
)
from openpype.lib import (
StringTemplate,
register_event_callback,
emit_event,
)
from openpype.pipeline.load import any_outdated_containers
from openpype.hosts.substancepainter import SUBSTANCE_HOST_DIR
from . import lib
log = logging.getLogger("openpype.hosts.substance")
PLUGINS_DIR = os.path.join(SUBSTANCE_HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
OPENPYPE_METADATA_KEY = "OpenPype"
OPENPYPE_METADATA_CONTAINERS_KEY = "containers" # child key
OPENPYPE_METADATA_CONTEXT_KEY = "context" # child key
OPENPYPE_METADATA_INSTANCES_KEY = "instances" # child key
class SubstanceHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "substancepainter"
def __init__(self):
super(SubstanceHost, self).__init__()
self._has_been_setup = False
self.menu = None
self.callbacks = []
self.shelves = []
def install(self):
pyblish.api.register_host("substancepainter")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info("Installing callbacks ... ")
# register_event_callback("init", on_init)
self._register_callbacks()
# register_event_callback("before.save", before_save)
# register_event_callback("save", on_save)
register_event_callback("open", on_open)
# register_event_callback("new", on_new)
log.info("Installing menu ... ")
self._install_menu()
project_settings = get_current_project_settings()
self._install_shelves(project_settings)
self._has_been_setup = True
def uninstall(self):
self._uninstall_shelves()
self._uninstall_menu()
self._deregister_callbacks()
def has_unsaved_changes(self):
if not substance_painter.project.is_open():
return False
return substance_painter.project.needs_saving()
def get_workfile_extensions(self):
return [".spp", ".toc"]
def save_workfile(self, dst_path=None):
if not substance_painter.project.is_open():
return False
if not dst_path:
dst_path = self.get_current_workfile()
full_save_mode = substance_painter.project.ProjectSaveMode.Full
substance_painter.project.save_as(dst_path, full_save_mode)
return dst_path
def open_workfile(self, filepath):
if not os.path.exists(filepath):
raise RuntimeError("File does not exist: {}".format(filepath))
# We must first explicitly close current project before opening another
if substance_painter.project.is_open():
substance_painter.project.close()
substance_painter.project.open(filepath)
return filepath
def get_current_workfile(self):
if not substance_painter.project.is_open():
return None
filepath = substance_painter.project.file_path()
if filepath and filepath.endswith(".spt"):
# When currently in a Substance Painter template assume our
# scene isn't saved. This can be the case directly after doing
# "New project", the path will then be the template used. This
# avoids Workfiles tool trying to save as .spt extension if the
# file hasn't been saved before.
return
return filepath
def get_containers(self):
if not substance_painter.project.is_open():
return
metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY)
containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY)
if containers:
for key, container in containers.items():
container["objectName"] = key
yield container
def update_context_data(self, data, changes):
if not substance_painter.project.is_open():
return
metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY)
metadata.set(OPENPYPE_METADATA_CONTEXT_KEY, data)
def get_context_data(self):
if not substance_painter.project.is_open():
return
metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY)
return metadata.get(OPENPYPE_METADATA_CONTEXT_KEY) or {}
def _install_menu(self):
from PySide2 import QtWidgets
from openpype.tools.utils import host_tools
parent = substance_painter.ui.get_main_window()
menu = QtWidgets.QMenu("OpenPype")
action = menu.addAction("Create...")
action.triggered.connect(
lambda: host_tools.show_publisher(parent=parent,
tab="create")
)
action = menu.addAction("Load...")
action.triggered.connect(
lambda: host_tools.show_loader(parent=parent, use_context=True)
)
action = menu.addAction("Publish...")
action.triggered.connect(
lambda: host_tools.show_publisher(parent=parent,
tab="publish")
)
action = menu.addAction("Manage...")
action.triggered.connect(
lambda: host_tools.show_scene_inventory(parent=parent)
)
action = menu.addAction("Library...")
action.triggered.connect(
lambda: host_tools.show_library_loader(parent=parent)
)
menu.addSeparator()
action = menu.addAction("Work Files...")
action.triggered.connect(
lambda: host_tools.show_workfiles(parent=parent)
)
substance_painter.ui.add_menu(menu)
def on_menu_destroyed():
self.menu = None
menu.destroyed.connect(on_menu_destroyed)
self.menu = menu
def _uninstall_menu(self):
if self.menu:
self.menu.destroy()
self.menu = None
def _register_callbacks(self):
# Prepare emit event callbacks
open_callback = partial(emit_event, "open")
# Connect to the Substance Painter events
dispatcher = substance_painter.event.DISPATCHER
for event, callback in [
(substance_painter.event.ProjectOpened, open_callback)
]:
dispatcher.connect(event, callback)
# Keep a reference so we can deregister if needed
self.callbacks.append((event, callback))
def _deregister_callbacks(self):
for event, callback in self.callbacks:
substance_painter.event.DISPATCHER.disconnect(event, callback)
self.callbacks.clear()
def _install_shelves(self, project_settings):
shelves = project_settings["substancepainter"].get("shelves", {})
if not shelves:
return
# Prepare formatting data if we detect any path which might have
# template tokens like {asset} in there.
formatting_data = {}
has_formatting_entries = any("{" in path for path in shelves.values())
if has_formatting_entries:
project_name = self.get_current_project_name()
asset_name = self.get_current_asset_name()
task_name = self.get_current_asset_name()
system_settings = get_system_settings()
formatting_data = get_template_data_with_names(project_name,
asset_name,
task_name,
system_settings)
anatomy = Anatomy(project_name)
formatting_data["root"] = anatomy.roots
for name, path in shelves.items():
shelf_name = None
# Allow formatting with anatomy for the paths
if "{" in path:
path = StringTemplate.format_template(path, formatting_data)
try:
shelf_name = lib.load_shelf(path, name=name)
except ValueError as exc:
print(f"Failed to load shelf -> {exc}")
if shelf_name:
self.shelves.append(shelf_name)
def _uninstall_shelves(self):
for shelf_name in self.shelves:
substance_painter.resource.Shelves.remove(shelf_name)
self.shelves.clear()
def on_open():
log.info("Running callback on open..")
if any_outdated_containers():
from openpype.widgets import popup
log.warning("Scene has outdated content.")
# Get main window
parent = substance_painter.ui.get_main_window()
if parent is None:
log.info("Skipping outdated content pop-up "
"because Substance window can't be found.")
else:
# Show outdated pop-up
def _on_show_inventory():
from openpype.tools.utils import host_tools
host_tools.show_scene_inventory(parent=parent)
dialog = popup.Popup(parent=parent)
dialog.setWindowTitle("Substance scene has outdated content")
dialog.setMessage("There are outdated containers in "
"your Substance scene.")
dialog.on_clicked.connect(_on_show_inventory)
dialog.show()
def imprint_container(container,
name,
namespace,
context,
loader):
"""Imprint a loaded container with metadata.
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
container (dict): The (substance metadata) dictionary to imprint into.
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
context (dict): Asset information
loader (load.LoaderPlugin): loader instance used to produce container.
Returns:
None
"""
data = [
("schema", "openpype:container-2.0"),
("id", AVALON_CONTAINER_ID),
("name", str(name)),
("namespace", str(namespace) if namespace else None),
("loader", str(loader.__class__.__name__)),
("representation", str(context["representation"]["_id"])),
]
for key, value in data:
container[key] = value
def set_container_metadata(object_name, container_data, update=False):
"""Helper method to directly set the data for a specific container
Args:
object_name (str): The unique object name identifier for the container
container_data (dict): The data for the container.
Note 'objectName' data is derived from `object_name` and key in
`container_data` will be ignored.
update (bool): Whether to only update the dict data.
"""
# The objectName is derived from the key in the metadata so won't be stored
# in the metadata in the container's data.
container_data.pop("objectName", None)
metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY)
containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) or {}
if update:
existing_data = containers.setdefault(object_name, {})
existing_data.update(container_data) # mutable dict, in-place update
else:
containers[object_name] = container_data
metadata.set("containers", containers)
def remove_container_metadata(object_name):
"""Helper method to remove the data for a specific container"""
metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY)
containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY)
if containers:
containers.pop(object_name, None)
metadata.set("containers", containers)
def set_instance(instance_id, instance_data, update=False):
"""Helper method to directly set the data for a specific container
Args:
instance_id (str): Unique identifier for the instance
instance_data (dict): The instance data to store in the metaadata.
"""
set_instances({instance_id: instance_data}, update=update)
def set_instances(instance_data_by_id, update=False):
"""Store data for multiple instances at the same time.
This is more optimal than querying and setting them in the metadata one
by one.
"""
metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY)
instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {}
for instance_id, instance_data in instance_data_by_id.items():
if update:
existing_data = instances.get(instance_id, {})
existing_data.update(instance_data)
else:
instances[instance_id] = instance_data
metadata.set("instances", instances)
def remove_instance(instance_id):
"""Helper method to remove the data for a specific container"""
metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY)
instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {}
instances.pop(instance_id, None)
metadata.set("instances", instances)
def get_instances_by_id():
"""Return all instances stored in the project instances metadata"""
if not substance_painter.project.is_open():
return {}
metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY)
return metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {}
def get_instances():
"""Return all instances stored in the project instances as a list"""
return list(get_instances_by_id().values())

View file

@ -0,0 +1,36 @@
def cleanup_openpype_qt_widgets():
"""
Workaround for Substance failing to shut down correctly
when a Qt window was still open at the time of shutting down.
This seems to work sometimes, but not all the time.
"""
# TODO: Create a more reliable method to close down all OpenPype Qt widgets
from PySide2 import QtWidgets
import substance_painter.ui
# Kill OpenPype Qt widgets
print("Killing OpenPype Qt widgets..")
for widget in QtWidgets.QApplication.topLevelWidgets():
if widget.__module__.startswith("openpype."):
print(f"Deleting widget: {widget.__class__.__name__}")
substance_painter.ui.delete_ui_element(widget)
def start_plugin():
from openpype.pipeline import install_host
from openpype.hosts.substancepainter.api import SubstanceHost
install_host(SubstanceHost())
def close_plugin():
from openpype.pipeline import uninstall_host
cleanup_openpype_qt_widgets()
uninstall_host()
if __name__ == "__main__":
start_plugin()

View file

@ -0,0 +1,43 @@
"""Ease the OpenPype on-boarding process by loading the plug-in on first run"""
OPENPYPE_PLUGIN_NAME = "openpype_plugin"
def start_plugin():
try:
# This isn't exposed in the official API so we keep it in a try-except
from painter_plugins_ui import (
get_settings,
LAUNCH_AT_START_KEY,
ON_STATE,
PLUGINS_MENU,
plugin_manager
)
# The `painter_plugins_ui` plug-in itself is also a startup plug-in
# we need to take into account that it could run either earlier or
# later than this startup script, we check whether its menu initialized
is_before_plugins_menu = PLUGINS_MENU is None
settings = get_settings(OPENPYPE_PLUGIN_NAME)
if settings.value(LAUNCH_AT_START_KEY, None) is None:
print("Initializing OpenPype plug-in on first run...")
if is_before_plugins_menu:
print("- running before 'painter_plugins_ui'")
# Delay the launch to the painter_plugins_ui initialization
settings.setValue(LAUNCH_AT_START_KEY, ON_STATE)
else:
# Launch now
print("- running after 'painter_plugins_ui'")
plugin_manager(OPENPYPE_PLUGIN_NAME)(True)
# Set the checked state in the menu to avoid confusion
action = next(action for action in PLUGINS_MENU._menu.actions()
if action.text() == OPENPYPE_PLUGIN_NAME)
if action is not None:
action.blockSignals(True)
action.setChecked(True)
action.blockSignals(False)
except Exception as exc:
print(exc)

View file

@ -0,0 +1,162 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating textures."""
from openpype.pipeline import CreatedInstance, Creator, CreatorError
from openpype.lib import (
EnumDef,
UILabelDef,
NumberDef,
BoolDef
)
from openpype.hosts.substancepainter.api.pipeline import (
get_instances,
set_instance,
set_instances,
remove_instance
)
from openpype.hosts.substancepainter.api.lib import get_export_presets
import substance_painter.project
class CreateTextures(Creator):
"""Create a texture set."""
identifier = "io.openpype.creators.substancepainter.textureset"
label = "Textures"
family = "textureSet"
icon = "picture-o"
default_variant = "Main"
def create(self, subset_name, instance_data, pre_create_data):
if not substance_painter.project.is_open():
raise CreatorError("Can't create a Texture Set instance without "
"an open project.")
instance = self.create_instance_in_context(subset_name,
instance_data)
set_instance(
instance_id=instance["instance_id"],
instance_data=instance.data_to_store()
)
def collect_instances(self):
for instance in get_instances():
if (instance.get("creator_identifier") == self.identifier or
instance.get("family") == self.family):
self.create_instance_in_context_from_existing(instance)
def update_instances(self, update_list):
instance_data_by_id = {}
for instance, _changes in update_list:
# Persist the data
instance_id = instance.get("instance_id")
instance_data = instance.data_to_store()
instance_data_by_id[instance_id] = instance_data
set_instances(instance_data_by_id, update=True)
def remove_instances(self, instances):
for instance in instances:
remove_instance(instance["instance_id"])
self._remove_instance_from_context(instance)
# Helper methods (this might get moved into Creator class)
def create_instance_in_context(self, subset_name, data):
instance = CreatedInstance(
self.family, subset_name, data, self
)
self.create_context.creator_adds_instance(instance)
return instance
def create_instance_in_context_from_existing(self, data):
instance = CreatedInstance.from_existing(data, self)
self.create_context.creator_adds_instance(instance)
return instance
def get_instance_attr_defs(self):
return [
EnumDef("exportPresetUrl",
items=get_export_presets(),
label="Output Template"),
BoolDef("allowSkippedMaps",
label="Allow Skipped Output Maps",
tooltip="When enabled this allows the publish to ignore "
"output maps in the used output template if one "
"or more maps are skipped due to the required "
"channels not being present in the current file.",
default=True),
EnumDef("exportFileFormat",
items={
None: "Based on output template",
# TODO: Get available extensions from substance API
"bmp": "bmp",
"ico": "ico",
"jpeg": "jpeg",
"jng": "jng",
"pbm": "pbm",
"pgm": "pgm",
"png": "png",
"ppm": "ppm",
"tga": "targa",
"tif": "tiff",
"wap": "wap",
"wbmp": "wbmp",
"xpm": "xpm",
"gif": "gif",
"hdr": "hdr",
"exr": "exr",
"j2k": "j2k",
"jp2": "jp2",
"pfm": "pfm",
"webp": "webp",
# TODO: Unsure why jxr format fails to export
# "jxr": "jpeg-xr",
# TODO: File formats that combine the exported textures
# like psd are not correctly supported due to
# publishing only a single file
# "psd": "psd",
# "sbsar": "sbsar",
},
default=None,
label="File type"),
EnumDef("exportSize",
items={
None: "Based on each Texture Set's size",
# The key is size of the texture file in log2.
# (i.e. 10 means 2^10 = 1024)
7: "128",
8: "256",
9: "512",
10: "1024",
11: "2048",
12: "4096"
},
default=None,
label="Size"),
EnumDef("exportPadding",
items={
"passthrough": "No padding (passthrough)",
"infinite": "Dilation infinite",
"transparent": "Dilation + transparent",
"color": "Dilation + default background color",
"diffusion": "Dilation + diffusion"
},
default="infinite",
label="Padding"),
NumberDef("exportDilationDistance",
minimum=0,
maximum=256,
decimals=0,
default=16,
label="Dilation Distance"),
UILabelDef("*only used with "
"'Dilation + <x>' padding"),
]
def get_pre_create_attr_defs(self):
# Use same attributes as for instance attributes
return self.get_instance_attr_defs()

View file

@ -0,0 +1,101 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating workfiles."""
from openpype.pipeline import CreatedInstance, AutoCreator
from openpype.client import get_asset_by_name
from openpype.hosts.substancepainter.api.pipeline import (
set_instances,
set_instance,
get_instances
)
import substance_painter.project
class CreateWorkfile(AutoCreator):
"""Workfile auto-creator."""
identifier = "io.openpype.creators.substancepainter.workfile"
label = "Workfile"
family = "workfile"
icon = "document"
default_variant = "Main"
def create(self):
if not substance_painter.project.is_open():
return
variant = self.default_variant
project_name = self.project_name
asset_name = self.create_context.get_current_asset_name()
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
# Workfile instance should always exist and must only exist once.
# As such we'll first check if it already exists and is collected.
current_instance = next(
(
instance for instance in self.create_context.instances
if instance.creator_identifier == self.identifier
), None)
if current_instance is None:
self.log.info("Auto-creating workfile instance...")
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": variant
}
current_instance = self.create_instance_in_context(subset_name,
data)
elif (
current_instance["asset"] != asset_name
or current_instance["task"] != task_name
):
# Update instance context if is not the same
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
current_instance["asset"] = asset_name
current_instance["task"] = task_name
current_instance["subset"] = subset_name
set_instance(
instance_id=current_instance.get("instance_id"),
instance_data=current_instance.data_to_store()
)
def collect_instances(self):
for instance in get_instances():
if (instance.get("creator_identifier") == self.identifier or
instance.get("family") == self.family):
self.create_instance_in_context_from_existing(instance)
def update_instances(self, update_list):
instance_data_by_id = {}
for instance, _changes in update_list:
# Persist the data
instance_id = instance.get("instance_id")
instance_data = instance.data_to_store()
instance_data_by_id[instance_id] = instance_data
set_instances(instance_data_by_id, update=True)
# Helper methods (this might get moved into Creator class)
def create_instance_in_context(self, subset_name, data):
instance = CreatedInstance(
self.family, subset_name, data, self
)
self.create_context.creator_adds_instance(instance)
return instance
def create_instance_in_context_from_existing(self, data):
instance = CreatedInstance.from_existing(data, self)
self.create_context.creator_adds_instance(instance)
return instance

View file

@ -0,0 +1,124 @@
from openpype.pipeline import (
load,
get_representation_path,
)
from openpype.pipeline.load import LoadError
from openpype.hosts.substancepainter.api.pipeline import (
imprint_container,
set_container_metadata,
remove_container_metadata
)
from openpype.hosts.substancepainter.api.lib import prompt_new_file_with_mesh
import substance_painter.project
import qargparse
class SubstanceLoadProjectMesh(load.LoaderPlugin):
"""Load mesh for project"""
families = ["*"]
representations = ["abc", "fbx", "obj", "gltf"]
label = "Load mesh"
order = -10
icon = "code-fork"
color = "orange"
options = [
qargparse.Boolean(
"preserve_strokes",
default=True,
help="Preserve strokes positions on mesh.\n"
"(only relevant when loading into existing project)"
),
qargparse.Boolean(
"import_cameras",
default=True,
help="Import cameras from the mesh file."
)
]
def load(self, context, name, namespace, data):
# Get user inputs
import_cameras = data.get("import_cameras", True)
preserve_strokes = data.get("preserve_strokes", True)
if not substance_painter.project.is_open():
# Allow to 'initialize' a new project
result = prompt_new_file_with_mesh(mesh_filepath=self.fname)
if not result:
self.log.info("User cancelled new project prompt.")
return
else:
# Reload the mesh
settings = substance_painter.project.MeshReloadingSettings(
import_cameras=import_cameras,
preserve_strokes=preserve_strokes
)
def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): # noqa
if status == substance_painter.project.ReloadMeshStatus.SUCCESS: # noqa
self.log.info("Reload succeeded")
else:
raise LoadError("Reload of mesh failed")
path = self.fname
substance_painter.project.reload_mesh(path,
settings,
on_mesh_reload)
# Store container
container = {}
project_mesh_object_name = "_ProjectMesh_"
imprint_container(container,
name=project_mesh_object_name,
namespace=project_mesh_object_name,
context=context,
loader=self)
# We want store some options for updating to keep consistent behavior
# from the user's original choice. We don't store 'preserve_strokes'
# as we always preserve strokes on updates.
container["options"] = {
"import_cameras": import_cameras,
}
set_container_metadata(project_mesh_object_name, container)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
path = get_representation_path(representation)
# Reload the mesh
container_options = container.get("options", {})
settings = substance_painter.project.MeshReloadingSettings(
import_cameras=container_options.get("import_cameras", True),
preserve_strokes=True
)
def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus):
if status == substance_painter.project.ReloadMeshStatus.SUCCESS:
self.log.info("Reload succeeded")
else:
raise LoadError("Reload of mesh failed")
substance_painter.project.reload_mesh(path, settings, on_mesh_reload)
# Update container representation
object_name = container["objectName"]
update_data = {"representation": str(representation["_id"])}
set_container_metadata(object_name, update_data, update=True)
def remove(self, container):
# Remove OpenPype related settings about what model was loaded
# or close the project?
# TODO: This is likely best 'hidden' away to the user because
# this will leave the project's mesh unmanaged.
remove_container_metadata(container["objectName"])

View file

@ -0,0 +1,17 @@
import pyblish.api
from openpype.pipeline import registered_host
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.49
label = "Current Workfile"
hosts = ["substancepainter"]
def process(self, context):
host = registered_host()
path = host.get_current_workfile()
context.data["currentFile"] = path
self.log.debug(f"Current workfile: {path}")

View file

@ -0,0 +1,196 @@
import os
import copy
import pyblish.api
from openpype.pipeline import publish
import substance_painter.textureset
from openpype.hosts.substancepainter.api.lib import (
get_parsed_export_maps,
strip_template
)
from openpype.pipeline.create import get_subset_name
from openpype.client import get_asset_by_name
class CollectTextureSet(pyblish.api.InstancePlugin):
"""Extract Textures using an output template config"""
# TODO: Production-test usage of color spaces
# TODO: Detect what source data channels end up in each file
label = "Collect Texture Set images"
hosts = ["substancepainter"]
families = ["textureSet"]
order = pyblish.api.CollectorOrder
def process(self, instance):
config = self.get_export_config(instance)
asset_doc = get_asset_by_name(
project_name=instance.context.data["projectName"],
asset_name=instance.data["asset"]
)
instance.data["exportConfig"] = config
maps = get_parsed_export_maps(config)
# Let's break the instance into multiple instances to integrate
# a subset per generated texture or texture UDIM sequence
for (texture_set_name, stack_name), template_maps in maps.items():
self.log.info(f"Processing {texture_set_name}/{stack_name}")
for template, outputs in template_maps.items():
self.log.info(f"Processing {template}")
self.create_image_instance(instance, template, outputs,
asset_doc=asset_doc,
texture_set_name=texture_set_name,
stack_name=stack_name)
def create_image_instance(self, instance, template, outputs,
asset_doc, texture_set_name, stack_name):
"""Create a new instance per image or UDIM sequence.
The new instances will be of family `image`.
"""
context = instance.context
first_filepath = outputs[0]["filepath"]
fnames = [os.path.basename(output["filepath"]) for output in outputs]
ext = os.path.splitext(first_filepath)[1]
assert ext.lstrip("."), f"No extension: {ext}"
always_include_texture_set_name = False # todo: make this configurable
all_texture_sets = substance_painter.textureset.all_texture_sets()
texture_set = substance_painter.textureset.TextureSet.from_name(
texture_set_name
)
# Define the suffix we want to give this particular texture
# set and set up a remapped subset naming for it.
suffix = ""
if always_include_texture_set_name or len(all_texture_sets) > 1:
# More than one texture set, include texture set name
suffix += f".{texture_set_name}"
if texture_set.is_layered_material() and stack_name:
# More than one stack, include stack name
suffix += f".{stack_name}"
# Always include the map identifier
map_identifier = strip_template(template)
suffix += f".{map_identifier}"
image_subset = get_subset_name(
# TODO: The family actually isn't 'texture' currently but for now
# this is only done so the subset name starts with 'texture'
family="texture",
variant=instance.data["variant"] + suffix,
task_name=instance.data.get("task"),
asset_doc=asset_doc,
project_name=context.data["projectName"],
host_name=context.data["hostName"],
project_settings=context.data["project_settings"]
)
# Prepare representation
representation = {
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": fnames if len(fnames) > 1 else fnames[0],
}
# Mark as UDIM explicitly if it has UDIM tiles.
if bool(outputs[0].get("udim")):
# The representation for a UDIM sequence should have a `udim` key
# that is a list of all udim tiles (str) like: ["1001", "1002"]
# strings. See CollectTextures plug-in and Integrators.
representation["udim"] = [output["udim"] for output in outputs]
# Set up the representation for thumbnail generation
# TODO: Simplify this once thumbnail extraction is refactored
staging_dir = os.path.dirname(first_filepath)
representation["tags"] = ["review"]
representation["stagingDir"] = staging_dir
# Clone the instance
image_instance = context.create_instance(image_subset)
image_instance[:] = instance[:]
image_instance.data.update(copy.deepcopy(instance.data))
image_instance.data["name"] = image_subset
image_instance.data["label"] = image_subset
image_instance.data["subset"] = image_subset
image_instance.data["family"] = "image"
image_instance.data["families"] = ["image", "textures"]
image_instance.data["representations"] = [representation]
# Group the textures together in the loader
image_instance.data["subsetGroup"] = instance.data["subset"]
# Store the texture set name and stack name on the instance
image_instance.data["textureSetName"] = texture_set_name
image_instance.data["textureStackName"] = stack_name
# Store color space with the instance
# Note: The extractor will assign it to the representation
colorspace = outputs[0].get("colorSpace")
if colorspace:
self.log.debug(f"{image_subset} colorspace: {colorspace}")
image_instance.data["colorspace"] = colorspace
# Store the instance in the original instance as a member
instance.append(image_instance)
def get_export_config(self, instance):
"""Return an export configuration dict for texture exports.
This config can be supplied to:
- `substance_painter.export.export_project_textures`
- `substance_painter.export.list_project_textures`
See documentation on substance_painter.export module about the
formatting of the configuration dictionary.
Args:
instance (pyblish.api.Instance): Texture Set instance to be
published.
Returns:
dict: Export config
"""
creator_attrs = instance.data["creator_attributes"]
preset_url = creator_attrs["exportPresetUrl"]
self.log.debug(f"Exporting using preset: {preset_url}")
# See: https://substance3d.adobe.com/documentation/ptpy/api/substance_painter/export # noqa
config = { # noqa
"exportShaderParams": True,
"exportPath": publish.get_instance_staging_dir(instance),
"defaultExportPreset": preset_url,
# Custom overrides to the exporter
"exportParameters": [
{
"parameters": {
"fileFormat": creator_attrs["exportFileFormat"],
"sizeLog2": creator_attrs["exportSize"],
"paddingAlgorithm": creator_attrs["exportPadding"],
"dilationDistance": creator_attrs["exportDilationDistance"] # noqa
}
}
]
}
# Create the list of Texture Sets to export.
config["exportList"] = []
for texture_set in substance_painter.textureset.all_texture_sets():
config["exportList"].append({"rootPath": texture_set.name()})
# Consider None values from the creator attributes optionals
for override in config["exportParameters"]:
parameters = override.get("parameters")
for key, value in dict(parameters).items():
if value is None:
parameters.pop(key)
return config

View file

@ -0,0 +1,26 @@
import os
import pyblish.api
class CollectWorkfileRepresentation(pyblish.api.InstancePlugin):
"""Create a publish representation for the current workfile instance."""
order = pyblish.api.CollectorOrder
label = "Workfile representation"
hosts = ["substancepainter"]
families = ["workfile"]
def process(self, instance):
context = instance.context
current_file = context.data["currentFile"]
folder, file = os.path.split(current_file)
filename, ext = os.path.splitext(file)
instance.data["representations"] = [{
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": file,
"stagingDir": folder,
}]

View file

@ -0,0 +1,62 @@
import substance_painter.export
from openpype.pipeline import KnownPublishError, publish
class ExtractTextures(publish.Extractor,
publish.ColormanagedPyblishPluginMixin):
"""Extract Textures using an output template config.
Note:
This Extractor assumes that `collect_textureset_images` has prepared
the relevant export config and has also collected the individual image
instances for publishing including its representation. That is why this
particular Extractor doesn't specify representations to integrate.
"""
label = "Extract Texture Set"
hosts = ["substancepainter"]
families = ["textureSet"]
# Run before thumbnail extractors
order = publish.Extractor.order - 0.1
def process(self, instance):
config = instance.data["exportConfig"]
result = substance_painter.export.export_project_textures(config)
if result.status != substance_painter.export.ExportStatus.Success:
raise KnownPublishError(
"Failed to export texture set: {}".format(result.message)
)
# Log what files we generated
for (texture_set_name, stack_name), maps in result.textures.items():
# Log our texture outputs
self.log.info(f"Exported stack: {texture_set_name} {stack_name}")
for texture_map in maps:
self.log.info(f"Exported texture: {texture_map}")
# We'll insert the color space data for each image instance that we
# added into this texture set. The collector couldn't do so because
# some anatomy and other instance data needs to be collected prior
context = instance.context
for image_instance in instance:
representation = next(iter(image_instance.data["representations"]))
colorspace = image_instance.data.get("colorspace")
if not colorspace:
self.log.debug("No color space data present for instance: "
f"{image_instance}")
continue
self.set_representation_colorspace(representation,
context=context,
colorspace=colorspace)
# The TextureSet instance should not be integrated. It generates no
# output data. Instead the separated texture instances are generated
# from it which themselves integrate into the database.
instance.data["integrate"] = False

View file

@ -0,0 +1,23 @@
import pyblish.api
from openpype.lib import version_up
from openpype.pipeline import registered_host
class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
"""Increment current workfile version."""
order = pyblish.api.IntegratorOrder + 1
label = "Increment Workfile Version"
optional = True
hosts = ["substancepainter"]
def process(self, context):
assert all(result["success"] for result in context.data["results"]), (
"Publishing not successful so version is not increased.")
host = registered_host()
path = context.data["currentFile"]
self.log.info(f"Incrementing current workfile to: {path}")
host.save_workfile(version_up(path))

View file

@ -0,0 +1,27 @@
import pyblish.api
from openpype.pipeline import (
registered_host,
KnownPublishError
)
class SaveCurrentWorkfile(pyblish.api.ContextPlugin):
"""Save current workfile"""
label = "Save current workfile"
order = pyblish.api.ExtractorOrder - 0.49
hosts = ["substancepainter"]
def process(self, context):
host = registered_host()
if context.data["currentFile"] != host.get_current_workfile():
raise KnownPublishError("Workfile has changed during publishing!")
if host.has_unsaved_changes():
self.log.info("Saving current file..")
host.save_workfile()
else:
self.log.debug("Skipping workfile save because there are no "
"unsaved changes.")

View file

@ -0,0 +1,109 @@
import copy
import os
import pyblish.api
import substance_painter.export
from openpype.pipeline import PublishValidationError
class ValidateOutputMaps(pyblish.api.InstancePlugin):
"""Validate all output maps for Output Template are generated.
Output maps will be skipped by Substance Painter if it is an output
map in the Substance Output Template which uses channels that the current
substance painter project has not painted or generated.
"""
order = pyblish.api.ValidatorOrder
label = "Validate output maps"
hosts = ["substancepainter"]
families = ["textureSet"]
def process(self, instance):
config = instance.data["exportConfig"]
# Substance Painter API does not allow to query the actual output maps
# it will generate without actually exporting the files. So we try to
# generate the smallest size / fastest export as possible
config = copy.deepcopy(config)
parameters = config["exportParameters"][0]["parameters"]
parameters["sizeLog2"] = [1, 1] # output 2x2 images (smallest)
parameters["paddingAlgorithm"] = "passthrough" # no dilation (faster)
parameters["dithering"] = False # no dithering (faster)
result = substance_painter.export.export_project_textures(config)
if result.status != substance_painter.export.ExportStatus.Success:
raise PublishValidationError(
"Failed to export texture set: {}".format(result.message)
)
generated_files = set()
for texture_maps in result.textures.values():
for texture_map in texture_maps:
generated_files.add(os.path.normpath(texture_map))
# Directly clean up our temporary export
os.remove(texture_map)
creator_attributes = instance.data.get("creator_attributes", {})
allow_skipped_maps = creator_attributes.get("allowSkippedMaps", True)
error_report_missing = []
for image_instance in instance:
# Confirm whether the instance has its expected files generated.
# We assume there's just one representation and that it is
# the actual texture representation from the collector.
representation = next(iter(image_instance.data["representations"]))
staging_dir = representation["stagingDir"]
filenames = representation["files"]
if not isinstance(filenames, (list, tuple)):
# Convert single file to list
filenames = [filenames]
missing = []
for filename in filenames:
filepath = os.path.join(staging_dir, filename)
filepath = os.path.normpath(filepath)
if filepath not in generated_files:
self.log.warning(f"Missing texture: {filepath}")
missing.append(filepath)
if not missing:
continue
if allow_skipped_maps:
# TODO: This is changing state on the instance's which
# should not be done during validation.
self.log.warning(f"Disabling texture instance: "
f"{image_instance}")
image_instance.data["active"] = False
image_instance.data["integrate"] = False
representation.setdefault("tags", []).append("delete")
continue
else:
error_report_missing.append((image_instance, missing))
if error_report_missing:
message = (
"The Texture Set skipped exporting some output maps which are "
"defined in the Output Template. This happens if the Output "
"Templates exports maps from channels which you do not "
"have in your current Substance Painter project.\n\n"
"To allow this enable the *Allow Skipped Output Maps* setting "
"on the instance.\n\n"
f"Instance {instance} skipped exporting output maps:\n"
""
)
for image_instance, missing in error_report_missing:
missing_str = ", ".join(missing)
message += f"- **{image_instance}** skipped: {missing_str}\n"
raise PublishValidationError(
message=message,
title="Missing output maps"
)

View file

@ -1,15 +1,20 @@
import os
import shutil
from time import sleep
from openpype.client.entities import (
get_last_version_by_subset_id,
get_representations,
get_subsets,
get_project
)
from openpype.lib import PreLaunchHook
from openpype.lib.local_settings import get_local_site_id
from openpype.lib.profiles_filtering import filter_profiles
from openpype.pipeline.load.utils import get_representation_path
from openpype.modules.sync_server.sync_server import (
download_last_published_workfile,
)
from openpype.pipeline.template_data import get_template_data
from openpype.pipeline.workfile.path_resolving import (
get_workfile_template_key,
)
from openpype.settings.lib import get_project_settings
@ -22,7 +27,11 @@ class CopyLastPublishedWorkfile(PreLaunchHook):
# Before `AddLastWorkfileToLaunchArgs`
order = -1
app_groups = ["blender", "photoshop", "tvpaint", "aftereffects"]
# any DCC could be used but TrayPublisher and other specials
app_groups = ["blender", "photoshop", "tvpaint", "aftereffects",
"nuke", "nukeassist", "nukex", "hiero", "nukestudio",
"maya", "harmony", "celaction", "flame", "fusion",
"houdini", "tvpaint"]
def execute(self):
"""Check if local workfile doesn't exist, else copy it.
@ -31,11 +40,11 @@ class CopyLastPublishedWorkfile(PreLaunchHook):
2- Check if workfile in work area doesn't exist
3- Check if published workfile exists and is copied locally in publish
4- Substitute copied published workfile as first workfile
with incremented version by +1
Returns:
None: This is a void method.
"""
sync_server = self.modules_manager.get("sync_server")
if not sync_server or not sync_server.enabled:
self.log.debug("Sync server module is not enabled or available")
@ -53,6 +62,7 @@ class CopyLastPublishedWorkfile(PreLaunchHook):
# Get data
project_name = self.data["project_name"]
asset_name = self.data["asset_name"]
task_name = self.data["task_name"]
task_type = self.data["task_type"]
host_name = self.application.host_name
@ -68,6 +78,8 @@ class CopyLastPublishedWorkfile(PreLaunchHook):
"hosts": host_name,
}
last_workfile_settings = filter_profiles(profiles, filter_data)
if not last_workfile_settings:
return
use_last_published_workfile = last_workfile_settings.get(
"use_last_published_workfile"
)
@ -92,57 +104,27 @@ class CopyLastPublishedWorkfile(PreLaunchHook):
)
return
max_retries = int((sync_server.sync_project_settings[project_name]
["config"]
["retry_cnt"]))
self.log.info("Trying to fetch last published workfile...")
project_doc = self.data.get("project_doc")
asset_doc = self.data.get("asset_doc")
anatomy = self.data.get("anatomy")
# Check it can proceed
if not project_doc and not asset_doc:
return
context_filters = {
"asset": asset_name,
"family": "workfile",
"task": {"name": task_name, "type": task_type}
}
# Get subset id
subset_id = next(
(
subset["_id"]
for subset in get_subsets(
project_name,
asset_ids=[asset_doc["_id"]],
fields=["_id", "data.family", "data.families"],
)
if subset["data"].get("family") == "workfile"
# Legacy compatibility
or "workfile" in subset["data"].get("families", {})
),
None,
)
if not subset_id:
self.log.debug(
'No any workfile for asset "{}".'.format(asset_doc["name"])
)
return
workfile_representations = list(get_representations(
project_name,
context_filters=context_filters
))
# Get workfile representation
last_version_doc = get_last_version_by_subset_id(
project_name, subset_id, fields=["_id"]
)
if not last_version_doc:
self.log.debug("Subset does not have any versions")
return
workfile_representation = next(
(
representation
for representation in get_representations(
project_name, version_ids=[last_version_doc["_id"]]
)
if representation["context"]["task"]["name"] == task_name
),
None,
)
if not workfile_representation:
if not workfile_representations:
self.log.debug(
'No published workfile for task "{}" and host "{}".'.format(
task_name, host_name
@ -150,28 +132,55 @@ class CopyLastPublishedWorkfile(PreLaunchHook):
)
return
local_site_id = get_local_site_id()
sync_server.add_site(
project_name,
workfile_representation["_id"],
local_site_id,
force=True,
priority=99,
reset_timer=True,
filtered_repres = filter(
lambda r: r["context"].get("version") is not None,
workfile_representations
)
while not sync_server.is_representation_on_site(
project_name, workfile_representation["_id"], local_site_id
):
sleep(5)
# Get paths
published_workfile_path = get_representation_path(
workfile_representation, root=anatomy.roots
workfile_representation = max(
filtered_repres, key=lambda r: r["context"]["version"]
)
local_workfile_dir = os.path.dirname(last_workfile)
# Copy file and substitute path
self.data["last_workfile_path"] = shutil.copy(
published_workfile_path, local_workfile_dir
last_published_workfile_path = download_last_published_workfile(
host_name,
project_name,
task_name,
workfile_representation,
max_retries,
anatomy=anatomy
)
if not last_published_workfile_path:
self.log.debug(
"Couldn't download {}".format(last_published_workfile_path)
)
return
project_doc = self.data["project_doc"]
project_settings = self.data["project_settings"]
template_key = get_workfile_template_key(
task_name, host_name, project_name, project_settings
)
# Get workfile data
workfile_data = get_template_data(
project_doc, asset_doc, task_name, host_name
)
extension = last_published_workfile_path.split(".")[-1]
workfile_data["version"] = (
workfile_representation["context"]["version"] + 1)
workfile_data["ext"] = extension
anatomy_result = anatomy.format(workfile_data)
local_workfile_path = anatomy_result[template_key]["path"]
# Copy last published workfile to local workfile directory
shutil.copy(
last_published_workfile_path,
local_workfile_path,
)
self.data["last_workfile_path"] = local_workfile_path
# Keep source filepath for further path conformation
self.data["source_filepath"] = last_published_workfile_path

View file

@ -3,10 +3,15 @@ import os
import asyncio
import threading
import concurrent.futures
from concurrent.futures._base import CancelledError
from time import sleep
from .providers import lib
from openpype.client.entity_links import get_linked_representation_id
from openpype.lib import Logger
from openpype.lib.local_settings import get_local_site_id
from openpype.modules.base import ModulesManager
from openpype.pipeline import Anatomy
from openpype.pipeline.load.utils import get_representation_path_with_anatomy
from .utils import SyncStatus, ResumableError
@ -189,6 +194,98 @@ def _site_is_working(module, project_name, site_name, site_config):
return handler.is_active()
def download_last_published_workfile(
host_name: str,
project_name: str,
task_name: str,
workfile_representation: dict,
max_retries: int,
anatomy: Anatomy = None,
) -> str:
"""Download the last published workfile
Args:
host_name (str): Host name.
project_name (str): Project name.
task_name (str): Task name.
workfile_representation (dict): Workfile representation.
max_retries (int): complete file failure only after so many attempts
anatomy (Anatomy, optional): Anatomy (Used for optimization).
Defaults to None.
Returns:
str: last published workfile path localized
"""
if not anatomy:
anatomy = Anatomy(project_name)
# Get sync server module
sync_server = ModulesManager().modules_by_name.get("sync_server")
if not sync_server or not sync_server.enabled:
print("Sync server module is disabled or unavailable.")
return
if not workfile_representation:
print(
"Not published workfile for task '{}' and host '{}'.".format(
task_name, host_name
)
)
return
last_published_workfile_path = get_representation_path_with_anatomy(
workfile_representation, anatomy
)
if (not last_published_workfile_path or
not os.path.exists(last_published_workfile_path)):
return
# If representation isn't available on remote site, then return.
if not sync_server.is_representation_on_site(
project_name,
workfile_representation["_id"],
sync_server.get_remote_site(project_name),
):
print(
"Representation for task '{}' and host '{}'".format(
task_name, host_name
)
)
return
# Get local site
local_site_id = get_local_site_id()
# Add workfile representation to local site
representation_ids = {workfile_representation["_id"]}
representation_ids.update(
get_linked_representation_id(
project_name, repre_id=workfile_representation["_id"]
)
)
for repre_id in representation_ids:
if not sync_server.is_representation_on_site(project_name, repre_id,
local_site_id):
sync_server.add_site(
project_name,
repre_id,
local_site_id,
force=True,
priority=99
)
sync_server.reset_timer()
print("Starting to download:{}".format(last_published_workfile_path))
# While representation unavailable locally, wait.
while not sync_server.is_representation_on_site(
project_name, workfile_representation["_id"], local_site_id,
max_retries=max_retries
):
sleep(5)
return last_published_workfile_path
class SyncServerThread(threading.Thread):
"""
Separate thread running synchronization server with asyncio loop.
@ -358,7 +455,6 @@ class SyncServerThread(threading.Thread):
duration = time.time() - start_time
self.log.debug("One loop took {:.2f}s".format(duration))
delay = self.module.get_loop_delay(project_name)
self.log.debug(
"Waiting for {} seconds to new loop".format(delay)
@ -370,8 +466,8 @@ class SyncServerThread(threading.Thread):
self.log.warning(
"ConnectionResetError in sync loop, trying next loop",
exc_info=True)
except CancelledError:
# just stopping server
except asyncio.exceptions.CancelledError:
# cancelling timer
pass
except ResumableError:
self.log.warning(

View file

@ -838,6 +838,18 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
return ret_dict
def get_launch_hook_paths(self):
"""Implementation for applications launch hooks.
Returns:
(str): full absolut path to directory with hooks for the module
"""
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"launch_hooks"
)
# Needs to be refactored after Settings are updated
# # Methods for Settings to get appriate values to fill forms
# def get_configurable_items(self, scope=None):
@ -1045,9 +1057,23 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
self.sync_server_thread.reset_timer()
def is_representation_on_site(
self, project_name, representation_id, site_name
self, project_name, representation_id, site_name, max_retries=None
):
"""Checks if 'representation_id' has all files avail. on 'site_name'"""
"""Checks if 'representation_id' has all files avail. on 'site_name'
Args:
project_name (str)
representation_id (str)
site_name (str)
max_retries (int) (optional) - provide only if method used in while
loop to bail out
Returns:
(bool): True if 'representation_id' has all files correctly on the
'site_name'
Raises:
(ValueError) Only If 'max_retries' provided if upload/download
failed too many times to limit infinite loop check.
"""
representation = get_representation_by_id(project_name,
representation_id,
fields=["_id", "files"])
@ -1060,6 +1086,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
if site["name"] != site_name:
continue
if max_retries:
tries = self._get_tries_count_from_rec(site)
if tries >= max_retries:
raise ValueError("Failed too many times")
if (site.get("progress") or site.get("error") or
not site.get("created_dt")):
return False

View file

@ -19,9 +19,9 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
order = pyblish.api.ExtractorOrder
families = [
"imagesequence", "render", "render2d", "prerender",
"source", "clip", "take", "online"
"source", "clip", "take", "online", "image"
]
hosts = ["shell", "fusion", "resolve", "traypublisher"]
hosts = ["shell", "fusion", "resolve", "traypublisher", "substancepainter"]
enabled = False
# presetable attribute

View file

@ -163,6 +163,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"Instance is marked to be processed on farm. Skipping")
return
# Instance is marked to not get integrated
if not instance.data.get("integrate", True):
self.log.info("Instance is marked to skip integrating. Skipping")
return
filtered_repres = self.filter_representations(instance)
# Skip instance if there are not representations to integrate
# all representations should not be integrated

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

View file

@ -1460,7 +1460,8 @@
},
"reference_loader": {
"namespace": "{asset_name}_{subset}_##_",
"group_name": "_GRP"
"group_name": "_GRP",
"display_handle": true
}
},
"workfile_build": {

View file

@ -10,23 +10,40 @@
}
},
"create": {
"CreateImage": {
"defaults": [
"ImageCreator": {
"enabled": true,
"active_on_create": true,
"mark_for_review": false,
"default_variants": [
"Main"
]
},
"AutoImageCreator": {
"enabled": false,
"active_on_create": true,
"mark_for_review": false,
"default_variant": ""
},
"ReviewCreator": {
"enabled": true,
"active_on_create": true,
"default_variant": ""
},
"WorkfileCreator": {
"enabled": true,
"active_on_create": true,
"default_variant": "Main"
}
},
"publish": {
"CollectColorCodedInstances": {
"enabled": true,
"create_flatten_image": "no",
"flatten_subset_template": "",
"color_code_mapping": []
},
"CollectInstances": {
"flatten_subset_template": ""
},
"CollectReview": {
"publish": true
"enabled": true
},
"CollectVersion": {
"enabled": false

View file

@ -0,0 +1,13 @@
{
"imageio": {
"ocio_config": {
"enabled": true,
"filepath": []
},
"file_rules": {
"enabled": true,
"rules": {}
}
},
"shelves": {}
}

View file

@ -1479,6 +1479,33 @@
}
}
},
"substancepainter": {
"enabled": true,
"label": "Substance Painter",
"icon": "app_icons/substancepainter.png",
"host_name": "substancepainter",
"environment": {},
"variants": {
"8-2-0": {
"executables": {
"windows": [
"C:\\Program Files\\Adobe\\Adobe Substance 3D Painter\\Adobe Substance 3D Painter.exe"
],
"darwin": [],
"linux": []
},
"arguments": {
"windows": [],
"darwin": [],
"linux": []
},
"environment": {}
},
"__dynamic_keys_labels__": {
"8-2-0": "8.2.0"
}
}
},
"unreal": {
"enabled": true,
"label": "Unreal Editor",

View file

@ -168,6 +168,7 @@ class HostsEnumEntity(BaseEnumEntity):
"tvpaint",
"unreal",
"standalonepublisher",
"substancepainter",
"traypublisher",
"webpublisher"
]

View file

@ -122,6 +122,10 @@
"type": "schema",
"name": "schema_project_photoshop"
},
{
"type": "schema",
"name": "schema_project_substancepainter"
},
{
"type": "schema",
"name": "schema_project_harmony"

View file

@ -31,16 +31,126 @@
{
"type": "dict",
"collapsible": true,
"key": "CreateImage",
"key": "ImageCreator",
"label": "Create Image",
"checkbox_key": "enabled",
"children": [
{
"type": "label",
"label": "Manually create instance from layer or group of layers. \n Separate review could be created for this image to be sent to Asset Management System."
},
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "active_on_create",
"label": "Active by default"
},
{
"type": "boolean",
"key": "mark_for_review",
"label": "Review by default"
},
{
"type": "list",
"key": "defaults",
"label": "Default Subsets",
"key": "default_variants",
"label": "Default Variants",
"object_type": "text"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "AutoImageCreator",
"label": "Create Flatten Image",
"checkbox_key": "enabled",
"children": [
{
"type": "label",
"label": "Auto create image for all visible layers, used for simplified processing. \n Separate review could be created for this image to be sent to Asset Management System."
},
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "active_on_create",
"label": "Active by default"
},
{
"type": "boolean",
"key": "mark_for_review",
"label": "Review by default"
},
{
"type": "text",
"key": "default_variant",
"label": "Default variant"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ReviewCreator",
"label": "Create Review",
"checkbox_key": "enabled",
"children": [
{
"type": "label",
"label": "Auto create review instance containing all published image instances or visible layers if no image instance."
},
{
"type": "boolean",
"key": "enabled",
"label": "Enabled",
"default": true
},
{
"type": "boolean",
"key": "active_on_create",
"label": "Active by default"
},
{
"type": "text",
"key": "default_variant",
"label": "Default variant"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "WorkfileCreator",
"label": "Create Workfile",
"checkbox_key": "enabled",
"children": [
{
"type": "label",
"label": "Auto create workfile instance"
},
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "active_on_create",
"label": "Active by default"
},
{
"type": "text",
"key": "default_variant",
"label": "Default variant"
}
]
}
]
},
@ -56,11 +166,18 @@
"is_group": true,
"key": "CollectColorCodedInstances",
"label": "Collect Color Coded Instances",
"checkbox_key": "enabled",
"children": [
{
"type": "label",
"label": "Set color for publishable layers, set its resulting family and template for subset name. \nCan create flatten image from published instances.(Applicable only for remote publishing!)"
},
{
"type": "boolean",
"key": "enabled",
"label": "Enabled",
"default": true
},
{
"key": "create_flatten_image",
"label": "Create flatten image",
@ -131,40 +248,26 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "CollectInstances",
"label": "Collect Instances",
"children": [
{
"type": "label",
"label": "Name for flatten image created if no image instance present"
},
{
"type": "text",
"key": "flatten_subset_template",
"label": "Subset template for flatten image"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "CollectReview",
"label": "Collect Review",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "publish",
"label": "Active"
}
]
"key": "enabled",
"label": "Enabled",
"default": true
}
]
},
{
"type": "dict",
"key": "CollectVersion",
"label": "Collect Version",
"checkbox_key": "enabled",
"children": [
{
"type": "label",

View file

@ -0,0 +1,35 @@
{
"type": "dict",
"collapsible": true,
"key": "substancepainter",
"label": "Substance Painter",
"is_file": true,
"children": [
{
"key": "imageio",
"type": "dict",
"label": "Color Management (ImageIO)",
"is_group": true,
"children": [
{
"type": "schema",
"name": "schema_imageio_config"
},
{
"type": "schema",
"name": "schema_imageio_file_rules"
}
]
},
{
"type": "dict-modifiable",
"key": "shelves",
"label": "Shelves",
"use_label_wrap": true,
"object_type": {
"type": "text"
}
}
]
}

View file

@ -111,6 +111,14 @@
{
"type": "label",
"label": "Here's a link to the doc where you can find explanations about customing the naming of referenced assets: https://openpype.io/docs/admin_hosts_maya#load-plugins"
},
{
"type": "separator"
},
{
"type": "boolean",
"key": "display_handle",
"label": "Display Handle On Load References"
}
]
}

View file

@ -0,0 +1,40 @@
{
"type": "dict",
"key": "substancepainter",
"label": "Substance Painter",
"collapsible": true,
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "schema_template",
"name": "template_host_unchangables"
},
{
"key": "environment",
"label": "Environment",
"type": "raw-json"
},
{
"type": "dict-modifiable",
"key": "variants",
"collapsible_key": true,
"use_label_wrap": false,
"object_type": {
"type": "dict",
"collapsible": true,
"children": [
{
"type": "schema_template",
"name": "template_host_variant_items",
"skip_paths": ["use_python_2"]
}
]
}
}
]
}

View file

@ -93,6 +93,10 @@
"type": "schema",
"name": "schema_celaction"
},
{
"type": "schema",
"name": "schema_substancepainter"
},
{
"type": "schema",
"name": "schema_unreal"

View file

@ -132,6 +132,7 @@ QPushButton {
border-radius: 0.2em;
padding: 3px 5px 3px 5px;
background: {color:bg-buttons};
min-width: 0px; /* Substance Painter fix */
}
QPushButton:hover {
@ -337,7 +338,15 @@ QTabWidget::tab-bar {
alignment: left;
}
/* avoid QTabBar overrides in Substance Painter */
QTabBar {
text-transform: none;
font-weight: normal;
}
QTabBar::tab {
text-transform: none;
font-weight: normal;
border-top: 1px solid {color:border};
border-left: 1px solid {color:border};
border-right: 1px solid {color:border};
@ -377,6 +386,7 @@ QHeaderView {
QHeaderView::section {
background: {color:bg-view-header};
padding: 4px;
border-top: 0px; /* Substance Painter fix */
border-right: 1px solid {color:bg-view};
border-radius: 0px;
text-align: center;

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.15.6-nightly.2"
__version__ = "3.15.6-nightly.3"

View file

@ -0,0 +1,93 @@
import logging
from tests.lib.assert_classes import DBAssert
from tests.integration.hosts.photoshop.lib import PhotoshopTestClass
log = logging.getLogger("test_publish_in_photoshop")
class TestPublishInPhotoshopAutoImage(PhotoshopTestClass):
"""Test for publish in Phohoshop with different review configuration.
Workfile contains 3 layers, auto image and review instances created.
Test contains updates to Settings!!!
"""
PERSIST = True
TEST_FILES = [
("1iLF6aNI31qlUCD1rGg9X9eMieZzxL-rc",
"test_photoshop_publish_auto_image.zip", "")
]
APP_GROUP = "photoshop"
# keep empty to locate latest installed variant or explicit
APP_VARIANT = ""
APP_NAME = "{}/{}".format(APP_GROUP, APP_VARIANT)
TIMEOUT = 120 # publish timeout
def test_db_asserts(self, dbcon, publish_finished):
"""Host and input data dependent expected results in DB."""
print("test_db_asserts")
failures = []
failures.append(DBAssert.count_of_types(dbcon, "version", 3))
failures.append(
DBAssert.count_of_types(dbcon, "version", 0, name={"$ne": 1}))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 0,
name="imageMainForeground"))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 0,
name="imageMainBackground"))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 1,
name="workfileTest_task"))
failures.append(
DBAssert.count_of_types(dbcon, "representation", 5))
additional_args = {"context.subset": "imageMainForeground",
"context.ext": "png"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 0,
additional_args=additional_args))
additional_args = {"context.subset": "imageMainBackground",
"context.ext": "png"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 0,
additional_args=additional_args))
# review from image
additional_args = {"context.subset": "imageBeautyMain",
"context.ext": "jpg",
"name": "jpg_jpg"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 1,
additional_args=additional_args))
additional_args = {"context.subset": "imageBeautyMain",
"context.ext": "jpg",
"name": "jpg"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 1,
additional_args=additional_args))
additional_args = {"context.subset": "review"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 1,
additional_args=additional_args))
assert not any(failures)
if __name__ == "__main__":
test_case = TestPublishInPhotoshopAutoImage()

View file

@ -0,0 +1,111 @@
import logging
from tests.lib.assert_classes import DBAssert
from tests.integration.hosts.photoshop.lib import PhotoshopTestClass
log = logging.getLogger("test_publish_in_photoshop")
class TestPublishInPhotoshopImageReviews(PhotoshopTestClass):
"""Test for publish in Phohoshop with different review configuration.
Workfile contains 2 image instance, one has review flag, second doesn't.
Regular `review` family is disabled.
Expected result is to `imageMainForeground` to have additional file with
review, `imageMainBackground` without. No separate `review` family.
`test_project_test_asset_imageMainForeground_v001_jpg.jpg` is expected name
of imageForeground review, `_jpg` suffix is needed to differentiate between
image and review file.
"""
PERSIST = True
TEST_FILES = [
("12WGbNy9RJ3m9jlnk0Ib9-IZmONoxIz_p",
"test_photoshop_publish_review.zip", "")
]
APP_GROUP = "photoshop"
# keep empty to locate latest installed variant or explicit
APP_VARIANT = ""
APP_NAME = "{}/{}".format(APP_GROUP, APP_VARIANT)
TIMEOUT = 120 # publish timeout
def test_db_asserts(self, dbcon, publish_finished):
"""Host and input data dependent expected results in DB."""
print("test_db_asserts")
failures = []
failures.append(DBAssert.count_of_types(dbcon, "version", 3))
failures.append(
DBAssert.count_of_types(dbcon, "version", 0, name={"$ne": 1}))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 1,
name="imageMainForeground"))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 1,
name="imageMainBackground"))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 1,
name="workfileTest_task"))
failures.append(
DBAssert.count_of_types(dbcon, "representation", 6))
additional_args = {"context.subset": "imageMainForeground",
"context.ext": "png"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 1,
additional_args=additional_args))
additional_args = {"context.subset": "imageMainForeground",
"context.ext": "jpg"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 2,
additional_args=additional_args))
additional_args = {"context.subset": "imageMainForeground",
"context.ext": "jpg",
"context.representation": "jpg_jpg"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 1,
additional_args=additional_args))
additional_args = {"context.subset": "imageMainBackground",
"context.ext": "png"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 1,
additional_args=additional_args))
additional_args = {"context.subset": "imageMainBackground",
"context.ext": "jpg"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 1,
additional_args=additional_args))
additional_args = {"context.subset": "imageMainBackground",
"context.ext": "jpg",
"context.representation": "jpg_jpg"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 0,
additional_args=additional_args))
additional_args = {"context.subset": "review"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 0,
additional_args=additional_args))
assert not any(failures)
if __name__ == "__main__":
test_case = TestPublishInPhotoshopImageReviews()

View file

@ -0,0 +1,127 @@
---
id: admin_hosts_photoshop
title: Photoshop Settings
sidebar_label: Photoshop
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## Photoshop settings
There is a couple of settings that could configure publishing process for **Photoshop**.
All of them are Project based, eg. each project could have different configuration.
Location: Settings > Project > Photoshop
![AfterEffects Project Settings](assets/admin_hosts_photoshop_settings.png)
## Color Management (ImageIO)
Placeholder for Color Management. Currently not implemented yet.
## Creator plugins
Contains configurable items for creators used during publishing from Photoshop.
### Create Image
Provides list of [variants](artist_concepts.md#variant) that will be shown to an artist in Publisher. Default value `Main`.
### Create Flatten Image
Provides simplified publishing process. It will create single `image` instance for artist automatically. This instance will
produce flatten image from all visible layers in a workfile.
- Subset template for flatten image - provide template for subset name for this instance (example `imageBeauty`)
- Review - should be separate review created for this instance
### Create Review
Creates single `review` instance automatically. This allows artists to disable it if needed.
### Create Workfile
Creates single `workfile` instance automatically. This allows artists to disable it if needed.
## Publish plugins
Contains configurable items for publish plugins used during publishing from Photoshop.
### Collect Color Coded Instances
Used only in remote publishing!
Allows to create automatically `image` instances for configurable highlight color set on layer or group in the workfile.
#### Create flatten image
- Flatten with images - produce additional `image` with all published `image` instances merged
- Flatten only - produce only merged `image` instance
- No - produce only separate `image` instances
#### Subset template for flatten image
Template used to create subset name automatically (example `image{layer}Main` - uses layer name in subset name)
### Collect Review
Disable if no review should be created
### Collect Version
If enabled it will push version from workfile name to all published items. Eg. if artist is publishing `test_asset_workfile_v005.psd`
produced `image` and `review` files will contain `v005` (even if some previous version were skipped for particular family).
### Validate Containers
Checks if all imported assets to the workfile through `Loader` are in latest version. Limits cases that older version of asset would be used.
If enabled, artist might still decide to disable validation for each publish (for special use cases).
Limit this optionality by toggling `Optional`.
`Active` toggle denotes that by default artists sees that optional validation as enabled.
### Validate naming of subsets and layers
Subset cannot contain invalid characters or extract to file would fail
#### Regex pattern of invalid characters
Contains weird characters like `/`, `/`, these might cause an issue when file (which contains subset name) is created on OS disk.
#### Replacement character
Replace all offending characters with this one. `_` is default.
### Extract Image
Controls extension formats of published instances of `image` family. `png` and `jpg` are by default.
### Extract Review
Controls output definitions of extracted reviews to upload on Asset Management (AM).
#### Makes an image sequence instead of flatten image
If multiple `image` instances are produced, glue created images into image sequence (`mov`) to review all of them separetely.
Without it only flatten image would be produced.
#### Maximum size of sources for review
Set Byte limit for review file. Applicable if gigantic `image` instances are produced, full image size is unnecessary to upload to AM.
#### Extract jpg Options
Handles tags for produced `.jpg` representation. `Create review` and `Add review to Ftrack` are defaults.
#### Extract mov Options
Handles tags for produced `.mov` representation. `Create review` and `Add review to Ftrack` are defaults.
### Workfile Builder
Allows to open prepared workfile for an artist when no workfile exists. Useful to share standards, additional helpful content in the workfile.
Could be configured per `Task type`, eg. `composition` task type could use different `.psd` template file than `art` task.
Workfile template must be accessible for all artists.
(Currently not handled by [SiteSync](module_site_sync.md))

View file

@ -0,0 +1,107 @@
---
id: artist_hosts_substancepainter
title: Substance Painter
sidebar_label: Substance Painter
---
## OpenPype global tools
- [Work Files](artist_tools.md#workfiles)
- [Load](artist_tools.md#loader)
- [Manage (Inventory)](artist_tools.md#inventory)
- [Publish](artist_tools.md#publisher)
- [Library Loader](artist_tools.md#library-loader)
## Working with OpenPype in Substance Painter
The Substance Painter OpenPype integration allows you to:
- Set the project mesh and easily keep it in sync with updates of the model
- Easily export your textures as versioned publishes for others to load and update.
## Setting the project mesh
Substance Painter requires a project file to have a mesh path configured.
As such, you can't start a workfile without choosing a mesh path.
To start a new project using a published model you can _without an open project_
use OpenPype > Load.. > Load Mesh on a supported publish. This will prompt you
with a New Project prompt preset to that particular mesh file.
If you already have a project open, you can also replace (reload) your mesh
using the same Load Mesh functionality.
After having the project mesh loaded or reloaded through the loader
tool the mesh will be _managed_ by OpenPype. For example, you'll be notified
on workfile open whether the mesh in your workfile is outdated. You can also
set it to specific version using OpenPype > Manage.. where you can right click
on the project mesh to perform _Set Version_
:::info
A Substance Painter project will always have only one mesh set. Whenever you
trigger _Load Mesh_ from the loader this will **replace** your currently loaded
mesh for your open project.
:::
## Publishing textures
To publish your textures we must first create a `textureSet`
publish instance.
To create a **TextureSet instance** we will use OpenPype's publisher tool. Go
to **OpenPype → Publish... → TextureSet**
The texture set instance will define what Substance Painter export template (`.spexp`) to
use and thus defines what texture maps will be exported from your workfile. This
can be set with the **Output Template** attribute on the instance.
:::info
The TextureSet instance gets saved with your Substance Painter project. As such,
you will only need to configure this once for your workfile. Next time you can
just click **OpenPype → Publish...** and start publishing directly with the
same settings.
:::
#### Publish per output map of the Substance Painter preset
The Texture Set instance generates a publish per output map that is defined in
the Substance Painter's export preset. For example a publish from a default
PBR Metallic Roughness texture set results in six separate published subsets
(if all the channels exist in your file).
![Substance Painter PBR Metallic Roughness Export Preset](assets/substancepainter_pbrmetallicroughness_export_preset.png)
When publishing for example a texture set with variant **Main** six instances will
be published with the variants:
- Main.**BaseColor**
- Main.**Emissive**
- Main.**Height**
- Main.**Metallic**
- Main.**Normal**
- Main.**Roughness**
The bold output map name for the publish is based on the string that is pulled
from the what is considered to be the static part of the filename templates in
the export preset. The tokens like `$mesh` and `(_$colorSpace)` are ignored.
So `$mesh_$textureSet_BaseColor(_$colorSpace)(.$udim)` becomes `BaseColor`.
An example output for PBR Metallic Roughness would be:
![Substance Painter PBR Metallic Roughness Publish Example in Loader](assets/substancepainter_pbrmetallicroughness_published.png)
## Known issues
#### Can't see the OpenPype menu?
If you're unable to see the OpenPype top level menu in Substance Painter make
sure you have launched Substance Painter through OpenPype and that the OpenPype
Integration plug-in is loaded inside Substance Painter: **Python > openpype_plugin**
#### Substance Painter + Steam
Running the steam version of Substance Painter within OpenPype will require you
to close the Steam executable before launching Substance Painter through OpenPype.
Otherwise the Substance Painter process is launched using Steam's existing
environment and thus will not be able to pick up the pipeline integration.
This appears to be a limitation of how Steam works.

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.3 KiB

View file

@ -7,80 +7,112 @@ sidebar_label: Site Sync
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
Site Sync allows users and studios to synchronize published assets between
multiple 'sites'. Site denotes a storage location,
which could be a physical disk, server, cloud storage. To be able to use site
sync, it first needs to be configured.
:::warning
**This feature is** currently **in a beta stage** and it is not recommended to rely on it fully for production.
:::
Site Sync allows users and studios to synchronize published assets between multiple 'sites'. Site denotes a storage location,
which could be a physical disk, server, cloud storage. To be able to use site sync, it first needs to be configured.
The general idea is that each user acts as an individual site and can download and upload any published project files when they are needed. that way, artist can have access to the whole project, but only every store files that are relevant to them on their home workstation.
The general idea is that each user acts as an individual site and can download
and upload any published project files when they are needed. that way, artist
can have access to the whole project, but only every store files that are
relevant to them on their home workstation.
:::note
At the moment site sync is only able to deal with publishes files. No workfiles will be synchronized unless they are published. We are working on making workfile synchronization possible as well.
At the moment site sync is only able to deal with publishes files. No workfiles
will be synchronized unless they are published. We are working on making
workfile synchronization possible as well.
:::
## System Settings
To use synchronization, *Site Sync* needs to be enabled globally in **OpenPype Settings/System/Modules/Site Sync**.
To use synchronization, *Site Sync* needs to be enabled globally in **OpenPype
Settings/System/Modules/Site Sync**.
![Configure module](assets/site_sync_system.png)
### Sites
### Sites
By default there are two sites created for each OpenPype installation:
- **studio** - default site - usually a centralized mounted disk accessible to all artists. Studio site is used if Site Sync is disabled.
- **local** - each workstation or server running OpenPype Tray receives its own with unique site name. Workstation refers to itself as "local"however all other sites will see it under it's unique ID.
Artists can explore their site ID by opening OpenPype Info tool by clicking on a version number in the tray app.
- **studio** - default site - usually a centralized mounted disk accessible to
all artists. Studio site is used if Site Sync is disabled.
- **local** - each workstation or server running OpenPype Tray receives its own
with unique site name. Workstation refers to itself as "local"however all
other sites will see it under it's unique ID.
Many different sites can be created and configured on the system level, and some or all can be assigned to each project.
Artists can explore their site ID by opening OpenPype Info tool by clicking on
a version number in the tray app.
Each OpenPype Tray app works with two sites at one time. (Sites can be the same, and no syncing is done in this setup).
Many different sites can be created and configured on the system level, and
some or all can be assigned to each project.
Sites could be configured differently per project basis.
Each OpenPype Tray app works with two sites at one time. (Sites can be the
same, and no syncing is done in this setup).
Each new site needs to be created first in `System Settings`. Most important feature of site is its Provider, select one from already prepared Providers.
Sites could be configured differently per project basis.
#### Alternative sites
Each new site needs to be created first in `System Settings`. Most important
feature of site is its Provider, select one from already prepared Providers.
#### Alternative sites
This attribute is meant for special use cases only.
One of the use cases is sftp site vendoring (exposing) same data as regular site (studio). Each site is accessible for different audience. 'studio' for artists in a studio via shared disk, 'sftp' for externals via sftp server with mounted 'studio' drive.
One of the use cases is sftp site vendoring (exposing) same data as regular
site (studio). Each site is accessible for different audience. 'studio' for
artists in a studio via shared disk, 'sftp' for externals via sftp server with
mounted 'studio' drive.
Change of file status on one site actually means same change on 'alternate' site occurred too. (eg. artists publish to 'studio', 'sftp' is using
same location >> file is accessible on 'sftp' site right away, no need to sync it anyhow.)
Change of file status on one site actually means same change on 'alternate'
site occurred too. (eg. artists publish to 'studio', 'sftp' is using
same location >> file is accessible on 'sftp' site right away, no need to sync
it anyhow.)
##### Example
![Configure module](assets/site_sync_system_sites.png)
Admin created new `sftp` site which is handled by `SFTP` provider. Somewhere in the studio SFTP server is deployed on a machine that has access to `studio` drive.
Admin created new `sftp` site which is handled by `SFTP` provider. Somewhere in
the studio SFTP server is deployed on a machine that has access to `studio`
drive.
Alternative sites work both way:
- everything published to `studio` is accessible on a `sftp` site too
- everything published to `sftp` (most probably via artist's local disk - artists publishes locally, representation is marked to be synced to `sftp`. Immediately after it is synced, it is marked to be available on `studio` too for artists in the studio to use.)
- everything published to `sftp` (most probably via artist's local disk -
artists publishes locally, representation is marked to be synced to `sftp`.
Immediately after it is synced, it is marked to be available on `studio` too
for artists in the studio to use.)
## Project Settings
Sites need to be made available for each project. Of course this is possible to do on the default project as well, in which case all other projects will inherit these settings until overridden explicitly.
Sites need to be made available for each project. Of course this is possible to
do on the default project as well, in which case all other projects will
inherit these settings until overridden explicitly.
You'll find the setting in **Settings/Project/Global/Site Sync**
The attributes that can be configured will vary between sites and their providers.
The attributes that can be configured will vary between sites and their
providers.
## Local settings
Each user should configure root folder for their 'local' site via **Local Settings** in OpenPype Tray. This folder will be used for all files that the user publishes or downloads while working on a project. Artist has the option to set the folder as "default"in which case it is used for all the projects, or it can be set on a project level individually.
Each user should configure root folder for their 'local' site via **Local
Settings** in OpenPype Tray. This folder will be used for all files that the
user publishes or downloads while working on a project. Artist has the option
to set the folder as "default"in which case it is used for all the projects, or
it can be set on a project level individually.
Artists can also override which site they use as active and remote if need be.
Artists can also override which site they use as active and remote if need be.
![Local overrides](assets/site_sync_local_setting.png)
## Providers
Each site implements a so called `provider` which handles most common operations (list files, copy files etc.) and provides interface with a particular type of storage. (disk, gdrive, aws, etc.)
Multiple configured sites could share the same provider with different settings (multiple mounted disks - each disk can be a separate site, while
Each site implements a so called `provider` which handles most common
operations (list files, copy files etc.) and provides interface with a
particular type of storage. (disk, gdrive, aws, etc.)
Multiple configured sites could share the same provider with different
settings (multiple mounted disks - each disk can be a separate site, while
all share the same provider).
**Currently implemented providers:**
@ -89,21 +121,30 @@ all share the same provider).
Handles files stored on disk storage.
Local drive provider is the most basic one that is used for accessing all standard hard disk storage scenarios. It will work with any storage that can be mounted on your system in a standard way. This could correspond to a physical external hard drive, network mounted storage, internal drive or even VPN connected network drive. It doesn't care about how the drive is mounted, but you must be able to point to it with a simple directory path.
Local drive provider is the most basic one that is used for accessing all
standard hard disk storage scenarios. It will work with any storage that can be
mounted on your system in a standard way. This could correspond to a physical
external hard drive, network mounted storage, internal drive or even VPN
connected network drive. It doesn't care about how the drive is mounted, but
you must be able to point to it with a simple directory path.
Default sites `local` and `studio` both use local drive provider.
### Google Drive
Handles files on Google Drive (this). GDrive is provided as a production example for implementing other cloud providers
Handles files on Google Drive (this). GDrive is provided as a production
example for implementing other cloud providers
Let's imagine a small globally distributed studio which wants all published work for all their freelancers uploaded to Google Drive folder.
Let's imagine a small globally distributed studio which wants all published
work for all their freelancers uploaded to Google Drive folder.
For this use case admin needs to configure:
- how many times it tries to synchronize file in case of some issue (network, permissions)
- how many times it tries to synchronize file in case of some issue (network,
permissions)
- how often should synchronization check for new assets
- sites for synchronization - 'local' and 'gdrive' (this can be overridden in local settings)
- sites for synchronization - 'local' and 'gdrive' (this can be overridden in
local settings)
- user credentials
- root folder location on Google Drive side
@ -111,30 +152,43 @@ Configuration would look like this:
![Configure project](assets/site_sync_project_settings.png)
*Site Sync* for Google Drive works using its API: https://developers.google.com/drive/api/v3/about-sdk
*Site Sync* for Google Drive works using its
API: https://developers.google.com/drive/api/v3/about-sdk
To configure Google Drive side you would need to have access to Google Cloud Platform project: https://console.cloud.google.com/
To configure Google Drive side you would need to have access to Google Cloud
Platform project: https://console.cloud.google.com/
To get working connection to Google Drive there are some necessary steps:
- first you need to enable GDrive API: https://developers.google.com/drive/api/v3/enable-drive-api
- next you need to create user, choose **Service Account** (for basic configuration no roles for account are necessary)
- first you need to enable GDrive
API: https://developers.google.com/drive/api/v3/enable-drive-api
- next you need to create user, choose **Service Account** (for basic
configuration no roles for account are necessary)
- add new key for created account and download .json file with credentials
- share destination folder on the Google Drive with created account (directly in GDrive web application)
- add new site back in OpenPype Settings, name as you want, provider needs to be 'gdrive'
- share destination folder on the Google Drive with created account (directly
in GDrive web application)
- add new site back in OpenPype Settings, name as you want, provider needs to
be 'gdrive'
- distribute credentials file via shared mounted disk location
:::note
If you are using regular personal GDrive for testing don't forget adding `/My Drive` as the prefix in root configuration. Business accounts and share drives don't need this.
If you are using regular personal GDrive for testing don't forget
adding `/My Drive` as the prefix in root configuration. Business accounts and
share drives don't need this.
:::
### SFTP
SFTP provider is used to connect to SFTP server. Currently authentication with `user:password` or `user:ssh key` is implemented.
Please provide only one combination, don't forget to provide password for ssh key if ssh key was created with a passphrase.
SFTP provider is used to connect to SFTP server. Currently authentication
with `user:password` or `user:ssh key` is implemented.
Please provide only one combination, don't forget to provide password for ssh
key if ssh key was created with a passphrase.
(SFTP connection could be a bit finicky, use FileZilla or WinSCP for testing connection, it will be mush faster.)
(SFTP connection could be a bit finicky, use FileZilla or WinSCP for testing
connection, it will be mush faster.)
Beware that ssh key expects OpenSSH format (`.pem`) not a Putty format (`.ppk`)!
Beware that ssh key expects OpenSSH format (`.pem`) not a Putty
format (`.ppk`)!
#### How to set SFTP site
@ -143,60 +197,101 @@ Beware that ssh key expects OpenSSH format (`.pem`) not a Putty format (`.ppk`)!
![Enable syncing and create site](assets/site_sync_sftp_system.png)
- In Projects setting enable Site Sync (on default project - all project will be synched, or on specific project)
- Configure SFTP connection and destination folder on a SFTP server (in screenshot `/upload`)
- In Projects setting enable Site Sync (on default project - all project will
be synched, or on specific project)
- Configure SFTP connection and destination folder on a SFTP server (in
screenshot `/upload`)
![SFTP connection](assets/site_sync_project_sftp_settings.png)
- if you want to force syncing between local and sftp site for all users, use combination `active site: local`, `remote site: NAME_OF_SFTP_SITE`
- if you want to allow only specific users to use SFTP syncing (external users, not located in the office), use `active site: studio`, `remote site: studio`.
- if you want to force syncing between local and sftp site for all users, use
combination `active site: local`, `remote site: NAME_OF_SFTP_SITE`
- if you want to allow only specific users to use SFTP syncing (external users,
not located in the office), use `active site: studio`, `remote site: studio`.
![Select active and remote site on a project](assets/site_sync_sftp_project_setting_not_forced.png)
- Each artist can decide and configure syncing from his/her local to SFTP via `Local Settings`
- Each artist can decide and configure syncing from his/her local to SFTP
via `Local Settings`
![Select active and remote site on a project](assets/site_sync_sftp_settings_local.png)
### Custom providers
If a studio needs to use other services for cloud storage, or want to implement totally different storage providers, they can do so by writing their own provider plugin. We're working on a developer documentation, however, for now we recommend looking at `abstract_provider.py`and `gdrive.py` inside `openpype/modules/sync_server/providers` and using it as a template.
If a studio needs to use other services for cloud storage, or want to implement
totally different storage providers, they can do so by writing their own
provider plugin. We're working on a developer documentation, however, for now
we recommend looking at `abstract_provider.py`and `gdrive.py`
inside `openpype/modules/sync_server/providers` and using it as a template.
### Running Site Sync in background
Site Sync server synchronizes new published files from artist machine into configured remote location by default.
Site Sync server synchronizes new published files from artist machine into
configured remote location by default.
There might be a use case where you need to synchronize between "non-artist" sites, for example between studio site and cloud. In this case
you need to run Site Sync as a background process from a command line (via service etc) 24/7.
There might be a use case where you need to synchronize between "non-artist"
sites, for example between studio site and cloud. In this case
you need to run Site Sync as a background process from a command line (via
service etc) 24/7.
To configure all sites where all published files should be synced eventually you need to configure `project_settings/global/sync_server/config/always_accessible_on` property in Settings (per project) first.
To configure all sites where all published files should be synced eventually
you need to
configure `project_settings/global/sync_server/config/always_accessible_on`
property in Settings (per project) first.
![Set another non artist remote site](assets/site_sync_always_on.png)
This is an example of:
- Site Sync is enabled for a project
- default active and remote sites are set to `studio` - eg. standard process: everyone is working in a studio, publishing to shared location etc.
- (but this also allows any of the artists to work remotely, they would change their active site in their own Local Settings to `local` and configure local root.
This would result in everything artist publishes is saved first onto his local folder AND synchronized to `studio` site eventually.)
- default active and remote sites are set to `studio` - eg. standard process:
everyone is working in a studio, publishing to shared location etc.
- (but this also allows any of the artists to work remotely, they would change
their active site in their own Local Settings to `local` and configure local
root.
This would result in everything artist publishes is saved first onto his
local folder AND synchronized to `studio` site eventually.)
- everything exported must also be eventually uploaded to `sftp` site
This eventual synchronization between `studio` and `sftp` sites must be physically handled by background process.
This eventual synchronization between `studio` and `sftp` sites must be
physically handled by background process.
As current implementation relies heavily on Settings and Local Settings, background process for a specific site ('studio' for example) must be configured via Tray first to `syncserver` command to work.
As current implementation relies heavily on Settings and Local Settings,
background process for a specific site ('studio' for example) must be
configured via Tray first to `syncserver` command to work.
To do this:
- run OP `Tray` with environment variable OPENPYPE_LOCAL_ID set to name of active (source) site. In most use cases it would be studio (for cases of backups of everything published to studio site to different cloud site etc.)
- run OP `Tray` with environment variable OPENPYPE_LOCAL_ID set to name of
active (source) site. In most use cases it would be studio (for cases of
backups of everything published to studio site to different cloud site etc.)
- start `Tray`
- check `Local ID` in information dialog after clicking on version number in the Tray
- check `Local ID` in information dialog after clicking on version number in
the Tray
- open `Local Settings` in the `Tray`
- configure for each project necessary active site and remote site
- close `Tray`
- run OP from a command line with `syncserver` and `--active_site` arguments
This is an example how to trigger background syncing process where active (source) site is `studio`.
(It is expected that OP is installed on a machine, `openpype_console` is on PATH. If not, add full path to executable.
This is an example how to trigger background syncing process where active (
source) site is `studio`.
(It is expected that OP is installed on a machine, `openpype_console` is on
PATH. If not, add full path to executable.
)
```shell
openpype_console syncserver --active_site studio
```
```
### Syncing of last published workfile
Some DCC might have enabled
in `project_setting/global/tools/Workfiles/last_workfile_on_startup`, eg. open
DCC with last opened workfile.
Flag `use_last_published_workfile` tells that last published workfile should be
used if no workfile is present locally.
This use case could happen if artists starts working on new task locally,
doesn't have any workfile present. In that case last published will be
synchronized locally and its version bumped by 1 (as workfile's version is
always +1 from published version).

View file

@ -126,6 +126,7 @@ module.exports = {
"admin_hosts_nuke",
"admin_hosts_resolve",
"admin_hosts_harmony",
"admin_hosts_photoshop",
"admin_hosts_aftereffects",
"admin_hosts_tvpaint"
],