mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 16:34:53 +01:00
Merge branch 'feature/validation_exceptions' into feature/OP-2765_AE-to-new-publisher
This commit is contained in:
commit
12b69bf840
68 changed files with 1307 additions and 338 deletions
|
|
@ -0,0 +1,21 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Subset context</title>
|
||||
<description>
|
||||
## Invalid subset context
|
||||
|
||||
Context of the given subset doesn't match your current scene.
|
||||
|
||||
### How to repair?
|
||||
|
||||
You can fix this with "repair" button on the right.
|
||||
</description>
|
||||
<detail>
|
||||
### __Detailed Info__ (optional)
|
||||
|
||||
This might happen if you are reuse old workfile and open it in different context.
|
||||
(Eg. you created subset "renderCompositingDefault" from asset "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing subset for "Robot" asset stayed in the workfile.)
|
||||
</detail>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Scene setting</title>
|
||||
<description>
|
||||
## Invalid scene setting found
|
||||
|
||||
One of the settings in a scene doesn't match to asset settings in database.
|
||||
|
||||
{invalid_setting_str}
|
||||
|
||||
### How to repair?
|
||||
|
||||
Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there.
|
||||
</description>
|
||||
<detail>
|
||||
### __Detailed Info__ (optional)
|
||||
|
||||
This error is shown when for example resolution in the scene doesn't match to resolution set on the asset in the database.
|
||||
Either value in the database or in the scene is wrong.
|
||||
</detail>
|
||||
</error>
|
||||
<error id="file_not_found">
|
||||
<title>Scene file doesn't exist</title>
|
||||
<description>
|
||||
## Scene file doesn't exist
|
||||
|
||||
Collected scene {scene_url} doesn't exist.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Re-save file, start publish from the beginning again.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
from avalon import api
|
||||
import pyblish.api
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.hosts.aftereffects.api import get_stub
|
||||
|
||||
|
||||
|
|
@ -53,9 +54,8 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin):
|
|||
current_asset = api.Session["AVALON_ASSET"]
|
||||
msg = (
|
||||
f"Instance asset {instance_asset} is not the same "
|
||||
f"as current context {current_asset}. PLEASE DO:\n"
|
||||
f"Repair with 'A' action to use '{current_asset}'.\n"
|
||||
f"If that's not correct value, close workfile and "
|
||||
f"reopen via Workfiles!"
|
||||
f"as current context {current_asset}."
|
||||
)
|
||||
assert instance_asset == current_asset, msg
|
||||
|
||||
if instance_asset != current_asset:
|
||||
raise PublishXmlValidationError(self, msg)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import re
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.hosts.aftereffects.api import get_asset_settings
|
||||
|
||||
|
||||
|
|
@ -99,12 +100,14 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
|||
self.log.info("current_settings:: {}".format(current_settings))
|
||||
|
||||
invalid_settings = []
|
||||
invalid_keys = set()
|
||||
for key, value in expected_settings.items():
|
||||
if value != current_settings[key]:
|
||||
invalid_settings.append(
|
||||
"{} expected: {} found: {}".format(key, value,
|
||||
current_settings[key])
|
||||
)
|
||||
invalid_keys.add(key)
|
||||
|
||||
if ((expected_settings.get("handleStart")
|
||||
or expected_settings.get("handleEnd"))
|
||||
|
|
@ -116,7 +119,27 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
|||
msg = "Found invalid settings:\n{}".format(
|
||||
"\n".join(invalid_settings)
|
||||
)
|
||||
assert not invalid_settings, msg
|
||||
assert os.path.exists(instance.data.get("source")), (
|
||||
"Scene file not found (saved under wrong name)"
|
||||
)
|
||||
|
||||
if invalid_settings:
|
||||
invalid_keys_str = ",".join(invalid_keys)
|
||||
break_str = "<br/>"
|
||||
invalid_setting_str = "<b>Found invalid settings:</b><br/>{}".\
|
||||
format(break_str.join(invalid_settings))
|
||||
|
||||
formatting_data = {
|
||||
"invalid_setting_str": invalid_setting_str,
|
||||
"invalid_keys_str": invalid_keys_str
|
||||
}
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
||||
if not os.path.exists(instance.data.get("source")):
|
||||
scene_url = instance.data.get("source")
|
||||
msg = "Scene file {} not found (saved under wrong name)".format(
|
||||
scene_url
|
||||
)
|
||||
formatting_data = {
|
||||
"scene_url": scene_url
|
||||
}
|
||||
raise PublishXmlValidationError(self, msg, key="file_not_found",
|
||||
formatting_data=formatting_data)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Missing audio file</title>
|
||||
<description>
|
||||
## Cannot locate linked audio file
|
||||
|
||||
Audio file at {audio_url} cannot be found.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Copy audio file to the highlighted location or remove audio link in the workfile.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Subset context</title>
|
||||
<description>
|
||||
## Invalid subset context
|
||||
|
||||
Asset name found '{found}' in subsets, expected '{expected}'.
|
||||
|
||||
### How to repair?
|
||||
|
||||
You can fix this with `Repair` button on the right. This will use '{expected}' asset name and overwrite '{found}' asset name in scene metadata.
|
||||
|
||||
After that restart `Publish` with a `Reload button`.
|
||||
|
||||
If this is unwanted, close workfile and open again, that way different asset value would be used for context information.
|
||||
</description>
|
||||
<detail>
|
||||
### __Detailed Info__ (optional)
|
||||
|
||||
This might happen if you are reuse old workfile and open it in different context.
|
||||
(Eg. you created subset "renderCompositingDefault" from asset "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing subset for "Robot" asset stayed in the workfile.)
|
||||
</detail>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Scene setting</title>
|
||||
<description>
|
||||
## Invalid scene setting found
|
||||
|
||||
One of the settings in a scene doesn't match to asset settings in database.
|
||||
|
||||
{invalid_setting_str}
|
||||
|
||||
### How to repair?
|
||||
|
||||
Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there.
|
||||
</description>
|
||||
<detail>
|
||||
### __Detailed Info__ (optional)
|
||||
|
||||
This error is shown when for example resolution in the scene doesn't match to resolution set on the asset in the database.
|
||||
Either value in the database or in the scene is wrong.
|
||||
</detail>
|
||||
</error>
|
||||
<error id="file_not_found">
|
||||
<title>Scene file doesn't exist</title>
|
||||
<description>
|
||||
## Scene file doesn't exist
|
||||
|
||||
Collected scene {scene_url} doesn't exist.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Re-save file, start publish from the beginning again.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -4,6 +4,8 @@ import pyblish.api
|
|||
|
||||
import openpype.hosts.harmony.api as harmony
|
||||
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateAudio(pyblish.api.InstancePlugin):
|
||||
"""Ensures that there is an audio file in the scene.
|
||||
|
|
@ -42,4 +44,9 @@ class ValidateAudio(pyblish.api.InstancePlugin):
|
|||
|
||||
msg = "You are missing audio file:\n{}".format(audio_path)
|
||||
|
||||
assert os.path.isfile(audio_path), msg
|
||||
formatting_data = {
|
||||
"audio_url": audio_path
|
||||
}
|
||||
if os.path.isfile(audio_path):
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
import os
|
||||
|
||||
from avalon import harmony
|
||||
import pyblish.api
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
import openpype.hosts.harmony.api as harmony
|
||||
|
||||
|
||||
|
|
@ -45,4 +47,11 @@ class ValidateInstance(pyblish.api.InstancePlugin):
|
|||
"Instance asset is not the same as current asset:"
|
||||
f"\nInstance: {instance_asset}\nCurrent: {current_asset}"
|
||||
)
|
||||
assert instance_asset == current_asset, msg
|
||||
|
||||
formatting_data = {
|
||||
"found": instance_asset,
|
||||
"expected": current_asset
|
||||
}
|
||||
if instance_asset != current_asset:
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import re
|
|||
import pyblish.api
|
||||
|
||||
import openpype.hosts.harmony.api as harmony
|
||||
import openpype.hosts.harmony
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateSceneSettingsRepair(pyblish.api.Action):
|
||||
|
|
@ -19,12 +19,12 @@ class ValidateSceneSettingsRepair(pyblish.api.Action):
|
|||
|
||||
def process(self, context, plugin):
|
||||
"""Repair action entry point."""
|
||||
expected = openpype.hosts.harmony.api.get_asset_settings()
|
||||
expected = harmony.get_asset_settings()
|
||||
asset_settings = _update_frames(dict.copy(expected))
|
||||
asset_settings["frameStart"] = 1
|
||||
asset_settings["frameEnd"] = asset_settings["frameEnd"] + \
|
||||
asset_settings["handleEnd"]
|
||||
openpype.hosts.harmony.api.set_scene_settings(asset_settings)
|
||||
harmony.set_scene_settings(asset_settings)
|
||||
if not os.path.exists(context.data["scenePath"]):
|
||||
self.log.info("correcting scene name")
|
||||
scene_dir = os.path.dirname(context.data["currentFile"])
|
||||
|
|
@ -55,7 +55,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
expected_settings = openpype.hosts.harmony.api.get_asset_settings()
|
||||
expected_settings = harmony.get_asset_settings()
|
||||
self.log.info("scene settings from DB:".format(expected_settings))
|
||||
|
||||
expected_settings = _update_frames(dict.copy(expected_settings))
|
||||
|
|
@ -102,6 +102,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
|||
self.log.debug("current scene settings {}".format(current_settings))
|
||||
|
||||
invalid_settings = []
|
||||
invalid_keys = set()
|
||||
for key, value in expected_settings.items():
|
||||
if value != current_settings[key]:
|
||||
invalid_settings.append({
|
||||
|
|
@ -109,6 +110,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
|||
"expected": value,
|
||||
"current": current_settings[key]
|
||||
})
|
||||
invalid_keys.add(key)
|
||||
|
||||
if ((expected_settings["handleStart"]
|
||||
or expected_settings["handleEnd"])
|
||||
|
|
@ -120,10 +122,30 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
|||
msg = "Found invalid settings:\n{}".format(
|
||||
json.dumps(invalid_settings, sort_keys=True, indent=4)
|
||||
)
|
||||
assert not invalid_settings, msg
|
||||
assert os.path.exists(instance.context.data.get("scenePath")), (
|
||||
"Scene file not found (saved under wrong name)"
|
||||
)
|
||||
|
||||
if invalid_settings:
|
||||
invalid_keys_str = ",".join(invalid_keys)
|
||||
break_str = "<br/>"
|
||||
invalid_setting_str = "<b>Found invalid settings:</b><br/>{}".\
|
||||
format(break_str.join(invalid_settings))
|
||||
|
||||
formatting_data = {
|
||||
"invalid_setting_str": invalid_setting_str,
|
||||
"invalid_keys_str": invalid_keys_str
|
||||
}
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
||||
scene_url = instance.context.data.get("scenePath")
|
||||
if not os.path.exists(scene_url):
|
||||
msg = "Scene file {} not found (saved under wrong name)".format(
|
||||
scene_url
|
||||
)
|
||||
formatting_data = {
|
||||
"scene_url": scene_url
|
||||
}
|
||||
raise PublishXmlValidationError(self, msg, key="file_not_found",
|
||||
formatting_data=formatting_data)
|
||||
|
||||
|
||||
def _update_frames(expected_settings):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<warning id="main">
|
||||
<title>Primitive to Detail</title>
|
||||
<description>## Invalid Primitive to Detail Attributes
|
||||
|
||||
Primitives with inconsistent primitive to detail attributes were found.
|
||||
|
||||
{message}
|
||||
|
||||
</description>
|
||||
<detail>
|
||||
</detail>
|
||||
</warning>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<warning id="main">
|
||||
<title>Alembic ROP Face Sets</title>
|
||||
<description>## Invalid Alembic ROP Face Sets
|
||||
|
||||
When groups are saved as Face Sets with the Alembic these show up
|
||||
as shadingEngine connections in Maya - however, with animated groups
|
||||
these connections in Maya won't work as expected, it won't update per
|
||||
frame. Additionally, it can break shader assignments in some cases
|
||||
where it requires to first break this connection to allow a shader to
|
||||
be assigned.
|
||||
|
||||
It is allowed to include Face Sets, so only an issue is logged to
|
||||
identify that it could introduce issues down the pipeline.
|
||||
|
||||
|
||||
</description>
|
||||
<detail>
|
||||
</detail>
|
||||
</warning>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Alembic input</title>
|
||||
<description>## Invalid Alembic input
|
||||
|
||||
The node connected to the output is incorrect.
|
||||
It contains primitive types that are not supported for alembic output.
|
||||
|
||||
Problematic primitive is of type {primitive_type}
|
||||
|
||||
|
||||
</description>
|
||||
<detail>
|
||||
|
||||
The connected node cannot be of the following types for Alembic:
|
||||
- VDB
|
||||
- Volume
|
||||
</detail>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Output frame token</title>
|
||||
<description>## Output path is missing frame token
|
||||
|
||||
This validator will check the output parameter of the node if
|
||||
the Valid Frame Range is not set to 'Render Current Frame'
|
||||
|
||||
No frame token found in: **{nodepath}**
|
||||
|
||||
### How to repair?
|
||||
|
||||
You need to add `$F4` or similar frame based token to your path.
|
||||
|
||||
**Example:**
|
||||
Good: 'my_vbd_cache.$F4.vdb'
|
||||
Bad: 'my_vbd_cache.vdb'
|
||||
|
||||
</description>
|
||||
<detail>
|
||||
|
||||
If you render out a frame range it is mandatory to have the
|
||||
frame token - '$F4' or similar - to ensure that each frame gets
|
||||
written. If this is not the case you will override the same file
|
||||
every time a frame is written out.
|
||||
|
||||
|
||||
</detail>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>VDB output node</title>
|
||||
<description>## Invalid VDB output nodes
|
||||
|
||||
Validate that the node connected to the output node is of type VDB.
|
||||
|
||||
Regardless of the amount of VDBs created the output will need to have an
|
||||
equal amount of VDBs, points, primitives and vertices
|
||||
|
||||
A VDB is an inherited type of Prim, holds the following data:
|
||||
|
||||
- Primitives: 1
|
||||
- Points: 1
|
||||
- Vertices: 1
|
||||
- VDBs: 1
|
||||
|
||||
</description>
|
||||
<detail>
|
||||
</detail>
|
||||
</error>
|
||||
|
||||
<error id="noSOP">
|
||||
<title>No SOP path</title>
|
||||
<description>## No SOP Path in output node
|
||||
|
||||
SOP Output node in '{node}' does not exist. Ensure a valid SOP output path is set.
|
||||
|
||||
</description>
|
||||
<detail>
|
||||
</detail>
|
||||
</error>
|
||||
|
||||
<error id="wrongSOP">
|
||||
<title>Wrong SOP path</title>
|
||||
<description>## Wrong SOP Path in output node
|
||||
|
||||
Output node {nodepath} is not a SOP node.
|
||||
SOP Path must point to a SOP node,
|
||||
instead found category type: {categoryname}
|
||||
|
||||
</description>
|
||||
<detail>
|
||||
</detail>
|
||||
</error>
|
||||
|
||||
</root>
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
|
||||
class ValidateVDBInputNode(pyblish.api.InstancePlugin):
|
||||
"""Validate that the node connected to the output node is of type VDB.
|
||||
|
||||
Regardless of the amount of VDBs create the output will need to have an
|
||||
equal amount of VDBs, points, primitives and vertices
|
||||
|
||||
A VDB is an inherited type of Prim, holds the following data:
|
||||
- Primitives: 1
|
||||
- Points: 1
|
||||
- Vertices: 1
|
||||
- VDBs: 1
|
||||
|
||||
"""
|
||||
|
||||
order = openpype.api.ValidateContentsOrder + 0.1
|
||||
families = ["vdbcache"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Input Node (VDB)"
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError(
|
||||
"Node connected to the output node is not" "of type VDB!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
node = instance.data["output_node"]
|
||||
|
||||
prims = node.geometry().prims()
|
||||
nr_of_prims = len(prims)
|
||||
|
||||
nr_of_points = len(node.geometry().points())
|
||||
if nr_of_points != nr_of_prims:
|
||||
cls.log.error("The number of primitives and points do not match")
|
||||
return [instance]
|
||||
|
||||
for prim in prims:
|
||||
if prim.numVertices() != 1:
|
||||
cls.log.error("Found primitive with more than 1 vertex!")
|
||||
return [instance]
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.hosts.houdini.api import lib
|
||||
|
||||
|
||||
class ValidateAnimationSettings(pyblish.api.InstancePlugin):
|
||||
"""Validate if the unexpanded string contains the frame ('$F') token
|
||||
|
||||
This validator will only check the output parameter of the node if
|
||||
the Valid Frame Range is not set to 'Render Current Frame'
|
||||
|
||||
Rules:
|
||||
If you render out a frame range it is mandatory to have the
|
||||
frame token - '$F4' or similar - to ensure that each frame gets
|
||||
written. If this is not the case you will override the same file
|
||||
every time a frame is written out.
|
||||
|
||||
Examples:
|
||||
Good: 'my_vbd_cache.$F4.vdb'
|
||||
Bad: 'my_vbd_cache.vdb'
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Frame Settings"
|
||||
families = ["vdbcache"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError(
|
||||
"Output settings do no match for '%s'" % instance
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
node = instance[0]
|
||||
|
||||
# Check trange parm, 0 means Render Current Frame
|
||||
frame_range = node.evalParm("trange")
|
||||
if frame_range == 0:
|
||||
return []
|
||||
|
||||
output_parm = lib.get_output_parameter(node)
|
||||
unexpanded_str = output_parm.unexpandedString()
|
||||
|
||||
if "$F" not in unexpanded_str:
|
||||
cls.log.error("No frame token found in '%s'" % node.path())
|
||||
return [instance]
|
||||
|
|
@ -1,12 +1,12 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.hosts.houdini.api import lib
|
||||
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
class ValidateFrameToken(pyblish.api.InstancePlugin):
|
||||
"""Validate if the unexpanded string contains the frame ('$F') token.
|
||||
"""Validate if the unexpanded string contains the frame ('$F') token
|
||||
|
||||
This validator will *only* check the output parameter of the node if
|
||||
This validator will only check the output parameter of the node if
|
||||
the Valid Frame Range is not set to 'Render Current Frame'
|
||||
|
||||
Rules:
|
||||
|
|
@ -28,9 +28,14 @@ class ValidateFrameToken(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
data = {
|
||||
"nodepath": instance
|
||||
}
|
||||
if invalid:
|
||||
raise RuntimeError(
|
||||
"Output settings do no match for '%s'" % instance
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"Output path for '%s' is missing $F4 token" % instance,
|
||||
formatting_data=data
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
|
@ -47,5 +52,5 @@ class ValidateFrameToken(pyblish.api.InstancePlugin):
|
|||
unexpanded_str = output_parm.unexpandedString()
|
||||
|
||||
if "$F" not in unexpanded_str:
|
||||
cls.log.error("No frame token found in '%s'" % node.path())
|
||||
# cls.log.info("No frame token found in '%s'" % node.path())
|
||||
return [instance]
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["pointcache", "vdbcache"]
|
||||
families = ["pointcache"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Output Node"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,47 +0,0 @@
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
|
||||
class ValidateVDBInputNode(pyblish.api.InstancePlugin):
|
||||
"""Validate that the node connected to the output node is of type VDB.
|
||||
|
||||
Regardless of the amount of VDBs create the output will need to have an
|
||||
equal amount of VDBs, points, primitives and vertices
|
||||
|
||||
A VDB is an inherited type of Prim, holds the following data:
|
||||
- Primitives: 1
|
||||
- Points: 1
|
||||
- Vertices: 1
|
||||
- VDBs: 1
|
||||
|
||||
"""
|
||||
|
||||
order = openpype.api.ValidateContentsOrder + 0.1
|
||||
families = ["vdbcache"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Input Node (VDB)"
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError(
|
||||
"Node connected to the output node is not" "of type VDB!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
node = instance.data["output_node"]
|
||||
|
||||
prims = node.geometry().prims()
|
||||
nr_of_prims = len(prims)
|
||||
|
||||
nr_of_points = len(node.geometry().points())
|
||||
if nr_of_points != nr_of_prims:
|
||||
cls.log.error("The number of primitives and points do not match")
|
||||
return [instance]
|
||||
|
||||
for prim in prims:
|
||||
if prim.numVertices() != 1:
|
||||
cls.log.error("Found primitive with more than 1 vertex!")
|
||||
return [instance]
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
import hou
|
||||
|
||||
|
||||
|
|
@ -23,32 +24,61 @@ class ValidateVDBOutputNode(pyblish.api.InstancePlugin):
|
|||
label = "Validate Output Node (VDB)"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
data = {
|
||||
"node": instance
|
||||
}
|
||||
|
||||
output_node = instance.data["output_node"]
|
||||
if output_node is None:
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"SOP Output node in '{node}' does not exist. Ensure a valid "
|
||||
"SOP output path is set.".format(**data),
|
||||
key="noSOP",
|
||||
formatting_data=data
|
||||
)
|
||||
|
||||
# Output node must be a Sop node.
|
||||
if not isinstance(output_node, hou.SopNode):
|
||||
data = {
|
||||
"nodepath": output_node.path(),
|
||||
"categoryname": output_node.type().category().name()
|
||||
}
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"Output node {nodepath} is not a SOP node. SOP Path must"
|
||||
"point to a SOP node, instead found category"
|
||||
"type: {categoryname}".format(**data),
|
||||
key="wrongSOP",
|
||||
formatting_data=data
|
||||
)
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
||||
if invalid:
|
||||
raise RuntimeError(
|
||||
"Node connected to the output node is not" " of type VDB!"
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"Output node(s) `{}` are incorrect. See plug-in"
|
||||
"log for details.".format(invalid),
|
||||
formatting_data=data
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
node = instance.data["output_node"]
|
||||
if node is None:
|
||||
cls.log.error(
|
||||
"SOP path is not correctly set on "
|
||||
"ROP node '%s'." % instance[0].path()
|
||||
)
|
||||
return [instance]
|
||||
output_node = instance.data["output_node"]
|
||||
|
||||
frame = instance.data.get("frameStart", 0)
|
||||
geometry = node.geometryAtFrame(frame)
|
||||
geometry = output_node.geometryAtFrame(frame)
|
||||
if geometry is None:
|
||||
# No geometry data on this node, maybe the node hasn't cooked?
|
||||
cls.log.error(
|
||||
# No geometry data on this output_node
|
||||
# - maybe the node hasn't cooked?
|
||||
cls.log.debug(
|
||||
"SOP node has no geometry data. "
|
||||
"Is it cooked? %s" % node.path()
|
||||
"Is it cooked? %s" % output_node.path()
|
||||
)
|
||||
return [node]
|
||||
return [output_node]
|
||||
|
||||
prims = geometry.prims()
|
||||
nr_of_prims = len(prims)
|
||||
|
|
@ -57,17 +87,17 @@ class ValidateVDBOutputNode(pyblish.api.InstancePlugin):
|
|||
invalid_prim = False
|
||||
for prim in prims:
|
||||
if not isinstance(prim, hou.VDB):
|
||||
cls.log.error("Found non-VDB primitive: %s" % prim)
|
||||
cls.log.debug("Found non-VDB primitive: %s" % prim)
|
||||
invalid_prim = True
|
||||
if invalid_prim:
|
||||
return [instance]
|
||||
|
||||
nr_of_points = len(geometry.points())
|
||||
if nr_of_points != nr_of_prims:
|
||||
cls.log.error("The number of primitives and points do not match")
|
||||
cls.log.debug("The number of primitives and points do not match")
|
||||
return [instance]
|
||||
|
||||
for prim in prims:
|
||||
if prim.numVertices() != 1:
|
||||
cls.log.error("Found primitive with more than 1 vertex!")
|
||||
cls.log.debug("Found primitive with more than 1 vertex!")
|
||||
return [instance]
|
||||
|
|
|
|||
|
|
@ -81,14 +81,10 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
parsed_subset = instance.data["subset"].replace(
|
||||
instance.data["family"], '')
|
||||
|
||||
fill_pairs = {
|
||||
explicit_data = {
|
||||
"subset": parsed_subset
|
||||
}
|
||||
|
||||
fill_pairs = prepare_template_data(fill_pairs)
|
||||
workfile_subset = format_template_with_optional_keys(
|
||||
fill_pairs, self.workfile_subset_template)
|
||||
|
||||
processed_instance = False
|
||||
for repre in instance.data["representations"]:
|
||||
ext = repre["ext"].replace('.', '')
|
||||
|
|
@ -102,6 +98,21 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
if ext in self.main_workfile_extensions or \
|
||||
ext in self.other_workfile_extensions:
|
||||
|
||||
formatting_data = self._get_parsed_groups(
|
||||
repre_file,
|
||||
self.input_naming_patterns["workfile"],
|
||||
self.input_naming_groups["workfile"],
|
||||
self.color_space
|
||||
)
|
||||
self.log.info("Parsed groups from workfile "
|
||||
"name '{}': {}".format(repre_file,
|
||||
formatting_data))
|
||||
|
||||
formatting_data.update(explicit_data)
|
||||
fill_pairs = prepare_template_data(formatting_data)
|
||||
workfile_subset = format_template_with_optional_keys(
|
||||
fill_pairs, self.workfile_subset_template)
|
||||
|
||||
asset_build = self._get_asset_build(
|
||||
repre_file,
|
||||
self.input_naming_patterns["workfile"],
|
||||
|
|
@ -148,11 +159,23 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
resource_files[workfile_subset].append(item)
|
||||
|
||||
if ext in self.texture_extensions:
|
||||
formatting_data = self._get_parsed_groups(
|
||||
repre_file,
|
||||
self.input_naming_patterns["textures"],
|
||||
self.input_naming_groups["textures"],
|
||||
self.color_space
|
||||
)
|
||||
|
||||
self.log.info("Parsed groups from texture "
|
||||
"name '{}': {}".format(repre_file,
|
||||
formatting_data))
|
||||
|
||||
c_space = self._get_color_space(
|
||||
repre_file,
|
||||
self.color_space
|
||||
)
|
||||
|
||||
# optional value
|
||||
channel = self._get_channel_name(
|
||||
repre_file,
|
||||
self.input_naming_patterns["textures"],
|
||||
|
|
@ -160,6 +183,7 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
self.color_space
|
||||
)
|
||||
|
||||
# optional value
|
||||
shader = self._get_shader_name(
|
||||
repre_file,
|
||||
self.input_naming_patterns["textures"],
|
||||
|
|
@ -167,13 +191,15 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
self.color_space
|
||||
)
|
||||
|
||||
formatting_data = {
|
||||
explicit_data = {
|
||||
"color_space": c_space or '', # None throws exception
|
||||
"channel": channel or '',
|
||||
"shader": shader or '',
|
||||
"subset": parsed_subset or ''
|
||||
}
|
||||
|
||||
formatting_data.update(explicit_data)
|
||||
|
||||
fill_pairs = prepare_template_data(formatting_data)
|
||||
subset = format_template_with_optional_keys(
|
||||
fill_pairs, self.texture_subset_template)
|
||||
|
|
@ -243,6 +269,13 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
for asset_build, version, subset, family in asset_builds:
|
||||
if not main_version:
|
||||
main_version = version
|
||||
|
||||
try:
|
||||
version_int = int(version or main_version or 1)
|
||||
except ValueError:
|
||||
self.log.error("Parsed version {} is not "
|
||||
"an number".format(version))
|
||||
|
||||
new_instance = context.create_instance(subset)
|
||||
new_instance.data.update(
|
||||
{
|
||||
|
|
@ -251,7 +284,7 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
"label": subset,
|
||||
"name": subset,
|
||||
"family": family,
|
||||
"version": int(version or main_version or 1),
|
||||
"version": version_int,
|
||||
"asset_build": asset_build # remove in validator
|
||||
}
|
||||
)
|
||||
|
|
@ -320,13 +353,14 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
"""
|
||||
asset_name = "NOT_AVAIL"
|
||||
|
||||
return self._parse(name, input_naming_patterns, input_naming_groups,
|
||||
color_spaces, 'asset') or asset_name
|
||||
return (self._parse_key(name, input_naming_patterns,
|
||||
input_naming_groups, color_spaces, 'asset') or
|
||||
asset_name)
|
||||
|
||||
def _get_version(self, name, input_naming_patterns, input_naming_groups,
|
||||
color_spaces):
|
||||
found = self._parse(name, input_naming_patterns, input_naming_groups,
|
||||
color_spaces, 'version')
|
||||
found = self._parse_key(name, input_naming_patterns,
|
||||
input_naming_groups, color_spaces, 'version')
|
||||
|
||||
if found:
|
||||
return found.replace('v', '')
|
||||
|
|
@ -336,8 +370,8 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
def _get_udim(self, name, input_naming_patterns, input_naming_groups,
|
||||
color_spaces):
|
||||
"""Parses from 'name' udim value."""
|
||||
found = self._parse(name, input_naming_patterns, input_naming_groups,
|
||||
color_spaces, 'udim')
|
||||
found = self._parse_key(name, input_naming_patterns,
|
||||
input_naming_groups, color_spaces, 'udim')
|
||||
if found:
|
||||
return found
|
||||
|
||||
|
|
@ -375,12 +409,15 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
Unknown format of channel name and color spaces >> cs are known
|
||||
list - 'color_space' used as a placeholder
|
||||
"""
|
||||
found = self._parse(name, input_naming_patterns, input_naming_groups,
|
||||
color_spaces, 'shader')
|
||||
if found:
|
||||
return found
|
||||
found = None
|
||||
try:
|
||||
found = self._parse_key(name, input_naming_patterns,
|
||||
input_naming_groups, color_spaces,
|
||||
'shader')
|
||||
except ValueError:
|
||||
self.log.warning("Didn't find shader in {}".format(name))
|
||||
|
||||
self.log.warning("Didn't find shader in {}".format(name))
|
||||
return found
|
||||
|
||||
def _get_channel_name(self, name, input_naming_patterns,
|
||||
input_naming_groups, color_spaces):
|
||||
|
|
@ -389,15 +426,18 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
Unknown format of channel name and color spaces >> cs are known
|
||||
list - 'color_space' used as a placeholder
|
||||
"""
|
||||
found = self._parse(name, input_naming_patterns, input_naming_groups,
|
||||
color_spaces, 'channel')
|
||||
if found:
|
||||
return found
|
||||
found = None
|
||||
try:
|
||||
found = self._parse_key(name, input_naming_patterns,
|
||||
input_naming_groups, color_spaces,
|
||||
'channel')
|
||||
except ValueError:
|
||||
self.log.warning("Didn't find channel in {}".format(name))
|
||||
|
||||
self.log.warning("Didn't find channel in {}".format(name))
|
||||
return found
|
||||
|
||||
def _parse(self, name, input_naming_patterns, input_naming_groups,
|
||||
color_spaces, key):
|
||||
def _parse_key(self, name, input_naming_patterns, input_naming_groups,
|
||||
color_spaces, key):
|
||||
"""Universal way to parse 'name' with configurable regex groups.
|
||||
|
||||
Args:
|
||||
|
|
@ -411,23 +451,47 @@ class CollectTextures(pyblish.api.ContextPlugin):
|
|||
Raises:
|
||||
ValueError - if broken 'input_naming_groups'
|
||||
"""
|
||||
parsed_groups = self._get_parsed_groups(name,
|
||||
input_naming_patterns,
|
||||
input_naming_groups,
|
||||
color_spaces)
|
||||
|
||||
try:
|
||||
parsed_value = parsed_groups[key]
|
||||
return parsed_value
|
||||
except (IndexError, KeyError):
|
||||
msg = ("'Textures group positions' must " +
|
||||
"have '{}' key".format(key))
|
||||
raise ValueError(msg)
|
||||
|
||||
def _get_parsed_groups(self, name, input_naming_patterns,
|
||||
input_naming_groups, color_spaces):
|
||||
"""Universal way to parse 'name' with configurable regex groups.
|
||||
|
||||
Args:
|
||||
name (str): workfile name or texture name
|
||||
input_naming_patterns (list):
|
||||
[workfile_pattern] or [texture_pattern]
|
||||
input_naming_groups (list)
|
||||
ordinal position of regex groups matching to input_naming..
|
||||
color_spaces (list) - predefined color spaces
|
||||
|
||||
Returns:
|
||||
(dict) {group_name:parsed_value}
|
||||
"""
|
||||
for input_pattern in input_naming_patterns:
|
||||
for cs in color_spaces:
|
||||
pattern = input_pattern.replace('{color_space}', cs)
|
||||
regex_result = re.findall(pattern, name)
|
||||
if regex_result:
|
||||
idx = list(input_naming_groups).index(key)
|
||||
if idx < 0:
|
||||
msg = "input_naming_groups must " +\
|
||||
"have '{}' key".format(key)
|
||||
raise ValueError(msg)
|
||||
if len(regex_result[0]) == len(input_naming_groups):
|
||||
return dict(zip(input_naming_groups, regex_result[0]))
|
||||
else:
|
||||
self.log.warning("No of parsed groups doesn't match "
|
||||
"no of group labels")
|
||||
|
||||
try:
|
||||
parsed_value = regex_result[0][idx]
|
||||
return parsed_value
|
||||
except IndexError:
|
||||
self.log.warning("Wrong index, probably "
|
||||
"wrong name {}".format(name))
|
||||
raise ValueError("Name '{}' cannot be parsed by any "
|
||||
"'{}' patterns".format(name, input_naming_patterns))
|
||||
|
||||
def _update_representations(self, upd_representations):
|
||||
"""Frames dont have sense for textures, add collected udims instead."""
|
||||
|
|
|
|||
|
|
@ -0,0 +1,17 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Missing source video file</title>
|
||||
<description>
|
||||
## No attached video file found
|
||||
|
||||
Process expects presence of source video file with same name prefix as an editorial file in same folder.
|
||||
(example `simple_editorial_setup_Layer1.edl` expects `simple_editorial_setup.mp4` in same folder)
|
||||
|
||||
|
||||
### How to repair?
|
||||
|
||||
Copy source video file to the folder next to `.edl` file. (On a disk, do not put it into Standalone Publisher.)
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Invalid frame range</title>
|
||||
<description>
|
||||
## Invalid frame range
|
||||
|
||||
Expected duration or '{duration}' frames set in database, workfile contains only '{found}' frames.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Modify configuration in the database or tweak frame range in the workfile.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Duplicate shots</title>
|
||||
<description>
|
||||
## Duplicate shot names
|
||||
|
||||
Process contains duplicated shot names '{duplicates_str}'.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Remove shot duplicates.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Files not found</title>
|
||||
<description>
|
||||
## Source files not found
|
||||
|
||||
Process contains duplicated shot names:
|
||||
'{files_not_found}'
|
||||
|
||||
### How to repair?
|
||||
|
||||
Add missing files or run Publish again to collect new publishable files.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Task not found</title>
|
||||
<description>
|
||||
## Task not found in database
|
||||
|
||||
Process contains tasks that don't exist in database:
|
||||
'{task_not_found}'
|
||||
|
||||
### How to repair?
|
||||
|
||||
Remove set task or add task into database into proper place.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>No texture files found</title>
|
||||
<description>
|
||||
## Batch doesn't contain texture files
|
||||
|
||||
Batch must contain at least one texture file.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Add texture file to the batch or check name if it follows naming convention to match texture files to the batch.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>No workfile found</title>
|
||||
<description>
|
||||
## Batch should contain workfile
|
||||
|
||||
It is expected that published contains workfile that served as a source for textures.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Add workfile to the batch, or disable this validator if you do not want workfile published.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Asset name not found</title>
|
||||
<description>
|
||||
## Couldn't parse asset name from a file
|
||||
|
||||
Unable to parse asset name from '{file_name}'. File name doesn't match configured naming convention.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Check Settings: project_settings/standalonepublisher/publish/CollectTextures for naming convention.
|
||||
</description>
|
||||
<detail>
|
||||
### __Detailed Info__ (optional)
|
||||
|
||||
This error happens when parsing cannot figure out name of asset texture files belong under.
|
||||
</detail>
|
||||
</error>
|
||||
<error id="missing_values">
|
||||
<title>Missing keys</title>
|
||||
<description>
|
||||
## Texture file name is missing some required keys
|
||||
|
||||
Texture '{file_name}' is missing values for {missing_str} keys.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Fix name of texture file and Publish again.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Texture version</title>
|
||||
<description>
|
||||
## Texture version mismatch with workfile
|
||||
|
||||
Workfile '{file_name}' version doesn't match with '{version}' of a texture.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Rename either workfile or texture to contain matching versions
|
||||
</description>
|
||||
<detail>
|
||||
### __Detailed Info__ (optional)
|
||||
|
||||
This might happen if you are trying to publish textures for older version of workfile (or the other way).
|
||||
(Eg. publishing 'workfile_v001' and 'texture_file_v002')
|
||||
</detail>
|
||||
</error>
|
||||
<error id="too_many">
|
||||
<title>Too many versions</title>
|
||||
<description>
|
||||
## Too many versions published at same time
|
||||
|
||||
It is currently expected to publish only batch with single version.
|
||||
|
||||
Found {found} versions.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Please remove files with different version and split publishing into multiple steps.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>No secondary workfile</title>
|
||||
<description>
|
||||
## No secondary workfile found
|
||||
|
||||
Current process expects that primary workfile (for example with a extension '{extension}') will contain also 'secondary' workfile.
|
||||
|
||||
Secondary workfile for '{file_name}' wasn't found.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Attach secondary workfile or disable this validator and Publish again.
|
||||
</description>
|
||||
<detail>
|
||||
### __Detailed Info__ (optional)
|
||||
|
||||
This process was implemented for a possible use case of first workfile coming from Mari, secondary workfile for textures from Substance.
|
||||
Publish should contain both if primary workfile is present.
|
||||
</detail>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateEditorialResources(pyblish.api.InstancePlugin):
|
||||
|
|
@ -19,5 +20,7 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
|
|||
f"Instance: {instance}, Families: "
|
||||
f"{[instance.data['family']] + instance.data['families']}")
|
||||
check_file = instance.data["editorialSourcePath"]
|
||||
msg = f"Missing \"{check_file}\"."
|
||||
assert check_file, msg
|
||||
msg = "Missing source video file."
|
||||
|
||||
if not check_file:
|
||||
raise PublishXmlValidationError(self, msg)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
import re
|
||||
|
||||
import pyblish.api
|
||||
|
||||
import openpype.api
|
||||
from openpype import lib
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateFrameRange(pyblish.api.InstancePlugin):
|
||||
|
|
@ -48,9 +50,15 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
|
|||
files = [files]
|
||||
frames = len(files)
|
||||
|
||||
err_msg = "Frame duration from DB:'{}' ". format(int(duration)) +\
|
||||
" doesn't match number of files:'{}'".format(frames) +\
|
||||
" Please change frame range for Asset or limit no. of files"
|
||||
assert frames == duration, err_msg
|
||||
msg = "Frame duration from DB:'{}' ". format(int(duration)) +\
|
||||
" doesn't match number of files:'{}'".format(frames) +\
|
||||
" Please change frame range for Asset or limit no. of files"
|
||||
|
||||
self.log.debug("Valid ranges {} - {}".format(int(duration), frames))
|
||||
formatting_data = {"duration": duration,
|
||||
"found": frames}
|
||||
if frames != duration:
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
||||
self.log.debug("Valid ranges expected '{}' - found '{}'".
|
||||
format(int(duration), frames))
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
class ValidateShotDuplicates(pyblish.api.ContextPlugin):
|
||||
"""Validating no duplicate names are in context."""
|
||||
|
|
@ -20,4 +21,8 @@ class ValidateShotDuplicates(pyblish.api.ContextPlugin):
|
|||
shot_names.append(name)
|
||||
|
||||
msg = "There are duplicate shot names:\n{}".format(duplicate_names)
|
||||
assert not duplicate_names, msg
|
||||
|
||||
formatting_data = {"duplicates_str": ','.join(duplicate_names)}
|
||||
if duplicate_names:
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateSources(pyblish.api.InstancePlugin):
|
||||
"""Validates source files.
|
||||
|
|
@ -11,7 +13,6 @@ class ValidateSources(pyblish.api.InstancePlugin):
|
|||
got deleted between starting of SP and now.
|
||||
|
||||
"""
|
||||
|
||||
order = openpype.api.ValidateContentsOrder
|
||||
label = "Check source files"
|
||||
|
||||
|
|
@ -22,6 +23,7 @@ class ValidateSources(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
self.log.info("instance {}".format(instance.data))
|
||||
|
||||
missing_files = set()
|
||||
for repre in instance.data.get("representations") or []:
|
||||
files = []
|
||||
if isinstance(repre["files"], str):
|
||||
|
|
@ -34,4 +36,10 @@ class ValidateSources(pyblish.api.InstancePlugin):
|
|||
file_name)
|
||||
|
||||
if not os.path.exists(source_file):
|
||||
raise ValueError("File {} not found".format(source_file))
|
||||
missing_files.add(source_file)
|
||||
|
||||
msg = "Files '{}' not found".format(','.join(missing_files))
|
||||
formatting_data = {"files_not_found": ' - {}'.join(missing_files)}
|
||||
if missing_files:
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateTaskExistence(pyblish.api.ContextPlugin):
|
||||
"""Validating tasks on instances are filled and existing."""
|
||||
|
|
@ -53,4 +55,9 @@ class ValidateTaskExistence(pyblish.api.ContextPlugin):
|
|||
"Asset: \"{}\" Task: \"{}\"".format(*missing_pair)
|
||||
)
|
||||
|
||||
raise AssertionError(msg.format("\n".join(pair_msgs)))
|
||||
msg = msg.format("\n".join(pair_msgs))
|
||||
|
||||
formatting_data = {"task_not_found": ' - {}'.join(pair_msgs)}
|
||||
if pair_msgs:
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateTextureBatch(pyblish.api.InstancePlugin):
|
||||
"""Validates that some texture files are present."""
|
||||
|
|
@ -15,8 +17,10 @@ class ValidateTextureBatch(pyblish.api.InstancePlugin):
|
|||
present = False
|
||||
for instance in instance.context:
|
||||
if instance.data["family"] == "textures":
|
||||
self.log.info("Some textures present.")
|
||||
self.log.info("At least some textures present.")
|
||||
|
||||
return
|
||||
|
||||
assert present, "No textures found in published batch!"
|
||||
msg = "No textures found in published batch!"
|
||||
if not present:
|
||||
raise PublishXmlValidationError(self, msg)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
import pyblish.api
|
||||
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin):
|
||||
|
|
@ -17,4 +19,6 @@ class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
wfile = instance.data["versionData"].get("workfile")
|
||||
|
||||
assert wfile, "Textures are missing attached workfile"
|
||||
msg = "Textures are missing attached workfile"
|
||||
if not wfile:
|
||||
raise PublishXmlValidationError(self, msg)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
class ValidateTextureBatchNaming(pyblish.api.InstancePlugin):
|
||||
"""Validates that all instances had properly formatted name."""
|
||||
|
|
@ -19,9 +20,13 @@ class ValidateTextureBatchNaming(pyblish.api.InstancePlugin):
|
|||
msg = "Couldn't find asset name in '{}'\n".format(file_name) + \
|
||||
"File name doesn't follow configured pattern.\n" + \
|
||||
"Please rename the file."
|
||||
assert "NOT_AVAIL" not in instance.data["asset_build"], msg
|
||||
|
||||
instance.data.pop("asset_build")
|
||||
formatting_data = {"file_name": file_name}
|
||||
if "NOT_AVAIL" in instance.data["asset_build"]:
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
||||
instance.data.pop("asset_build") # not needed anymore
|
||||
|
||||
if instance.data["family"] == "textures":
|
||||
file_name = instance.data["representations"][0]["files"][0]
|
||||
|
|
@ -47,4 +52,10 @@ class ValidateTextureBatchNaming(pyblish.api.InstancePlugin):
|
|||
"Name of the texture file doesn't match expected pattern.\n" + \
|
||||
"Please rename file(s) {}".format(file_name)
|
||||
|
||||
assert not missing_key_values, msg
|
||||
missing_str = ','.join(["'{}'".format(key)
|
||||
for key in missing_key_values])
|
||||
formatting_data = {"file_name": file_name,
|
||||
"missing_str": missing_str}
|
||||
if missing_key_values:
|
||||
raise PublishXmlValidationError(self, msg, key="missing_values",
|
||||
formatting_data=formatting_data)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
import pyblish.api
|
||||
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateTextureBatchVersions(pyblish.api.InstancePlugin):
|
||||
|
|
@ -25,14 +27,21 @@ class ValidateTextureBatchVersions(pyblish.api.InstancePlugin):
|
|||
self.log.info("No workfile present for textures")
|
||||
return
|
||||
|
||||
msg = "Not matching version: texture v{:03d} - workfile {}"
|
||||
assert version_str in wfile, \
|
||||
if version_str not in wfile:
|
||||
msg = "Not matching version: texture v{:03d} - workfile {}"
|
||||
msg.format(
|
||||
instance.data["version"], wfile
|
||||
)
|
||||
raise PublishXmlValidationError(self, msg)
|
||||
|
||||
present_versions = set()
|
||||
for instance in instance.context:
|
||||
present_versions.add(instance.data["version"])
|
||||
|
||||
assert len(present_versions) == 1, "Too many versions in a batch!"
|
||||
if len(present_versions) != 1:
|
||||
msg = "Too many versions in a batch!"
|
||||
found = ','.join(["'{}'".format(val) for val in present_versions])
|
||||
formatting_data = {"found": found}
|
||||
|
||||
raise PublishXmlValidationError(self, msg, key="too_many",
|
||||
formatting_data=formatting_data)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
import pyblish.api
|
||||
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin):
|
||||
"""Validates that textures workfile has collected resources (optional).
|
||||
|
||||
Collected recourses means secondary workfiles (in most cases).
|
||||
Collected resources means secondary workfiles (in most cases).
|
||||
"""
|
||||
|
||||
label = "Validate Texture Workfile Has Resources"
|
||||
|
|
@ -24,6 +26,13 @@ class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin):
|
|||
self.log.warning("Only secondary workfile present!")
|
||||
return
|
||||
|
||||
msg = "No secondary workfiles present for workfile {}".\
|
||||
format(instance.data["name"])
|
||||
assert instance.data.get("resources"), msg
|
||||
if not instance.data.get("resources"):
|
||||
msg = "No secondary workfile present for workfile '{}'". \
|
||||
format(instance.data["name"])
|
||||
ext = self.main_workfile_extensions[0]
|
||||
formatting_data = {"file_name": instance.data["name"],
|
||||
"extension": ext}
|
||||
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data
|
||||
)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import pyblish.api
|
|||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
|
||||
|
||||
class ValidateInstanceAssetRepair(pyblish.api.Action):
|
||||
"""Repair the instance asset."""
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Subset context</title>
|
||||
<description>## Invalid subset context
|
||||
|
||||
Context of the given subset doesn't match your current scene.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Yout can fix this with "Repair" button on the right. This will use '{expected_asset}' asset name and overwrite '{found_asset}' asset name in scene metadata.
|
||||
|
||||
After that restart publishing with Reload button.
|
||||
</description>
|
||||
<detail>
|
||||
### How could this happen?
|
||||
|
||||
The subset was created in different scene with different context
|
||||
or the scene file was copy pasted from different context.
|
||||
</detail>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Layer names</title>
|
||||
<description>## Duplicated layer names
|
||||
|
||||
Can't determine which layers should be published because there are duplicated layer names in the scene.
|
||||
|
||||
### Duplicated layer names
|
||||
|
||||
{layer_names}
|
||||
|
||||
*Check layer names for all subsets in list on left side.*
|
||||
|
||||
### How to repair?
|
||||
|
||||
Hide/rename/remove layers that should not be published.
|
||||
|
||||
If all of them should be published then you have duplicated subset names in the scene. In that case you have to recrete them and use different variant name.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Layers visiblity</title>
|
||||
<description>## All layers are not visible
|
||||
|
||||
Layers visibility was changed during publishing which caused that all layers for subset "{instance_name}" are hidden.
|
||||
|
||||
### Layer names for **{instance_name}**
|
||||
|
||||
{layer_names}
|
||||
|
||||
*Check layer names for all subsets in the list on the left side.*
|
||||
|
||||
### How to repair?
|
||||
|
||||
Reset publishing and do not change visibility of layers after hitting publish button.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Frame range</title>
|
||||
<description>## Invalid render frame range
|
||||
|
||||
Scene frame range which will be rendered is defined by MarkIn and MarkOut. Expected frame range is {expected_frame_range} and current frame range is {current_frame_range}.
|
||||
|
||||
It is also required that MarkIn and MarkOut are enabled in the scene. Their color is highlighted on timeline when are enabled.
|
||||
|
||||
- MarkIn is {mark_in_enable_state}
|
||||
- MarkOut is {mark_out_enable_state}
|
||||
|
||||
### How to repair?
|
||||
|
||||
Yout can fix this with "Repair" button on the right. That will change MarkOut to {expected_mark_out}.
|
||||
|
||||
Or you can manually modify MarkIn and MarkOut in the scene timeline.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Missing layers</title>
|
||||
<description>## Missing layers for render pass
|
||||
|
||||
Render pass subset "{instance_name}" has stored layer names that belong to it's rendering scope but layers were not found in scene.
|
||||
|
||||
### Missing layer names
|
||||
|
||||
{layer_names}
|
||||
|
||||
### How to repair?
|
||||
|
||||
Find layers that belong to subset {instance_name} and rename them back to expected layer names or remove the subset and create new with right layers.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Render pass group</title>
|
||||
<description>## Invalid group of Render Pass layers
|
||||
|
||||
Layers of Render Pass {instance_name} belong to Render Group which is defined by TVPaint color group {expected_group}. But the layers are not in the group.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Change the color group to {expected_group} on layers {layer_names}.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Scene settings</title>
|
||||
<description>## Invalid scene settings
|
||||
|
||||
Scene settings do not match to expected values.
|
||||
|
||||
**FPS**
|
||||
- Expected value: {expected_fps}
|
||||
- Current value: {current_fps}
|
||||
|
||||
**Resolution**
|
||||
- Expected value: {expected_width}x{expected_height}
|
||||
- Current value: {current_width}x{current_height}
|
||||
|
||||
**Pixel ratio**
|
||||
- Expected value: {expected_pixel_ratio}
|
||||
- Current value: {current_pixel_ratio}
|
||||
|
||||
### How to repair?
|
||||
|
||||
FPS and Pixel ratio can be modified in scene setting. Wrong resolution can be fixed with changing resolution of scene but due to TVPaint limitations it is possible that you will need to create new scene.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>First frame</title>
|
||||
<description>## MarkIn is not set to 0
|
||||
|
||||
MarkIn in your scene must start from 0 fram index but MarkIn is set to {current_start_frame}.
|
||||
|
||||
### How to repair?
|
||||
|
||||
You can modify MarkIn manually or hit the "Repair" button on the right which will change MarkIn to 0 (does not change MarkOut).
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Missing metadata</title>
|
||||
<description>## Your scene miss context metadata
|
||||
|
||||
Your scene does not contain metadata about {missing_metadata}.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Resave the scene using Workfiles tool or hit the "Repair" button on the right.
|
||||
</description>
|
||||
<detail>
|
||||
### How this could happend?
|
||||
|
||||
You're using scene file that was not created using Workfiles tool.
|
||||
</detail>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Project name</title>
|
||||
<description>## Your scene is from different project
|
||||
|
||||
It is not possible to publish into project "{workfile_project_name}" when TVPaint was opened with project "{env_project_name}" in context.
|
||||
|
||||
### How to repair?
|
||||
|
||||
If the workfile belongs to project "{env_project_name}" then use Workfiles tool to resave it.
|
||||
|
||||
Otherwise close TVPaint and launch it again from project you want to publish in.
|
||||
</description>
|
||||
<detail>
|
||||
### How this could happend?
|
||||
|
||||
You've opened workfile from different project. You've opened TVPaint on a task from "{env_project_name}" then you've opened TVPaint again on task from "{workfile_project_name}" without closing the TVPaint. Because TVPaint can run only once the project didn't change.
|
||||
|
||||
### Why it is important?
|
||||
Because project may affect how TVPaint works or change publishing behavior it is dangerous to allow change project context in many ways. For example publishing will not run as expected.
|
||||
</detail>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.hosts.tvpaint.api import pipeline
|
||||
|
||||
|
||||
|
|
@ -27,7 +28,7 @@ class FixAssetNames(pyblish.api.Action):
|
|||
pipeline._write_instances(new_instance_items)
|
||||
|
||||
|
||||
class ValidateMissingLayers(pyblish.api.ContextPlugin):
|
||||
class ValidateAssetNames(pyblish.api.ContextPlugin):
|
||||
"""Validate assset name present on instance.
|
||||
|
||||
Asset name on instance should be the same as context's.
|
||||
|
|
@ -48,8 +49,18 @@ class ValidateMissingLayers(pyblish.api.ContextPlugin):
|
|||
instance_label = (
|
||||
instance.data.get("label") or instance.data["name"]
|
||||
)
|
||||
raise AssertionError((
|
||||
"Different asset name on instance then context's."
|
||||
" Instance \"{}\" has asset name: \"{}\""
|
||||
" Context asset name is: \"{}\""
|
||||
).format(instance_label, asset_name, context_asset_name))
|
||||
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
(
|
||||
"Different asset name on instance then context's."
|
||||
" Instance \"{}\" has asset name: \"{}\""
|
||||
" Context asset name is: \"{}\""
|
||||
).format(
|
||||
instance_label, asset_name, context_asset_name
|
||||
),
|
||||
formatting_data={
|
||||
"expected_asset": context_asset_name,
|
||||
"found_asset": asset_name
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
||||
|
|
@ -30,14 +31,20 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
|||
"\"{}\"".format(layer_name)
|
||||
for layer_name in duplicated_layer_names
|
||||
])
|
||||
|
||||
# Raise an error
|
||||
raise AssertionError(
|
||||
detail_lines = [
|
||||
"- {}".format(layer_name)
|
||||
for layer_name in set(duplicated_layer_names)
|
||||
]
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
(
|
||||
"Layers have duplicated names for instance {}."
|
||||
# Description what's wrong
|
||||
" There are layers with same name and one of them is marked"
|
||||
" for publishing so it is not possible to know which should"
|
||||
" be published. Please look for layers with names: {}"
|
||||
).format(instance.data["label"], layers_msg)
|
||||
).format(instance.data["label"], layers_msg),
|
||||
formatting_data={
|
||||
"layer_names": "<br/>".join(detail_lines)
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
# TODO @iLLiCiTiT add repair action to disable instances?
|
||||
class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
|
||||
"""Validate existence of renderPass layers."""
|
||||
|
||||
|
|
@ -9,8 +11,26 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
|
|||
families = ["review", "renderPass", "renderLayer"]
|
||||
|
||||
def process(self, instance):
|
||||
layer_names = set()
|
||||
for layer in instance.data["layers"]:
|
||||
layer_names.add(layer["name"])
|
||||
if layer["visible"]:
|
||||
return
|
||||
|
||||
raise AssertionError("All layers of instance are not visible.")
|
||||
instance_label = (
|
||||
instance.data.get("label") or instance.data["name"]
|
||||
)
|
||||
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"All layers of instance \"{}\" are not visible.".format(
|
||||
instance_label
|
||||
),
|
||||
formatting_data={
|
||||
"instance_name": instance_label,
|
||||
"layer_names": "<br/>".join([
|
||||
"- {}".format(layer_name)
|
||||
for layer_name in layer_names
|
||||
])
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import json
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.hosts.tvpaint.api import lib
|
||||
|
||||
|
||||
|
|
@ -73,9 +74,34 @@ class ValidateMarks(pyblish.api.ContextPlugin):
|
|||
"expected": expected_data[k]
|
||||
}
|
||||
|
||||
if invalid:
|
||||
raise AssertionError(
|
||||
"Marks does not match database:\n{}".format(
|
||||
json.dumps(invalid, sort_keys=True, indent=4)
|
||||
)
|
||||
)
|
||||
# Validation ends
|
||||
if not invalid:
|
||||
return
|
||||
|
||||
current_frame_range = (
|
||||
(current_data["markOut"] - current_data["markIn"]) + 1
|
||||
)
|
||||
expected_frame_range = (
|
||||
(expected_data["markOut"] - expected_data["markIn"]) + 1
|
||||
)
|
||||
mark_in_enable_state = "disabled"
|
||||
if current_data["markInState"]:
|
||||
mark_in_enable_state = "enabled"
|
||||
|
||||
mark_out_enable_state = "disabled"
|
||||
if current_data["markOutState"]:
|
||||
mark_out_enable_state = "enabled"
|
||||
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"Marks does not match database:\n{}".format(
|
||||
json.dumps(invalid, sort_keys=True, indent=4)
|
||||
),
|
||||
formatting_data={
|
||||
"current_frame_range": str(current_frame_range),
|
||||
"expected_frame_range": str(expected_frame_range),
|
||||
"mark_in_enable_state": mark_in_enable_state,
|
||||
"mark_out_enable_state": mark_out_enable_state,
|
||||
"expected_mark_out": expected_data["markOut"]
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateMissingLayers(pyblish.api.InstancePlugin):
|
||||
|
|
@ -30,13 +31,25 @@ class ValidateMissingLayers(pyblish.api.InstancePlugin):
|
|||
"\"{}\"".format(layer_name)
|
||||
for layer_name in missing_layer_names
|
||||
])
|
||||
instance_label = (
|
||||
instance.data.get("label") or instance.data["name"]
|
||||
)
|
||||
description_layer_names = "<br/>".join([
|
||||
"- {}".format(layer_name)
|
||||
for layer_name in missing_layer_names
|
||||
])
|
||||
|
||||
# Raise an error
|
||||
raise AssertionError(
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
(
|
||||
"Layers were not found by name for instance \"{}\"."
|
||||
# Description what's wrong
|
||||
" Layer names marked for publishing are not available"
|
||||
" in layers list. Missing layer names: {}"
|
||||
).format(instance.data["label"], layers_msg)
|
||||
).format(instance.data["label"], layers_msg),
|
||||
formatting_data={
|
||||
"instance_name": instance_label,
|
||||
"layer_names": description_layer_names
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,34 +0,0 @@
|
|||
import json
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateProjectSettings(pyblish.api.ContextPlugin):
|
||||
"""Validate project settings against database.
|
||||
"""
|
||||
|
||||
label = "Validate Project Settings"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
scene_data = {
|
||||
"fps": context.data.get("sceneFps"),
|
||||
"resolutionWidth": context.data.get("sceneWidth"),
|
||||
"resolutionHeight": context.data.get("sceneHeight"),
|
||||
"pixelAspect": context.data.get("scenePixelAspect")
|
||||
}
|
||||
invalid = {}
|
||||
for k in scene_data.keys():
|
||||
expected_value = context.data["assetEntity"]["data"][k]
|
||||
if scene_data[k] != expected_value:
|
||||
invalid[k] = {
|
||||
"current": scene_data[k], "expected": expected_value
|
||||
}
|
||||
|
||||
if invalid:
|
||||
raise AssertionError(
|
||||
"Project settings does not match database:\n{}".format(
|
||||
json.dumps(invalid, sort_keys=True, indent=4)
|
||||
)
|
||||
)
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
import collections
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
||||
|
|
@ -26,11 +27,13 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
|||
layer_names = instance.data["layer_names"]
|
||||
# Check if all layers from render pass are in right group
|
||||
invalid_layers_by_group_id = collections.defaultdict(list)
|
||||
invalid_layer_names = set()
|
||||
for layer_name in layer_names:
|
||||
layer = layers_by_name.get(layer_name)
|
||||
_group_id = layer["group_id"]
|
||||
if _group_id != group_id:
|
||||
invalid_layers_by_group_id[_group_id].append(layer)
|
||||
invalid_layer_names.add(layer_name)
|
||||
|
||||
# Everything is OK and skip exception
|
||||
if not invalid_layers_by_group_id:
|
||||
|
|
@ -61,16 +64,27 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
|||
)
|
||||
|
||||
# Raise an error
|
||||
raise AssertionError((
|
||||
# Short message
|
||||
"Layers in wrong group."
|
||||
# Description what's wrong
|
||||
" Layers from render pass \"{}\" must be in group {} (id: {})."
|
||||
# Detailed message
|
||||
" Layers in wrong group: {}"
|
||||
).format(
|
||||
instance.data["label"],
|
||||
correct_group["name"],
|
||||
correct_group["group_id"],
|
||||
" | ".join(per_group_msgs)
|
||||
))
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
(
|
||||
# Short message
|
||||
"Layers in wrong group."
|
||||
# Description what's wrong
|
||||
" Layers from render pass \"{}\" must be in group {} (id: {})."
|
||||
# Detailed message
|
||||
" Layers in wrong group: {}"
|
||||
).format(
|
||||
instance.data["label"],
|
||||
correct_group["name"],
|
||||
correct_group["group_id"],
|
||||
" | ".join(per_group_msgs)
|
||||
),
|
||||
formatting_data={
|
||||
"instance_name": (
|
||||
instance.data.get("label") or instance.data["name"]
|
||||
),
|
||||
"expected_group": correct_group["name"],
|
||||
"layer_names": ", ".join(invalid_layer_names)
|
||||
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,49 @@
|
|||
import json
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
# TODO @iLliCiTiT add fix action for fps
|
||||
class ValidateProjectSettings(pyblish.api.ContextPlugin):
|
||||
"""Validate scene settings against database."""
|
||||
|
||||
label = "Validate Scene Settings"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
expected_data = context.data["assetEntity"]["data"]
|
||||
scene_data = {
|
||||
"fps": context.data.get("sceneFps"),
|
||||
"resolutionWidth": context.data.get("sceneWidth"),
|
||||
"resolutionHeight": context.data.get("sceneHeight"),
|
||||
"pixelAspect": context.data.get("scenePixelAspect")
|
||||
}
|
||||
invalid = {}
|
||||
for k in scene_data.keys():
|
||||
expected_value = expected_data[k]
|
||||
if scene_data[k] != expected_value:
|
||||
invalid[k] = {
|
||||
"current": scene_data[k], "expected": expected_value
|
||||
}
|
||||
|
||||
if not invalid:
|
||||
return
|
||||
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"Scene settings does not match database:\n{}".format(
|
||||
json.dumps(invalid, sort_keys=True, indent=4)
|
||||
),
|
||||
formatting_data={
|
||||
"expected_fps": expected_data["fps"],
|
||||
"current_fps": scene_data["fps"],
|
||||
"expected_width": expected_data["resolutionWidth"],
|
||||
"expected_height": expected_data["resolutionHeight"],
|
||||
"current_width": scene_data["resolutionWidth"],
|
||||
"current_height": scene_data["resolutionWidth"],
|
||||
"expected_pixel_ratio": expected_data["pixelAspect"],
|
||||
"current_pixel_ratio": scene_data["pixelAspect"]
|
||||
}
|
||||
)
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.hosts.tvpaint.api import lib
|
||||
|
||||
|
||||
|
|
@ -24,4 +25,13 @@ class ValidateStartFrame(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
start_frame = lib.execute_george("tv_startframe")
|
||||
assert int(start_frame) == 0, "Start frame has to be frame 0."
|
||||
if start_frame == 0:
|
||||
return
|
||||
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"Start frame has to be frame 0.",
|
||||
formatting_data={
|
||||
"current_start_frame": start_frame
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.hosts.tvpaint.api import save_file
|
||||
|
||||
|
||||
|
|
@ -42,8 +43,12 @@ class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):
|
|||
missing_keys.append(key)
|
||||
|
||||
if missing_keys:
|
||||
raise AssertionError(
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"Current workfile is missing metadata about {}.".format(
|
||||
", ".join(missing_keys)
|
||||
)
|
||||
),
|
||||
formatting_data={
|
||||
"missing_metadata": ", ".join(missing_keys)
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
|
||||
|
|
@ -31,15 +32,23 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
|
|||
return
|
||||
|
||||
# Raise an error
|
||||
raise AssertionError((
|
||||
# Short message
|
||||
"Workfile from different Project ({})."
|
||||
# Description what's wrong
|
||||
" It is not possible to publish when TVPaint was launched in"
|
||||
"context of different project. Current context project is \"{}\"."
|
||||
" Launch TVPaint in context of project \"{}\" and then publish."
|
||||
).format(
|
||||
workfile_project_name,
|
||||
env_project_name,
|
||||
workfile_project_name,
|
||||
))
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
(
|
||||
# Short message
|
||||
"Workfile from different Project ({})."
|
||||
# Description what's wrong
|
||||
" It is not possible to publish when TVPaint was launched in"
|
||||
"context of different project. Current context project is"
|
||||
" \"{}\". Launch TVPaint in context of project \"{}\""
|
||||
" and then publish."
|
||||
).format(
|
||||
workfile_project_name,
|
||||
env_project_name,
|
||||
workfile_project_name,
|
||||
),
|
||||
formatting_data={
|
||||
"workfile_project_name": workfile_project_name,
|
||||
"expected_project_name": env_project_name
|
||||
}
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue