mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 21:32:15 +01:00
Merge remote-tracking branch 'origin/enhancement/resolve_save_current_file' into enhancement/resolve_save_current_file
This commit is contained in:
commit
3e92c00546
271 changed files with 20178 additions and 2269 deletions
18
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
18
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,15 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to OpenPype Tray
|
||||
options:
|
||||
- 3.17.4
|
||||
- 3.17.4-nightly.2
|
||||
- 3.17.4-nightly.1
|
||||
- 3.17.3
|
||||
- 3.17.3-nightly.2
|
||||
- 3.17.3-nightly.1
|
||||
- 3.17.2
|
||||
- 3.17.2-nightly.4
|
||||
- 3.17.2-nightly.3
|
||||
- 3.17.2-nightly.2
|
||||
- 3.17.2-nightly.1
|
||||
- 3.17.1
|
||||
|
|
@ -126,15 +135,6 @@ body:
|
|||
- 3.15.1-nightly.2
|
||||
- 3.15.1-nightly.1
|
||||
- 3.15.0
|
||||
- 3.15.0-nightly.1
|
||||
- 3.14.11-nightly.4
|
||||
- 3.14.11-nightly.3
|
||||
- 3.14.11-nightly.2
|
||||
- 3.14.11-nightly.1
|
||||
- 3.14.10
|
||||
- 3.14.10-nightly.9
|
||||
- 3.14.10-nightly.8
|
||||
- 3.14.10-nightly.7
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
|
|
|
|||
1112
CHANGELOG.md
1112
CHANGELOG.md
File diff suppressed because it is too large
Load diff
|
|
@ -279,7 +279,7 @@ arguments and it will create zip file that OpenPype can use.
|
|||
Building documentation
|
||||
----------------------
|
||||
|
||||
Top build API documentation, run `.\tools\make_docs(.ps1|.sh)`. It will create html documentation
|
||||
To build API documentation, run `.\tools\make_docs(.ps1|.sh)`. It will create html documentation
|
||||
from current sources in `.\docs\build`.
|
||||
|
||||
**Note that it needs existing virtual environment.**
|
||||
|
|
|
|||
|
|
@ -75,9 +75,9 @@ def _get_subsets(
|
|||
):
|
||||
fields.add(key)
|
||||
|
||||
active = None
|
||||
active = True
|
||||
if archived:
|
||||
active = False
|
||||
active = None
|
||||
|
||||
for subset in con.get_products(
|
||||
project_name,
|
||||
|
|
@ -196,7 +196,7 @@ def get_assets(
|
|||
|
||||
active = True
|
||||
if archived:
|
||||
active = False
|
||||
active = None
|
||||
|
||||
con = get_server_api_connection()
|
||||
fields = folder_fields_v3_to_v4(fields, con)
|
||||
|
|
|
|||
|
|
@ -422,7 +422,7 @@ def failed_json_default(value):
|
|||
|
||||
|
||||
class ServerCreateOperation(CreateOperation):
|
||||
"""Opeartion to create an entity.
|
||||
"""Operation to create an entity.
|
||||
|
||||
Args:
|
||||
project_name (str): On which project operation will happen.
|
||||
|
|
@ -634,7 +634,7 @@ class ServerUpdateOperation(UpdateOperation):
|
|||
|
||||
|
||||
class ServerDeleteOperation(DeleteOperation):
|
||||
"""Opeartion to delete an entity.
|
||||
"""Operation to delete an entity.
|
||||
|
||||
Args:
|
||||
project_name (str): On which project operation will happen.
|
||||
|
|
@ -647,7 +647,7 @@ class ServerDeleteOperation(DeleteOperation):
|
|||
self._session = session
|
||||
|
||||
if entity_type == "asset":
|
||||
entity_type == "folder"
|
||||
entity_type = "folder"
|
||||
|
||||
elif entity_type == "hero_version":
|
||||
entity_type = "version"
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import subprocess
|
|||
from openpype.lib.applications import PreLaunchHook, LaunchTypes
|
||||
|
||||
|
||||
class LaunchFoundryAppsWindows(PreLaunchHook):
|
||||
class LaunchNewConsoleApps(PreLaunchHook):
|
||||
"""Foundry applications have specific way how to launch them.
|
||||
|
||||
Nuke is executed "like" python process so it is required to pass
|
||||
|
|
@ -13,13 +13,15 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
|
|||
|
||||
# Should be as last hook because must change launch arguments to string
|
||||
order = 1000
|
||||
app_groups = {"nuke", "nukeassist", "nukex", "hiero", "nukestudio"}
|
||||
app_groups = {
|
||||
"nuke", "nukeassist", "nukex", "hiero", "nukestudio", "mayapy"
|
||||
}
|
||||
platforms = {"windows"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
# Change `creationflags` to CREATE_NEW_CONSOLE
|
||||
# - on Windows nuke will create new window using its console
|
||||
# - on Windows some apps will create new window using its console
|
||||
# Set `stdout` and `stderr` to None so new created console does not
|
||||
# have redirected output to DEVNULL in build
|
||||
self.launch_context.kwargs.update({
|
||||
|
|
@ -148,13 +148,14 @@ def applied_view(window, camera, isolate=None, options=None):
|
|||
|
||||
area.ui_type = "VIEW_3D"
|
||||
|
||||
meshes = [obj for obj in window.scene.objects if obj.type == "MESH"]
|
||||
types = {"MESH", "GPENCIL"}
|
||||
objects = [obj for obj in window.scene.objects if obj.type in types]
|
||||
|
||||
if camera == "AUTO":
|
||||
space.region_3d.view_perspective = "ORTHO"
|
||||
isolate_objects(window, isolate or meshes)
|
||||
isolate_objects(window, isolate or objects)
|
||||
else:
|
||||
isolate_objects(window, isolate or meshes)
|
||||
isolate_objects(window, isolate or objects)
|
||||
space.camera = window.scene.objects.get(camera)
|
||||
space.region_3d.view_perspective = "CAMERA"
|
||||
|
||||
|
|
|
|||
|
|
@ -284,6 +284,8 @@ class LaunchLoader(LaunchQtApp):
|
|||
_tool_name = "loader"
|
||||
|
||||
def before_window_show(self):
|
||||
if AYON_SERVER_ENABLED:
|
||||
return
|
||||
self._window.set_context(
|
||||
{"asset": get_current_asset_name()},
|
||||
refresh=True
|
||||
|
|
@ -309,6 +311,8 @@ class LaunchManager(LaunchQtApp):
|
|||
_tool_name = "sceneinventory"
|
||||
|
||||
def before_window_show(self):
|
||||
if AYON_SERVER_ENABLED:
|
||||
return
|
||||
self._window.refresh()
|
||||
|
||||
|
||||
|
|
@ -320,6 +324,8 @@ class LaunchLibrary(LaunchQtApp):
|
|||
_tool_name = "libraryloader"
|
||||
|
||||
def before_window_show(self):
|
||||
if AYON_SERVER_ENABLED:
|
||||
return
|
||||
self._window.refresh()
|
||||
|
||||
|
||||
|
|
@ -340,6 +346,8 @@ class LaunchWorkFiles(LaunchQtApp):
|
|||
return result
|
||||
|
||||
def before_window_show(self):
|
||||
if AYON_SERVER_ENABLED:
|
||||
return
|
||||
self._window.root = str(Path(
|
||||
os.environ.get("AVALON_WORKDIR", ""),
|
||||
os.environ.get("AVALON_SCENEDIR", ""),
|
||||
|
|
|
|||
|
|
@ -460,36 +460,6 @@ def ls() -> Iterator:
|
|||
yield parse_container(container)
|
||||
|
||||
|
||||
def update_hierarchy(containers):
|
||||
"""Hierarchical container support
|
||||
|
||||
This is the function to support Scene Inventory to draw hierarchical
|
||||
view for containers.
|
||||
|
||||
We need both parent and children to visualize the graph.
|
||||
|
||||
"""
|
||||
|
||||
all_containers = set(ls()) # lookup set
|
||||
|
||||
for container in containers:
|
||||
# Find parent
|
||||
# FIXME (jasperge): re-evaluate this. How would it be possible
|
||||
# to 'nest' assets? Collections can have several parents, for
|
||||
# now assume it has only 1 parent
|
||||
parent = [
|
||||
coll for coll in bpy.data.collections if container in coll.children
|
||||
]
|
||||
for node in parent:
|
||||
if node in all_containers:
|
||||
container["parent"] = node
|
||||
break
|
||||
|
||||
log.debug("Container: %s", container)
|
||||
|
||||
yield container
|
||||
|
||||
|
||||
def publish():
|
||||
"""Shorthand to publish from within host."""
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
|
||||
def inner_execute(self):
|
||||
# Get blender's python directory
|
||||
version_regex = re.compile(r"^[2-3]\.[0-9]+$")
|
||||
version_regex = re.compile(r"^[2-4]\.[0-9]+$")
|
||||
|
||||
platform = system().lower()
|
||||
executable = self.launch_context.executable.executable_path
|
||||
|
|
|
|||
|
|
@ -3,11 +3,11 @@
|
|||
import bpy
|
||||
|
||||
from openpype.pipeline import get_current_task_name
|
||||
import openpype.hosts.blender.api.plugin
|
||||
from openpype.hosts.blender.api import lib
|
||||
from openpype.hosts.blender.api import plugin, lib, ops
|
||||
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
|
||||
|
||||
|
||||
class CreatePointcache(openpype.hosts.blender.api.plugin.Creator):
|
||||
class CreatePointcache(plugin.Creator):
|
||||
"""Polygonal static geometry"""
|
||||
|
||||
name = "pointcacheMain"
|
||||
|
|
@ -16,20 +16,36 @@ class CreatePointcache(openpype.hosts.blender.api.plugin.Creator):
|
|||
icon = "gears"
|
||||
|
||||
def process(self):
|
||||
""" Run the creator on Blender main thread"""
|
||||
mti = ops.MainThreadItem(self._process)
|
||||
ops.execute_in_main_thread(mti)
|
||||
|
||||
def _process(self):
|
||||
# Get Instance Container or create it if it does not exist
|
||||
instances = bpy.data.collections.get(AVALON_INSTANCES)
|
||||
if not instances:
|
||||
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
|
||||
bpy.context.scene.collection.children.link(instances)
|
||||
|
||||
# Create instance object
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
name = plugin.asset_name(asset, subset)
|
||||
asset_group = bpy.data.objects.new(name=name, object_data=None)
|
||||
asset_group.empty_display_type = 'SINGLE_ARROW'
|
||||
instances.objects.link(asset_group)
|
||||
self.data['task'] = get_current_task_name()
|
||||
lib.imprint(collection, self.data)
|
||||
lib.imprint(asset_group, self.data)
|
||||
|
||||
# Add selected objects to instance
|
||||
if (self.options or {}).get("useSelection"):
|
||||
objects = lib.get_selection()
|
||||
for obj in objects:
|
||||
collection.objects.link(obj)
|
||||
if obj.type == 'EMPTY':
|
||||
objects.extend(obj.children)
|
||||
bpy.context.view_layer.objects.active = asset_group
|
||||
selected = lib.get_selection()
|
||||
for obj in selected:
|
||||
if obj.parent in selected:
|
||||
obj.select_set(False)
|
||||
continue
|
||||
selected.append(asset_group)
|
||||
bpy.ops.object.parent_set(keep_transform=True)
|
||||
|
||||
return collection
|
||||
return asset_group
|
||||
|
|
|
|||
|
|
@ -26,8 +26,7 @@ class CacheModelLoader(plugin.AssetLoader):
|
|||
Note:
|
||||
At least for now it only supports Alembic files.
|
||||
"""
|
||||
|
||||
families = ["model", "pointcache"]
|
||||
families = ["model", "pointcache", "animation"]
|
||||
representations = ["abc"]
|
||||
|
||||
label = "Load Alembic"
|
||||
|
|
@ -53,32 +52,43 @@ class CacheModelLoader(plugin.AssetLoader):
|
|||
def _process(self, libpath, asset_group, group_name):
|
||||
plugin.deselect_all()
|
||||
|
||||
collection = bpy.context.view_layer.active_layer_collection.collection
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
bpy.ops.wm.alembic_import(
|
||||
filepath=libpath,
|
||||
relative_path=relative
|
||||
)
|
||||
|
||||
parent = bpy.context.scene.collection
|
||||
|
||||
imported = lib.get_selection()
|
||||
|
||||
# Children must be linked before parents,
|
||||
# otherwise the hierarchy will break
|
||||
# Use first EMPTY without parent as container
|
||||
container = next(
|
||||
(obj for obj in imported
|
||||
if obj.type == "EMPTY" and not obj.parent),
|
||||
None
|
||||
)
|
||||
|
||||
objects = []
|
||||
if container:
|
||||
nodes = list(container.children)
|
||||
|
||||
for obj in imported:
|
||||
obj.parent = asset_group
|
||||
for obj in nodes:
|
||||
obj.parent = asset_group
|
||||
|
||||
for obj in imported:
|
||||
objects.append(obj)
|
||||
imported.extend(list(obj.children))
|
||||
bpy.data.objects.remove(container)
|
||||
|
||||
objects.reverse()
|
||||
objects.extend(nodes)
|
||||
for obj in nodes:
|
||||
objects.extend(obj.children_recursive)
|
||||
else:
|
||||
for obj in imported:
|
||||
obj.parent = asset_group
|
||||
objects = imported
|
||||
|
||||
for obj in objects:
|
||||
# Unlink the object from all collections
|
||||
collections = obj.users_collection
|
||||
for collection in collections:
|
||||
collection.objects.unlink(obj)
|
||||
name = obj.name
|
||||
obj.name = f"{group_name}:{name}"
|
||||
if obj.type != 'EMPTY':
|
||||
|
|
@ -90,7 +100,7 @@ class CacheModelLoader(plugin.AssetLoader):
|
|||
material_slot.material.name = f"{group_name}:{name_mat}"
|
||||
|
||||
if not obj.get(AVALON_PROPERTY):
|
||||
obj[AVALON_PROPERTY] = dict()
|
||||
obj[AVALON_PROPERTY] = {}
|
||||
|
||||
avalon_info = obj[AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": group_name})
|
||||
|
|
@ -99,6 +109,18 @@ class CacheModelLoader(plugin.AssetLoader):
|
|||
|
||||
return objects
|
||||
|
||||
def _link_objects(self, objects, collection, containers, asset_group):
|
||||
# Link the imported objects to any collection where the asset group is
|
||||
# linked to, except the AVALON_CONTAINERS collection
|
||||
group_collections = [
|
||||
collection
|
||||
for collection in asset_group.users_collection
|
||||
if collection != containers]
|
||||
|
||||
for obj in objects:
|
||||
for collection in group_collections:
|
||||
collection.objects.link(obj)
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
|
|
@ -120,18 +142,22 @@ class CacheModelLoader(plugin.AssetLoader):
|
|||
group_name = plugin.asset_name(asset, subset, unique_number)
|
||||
namespace = namespace or f"{asset}_{unique_number}"
|
||||
|
||||
avalon_containers = bpy.data.collections.get(AVALON_CONTAINERS)
|
||||
if not avalon_containers:
|
||||
avalon_containers = bpy.data.collections.new(
|
||||
name=AVALON_CONTAINERS)
|
||||
bpy.context.scene.collection.children.link(avalon_containers)
|
||||
containers = bpy.data.collections.get(AVALON_CONTAINERS)
|
||||
if not containers:
|
||||
containers = bpy.data.collections.new(name=AVALON_CONTAINERS)
|
||||
bpy.context.scene.collection.children.link(containers)
|
||||
|
||||
asset_group = bpy.data.objects.new(group_name, object_data=None)
|
||||
avalon_containers.objects.link(asset_group)
|
||||
asset_group.empty_display_type = 'SINGLE_ARROW'
|
||||
containers.objects.link(asset_group)
|
||||
|
||||
objects = self._process(libpath, asset_group, group_name)
|
||||
|
||||
bpy.context.scene.collection.objects.link(asset_group)
|
||||
# Link the asset group to the active collection
|
||||
collection = bpy.context.view_layer.active_layer_collection.collection
|
||||
collection.objects.link(asset_group)
|
||||
|
||||
self._link_objects(objects, asset_group, containers, asset_group)
|
||||
|
||||
asset_group[AVALON_PROPERTY] = {
|
||||
"schema": "openpype:container-2.0",
|
||||
|
|
@ -207,7 +233,11 @@ class CacheModelLoader(plugin.AssetLoader):
|
|||
mat = asset_group.matrix_basis.copy()
|
||||
self._remove(asset_group)
|
||||
|
||||
self._process(str(libpath), asset_group, object_name)
|
||||
objects = self._process(str(libpath), asset_group, object_name)
|
||||
|
||||
containers = bpy.data.collections.get(AVALON_CONTAINERS)
|
||||
self._link_objects(objects, asset_group, containers, asset_group)
|
||||
|
||||
asset_group.matrix_basis = mat
|
||||
|
||||
metadata["libpath"] = str(libpath)
|
||||
|
|
|
|||
|
|
@ -19,85 +19,51 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
@staticmethod
|
||||
def get_asset_groups() -> Generator:
|
||||
"""Return all 'model' collections.
|
||||
|
||||
Check if the family is 'model' and if it doesn't have the
|
||||
representation set. If the representation is set, it is a loaded model
|
||||
and we don't want to publish it.
|
||||
"""Return all instances that are empty objects asset groups.
|
||||
"""
|
||||
instances = bpy.data.collections.get(AVALON_INSTANCES)
|
||||
for obj in instances.objects:
|
||||
avalon_prop = obj.get(AVALON_PROPERTY) or dict()
|
||||
for obj in list(instances.objects) + list(instances.children):
|
||||
avalon_prop = obj.get(AVALON_PROPERTY) or {}
|
||||
if avalon_prop.get('id') == 'pyblish.avalon.instance':
|
||||
yield obj
|
||||
|
||||
@staticmethod
|
||||
def get_collections() -> Generator:
|
||||
"""Return all 'model' collections.
|
||||
|
||||
Check if the family is 'model' and if it doesn't have the
|
||||
representation set. If the representation is set, it is a loaded model
|
||||
and we don't want to publish it.
|
||||
"""
|
||||
for collection in bpy.data.collections:
|
||||
avalon_prop = collection.get(AVALON_PROPERTY) or dict()
|
||||
if avalon_prop.get('id') == 'pyblish.avalon.instance':
|
||||
yield collection
|
||||
def create_instance(context, group):
|
||||
avalon_prop = group[AVALON_PROPERTY]
|
||||
asset = avalon_prop['asset']
|
||||
family = avalon_prop['family']
|
||||
subset = avalon_prop['subset']
|
||||
task = avalon_prop['task']
|
||||
name = f"{asset}_{subset}"
|
||||
return context.create_instance(
|
||||
name=name,
|
||||
family=family,
|
||||
families=[family],
|
||||
subset=subset,
|
||||
asset=asset,
|
||||
task=task,
|
||||
)
|
||||
|
||||
def process(self, context):
|
||||
"""Collect the models from the current Blender scene."""
|
||||
asset_groups = self.get_asset_groups()
|
||||
collections = self.get_collections()
|
||||
|
||||
for group in asset_groups:
|
||||
avalon_prop = group[AVALON_PROPERTY]
|
||||
asset = avalon_prop['asset']
|
||||
family = avalon_prop['family']
|
||||
subset = avalon_prop['subset']
|
||||
task = avalon_prop['task']
|
||||
name = f"{asset}_{subset}"
|
||||
instance = context.create_instance(
|
||||
name=name,
|
||||
family=family,
|
||||
families=[family],
|
||||
subset=subset,
|
||||
asset=asset,
|
||||
task=task,
|
||||
)
|
||||
objects = list(group.children)
|
||||
members = set()
|
||||
for obj in objects:
|
||||
objects.extend(list(obj.children))
|
||||
members.add(obj)
|
||||
members.add(group)
|
||||
instance[:] = list(members)
|
||||
self.log.debug(json.dumps(instance.data, indent=4))
|
||||
for obj in instance:
|
||||
self.log.debug(obj)
|
||||
instance = self.create_instance(context, group)
|
||||
members = []
|
||||
if isinstance(group, bpy.types.Collection):
|
||||
members = list(group.objects)
|
||||
family = instance.data["family"]
|
||||
if family == "animation":
|
||||
for obj in group.objects:
|
||||
if obj.type == 'EMPTY' and obj.get(AVALON_PROPERTY):
|
||||
members.extend(
|
||||
child for child in obj.children
|
||||
if child.type == 'ARMATURE')
|
||||
else:
|
||||
members = group.children_recursive
|
||||
|
||||
for collection in collections:
|
||||
avalon_prop = collection[AVALON_PROPERTY]
|
||||
asset = avalon_prop['asset']
|
||||
family = avalon_prop['family']
|
||||
subset = avalon_prop['subset']
|
||||
task = avalon_prop['task']
|
||||
name = f"{asset}_{subset}"
|
||||
instance = context.create_instance(
|
||||
name=name,
|
||||
family=family,
|
||||
families=[family],
|
||||
subset=subset,
|
||||
asset=asset,
|
||||
task=task,
|
||||
)
|
||||
members = list(collection.objects)
|
||||
if family == "animation":
|
||||
for obj in collection.objects:
|
||||
if obj.type == 'EMPTY' and obj.get(AVALON_PROPERTY):
|
||||
for child in obj.children:
|
||||
if child.type == 'ARMATURE':
|
||||
members.append(child)
|
||||
members.append(collection)
|
||||
members.append(group)
|
||||
instance[:] = members
|
||||
self.log.debug(json.dumps(instance.data, indent=4))
|
||||
for obj in instance:
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@ class ExtractABC(publish.Extractor):
|
|||
|
||||
label = "Extract ABC"
|
||||
hosts = ["blender"]
|
||||
families = ["model", "pointcache"]
|
||||
optional = True
|
||||
families = ["pointcache"]
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
|
@ -62,3 +61,12 @@ class ExtractABC(publish.Extractor):
|
|||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
|
||||
|
||||
class ExtractModelABC(ExtractABC):
|
||||
"""Extract model as ABC."""
|
||||
|
||||
label = "Extract Model ABC"
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
|
|||
optional = True
|
||||
hosts = ["blender"]
|
||||
families = ["animation", "model", "rig", "action", "layout", "blendScene",
|
||||
"render"]
|
||||
"pointcache", "render"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
|
|||
|
|
@ -165,7 +165,8 @@ class CreateSaver(NewCreator):
|
|||
filepath = self.temp_rendering_path_template.format(
|
||||
**formatting_data)
|
||||
|
||||
tool["Clip"] = os.path.normpath(filepath)
|
||||
comp = get_current_comp()
|
||||
tool["Clip"] = comp.ReverseMapPath(os.path.normpath(filepath))
|
||||
|
||||
# Rename tool
|
||||
if tool.Name != subset:
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
with comp_lock_and_undo_chunk(comp, "Create Loader"):
|
||||
args = (-32768, -32768)
|
||||
tool = comp.AddTool("Loader", *args)
|
||||
tool["Clip"] = path
|
||||
tool["Clip"] = comp.ReverseMapPath(path)
|
||||
|
||||
# Set global in point to start frame (if in version.data)
|
||||
start = self._get_start(context["version"], tool)
|
||||
|
|
@ -244,7 +244,7 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
"TimeCodeOffset",
|
||||
),
|
||||
):
|
||||
tool["Clip"] = path
|
||||
tool["Clip"] = comp.ReverseMapPath(path)
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
global_in_changed = loader_shift(tool, start, relative=False)
|
||||
|
|
|
|||
|
|
@ -145,9 +145,11 @@ class CollectFusionRender(
|
|||
start = render_instance.frameStart - render_instance.handleStart
|
||||
end = render_instance.frameEnd + render_instance.handleEnd
|
||||
|
||||
path = (
|
||||
render_instance.tool["Clip"]
|
||||
[render_instance.workfileComp.TIME_UNDEFINED]
|
||||
comp = render_instance.workfileComp
|
||||
path = comp.MapPath(
|
||||
render_instance.tool["Clip"][
|
||||
render_instance.workfileComp.TIME_UNDEFINED
|
||||
]
|
||||
)
|
||||
output_dir = os.path.dirname(path)
|
||||
render_instance.outputDir = output_dir
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
import os
|
||||
import errno
|
||||
import re
|
||||
import uuid
|
||||
import logging
|
||||
|
|
@ -9,9 +10,21 @@ import json
|
|||
|
||||
import six
|
||||
|
||||
from openpype.lib import StringTemplate
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import get_current_project_name, get_current_asset_name
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from openpype.settings import get_current_project_settings
|
||||
from openpype.pipeline import (
|
||||
get_current_project_name,
|
||||
get_current_asset_name,
|
||||
registered_host
|
||||
)
|
||||
from openpype.pipeline.context_tools import (
|
||||
get_current_context_template_data,
|
||||
get_current_project_asset
|
||||
)
|
||||
from openpype.widgets import popup
|
||||
from openpype.tools.utils.host_tools import get_tool_by_name
|
||||
from openpype.pipeline.create import CreateContext
|
||||
|
||||
import hou
|
||||
|
||||
|
|
@ -160,8 +173,6 @@ def validate_fps():
|
|||
|
||||
if current_fps != fps:
|
||||
|
||||
from openpype.widgets import popup
|
||||
|
||||
# Find main window
|
||||
parent = hou.ui.mainQtWindow()
|
||||
if parent is None:
|
||||
|
|
@ -321,52 +332,61 @@ def imprint(node, data, update=False):
|
|||
return
|
||||
|
||||
current_parms = {p.name(): p for p in node.spareParms()}
|
||||
update_parms = []
|
||||
templates = []
|
||||
update_parm_templates = []
|
||||
new_parm_templates = []
|
||||
|
||||
for key, value in data.items():
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
parm = get_template_from_value(key, value)
|
||||
parm_template = get_template_from_value(key, value)
|
||||
|
||||
if key in current_parms:
|
||||
if node.evalParm(key) == data[key]:
|
||||
if node.evalParm(key) == value:
|
||||
continue
|
||||
if not update:
|
||||
log.debug(f"{key} already exists on {node}")
|
||||
else:
|
||||
log.debug(f"replacing {key}")
|
||||
update_parms.append(parm)
|
||||
update_parm_templates.append(parm_template)
|
||||
continue
|
||||
|
||||
templates.append(parm)
|
||||
new_parm_templates.append(parm_template)
|
||||
|
||||
parm_group = node.parmTemplateGroup()
|
||||
parm_folder = parm_group.findFolder("Extra")
|
||||
|
||||
# if folder doesn't exist yet, create one and append to it,
|
||||
# else append to existing one
|
||||
if not parm_folder:
|
||||
parm_folder = hou.FolderParmTemplate("folder", "Extra")
|
||||
parm_folder.setParmTemplates(templates)
|
||||
parm_group.append(parm_folder)
|
||||
else:
|
||||
for template in templates:
|
||||
parm_group.appendToFolder(parm_folder, template)
|
||||
# this is needed because the pointer to folder
|
||||
# is for some reason lost every call to `appendToFolder()`
|
||||
parm_folder = parm_group.findFolder("Extra")
|
||||
|
||||
node.setParmTemplateGroup(parm_group)
|
||||
|
||||
# TODO: Updating is done here, by calling probably deprecated functions.
|
||||
# This needs to be addressed in the future.
|
||||
if not update_parms:
|
||||
if not new_parm_templates and not update_parm_templates:
|
||||
return
|
||||
|
||||
for parm in update_parms:
|
||||
node.replaceSpareParmTuple(parm.name(), parm)
|
||||
parm_group = node.parmTemplateGroup()
|
||||
|
||||
# Add new parm templates
|
||||
if new_parm_templates:
|
||||
parm_folder = parm_group.findFolder("Extra")
|
||||
|
||||
# if folder doesn't exist yet, create one and append to it,
|
||||
# else append to existing one
|
||||
if not parm_folder:
|
||||
parm_folder = hou.FolderParmTemplate("folder", "Extra")
|
||||
parm_folder.setParmTemplates(new_parm_templates)
|
||||
parm_group.append(parm_folder)
|
||||
else:
|
||||
# Add to parm template folder instance then replace with updated
|
||||
# one in parm template group
|
||||
for template in new_parm_templates:
|
||||
parm_folder.addParmTemplate(template)
|
||||
parm_group.replace(parm_folder.name(), parm_folder)
|
||||
|
||||
# Update existing parm templates
|
||||
for parm_template in update_parm_templates:
|
||||
parm_group.replace(parm_template.name(), parm_template)
|
||||
|
||||
# When replacing a parm with a parm of the same name it preserves its
|
||||
# value if before the replacement the parm was not at the default,
|
||||
# because it has a value override set. Since we're trying to update the
|
||||
# parm by using the new value as `default` we enforce the parm is at
|
||||
# default state
|
||||
node.parm(parm_template.name()).revertToDefaults()
|
||||
|
||||
node.setParmTemplateGroup(parm_group)
|
||||
|
||||
|
||||
def lsattr(attr, value=None, root="/"):
|
||||
|
|
@ -548,29 +568,64 @@ def get_template_from_value(key, value):
|
|||
return parm
|
||||
|
||||
|
||||
def get_frame_data(node):
|
||||
"""Get the frame data: start frame, end frame and steps.
|
||||
def get_frame_data(node, handle_start=0, handle_end=0, log=None):
|
||||
"""Get the frame data: start frame, end frame, steps,
|
||||
start frame with start handle and end frame with end handle.
|
||||
|
||||
This function uses Houdini node's `trange`, `t1, `t2` and `t3`
|
||||
parameters as the source of truth for the full inclusive frame
|
||||
range to render, as such these are considered as the frame
|
||||
range including the handles.
|
||||
|
||||
The non-inclusive frame start and frame end without handles
|
||||
are computed by subtracting the handles from the inclusive
|
||||
frame range.
|
||||
|
||||
Args:
|
||||
node(hou.Node)
|
||||
node (hou.Node): ROP node to retrieve frame range from,
|
||||
the frame range is assumed to be the frame range
|
||||
*including* the start and end handles.
|
||||
handle_start (int): Start handles.
|
||||
handle_end (int): End handles.
|
||||
log (logging.Logger): Logger to log to.
|
||||
|
||||
Returns:
|
||||
dict: frame data for star, end and steps.
|
||||
dict: frame data for start, end, steps,
|
||||
start with handle and end with handle
|
||||
|
||||
"""
|
||||
|
||||
if log is None:
|
||||
log = self.log
|
||||
|
||||
data = {}
|
||||
|
||||
if node.parm("trange") is None:
|
||||
|
||||
log.debug(
|
||||
"Node has no 'trange' parameter: {}".format(node.path())
|
||||
)
|
||||
return data
|
||||
|
||||
if node.evalParm("trange") == 0:
|
||||
self.log.debug("trange is 0")
|
||||
return data
|
||||
data["frameStartHandle"] = hou.intFrame()
|
||||
data["frameEndHandle"] = hou.intFrame()
|
||||
data["byFrameStep"] = 1.0
|
||||
|
||||
data["frameStart"] = node.evalParm("f1")
|
||||
data["frameEnd"] = node.evalParm("f2")
|
||||
data["steps"] = node.evalParm("f3")
|
||||
log.info(
|
||||
"Node '{}' has 'Render current frame' set.\n"
|
||||
"Asset Handles are ignored.\n"
|
||||
"frameStart and frameEnd are set to the "
|
||||
"current frame.".format(node.path())
|
||||
)
|
||||
else:
|
||||
data["frameStartHandle"] = int(node.evalParm("f1"))
|
||||
data["frameEndHandle"] = int(node.evalParm("f2"))
|
||||
data["byFrameStep"] = node.evalParm("f3")
|
||||
|
||||
data["handleStart"] = handle_start
|
||||
data["handleEnd"] = handle_end
|
||||
data["frameStart"] = data["frameStartHandle"] + data["handleStart"]
|
||||
data["frameEnd"] = data["frameEndHandle"] - data["handleEnd"]
|
||||
|
||||
return data
|
||||
|
||||
|
|
@ -747,3 +802,193 @@ def get_camera_from_container(container):
|
|||
|
||||
assert len(cameras) == 1, "Camera instance must have only one camera"
|
||||
return cameras[0]
|
||||
|
||||
|
||||
def get_context_var_changes():
|
||||
"""get context var changes."""
|
||||
|
||||
houdini_vars_to_update = {}
|
||||
|
||||
project_settings = get_current_project_settings()
|
||||
houdini_vars_settings = \
|
||||
project_settings["houdini"]["general"]["update_houdini_var_context"]
|
||||
|
||||
if not houdini_vars_settings["enabled"]:
|
||||
return houdini_vars_to_update
|
||||
|
||||
houdini_vars = houdini_vars_settings["houdini_vars"]
|
||||
|
||||
# No vars specified - nothing to do
|
||||
if not houdini_vars:
|
||||
return houdini_vars_to_update
|
||||
|
||||
# Get Template data
|
||||
template_data = get_current_context_template_data()
|
||||
|
||||
# Set Houdini Vars
|
||||
for item in houdini_vars:
|
||||
# For consistency reasons we always force all vars to be uppercase
|
||||
# Also remove any leading, and trailing whitespaces.
|
||||
var = item["var"].strip().upper()
|
||||
|
||||
# get and resolve template in value
|
||||
item_value = StringTemplate.format_template(
|
||||
item["value"],
|
||||
template_data
|
||||
)
|
||||
|
||||
if var == "JOB" and item_value == "":
|
||||
# sync $JOB to $HIP if $JOB is empty
|
||||
item_value = os.environ["HIP"]
|
||||
|
||||
if item["is_directory"]:
|
||||
item_value = item_value.replace("\\", "/")
|
||||
|
||||
current_value = hou.hscript("echo -n `${}`".format(var))[0]
|
||||
|
||||
if current_value != item_value:
|
||||
houdini_vars_to_update[var] = (
|
||||
current_value, item_value, item["is_directory"]
|
||||
)
|
||||
|
||||
return houdini_vars_to_update
|
||||
|
||||
|
||||
def update_houdini_vars_context():
|
||||
"""Update asset context variables"""
|
||||
|
||||
for var, (_old, new, is_directory) in get_context_var_changes().items():
|
||||
if is_directory:
|
||||
try:
|
||||
os.makedirs(new)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
print(
|
||||
"Failed to create ${} dir. Maybe due to "
|
||||
"insufficient permissions.".format(var)
|
||||
)
|
||||
|
||||
hou.hscript("set {}={}".format(var, new))
|
||||
os.environ[var] = new
|
||||
print("Updated ${} to {}".format(var, new))
|
||||
|
||||
|
||||
def update_houdini_vars_context_dialog():
|
||||
"""Show pop-up to update asset context variables"""
|
||||
update_vars = get_context_var_changes()
|
||||
if not update_vars:
|
||||
# Nothing to change
|
||||
print("Nothing to change, Houdini vars are already up to date.")
|
||||
return
|
||||
|
||||
message = "\n".join(
|
||||
"${}: {} -> {}".format(var, old or "None", new or "None")
|
||||
for var, (old, new, _is_directory) in update_vars.items()
|
||||
)
|
||||
|
||||
# TODO: Use better UI!
|
||||
parent = hou.ui.mainQtWindow()
|
||||
dialog = popup.Popup(parent=parent)
|
||||
dialog.setModal(True)
|
||||
dialog.setWindowTitle("Houdini scene has outdated asset variables")
|
||||
dialog.setMessage(message)
|
||||
dialog.setButtonText("Fix")
|
||||
|
||||
# on_show is the Fix button clicked callback
|
||||
dialog.on_clicked.connect(update_houdini_vars_context)
|
||||
|
||||
dialog.show()
|
||||
|
||||
|
||||
def publisher_show_and_publish(comment=None):
|
||||
"""Open publisher window and trigger publishing action.
|
||||
|
||||
Args:
|
||||
comment (Optional[str]): Comment to set in publisher window.
|
||||
"""
|
||||
|
||||
main_window = get_main_window()
|
||||
publisher_window = get_tool_by_name(
|
||||
tool_name="publisher",
|
||||
parent=main_window,
|
||||
)
|
||||
publisher_window.show_and_publish(comment)
|
||||
|
||||
|
||||
def find_rop_input_dependencies(input_tuple):
|
||||
"""Self publish from ROP nodes.
|
||||
|
||||
Arguments:
|
||||
tuple (hou.RopNode.inputDependencies) which can be a nested tuples
|
||||
represents the input dependencies of the ROP node, consisting of ROPs,
|
||||
and the frames that need to be be rendered prior to rendering the ROP.
|
||||
|
||||
Returns:
|
||||
list of the RopNode.path() that can be found inside
|
||||
the input tuple.
|
||||
"""
|
||||
|
||||
out_list = []
|
||||
if isinstance(input_tuple[0], hou.RopNode):
|
||||
return input_tuple[0].path()
|
||||
|
||||
if isinstance(input_tuple[0], tuple):
|
||||
for item in input_tuple:
|
||||
out_list.append(find_rop_input_dependencies(item))
|
||||
|
||||
return out_list
|
||||
|
||||
|
||||
def self_publish():
|
||||
"""Self publish from ROP nodes.
|
||||
|
||||
Firstly, it gets the node and its dependencies.
|
||||
Then, it deactivates all other ROPs
|
||||
And finaly, it triggers the publishing action.
|
||||
"""
|
||||
|
||||
result, comment = hou.ui.readInput(
|
||||
"Add Publish Comment",
|
||||
buttons=("Publish", "Cancel"),
|
||||
title="Publish comment",
|
||||
close_choice=1
|
||||
)
|
||||
|
||||
if result:
|
||||
return
|
||||
|
||||
current_node = hou.node(".")
|
||||
inputs_paths = find_rop_input_dependencies(
|
||||
current_node.inputDependencies()
|
||||
)
|
||||
inputs_paths.append(current_node.path())
|
||||
|
||||
host = registered_host()
|
||||
context = CreateContext(host, reset=True)
|
||||
|
||||
for instance in context.instances:
|
||||
node_path = instance.data.get("instance_node")
|
||||
instance["active"] = node_path and node_path in inputs_paths
|
||||
|
||||
context.save_changes()
|
||||
|
||||
publisher_show_and_publish(comment)
|
||||
|
||||
|
||||
def add_self_publish_button(node):
|
||||
"""Adds a self publish button to the rop node."""
|
||||
|
||||
label = os.environ.get("AVALON_LABEL") or "OpenPype"
|
||||
|
||||
button_parm = hou.ButtonParmTemplate(
|
||||
"ayon_self_publish",
|
||||
"{} Publish".format(label),
|
||||
script_callback="from openpype.hosts.houdini.api.lib import "
|
||||
"self_publish; self_publish()",
|
||||
script_callback_language=hou.scriptLanguage.Python,
|
||||
join_with_next=True
|
||||
)
|
||||
|
||||
template = node.parmTemplateGroup()
|
||||
template.insertBefore((0,), button_parm)
|
||||
node.setParmTemplateGroup(template)
|
||||
|
|
|
|||
|
|
@ -300,6 +300,9 @@ def on_save():
|
|||
|
||||
log.info("Running callback on save..")
|
||||
|
||||
# update houdini vars
|
||||
lib.update_houdini_vars_context_dialog()
|
||||
|
||||
nodes = lib.get_id_required_nodes()
|
||||
for node, new_id in lib.generate_ids(nodes):
|
||||
lib.set_id(node, new_id, overwrite=False)
|
||||
|
|
@ -335,6 +338,9 @@ def on_open():
|
|||
|
||||
log.info("Running callback on open..")
|
||||
|
||||
# update houdini vars
|
||||
lib.update_houdini_vars_context_dialog()
|
||||
|
||||
# Validate FPS after update_task_from_path to
|
||||
# ensure it is using correct FPS for the asset
|
||||
lib.validate_fps()
|
||||
|
|
@ -399,6 +405,7 @@ def _set_context_settings():
|
|||
"""
|
||||
|
||||
lib.reset_framerange()
|
||||
lib.update_houdini_vars_context()
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, new_value, old_value):
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ from openpype.pipeline import (
|
|||
CreatedInstance
|
||||
)
|
||||
from openpype.lib import BoolDef
|
||||
from .lib import imprint, read, lsattr
|
||||
from .lib import imprint, read, lsattr, add_self_publish_button
|
||||
|
||||
|
||||
class OpenPypeCreatorError(CreatorError):
|
||||
|
|
@ -168,6 +168,7 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
|
|||
"""Base class for most of the Houdini creator plugins."""
|
||||
selected_nodes = []
|
||||
settings_name = None
|
||||
add_publish_button = False
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
try:
|
||||
|
|
@ -195,6 +196,10 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
|
|||
self)
|
||||
self._add_instance_to_context(instance)
|
||||
self.imprint(instance_node, instance.data_to_store())
|
||||
|
||||
if self.add_publish_button:
|
||||
add_self_publish_button(instance_node)
|
||||
|
||||
return instance
|
||||
|
||||
except hou.Error as er:
|
||||
|
|
@ -245,6 +250,7 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
|
|||
key: changes[key].new_value
|
||||
for key in changes.changed_keys
|
||||
}
|
||||
# Update parm templates and values
|
||||
self.imprint(
|
||||
instance_node,
|
||||
new_values,
|
||||
|
|
@ -316,6 +322,12 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
|
|||
def apply_settings(self, project_settings):
|
||||
"""Method called on initialization of plugin to apply settings."""
|
||||
|
||||
# Apply General Settings
|
||||
houdini_general_settings = project_settings["houdini"]["general"]
|
||||
self.add_publish_button = houdini_general_settings.get(
|
||||
"add_self_publish_button", False)
|
||||
|
||||
# Apply Creator Settings
|
||||
settings_name = self.settings_name
|
||||
if settings_name is None:
|
||||
settings_name = self.__class__.__name__
|
||||
|
|
|
|||
|
|
@ -6,6 +6,9 @@ import platform
|
|||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import get_current_project_name
|
||||
|
||||
from openpype.lib import StringTemplate
|
||||
from openpype.pipeline.context_tools import get_current_context_template_data
|
||||
|
||||
import hou
|
||||
|
||||
log = logging.getLogger("openpype.hosts.houdini.shelves")
|
||||
|
|
@ -26,10 +29,16 @@ def generate_shelves():
|
|||
log.debug("No custom shelves found in project settings.")
|
||||
return
|
||||
|
||||
# Get Template data
|
||||
template_data = get_current_context_template_data()
|
||||
|
||||
for shelf_set_config in shelves_set_config:
|
||||
shelf_set_filepath = shelf_set_config.get('shelf_set_source_path')
|
||||
shelf_set_os_filepath = shelf_set_filepath[current_os]
|
||||
if shelf_set_os_filepath:
|
||||
shelf_set_os_filepath = get_path_using_template_data(
|
||||
shelf_set_os_filepath, template_data
|
||||
)
|
||||
if not os.path.isfile(shelf_set_os_filepath):
|
||||
log.error("Shelf path doesn't exist - "
|
||||
"{}".format(shelf_set_os_filepath))
|
||||
|
|
@ -81,7 +90,9 @@ def generate_shelves():
|
|||
"script path of the tool.")
|
||||
continue
|
||||
|
||||
tool = get_or_create_tool(tool_definition, shelf)
|
||||
tool = get_or_create_tool(
|
||||
tool_definition, shelf, template_data
|
||||
)
|
||||
|
||||
if not tool:
|
||||
continue
|
||||
|
|
@ -144,7 +155,7 @@ def get_or_create_shelf(shelf_label):
|
|||
return new_shelf
|
||||
|
||||
|
||||
def get_or_create_tool(tool_definition, shelf):
|
||||
def get_or_create_tool(tool_definition, shelf, template_data):
|
||||
"""This function verifies if the tool exists and updates it. If not, creates
|
||||
a new one.
|
||||
|
||||
|
|
@ -162,10 +173,16 @@ def get_or_create_tool(tool_definition, shelf):
|
|||
return
|
||||
|
||||
script_path = tool_definition["script"]
|
||||
script_path = get_path_using_template_data(script_path, template_data)
|
||||
if not script_path or not os.path.exists(script_path):
|
||||
log.warning("This path doesn't exist - {}".format(script_path))
|
||||
return
|
||||
|
||||
icon_path = tool_definition["icon"]
|
||||
if icon_path:
|
||||
icon_path = get_path_using_template_data(icon_path, template_data)
|
||||
tool_definition["icon"] = icon_path
|
||||
|
||||
existing_tools = shelf.tools()
|
||||
existing_tool = next(
|
||||
(tool for tool in existing_tools if tool.label() == tool_label),
|
||||
|
|
@ -184,3 +201,10 @@ def get_or_create_tool(tool_definition, shelf):
|
|||
|
||||
tool_name = re.sub(r"[^\w\d]+", "_", tool_label).lower()
|
||||
return hou.shelves.newTool(name=tool_name, **tool_definition)
|
||||
|
||||
|
||||
def get_path_using_template_data(path, template_data):
|
||||
path = StringTemplate.format_template(path, template_data)
|
||||
path = path.replace("\\", "/")
|
||||
|
||||
return path
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
from openpype.lib.vendor_bin_utils import find_executable
|
||||
|
|
@ -8,17 +9,31 @@ from openpype.pipeline import load
|
|||
class ShowInUsdview(load.LoaderPlugin):
|
||||
"""Open USD file in usdview"""
|
||||
|
||||
families = ["colorbleed.usd"]
|
||||
label = "Show in usdview"
|
||||
representations = ["usd", "usda", "usdlc", "usdnc"]
|
||||
order = 10
|
||||
representations = ["*"]
|
||||
families = ["*"]
|
||||
extensions = {"usd", "usda", "usdlc", "usdnc", "abc"}
|
||||
order = 15
|
||||
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pathlib import Path
|
||||
|
||||
usdview = find_executable("usdview")
|
||||
if platform.system() == "Windows":
|
||||
executable = "usdview.bat"
|
||||
else:
|
||||
executable = "usdview"
|
||||
|
||||
usdview = find_executable(executable)
|
||||
if not usdview:
|
||||
raise RuntimeError("Unable to find usdview")
|
||||
|
||||
# For some reason Windows can return the path like:
|
||||
# C:/PROGRA~1/SIDEEF~1/HOUDIN~1.435/bin/usdview
|
||||
# convert to resolved path so `subprocess` can take it
|
||||
usdview = str(Path(usdview).resolve().as_posix())
|
||||
|
||||
filepath = self.filepath_from_context(context)
|
||||
filepath = os.path.normpath(filepath)
|
||||
|
|
@ -30,14 +45,4 @@ class ShowInUsdview(load.LoaderPlugin):
|
|||
|
||||
self.log.info("Start houdini variant of usdview...")
|
||||
|
||||
# For now avoid some pipeline environment variables that initialize
|
||||
# Avalon in Houdini as it is redundant for usdview and slows boot time
|
||||
env = os.environ.copy()
|
||||
env.pop("PYTHONPATH", None)
|
||||
env.pop("HOUDINI_SCRIPT_PATH", None)
|
||||
env.pop("HOUDINI_MENU_PATH", None)
|
||||
|
||||
# Force string to avoid unicode issues
|
||||
env = {str(key): str(value) for key, value in env.items()}
|
||||
|
||||
subprocess.Popen([usdview, filepath, "--renderer", "GL"], env=env)
|
||||
subprocess.Popen([usdview, filepath, "--renderer", "GL"])
|
||||
|
|
|
|||
|
|
@ -20,7 +20,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
label = "Arnold ROP Render Products"
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
# This specific order value is used so that
|
||||
# this plugin runs after CollectRopFrameRange
|
||||
order = pyblish.api.CollectorOrder + 0.4999
|
||||
hosts = ["houdini"]
|
||||
families = ["arnold_rop"]
|
||||
|
||||
|
|
@ -126,8 +128,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
return path
|
||||
|
||||
expected_files = []
|
||||
start = instance.data["frameStart"]
|
||||
end = instance.data["frameEnd"]
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
for i in range(int(start), (int(end) + 1)):
|
||||
expected_files.append(
|
||||
os.path.join(dir, (file % i)).replace("\\", "/"))
|
||||
|
|
|
|||
|
|
@ -1,56 +0,0 @@
|
|||
import hou
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectInstanceNodeFrameRange(pyblish.api.InstancePlugin):
|
||||
"""Collect time range frame data for the instance node."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.001
|
||||
label = "Instance Node Frame Range"
|
||||
hosts = ["houdini"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
node_path = instance.data.get("instance_node")
|
||||
node = hou.node(node_path) if node_path else None
|
||||
if not node_path or not node:
|
||||
self.log.debug("No instance node found for instance: "
|
||||
"{}".format(instance))
|
||||
return
|
||||
|
||||
frame_data = self.get_frame_data(node)
|
||||
if not frame_data:
|
||||
return
|
||||
|
||||
self.log.info("Collected time data: {}".format(frame_data))
|
||||
instance.data.update(frame_data)
|
||||
|
||||
def get_frame_data(self, node):
|
||||
"""Get the frame data: start frame, end frame and steps
|
||||
Args:
|
||||
node(hou.Node)
|
||||
|
||||
Returns:
|
||||
dict
|
||||
|
||||
"""
|
||||
|
||||
data = {}
|
||||
|
||||
if node.parm("trange") is None:
|
||||
self.log.debug("Node has no 'trange' parameter: "
|
||||
"{}".format(node.path()))
|
||||
return data
|
||||
|
||||
if node.evalParm("trange") == 0:
|
||||
# Ignore 'render current frame'
|
||||
self.log.debug("Node '{}' has 'Render current frame' set. "
|
||||
"Time range data ignored.".format(node.path()))
|
||||
return data
|
||||
|
||||
data["frameStart"] = node.evalParm("f1")
|
||||
data["frameEnd"] = node.evalParm("f2")
|
||||
data["byFrameStep"] = node.evalParm("f3")
|
||||
|
||||
return data
|
||||
|
|
@ -91,27 +91,3 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
context[:] = sorted(context, key=sort_by_family)
|
||||
|
||||
return context
|
||||
|
||||
def get_frame_data(self, node):
|
||||
"""Get the frame data: start frame, end frame and steps
|
||||
Args:
|
||||
node(hou.Node)
|
||||
|
||||
Returns:
|
||||
dict
|
||||
|
||||
"""
|
||||
|
||||
data = {}
|
||||
|
||||
if node.parm("trange") is None:
|
||||
return data
|
||||
|
||||
if node.evalParm("trange") == 0:
|
||||
return data
|
||||
|
||||
data["frameStart"] = node.evalParm("f1")
|
||||
data["frameEnd"] = node.evalParm("f2")
|
||||
data["byFrameStep"] = node.evalParm("f3")
|
||||
|
||||
return data
|
||||
|
|
|
|||
|
|
@ -24,7 +24,9 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
label = "Karma ROP Render Products"
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
# This specific order value is used so that
|
||||
# this plugin runs after CollectRopFrameRange
|
||||
order = pyblish.api.CollectorOrder + 0.4999
|
||||
hosts = ["houdini"]
|
||||
families = ["karma_rop"]
|
||||
|
||||
|
|
@ -95,8 +97,9 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
return path
|
||||
|
||||
expected_files = []
|
||||
start = instance.data["frameStart"]
|
||||
end = instance.data["frameEnd"]
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
for i in range(int(start), (int(end) + 1)):
|
||||
expected_files.append(
|
||||
os.path.join(dir, (file % i)).replace("\\", "/"))
|
||||
|
|
|
|||
|
|
@ -24,7 +24,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
label = "Mantra ROP Render Products"
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
# This specific order value is used so that
|
||||
# this plugin runs after CollectRopFrameRange
|
||||
order = pyblish.api.CollectorOrder + 0.4999
|
||||
hosts = ["houdini"]
|
||||
families = ["mantra_rop"]
|
||||
|
||||
|
|
@ -118,8 +120,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
return path
|
||||
|
||||
expected_files = []
|
||||
start = instance.data["frameStart"]
|
||||
end = instance.data["frameEnd"]
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
for i in range(int(start), (int(end) + 1)):
|
||||
expected_files.append(
|
||||
os.path.join(dir, (file % i)).replace("\\", "/"))
|
||||
|
|
|
|||
|
|
@ -24,7 +24,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
label = "Redshift ROP Render Products"
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
# This specific order value is used so that
|
||||
# this plugin runs after CollectRopFrameRange
|
||||
order = pyblish.api.CollectorOrder + 0.4999
|
||||
hosts = ["houdini"]
|
||||
families = ["redshift_rop"]
|
||||
|
||||
|
|
@ -132,8 +134,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
return path
|
||||
|
||||
expected_files = []
|
||||
start = instance.data["frameStart"]
|
||||
end = instance.data["frameEnd"]
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
for i in range(int(start), (int(end) + 1)):
|
||||
expected_files.append(
|
||||
os.path.join(dir, (file % i)).replace("\\", "/"))
|
||||
|
|
|
|||
|
|
@ -2,40 +2,106 @@
|
|||
"""Collector plugin for frames data on ROP instances."""
|
||||
import hou # noqa
|
||||
import pyblish.api
|
||||
from openpype.lib import BoolDef
|
||||
from openpype.hosts.houdini.api import lib
|
||||
from openpype.pipeline import OpenPypePyblishPluginMixin
|
||||
|
||||
|
||||
class CollectRopFrameRange(pyblish.api.InstancePlugin):
|
||||
class CollectRopFrameRange(pyblish.api.InstancePlugin,
|
||||
OpenPypePyblishPluginMixin):
|
||||
|
||||
"""Collect all frames which would be saved from the ROP nodes"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["houdini"]
|
||||
# This specific order value is used so that
|
||||
# this plugin runs after CollectAnatomyInstanceData
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
label = "Collect RopNode Frame Range"
|
||||
use_asset_handles = True
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
node_path = instance.data.get("instance_node")
|
||||
if node_path is None:
|
||||
# Instance without instance node like a workfile instance
|
||||
self.log.debug(
|
||||
"No instance node found for instance: {}".format(instance)
|
||||
)
|
||||
return
|
||||
|
||||
ropnode = hou.node(node_path)
|
||||
frame_data = lib.get_frame_data(ropnode)
|
||||
|
||||
if "frameStart" in frame_data and "frameEnd" in frame_data:
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
|
||||
# Log artist friendly message about the collected frame range
|
||||
message = (
|
||||
"Frame range {0[frameStart]} - {0[frameEnd]}"
|
||||
).format(frame_data)
|
||||
if frame_data.get("step", 1.0) != 1.0:
|
||||
message += " with step {0[step]}".format(frame_data)
|
||||
self.log.info(message)
|
||||
if attr_values.get("use_handles", self.use_asset_handles):
|
||||
asset_data = instance.data["assetEntity"]["data"]
|
||||
handle_start = asset_data.get("handleStart", 0)
|
||||
handle_end = asset_data.get("handleEnd", 0)
|
||||
else:
|
||||
handle_start = 0
|
||||
handle_end = 0
|
||||
|
||||
instance.data.update(frame_data)
|
||||
frame_data = lib.get_frame_data(
|
||||
ropnode, handle_start, handle_end, self.log
|
||||
)
|
||||
|
||||
# Add frame range to label if the instance has a frame range.
|
||||
label = instance.data.get("label", instance.data["name"])
|
||||
instance.data["label"] = (
|
||||
"{0} [{1[frameStart]} - {1[frameEnd]}]".format(label,
|
||||
frame_data)
|
||||
if not frame_data:
|
||||
return
|
||||
|
||||
# Log debug message about the collected frame range
|
||||
frame_start = frame_data["frameStart"]
|
||||
frame_end = frame_data["frameEnd"]
|
||||
|
||||
if attr_values.get("use_handles", self.use_asset_handles):
|
||||
self.log.debug(
|
||||
"Full Frame range with Handles "
|
||||
"[{frame_start_handle} - {frame_end_handle}]"
|
||||
.format(
|
||||
frame_start_handle=frame_data["frameStartHandle"],
|
||||
frame_end_handle=frame_data["frameEndHandle"]
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.log.debug(
|
||||
"Use handles is deactivated for this instance, "
|
||||
"start and end handles are set to 0."
|
||||
)
|
||||
|
||||
# Log collected frame range to the user
|
||||
message = "Frame range [{frame_start} - {frame_end}]".format(
|
||||
frame_start=frame_start,
|
||||
frame_end=frame_end
|
||||
)
|
||||
if handle_start or handle_end:
|
||||
message += " with handles [{handle_start}]-[{handle_end}]".format(
|
||||
handle_start=handle_start,
|
||||
handle_end=handle_end
|
||||
)
|
||||
self.log.info(message)
|
||||
|
||||
if frame_data.get("byFrameStep", 1.0) != 1.0:
|
||||
self.log.info("Frame steps {}".format(frame_data["byFrameStep"]))
|
||||
|
||||
instance.data.update(frame_data)
|
||||
|
||||
# Add frame range to label if the instance has a frame range.
|
||||
label = instance.data.get("label", instance.data["name"])
|
||||
instance.data["label"] = (
|
||||
"{label} [{frame_start} - {frame_end}]"
|
||||
.format(
|
||||
label=label,
|
||||
frame_start=frame_start,
|
||||
frame_end=frame_end
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
BoolDef("use_handles",
|
||||
tooltip="Disable this if you want the publisher to"
|
||||
" ignore start and end handles specified in the"
|
||||
" asset data for this publish instance",
|
||||
default=cls.use_asset_handles,
|
||||
label="Use asset handles")
|
||||
]
|
||||
|
|
|
|||
|
|
@ -24,7 +24,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
label = "VRay ROP Render Products"
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
# This specific order value is used so that
|
||||
# this plugin runs after CollectRopFrameRange
|
||||
order = pyblish.api.CollectorOrder + 0.4999
|
||||
hosts = ["houdini"]
|
||||
families = ["vray_rop"]
|
||||
|
||||
|
|
@ -115,8 +117,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
return path
|
||||
|
||||
expected_files = []
|
||||
start = instance.data["frameStart"]
|
||||
end = instance.data["frameEnd"]
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
for i in range(int(start), (int(end) + 1)):
|
||||
expected_files.append(
|
||||
os.path.join(dir, (file % i)).replace("\\", "/"))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,96 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
from openpype.pipeline.publish import RepairAction
|
||||
from openpype.hosts.houdini.api.action import SelectInvalidAction
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
class DisableUseAssetHandlesAction(RepairAction):
|
||||
label = "Disable use asset handles"
|
||||
icon = "mdi.toggle-switch-off"
|
||||
|
||||
|
||||
class ValidateFrameRange(pyblish.api.InstancePlugin):
|
||||
"""Validate Frame Range.
|
||||
|
||||
Due to the usage of start and end handles,
|
||||
then Frame Range must be >= (start handle + end handle)
|
||||
which results that frameEnd be smaller than frameStart
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder - 0.1
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Frame Range"
|
||||
actions = [DisableUseAssetHandlesAction, SelectInvalidAction]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
title="Invalid Frame Range",
|
||||
message=(
|
||||
"Invalid frame range because the instance "
|
||||
"start frame ({0[frameStart]}) is higher than "
|
||||
"the end frame ({0[frameEnd]})"
|
||||
.format(instance.data)
|
||||
),
|
||||
description=(
|
||||
"## Invalid Frame Range\n"
|
||||
"The frame range for the instance is invalid because "
|
||||
"the start frame is higher than the end frame.\n\nThis "
|
||||
"is likely due to asset handles being applied to your "
|
||||
"instance or the ROP node's start frame "
|
||||
"is set higher than the end frame.\n\nIf your ROP frame "
|
||||
"range is correct and you do not want to apply asset "
|
||||
"handles make sure to disable Use asset handles on the "
|
||||
"publish instance."
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
if not instance.data.get("instance_node"):
|
||||
return
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
if instance.data["frameStart"] > instance.data["frameEnd"]:
|
||||
cls.log.info(
|
||||
"The ROP node render range is set to "
|
||||
"{0[frameStartHandle]} - {0[frameEndHandle]} "
|
||||
"The asset handles applied to the instance are start handle "
|
||||
"{0[handleStart]} and end handle {0[handleEnd]}"
|
||||
.format(instance.data)
|
||||
)
|
||||
return [rop_node]
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
||||
if not cls.get_invalid(instance):
|
||||
# Already fixed
|
||||
return
|
||||
|
||||
# Disable use asset handles
|
||||
context = instance.context
|
||||
create_context = context.data["create_context"]
|
||||
instance_id = instance.data.get("instance_id")
|
||||
if not instance_id:
|
||||
cls.log.debug("'{}' must have instance id"
|
||||
.format(instance))
|
||||
return
|
||||
|
||||
created_instance = create_context.get_instance_by_id(instance_id)
|
||||
if not instance_id:
|
||||
cls.log.debug("Unable to find instance '{}' by id"
|
||||
.format(instance))
|
||||
return
|
||||
|
||||
created_instance.publish_attributes["CollectRopFrameRange"]["use_handles"] = False # noqa
|
||||
|
||||
create_context.save_changes()
|
||||
cls.log.debug("use asset handles is turned off for '{}'"
|
||||
.format(instance))
|
||||
|
|
@ -86,6 +86,14 @@ openpype.hosts.houdini.api.lib.reset_framerange()
|
|||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
<scriptItem id="update_context_vars">
|
||||
<label>Update Houdini Vars</label>
|
||||
<scriptCode><![CDATA[
|
||||
import openpype.hosts.houdini.api.lib
|
||||
openpype.hosts.houdini.api.lib.update_houdini_vars_context_dialog()
|
||||
]]></scriptCode>
|
||||
</scriptItem>
|
||||
|
||||
<separatorItem/>
|
||||
<scriptItem id="experimental_tools">
|
||||
<label>Experimental tools...</label>
|
||||
|
|
|
|||
|
|
@ -234,27 +234,40 @@ def reset_scene_resolution():
|
|||
set_scene_resolution(width, height)
|
||||
|
||||
|
||||
def get_frame_range() -> Union[Dict[str, Any], None]:
|
||||
def get_frame_range(asset_doc=None) -> Union[Dict[str, Any], None]:
|
||||
"""Get the current assets frame range and handles.
|
||||
|
||||
Args:
|
||||
asset_doc (dict): Asset Entity Data
|
||||
|
||||
Returns:
|
||||
dict: with frame start, frame end, handle start, handle end.
|
||||
"""
|
||||
# Set frame start/end
|
||||
asset = get_current_project_asset()
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
if asset_doc is None:
|
||||
asset_doc = get_current_project_asset()
|
||||
|
||||
data = asset_doc["data"]
|
||||
frame_start = data.get("frameStart")
|
||||
frame_end = data.get("frameEnd")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
return
|
||||
return {}
|
||||
|
||||
frame_start = int(frame_start)
|
||||
frame_end = int(frame_end)
|
||||
handle_start = int(data.get("handleStart", 0))
|
||||
handle_end = int(data.get("handleEnd", 0))
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_end + handle_end
|
||||
|
||||
handle_start = asset["data"].get("handleStart", 0)
|
||||
handle_end = asset["data"].get("handleEnd", 0)
|
||||
return {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
"handleEnd": handle_end,
|
||||
"frameStartHandle": frame_start_handle,
|
||||
"frameEndHandle": frame_end_handle,
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -274,12 +287,11 @@ def reset_frame_range(fps: bool = True):
|
|||
fps_number = float(data_fps["data"]["fps"])
|
||||
rt.frameRate = fps_number
|
||||
frame_range = get_frame_range()
|
||||
frame_start_handle = frame_range["frameStart"] - int(
|
||||
frame_range["handleStart"]
|
||||
)
|
||||
frame_end_handle = frame_range["frameEnd"] + int(frame_range["handleEnd"])
|
||||
set_timeline(frame_start_handle, frame_end_handle)
|
||||
set_render_frame_range(frame_start_handle, frame_end_handle)
|
||||
|
||||
set_timeline(
|
||||
frame_range["frameStartHandle"], frame_range["frameEndHandle"])
|
||||
set_render_frame_range(
|
||||
frame_range["frameStartHandle"], frame_range["frameEndHandle"])
|
||||
|
||||
|
||||
def set_context_setting():
|
||||
|
|
@ -321,21 +333,6 @@ def is_headless():
|
|||
return rt.maxops.isInNonInteractiveMode()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def viewport_camera(camera):
|
||||
original = rt.viewport.getCamera()
|
||||
if not original:
|
||||
# if there is no original camera
|
||||
# use the current camera as original
|
||||
original = rt.getNodeByName(camera)
|
||||
review_camera = rt.getNodeByName(camera)
|
||||
try:
|
||||
rt.viewport.setCamera(review_camera)
|
||||
yield
|
||||
finally:
|
||||
rt.viewport.setCamera(original)
|
||||
|
||||
|
||||
def set_timeline(frameStart, frameEnd):
|
||||
"""Set frame range for timeline editor in Max
|
||||
"""
|
||||
|
|
@ -497,3 +494,22 @@ def get_plugins() -> list:
|
|||
plugin_info_list.append(plugin_info)
|
||||
|
||||
return plugin_info_list
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def render_resolution(width, height):
|
||||
"""Set render resolution option during context
|
||||
|
||||
Args:
|
||||
width (int): render width
|
||||
height (int): render height
|
||||
"""
|
||||
current_renderWidth = rt.renderWidth
|
||||
current_renderHeight = rt.renderHeight
|
||||
try:
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
yield
|
||||
finally:
|
||||
rt.renderWidth = current_renderWidth
|
||||
rt.renderHeight = current_renderHeight
|
||||
|
|
|
|||
309
openpype/hosts/max/api/preview_animation.py
Normal file
309
openpype/hosts/max/api/preview_animation.py
Normal file
|
|
@ -0,0 +1,309 @@
|
|||
import logging
|
||||
import contextlib
|
||||
from pymxs import runtime as rt
|
||||
from .lib import get_max_version, render_resolution
|
||||
|
||||
log = logging.getLogger("openpype.hosts.max")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def play_preview_when_done(has_autoplay):
|
||||
"""Set preview playback option during context
|
||||
|
||||
Args:
|
||||
has_autoplay (bool): autoplay during creating
|
||||
preview animation
|
||||
"""
|
||||
current_playback = rt.preferences.playPreviewWhenDone
|
||||
try:
|
||||
rt.preferences.playPreviewWhenDone = has_autoplay
|
||||
yield
|
||||
finally:
|
||||
rt.preferences.playPreviewWhenDone = current_playback
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def viewport_camera(camera):
|
||||
"""Set viewport camera during context
|
||||
***For 3dsMax 2024+
|
||||
Args:
|
||||
camera (str): viewport camera
|
||||
"""
|
||||
original = rt.viewport.getCamera()
|
||||
if not original:
|
||||
# if there is no original camera
|
||||
# use the current camera as original
|
||||
original = rt.getNodeByName(camera)
|
||||
review_camera = rt.getNodeByName(camera)
|
||||
try:
|
||||
rt.viewport.setCamera(review_camera)
|
||||
yield
|
||||
finally:
|
||||
rt.viewport.setCamera(original)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def viewport_preference_setting(general_viewport,
|
||||
nitrous_viewport,
|
||||
vp_button_mgr):
|
||||
"""Function to set viewport setting during context
|
||||
***For Max Version < 2024
|
||||
Args:
|
||||
camera (str): Viewport camera for review render
|
||||
general_viewport (dict): General viewport setting
|
||||
nitrous_viewport (dict): Nitrous setting for
|
||||
preview animation
|
||||
vp_button_mgr (dict): Viewport button manager Setting
|
||||
preview_preferences (dict): Preview Preferences Setting
|
||||
"""
|
||||
orig_vp_grid = rt.viewport.getGridVisibility(1)
|
||||
orig_vp_bkg = rt.viewport.IsSolidBackgroundColorMode()
|
||||
|
||||
nitrousGraphicMgr = rt.NitrousGraphicsManager
|
||||
viewport_setting = nitrousGraphicMgr.GetActiveViewportSetting()
|
||||
vp_button_mgr_original = {
|
||||
key: getattr(rt.ViewportButtonMgr, key) for key in vp_button_mgr
|
||||
}
|
||||
nitrous_viewport_original = {
|
||||
key: getattr(viewport_setting, key) for key in nitrous_viewport
|
||||
}
|
||||
|
||||
try:
|
||||
rt.viewport.setGridVisibility(1, general_viewport["dspGrid"])
|
||||
rt.viewport.EnableSolidBackgroundColorMode(general_viewport["dspBkg"])
|
||||
for key, value in vp_button_mgr.items():
|
||||
setattr(rt.ViewportButtonMgr, key, value)
|
||||
for key, value in nitrous_viewport.items():
|
||||
if nitrous_viewport[key] != nitrous_viewport_original[key]:
|
||||
setattr(viewport_setting, key, value)
|
||||
yield
|
||||
|
||||
finally:
|
||||
rt.viewport.setGridVisibility(1, orig_vp_grid)
|
||||
rt.viewport.EnableSolidBackgroundColorMode(orig_vp_bkg)
|
||||
for key, value in vp_button_mgr_original.items():
|
||||
setattr(rt.ViewportButtonMgr, key, value)
|
||||
for key, value in nitrous_viewport_original.items():
|
||||
setattr(viewport_setting, key, value)
|
||||
|
||||
|
||||
def _render_preview_animation_max_2024(
|
||||
filepath, start, end, percentSize, ext, viewport_options):
|
||||
"""Render viewport preview with MaxScript using `CreateAnimation`.
|
||||
****For 3dsMax 2024+
|
||||
Args:
|
||||
filepath (str): filepath for render output without frame number and
|
||||
extension, for example: /path/to/file
|
||||
start (int): startFrame
|
||||
end (int): endFrame
|
||||
percentSize (float): render resolution multiplier by 100
|
||||
e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x
|
||||
viewport_options (dict): viewport setting options, e.g.
|
||||
{"vpStyle": "defaultshading", "vpPreset": "highquality"}
|
||||
Returns:
|
||||
list: Created files
|
||||
"""
|
||||
# the percentSize argument must be integer
|
||||
percent = int(percentSize)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
preview_output = f"{filepath}..{ext}"
|
||||
frame_template = f"{filepath}.{{:04d}}.{ext}"
|
||||
job_args = []
|
||||
for key, value in viewport_options.items():
|
||||
if isinstance(value, bool):
|
||||
if value:
|
||||
job_args.append(f"{key}:{value}")
|
||||
elif isinstance(value, str):
|
||||
if key == "vpStyle":
|
||||
if value == "Realistic":
|
||||
value = "defaultshading"
|
||||
elif value == "Shaded":
|
||||
log.warning(
|
||||
"'Shaded' Mode not supported in "
|
||||
"preview animation in Max 2024.\n"
|
||||
"Using 'defaultshading' instead.")
|
||||
value = "defaultshading"
|
||||
elif value == "ConsistentColors":
|
||||
value = "flatcolor"
|
||||
else:
|
||||
value = value.lower()
|
||||
elif key == "vpPreset":
|
||||
if value == "Quality":
|
||||
value = "highquality"
|
||||
elif value == "Customize":
|
||||
value = "userdefined"
|
||||
else:
|
||||
value = value.lower()
|
||||
job_args.append(f"{key}: #{value}")
|
||||
|
||||
job_str = (
|
||||
f'CreatePreview filename:"{preview_output}" outputAVI:false '
|
||||
f"percentSize:{percent} start:{start} end:{end} "
|
||||
f"{' '.join(job_args)} "
|
||||
"autoPlay:false"
|
||||
)
|
||||
rt.completeRedraw()
|
||||
rt.execute(job_str)
|
||||
# Return the created files
|
||||
return [frame_template.format(frame) for frame in range(start, end + 1)]
|
||||
|
||||
|
||||
def _render_preview_animation_max_pre_2024(
|
||||
filepath, startFrame, endFrame, percentSize, ext):
|
||||
"""Render viewport animation by creating bitmaps
|
||||
***For 3dsMax Version <2024
|
||||
Args:
|
||||
filepath (str): filepath without frame numbers and extension
|
||||
startFrame (int): start frame
|
||||
endFrame (int): end frame
|
||||
percentSize (float): render resolution multiplier by 100
|
||||
e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x
|
||||
ext (str): image extension
|
||||
Returns:
|
||||
list: Created filepaths
|
||||
"""
|
||||
# get the screenshot
|
||||
percent = percentSize / 100.0
|
||||
res_width = int(round(rt.renderWidth * percent))
|
||||
res_height = int(round(rt.renderHeight * percent))
|
||||
viewportRatio = float(res_width / res_height)
|
||||
frame_template = "{}.{{:04}}.{}".format(filepath, ext)
|
||||
frame_template.replace("\\", "/")
|
||||
files = []
|
||||
user_cancelled = False
|
||||
for frame in range(startFrame, endFrame + 1):
|
||||
rt.sliderTime = frame
|
||||
filepath = frame_template.format(frame)
|
||||
preview_res = rt.bitmap(
|
||||
res_width, res_height, filename=filepath
|
||||
)
|
||||
dib = rt.gw.getViewportDib()
|
||||
dib_width = float(dib.width)
|
||||
dib_height = float(dib.height)
|
||||
renderRatio = float(dib_width / dib_height)
|
||||
if viewportRatio <= renderRatio:
|
||||
heightCrop = (dib_width / renderRatio)
|
||||
topEdge = int((dib_height - heightCrop) / 2.0)
|
||||
tempImage_bmp = rt.bitmap(dib_width, heightCrop)
|
||||
src_box_value = rt.Box2(0, topEdge, dib_width, heightCrop)
|
||||
else:
|
||||
widthCrop = dib_height * renderRatio
|
||||
leftEdge = int((dib_width - widthCrop) / 2.0)
|
||||
tempImage_bmp = rt.bitmap(widthCrop, dib_height)
|
||||
src_box_value = rt.Box2(0, leftEdge, dib_width, dib_height)
|
||||
rt.pasteBitmap(dib, tempImage_bmp, src_box_value, rt.Point2(0, 0))
|
||||
# copy the bitmap and close it
|
||||
rt.copy(tempImage_bmp, preview_res)
|
||||
rt.close(tempImage_bmp)
|
||||
rt.save(preview_res)
|
||||
rt.close(preview_res)
|
||||
rt.close(dib)
|
||||
files.append(filepath)
|
||||
if rt.keyboard.escPressed:
|
||||
user_cancelled = True
|
||||
break
|
||||
# clean up the cache
|
||||
rt.gc(delayed=True)
|
||||
if user_cancelled:
|
||||
raise RuntimeError("User cancelled rendering of viewport animation.")
|
||||
return files
|
||||
|
||||
|
||||
def render_preview_animation(
|
||||
filepath,
|
||||
ext,
|
||||
camera,
|
||||
start_frame=None,
|
||||
end_frame=None,
|
||||
percentSize=100.0,
|
||||
width=1920,
|
||||
height=1080,
|
||||
viewport_options=None):
|
||||
"""Render camera review animation
|
||||
Args:
|
||||
filepath (str): filepath to render to, without frame number and
|
||||
extension
|
||||
ext (str): output file extension
|
||||
camera (str): viewport camera for preview render
|
||||
start_frame (int): start frame
|
||||
end_frame (int): end frame
|
||||
percentSize (float): render resolution multiplier by 100
|
||||
e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x
|
||||
width (int): render resolution width
|
||||
height (int): render resolution height
|
||||
viewport_options (dict): viewport setting options
|
||||
Returns:
|
||||
list: Rendered output files
|
||||
"""
|
||||
if start_frame is None:
|
||||
start_frame = int(rt.animationRange.start)
|
||||
if end_frame is None:
|
||||
end_frame = int(rt.animationRange.end)
|
||||
|
||||
if viewport_options is None:
|
||||
viewport_options = viewport_options_for_preview_animation()
|
||||
with play_preview_when_done(False):
|
||||
with viewport_camera(camera):
|
||||
with render_resolution(width, height):
|
||||
if int(get_max_version()) < 2024:
|
||||
with viewport_preference_setting(
|
||||
viewport_options["general_viewport"],
|
||||
viewport_options["nitrous_viewport"],
|
||||
viewport_options["vp_btn_mgr"]
|
||||
):
|
||||
return _render_preview_animation_max_pre_2024(
|
||||
filepath,
|
||||
start_frame,
|
||||
end_frame,
|
||||
percentSize,
|
||||
ext
|
||||
)
|
||||
else:
|
||||
return _render_preview_animation_max_2024(
|
||||
filepath,
|
||||
start_frame,
|
||||
end_frame,
|
||||
percentSize,
|
||||
ext,
|
||||
viewport_options
|
||||
)
|
||||
|
||||
|
||||
def viewport_options_for_preview_animation():
|
||||
"""Get default viewport options for `render_preview_animation`.
|
||||
|
||||
Returns:
|
||||
dict: viewport setting options
|
||||
"""
|
||||
# viewport_options should be the dictionary
|
||||
if int(get_max_version()) < 2024:
|
||||
return {
|
||||
"visualStyleMode": "defaultshading",
|
||||
"viewportPreset": "highquality",
|
||||
"vpTexture": False,
|
||||
"dspGeometry": True,
|
||||
"dspShapes": False,
|
||||
"dspLights": False,
|
||||
"dspCameras": False,
|
||||
"dspHelpers": False,
|
||||
"dspParticles": True,
|
||||
"dspBones": False,
|
||||
"dspBkg": True,
|
||||
"dspGrid": False,
|
||||
"dspSafeFrame": False,
|
||||
"dspFrameNums": False
|
||||
}
|
||||
else:
|
||||
viewport_options = {}
|
||||
viewport_options["general_viewport"] = {
|
||||
"dspBkg": True,
|
||||
"dspGrid": False
|
||||
}
|
||||
viewport_options["nitrous_viewport"] = {
|
||||
"VisualStyleMode": "defaultshading",
|
||||
"ViewportPreset": "highquality",
|
||||
"UseTextureEnabled": False
|
||||
}
|
||||
viewport_options["vp_btn_mgr"] = {
|
||||
"EnableButtons": False}
|
||||
return viewport_options
|
||||
|
|
@ -13,31 +13,50 @@ class CreateReview(plugin.MaxCreator):
|
|||
icon = "video-camera"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
instance_data["imageFormat"] = pre_create_data.get("imageFormat")
|
||||
instance_data["keepImages"] = pre_create_data.get("keepImages")
|
||||
instance_data["percentSize"] = pre_create_data.get("percentSize")
|
||||
instance_data["rndLevel"] = pre_create_data.get("rndLevel")
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["imageFormat",
|
||||
"keepImages",
|
||||
"review_width",
|
||||
"review_height",
|
||||
"percentSize",
|
||||
"visualStyleMode",
|
||||
"viewportPreset",
|
||||
"vpTexture"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
super(CreateReview, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateReview, self).get_pre_create_attr_defs()
|
||||
def get_instance_attr_defs(self):
|
||||
image_format_enum = ["exr", "jpg", "png"]
|
||||
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "hdr", "rgb", "png",
|
||||
"rla", "rpf", "dds", "sgi", "tga", "tif", "vrimg"
|
||||
visual_style_preset_enum = [
|
||||
"Realistic", "Shaded", "Facets",
|
||||
"ConsistentColors", "HiddenLine",
|
||||
"Wireframe", "BoundingBox", "Ink",
|
||||
"ColorInk", "Acrylic", "Tech", "Graphite",
|
||||
"ColorPencil", "Pastel", "Clay", "ModelAssist"
|
||||
]
|
||||
preview_preset_enum = [
|
||||
"Quality", "Standard", "Performance",
|
||||
"DXMode", "Customize"]
|
||||
|
||||
rndLevel_enum = [
|
||||
"smoothhighlights", "smooth", "facethighlights",
|
||||
"facet", "flat", "litwireframe", "wireframe", "box"
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
return [
|
||||
NumberDef("review_width",
|
||||
label="Review width",
|
||||
decimals=0,
|
||||
minimum=0,
|
||||
default=1920),
|
||||
NumberDef("review_height",
|
||||
label="Review height",
|
||||
decimals=0,
|
||||
minimum=0,
|
||||
default=1080),
|
||||
BoolDef("keepImages",
|
||||
label="Keep Image Sequences",
|
||||
default=False),
|
||||
|
|
@ -50,8 +69,20 @@ class CreateReview(plugin.MaxCreator):
|
|||
default=100,
|
||||
minimum=1,
|
||||
decimals=0),
|
||||
EnumDef("rndLevel",
|
||||
rndLevel_enum,
|
||||
default="smoothhighlights",
|
||||
label="Preference")
|
||||
EnumDef("visualStyleMode",
|
||||
visual_style_preset_enum,
|
||||
default="Realistic",
|
||||
label="Preference"),
|
||||
EnumDef("viewportPreset",
|
||||
preview_preset_enum,
|
||||
default="Quality",
|
||||
label="Pre-View Preset"),
|
||||
BoolDef("vpTexture",
|
||||
label="Viewport Texture",
|
||||
default=False)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
# Use same attributes as for instance attributes
|
||||
attrs = super().get_pre_create_attr_defs()
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
11
openpype/hosts/max/plugins/create/create_tycache.py
Normal file
11
openpype/hosts/max/plugins/create/create_tycache.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating TyCache."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
|
||||
|
||||
class CreateTyCache(plugin.MaxCreator):
|
||||
"""Creator plugin for TyCache."""
|
||||
identifier = "io.openpype.creators.max.tycache"
|
||||
label = "TyCache"
|
||||
family = "tycache"
|
||||
icon = "gear"
|
||||
64
openpype/hosts/max/plugins/load/load_tycache.py
Normal file
64
openpype/hosts/max/plugins/load/load_tycache.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
import os
|
||||
from openpype.hosts.max.api import lib, maintained_selection
|
||||
from openpype.hosts.max.api.lib import (
|
||||
unique_namespace,
|
||||
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
update_custom_attribute_data
|
||||
)
|
||||
from openpype.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class TyCacheLoader(load.LoaderPlugin):
|
||||
"""TyCache Loader."""
|
||||
|
||||
families = ["tycache"]
|
||||
representations = ["tyc"]
|
||||
order = -8
|
||||
icon = "code-fork"
|
||||
color = "green"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
"""Load tyCache"""
|
||||
from pymxs import runtime as rt
|
||||
filepath = os.path.normpath(self.filepath_from_context(context))
|
||||
obj = rt.tyCache()
|
||||
obj.filename = filepath
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
obj.name = f"{namespace}:{obj.name}"
|
||||
|
||||
return containerise(
|
||||
name, [obj], context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""update the container"""
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
node_list = get_previous_loaded_object(node)
|
||||
update_custom_attribute_data(node, node_list)
|
||||
with maintained_selection():
|
||||
for tyc in node_list:
|
||||
tyc.filename = path
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
"""remove the container"""
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
rt.Delete(node)
|
||||
22
openpype/hosts/max/plugins/publish/collect_frame_range.py
Normal file
22
openpype/hosts/max/plugins/publish/collect_frame_range.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class CollectFrameRange(pyblish.api.InstancePlugin):
|
||||
"""Collect Frame Range."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect Frame Range"
|
||||
hosts = ['max']
|
||||
families = ["camera", "maxrender",
|
||||
"pointcache", "pointcloud",
|
||||
"review", "redshiftproxy"]
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data["family"] == "maxrender":
|
||||
instance.data["frameStartHandle"] = int(rt.rendStart)
|
||||
instance.data["frameEndHandle"] = int(rt.rendEnd)
|
||||
else:
|
||||
instance.data["frameStartHandle"] = int(rt.animationRange.start)
|
||||
instance.data["frameEndHandle"] = int(rt.animationRange.end)
|
||||
|
|
@ -14,7 +14,7 @@ from openpype.client import get_last_version_by_subset_name
|
|||
class CollectRender(pyblish.api.InstancePlugin):
|
||||
"""Collect Render for Deadline"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
order = pyblish.api.CollectorOrder + 0.02
|
||||
label = "Collect 3dsmax Render Layers"
|
||||
hosts = ['max']
|
||||
families = ["maxrender"]
|
||||
|
|
@ -97,8 +97,8 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
"renderer": renderer,
|
||||
"source": filepath,
|
||||
"plugin": "3dsmax",
|
||||
"frameStart": int(rt.rendStart),
|
||||
"frameEnd": int(rt.rendEnd),
|
||||
"frameStart": instance.data["frameStartHandle"],
|
||||
"frameEnd": instance.data["frameEndHandle"],
|
||||
"version": version_int,
|
||||
"farm": True
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,10 @@ import pyblish.api
|
|||
from pymxs import runtime as rt
|
||||
from openpype.lib import BoolDef
|
||||
from openpype.hosts.max.api.lib import get_max_version
|
||||
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
|
||||
from openpype.pipeline.publish import (
|
||||
OpenPypePyblishPluginMixin,
|
||||
KnownPublishError
|
||||
)
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.InstancePlugin,
|
||||
|
|
@ -19,30 +22,41 @@ class CollectReview(pyblish.api.InstancePlugin,
|
|||
|
||||
def process(self, instance):
|
||||
nodes = instance.data["members"]
|
||||
focal_length = None
|
||||
camera_name = None
|
||||
for node in nodes:
|
||||
if rt.classOf(node) in rt.Camera.classes:
|
||||
camera_name = node.name
|
||||
focal_length = node.fov
|
||||
|
||||
def is_camera(node):
|
||||
is_camera_class = rt.classOf(node) in rt.Camera.classes
|
||||
return is_camera_class and rt.isProperty(node, "fov")
|
||||
|
||||
# Use first camera in instance
|
||||
cameras = [node for node in nodes if is_camera(node)]
|
||||
if cameras:
|
||||
if len(cameras) > 1:
|
||||
self.log.warning(
|
||||
"Found more than one camera in instance, using first "
|
||||
f"one found: {cameras[0]}"
|
||||
)
|
||||
camera = cameras[0]
|
||||
camera_name = camera.name
|
||||
focal_length = camera.fov
|
||||
else:
|
||||
raise KnownPublishError(
|
||||
"Unable to find a valid camera in 'Review' container."
|
||||
" Only native max Camera supported. "
|
||||
f"Found objects: {nodes}"
|
||||
)
|
||||
creator_attrs = instance.data["creator_attributes"]
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
data = {
|
||||
|
||||
general_preview_data = {
|
||||
"review_camera": camera_name,
|
||||
"frameStart": instance.context.data["frameStart"],
|
||||
"frameEnd": instance.context.data["frameEnd"],
|
||||
"frameStart": instance.data["frameStartHandle"],
|
||||
"frameEnd": instance.data["frameEndHandle"],
|
||||
"percentSize": creator_attrs["percentSize"],
|
||||
"imageFormat": creator_attrs["imageFormat"],
|
||||
"keepImages": creator_attrs["keepImages"],
|
||||
"fps": instance.context.data["fps"],
|
||||
"dspGeometry": attr_values.get("dspGeometry"),
|
||||
"dspShapes": attr_values.get("dspShapes"),
|
||||
"dspLights": attr_values.get("dspLights"),
|
||||
"dspCameras": attr_values.get("dspCameras"),
|
||||
"dspHelpers": attr_values.get("dspHelpers"),
|
||||
"dspParticles": attr_values.get("dspParticles"),
|
||||
"dspBones": attr_values.get("dspBones"),
|
||||
"dspBkg": attr_values.get("dspBkg"),
|
||||
"dspGrid": attr_values.get("dspGrid"),
|
||||
"dspSafeFrame": attr_values.get("dspSafeFrame"),
|
||||
"dspFrameNums": attr_values.get("dspFrameNums")
|
||||
"review_width": creator_attrs["review_width"],
|
||||
"review_height": creator_attrs["review_height"],
|
||||
}
|
||||
|
||||
if int(get_max_version()) >= 2024:
|
||||
|
|
@ -55,14 +69,46 @@ class CollectReview(pyblish.api.InstancePlugin,
|
|||
instance.data["colorspaceDisplay"] = display
|
||||
instance.data["colorspaceView"] = view_transform
|
||||
|
||||
preview_data = {
|
||||
"vpStyle": creator_attrs["visualStyleMode"],
|
||||
"vpPreset": creator_attrs["viewportPreset"],
|
||||
"vpTextures": creator_attrs["vpTexture"],
|
||||
"dspGeometry": attr_values.get("dspGeometry"),
|
||||
"dspShapes": attr_values.get("dspShapes"),
|
||||
"dspLights": attr_values.get("dspLights"),
|
||||
"dspCameras": attr_values.get("dspCameras"),
|
||||
"dspHelpers": attr_values.get("dspHelpers"),
|
||||
"dspParticles": attr_values.get("dspParticles"),
|
||||
"dspBones": attr_values.get("dspBones"),
|
||||
"dspBkg": attr_values.get("dspBkg"),
|
||||
"dspGrid": attr_values.get("dspGrid"),
|
||||
"dspSafeFrame": attr_values.get("dspSafeFrame"),
|
||||
"dspFrameNums": attr_values.get("dspFrameNums")
|
||||
}
|
||||
else:
|
||||
general_viewport = {
|
||||
"dspBkg": attr_values.get("dspBkg"),
|
||||
"dspGrid": attr_values.get("dspGrid")
|
||||
}
|
||||
nitrous_viewport = {
|
||||
"VisualStyleMode": creator_attrs["visualStyleMode"],
|
||||
"ViewportPreset": creator_attrs["viewportPreset"],
|
||||
"UseTextureEnabled": creator_attrs["vpTexture"]
|
||||
}
|
||||
preview_data = {
|
||||
"general_viewport": general_viewport,
|
||||
"nitrous_viewport": nitrous_viewport,
|
||||
"vp_btn_mgr": {"EnableButtons": False}
|
||||
}
|
||||
|
||||
# Enable ftrack functionality
|
||||
instance.data.setdefault("families", []).append('ftrack')
|
||||
|
||||
burnin_members = instance.data.setdefault("burninDataMembers", {})
|
||||
burnin_members["focalLength"] = focal_length
|
||||
|
||||
self.log.debug(f"data:{data}")
|
||||
instance.data.update(data)
|
||||
instance.data.update(general_preview_data)
|
||||
instance.data["viewport_options"] = preview_data
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,76 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.lib import EnumDef, TextDef
|
||||
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
|
||||
|
||||
|
||||
class CollectTyCacheData(pyblish.api.InstancePlugin,
|
||||
OpenPypePyblishPluginMixin):
|
||||
"""Collect Channel Attributes for TyCache Export"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.02
|
||||
label = "Collect tyCache attribute Data"
|
||||
hosts = ['max']
|
||||
families = ["tycache"]
|
||||
|
||||
def process(self, instance):
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
attributes = {}
|
||||
for attr_key in attr_values.get("tycacheAttributes", []):
|
||||
attributes[attr_key] = True
|
||||
|
||||
for key in ["tycacheLayer", "tycacheObjectName"]:
|
||||
attributes[key] = attr_values.get(key, "")
|
||||
|
||||
# Collect the selected channel data before exporting
|
||||
instance.data["tyc_attrs"] = attributes
|
||||
self.log.debug(
|
||||
f"Found tycache attributes: {attributes}"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
# TODO: Support the attributes with maxObject array
|
||||
tyc_attr_enum = ["tycacheChanAge", "tycacheChanGroups",
|
||||
"tycacheChanPos", "tycacheChanRot",
|
||||
"tycacheChanScale", "tycacheChanVel",
|
||||
"tycacheChanSpin", "tycacheChanShape",
|
||||
"tycacheChanMatID", "tycacheChanMapping",
|
||||
"tycacheChanMaterials", "tycacheChanCustomFloat"
|
||||
"tycacheChanCustomVector", "tycacheChanCustomTM",
|
||||
"tycacheChanPhysX", "tycacheMeshBackup",
|
||||
"tycacheCreateObject",
|
||||
"tycacheCreateObjectIfNotCreated",
|
||||
"tycacheAdditionalCloth",
|
||||
"tycacheAdditionalSkin",
|
||||
"tycacheAdditionalSkinID",
|
||||
"tycacheAdditionalSkinIDValue",
|
||||
"tycacheAdditionalTerrain",
|
||||
"tycacheAdditionalVDB",
|
||||
"tycacheAdditionalSplinePaths",
|
||||
"tycacheAdditionalGeo",
|
||||
"tycacheAdditionalGeoActivateModifiers",
|
||||
"tycacheSplines",
|
||||
"tycacheSplinesAdditionalSplines"
|
||||
]
|
||||
tyc_default_attrs = ["tycacheChanGroups", "tycacheChanPos",
|
||||
"tycacheChanRot", "tycacheChanScale",
|
||||
"tycacheChanVel", "tycacheChanShape",
|
||||
"tycacheChanMatID", "tycacheChanMapping",
|
||||
"tycacheChanMaterials",
|
||||
"tycacheCreateObjectIfNotCreated"]
|
||||
return [
|
||||
EnumDef("tycacheAttributes",
|
||||
tyc_attr_enum,
|
||||
default=tyc_default_attrs,
|
||||
multiselection=True,
|
||||
label="TyCache Attributes"),
|
||||
TextDef("tycacheLayer",
|
||||
label="TyCache Layer",
|
||||
tooltip="Name of tycache layer",
|
||||
default="$(tyFlowLayer)"),
|
||||
TextDef("tycacheObjectName",
|
||||
label="TyCache Object Name",
|
||||
tooltip="TyCache Object Name",
|
||||
default="$(tyFlowName)_tyCache")
|
||||
]
|
||||
|
|
@ -19,8 +19,8 @@ class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
start = float(instance.data.get("frameStartHandle", 1))
|
||||
end = float(instance.data.get("frameEndHandle", 1))
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Extracting Camera ...")
|
||||
|
||||
|
|
|
|||
|
|
@ -51,8 +51,8 @@ class ExtractAlembic(publish.Extractor):
|
|||
families = ["pointcache"]
|
||||
|
||||
def process(self, instance):
|
||||
start = float(instance.data.get("frameStartHandle", 1))
|
||||
end = float(instance.data.get("frameEndHandle", 1))
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
self.log.debug("Extracting pointcache ...")
|
||||
|
||||
|
|
|
|||
|
|
@ -36,11 +36,12 @@ class ExtractPointCloud(publish.Extractor):
|
|||
label = "Extract Point Cloud"
|
||||
hosts = ["max"]
|
||||
families = ["pointcloud"]
|
||||
settings = []
|
||||
|
||||
def process(self, instance):
|
||||
self.settings = self.get_setting(instance)
|
||||
start = int(instance.context.data.get("frameStart"))
|
||||
end = int(instance.context.data.get("frameEnd"))
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
self.log.info("Extracting PRT...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@ class ExtractRedshiftProxy(publish.Extractor):
|
|||
families = ["redshiftproxy"]
|
||||
|
||||
def process(self, instance):
|
||||
start = int(instance.context.data.get("frameStart"))
|
||||
end = int(instance.context.data.get("frameEnd"))
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
self.log.debug("Extracting Redshift Proxy...")
|
||||
stagingdir = self.staging_dir(instance)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
|
||||
from openpype.hosts.max.api.preview_animation import (
|
||||
render_preview_animation
|
||||
)
|
||||
|
||||
|
||||
class ExtractReviewAnimation(publish.Extractor):
|
||||
|
|
@ -18,24 +19,26 @@ class ExtractReviewAnimation(publish.Extractor):
|
|||
def process(self, instance):
|
||||
staging_dir = self.staging_dir(instance)
|
||||
ext = instance.data.get("imageFormat")
|
||||
filename = "{0}..{1}".format(instance.name, ext)
|
||||
start = int(instance.data["frameStart"])
|
||||
end = int(instance.data["frameEnd"])
|
||||
fps = int(instance.data["fps"])
|
||||
filepath = os.path.join(staging_dir, filename)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
filenames = self.get_files(
|
||||
instance.name, start, end, ext)
|
||||
|
||||
filepath = os.path.join(staging_dir, instance.name)
|
||||
self.log.debug(
|
||||
"Writing Review Animation to"
|
||||
" '%s' to '%s'" % (filename, staging_dir))
|
||||
"Writing Review Animation to '{}'".format(filepath))
|
||||
|
||||
review_camera = instance.data["review_camera"]
|
||||
with viewport_camera(review_camera):
|
||||
preview_arg = self.set_preview_arg(
|
||||
instance, filepath, start, end, fps)
|
||||
rt.execute(preview_arg)
|
||||
viewport_options = instance.data.get("viewport_options", {})
|
||||
files = render_preview_animation(
|
||||
filepath,
|
||||
ext,
|
||||
review_camera,
|
||||
start,
|
||||
end,
|
||||
percentSize=instance.data["percentSize"],
|
||||
width=instance.data["review_width"],
|
||||
height=instance.data["review_height"],
|
||||
viewport_options=viewport_options)
|
||||
|
||||
filenames = [os.path.basename(path) for path in files]
|
||||
|
||||
tags = ["review"]
|
||||
if not instance.data.get("keepImages"):
|
||||
|
|
@ -48,8 +51,8 @@ class ExtractReviewAnimation(publish.Extractor):
|
|||
"ext": instance.data["imageFormat"],
|
||||
"files": filenames,
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
"frameStart": instance.data["frameStartHandle"],
|
||||
"frameEnd": instance.data["frameEndHandle"],
|
||||
"tags": tags,
|
||||
"preview": True,
|
||||
"camera_name": review_camera
|
||||
|
|
@ -59,44 +62,3 @@ class ExtractReviewAnimation(publish.Extractor):
|
|||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def get_files(self, filename, start, end, ext):
|
||||
file_list = []
|
||||
for frame in range(int(start), int(end) + 1):
|
||||
actual_name = "{}.{:04}.{}".format(
|
||||
filename, frame, ext)
|
||||
file_list.append(actual_name)
|
||||
|
||||
return file_list
|
||||
|
||||
def set_preview_arg(self, instance, filepath,
|
||||
start, end, fps):
|
||||
job_args = list()
|
||||
default_option = f'CreatePreview filename:"{filepath}"'
|
||||
job_args.append(default_option)
|
||||
frame_option = f"outputAVI:false start:{start} end:{end} fps:{fps}" # noqa
|
||||
job_args.append(frame_option)
|
||||
rndLevel = instance.data.get("rndLevel")
|
||||
if rndLevel:
|
||||
option = f"rndLevel:#{rndLevel}"
|
||||
job_args.append(option)
|
||||
options = [
|
||||
"percentSize", "dspGeometry", "dspShapes",
|
||||
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
|
||||
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
|
||||
]
|
||||
|
||||
for key in options:
|
||||
enabled = instance.data.get(key)
|
||||
if enabled:
|
||||
job_args.append(f"{key}:{enabled}")
|
||||
|
||||
if get_max_version() == 2024:
|
||||
# hardcoded for current stage
|
||||
auto_play_option = "autoPlay:false"
|
||||
job_args.append(auto_play_option)
|
||||
|
||||
job_str = " ".join(job_args)
|
||||
self.log.debug(job_str)
|
||||
|
||||
return job_str
|
||||
|
|
|
|||
|
|
@ -1,14 +1,11 @@
|
|||
import os
|
||||
import tempfile
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
|
||||
from openpype.hosts.max.api.preview_animation import render_preview_animation
|
||||
|
||||
|
||||
class ExtractThumbnail(publish.Extractor):
|
||||
"""
|
||||
Extract Thumbnail for Review
|
||||
"""Extract Thumbnail for Review
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
|
|
@ -17,34 +14,33 @@ class ExtractThumbnail(publish.Extractor):
|
|||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
# TODO: Create temp directory for thumbnail
|
||||
# - this is to avoid "override" of source file
|
||||
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
self.log.debug(
|
||||
f"Create temp directory {tmp_staging} for thumbnail"
|
||||
)
|
||||
fps = int(instance.data["fps"])
|
||||
ext = instance.data.get("imageFormat")
|
||||
frame = int(instance.data["frameStart"])
|
||||
instance.context.data["cleanupFullPaths"].append(tmp_staging)
|
||||
filename = "{name}_thumbnail..png".format(**instance.data)
|
||||
filepath = os.path.join(tmp_staging, filename)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
thumbnail = self.get_filename(instance.name, frame)
|
||||
staging_dir = self.staging_dir(instance)
|
||||
filepath = os.path.join(
|
||||
staging_dir, f"{instance.name}_thumbnail")
|
||||
self.log.debug("Writing Thumbnail to '{}'".format(filepath))
|
||||
|
||||
self.log.debug(
|
||||
"Writing Thumbnail to"
|
||||
" '%s' to '%s'" % (filename, tmp_staging))
|
||||
review_camera = instance.data["review_camera"]
|
||||
with viewport_camera(review_camera):
|
||||
preview_arg = self.set_preview_arg(
|
||||
instance, filepath, fps, frame)
|
||||
rt.execute(preview_arg)
|
||||
viewport_options = instance.data.get("viewport_options", {})
|
||||
files = render_preview_animation(
|
||||
filepath,
|
||||
ext,
|
||||
review_camera,
|
||||
start_frame=frame,
|
||||
end_frame=frame,
|
||||
percentSize=instance.data["percentSize"],
|
||||
width=instance.data["review_width"],
|
||||
height=instance.data["review_height"],
|
||||
viewport_options=viewport_options)
|
||||
|
||||
thumbnail = next(os.path.basename(path) for path in files)
|
||||
|
||||
representation = {
|
||||
"name": "thumbnail",
|
||||
"ext": "png",
|
||||
"ext": ext,
|
||||
"files": thumbnail,
|
||||
"stagingDir": tmp_staging,
|
||||
"stagingDir": staging_dir,
|
||||
"thumbnail": True
|
||||
}
|
||||
|
||||
|
|
@ -53,39 +49,3 @@ class ExtractThumbnail(publish.Extractor):
|
|||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def get_filename(self, filename, target_frame):
|
||||
thumbnail_name = "{}_thumbnail.{:04}.png".format(
|
||||
filename, target_frame
|
||||
)
|
||||
return thumbnail_name
|
||||
|
||||
def set_preview_arg(self, instance, filepath, fps, frame):
|
||||
job_args = list()
|
||||
default_option = f'CreatePreview filename:"{filepath}"'
|
||||
job_args.append(default_option)
|
||||
frame_option = f"outputAVI:false start:{frame} end:{frame} fps:{fps}" # noqa
|
||||
job_args.append(frame_option)
|
||||
rndLevel = instance.data.get("rndLevel")
|
||||
if rndLevel:
|
||||
option = f"rndLevel:#{rndLevel}"
|
||||
job_args.append(option)
|
||||
options = [
|
||||
"percentSize", "dspGeometry", "dspShapes",
|
||||
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
|
||||
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
|
||||
]
|
||||
|
||||
for key in options:
|
||||
enabled = instance.data.get(key)
|
||||
if enabled:
|
||||
job_args.append(f"{key}:{enabled}")
|
||||
if get_max_version() == 2024:
|
||||
# hardcoded for current stage
|
||||
auto_play_option = "autoPlay:false"
|
||||
job_args.append(auto_play_option)
|
||||
|
||||
job_str = " ".join(job_args)
|
||||
self.log.debug(job_str)
|
||||
|
||||
return job_str
|
||||
|
|
|
|||
157
openpype/hosts/max/plugins/publish/extract_tycache.py
Normal file
157
openpype/hosts/max/plugins/publish/extract_tycache.py
Normal file
|
|
@ -0,0 +1,157 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.pipeline import publish
|
||||
|
||||
|
||||
class ExtractTyCache(publish.Extractor):
|
||||
"""Extract tycache format with tyFlow operators.
|
||||
Notes:
|
||||
- TyCache only works for TyFlow Pro Plugin.
|
||||
|
||||
Methods:
|
||||
self.get_export_particles_job_args(): sets up all job arguments
|
||||
for attributes to be exported in MAXscript
|
||||
|
||||
self.get_operators(): get the export_particle operator
|
||||
|
||||
self.get_files(): get the files with tyFlow naming convention
|
||||
before publishing
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Extract TyCache"
|
||||
hosts = ["max"]
|
||||
families = ["tycache"]
|
||||
|
||||
def process(self, instance):
|
||||
# TODO: let user decide the param
|
||||
start = int(instance.context.data["frameStart"])
|
||||
end = int(instance.context.data.get("frameEnd"))
|
||||
self.log.debug("Extracting Tycache...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.tyc".format(**instance.data)
|
||||
path = os.path.join(stagingdir, filename)
|
||||
filenames = self.get_files(instance, start, end)
|
||||
additional_attributes = instance.data.get("tyc_attrs", {})
|
||||
|
||||
with maintained_selection():
|
||||
job_args = self.get_export_particles_job_args(
|
||||
instance.data["members"],
|
||||
start, end, path,
|
||||
additional_attributes)
|
||||
for job in job_args:
|
||||
rt.Execute(job)
|
||||
representations = instance.data.setdefault("representations", [])
|
||||
representation = {
|
||||
'name': 'tyc',
|
||||
'ext': 'tyc',
|
||||
'files': filenames if len(filenames) > 1 else filenames[0],
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
representations.append(representation)
|
||||
|
||||
# Get the tyMesh filename for extraction
|
||||
mesh_filename = f"{instance.name}__tyMesh.tyc"
|
||||
mesh_repres = {
|
||||
'name': 'tyMesh',
|
||||
'ext': 'tyc',
|
||||
'files': mesh_filename,
|
||||
"stagingDir": stagingdir,
|
||||
"outputName": '__tyMesh'
|
||||
}
|
||||
representations.append(mesh_repres)
|
||||
self.log.debug(f"Extracted instance '{instance.name}' to: {filenames}")
|
||||
|
||||
def get_files(self, instance, start_frame, end_frame):
|
||||
"""Get file names for tyFlow in tyCache format.
|
||||
|
||||
Set the filenames accordingly to the tyCache file
|
||||
naming extension(.tyc) for the publishing purpose
|
||||
|
||||
Actual File Output from tyFlow in tyCache format:
|
||||
<InstanceName>__tyPart_<frame>.tyc
|
||||
|
||||
e.g. tycacheMain__tyPart_00000.tyc
|
||||
|
||||
Args:
|
||||
instance (pyblish.api.Instance): instance.
|
||||
start_frame (int): Start frame.
|
||||
end_frame (int): End frame.
|
||||
|
||||
Returns:
|
||||
filenames(list): list of filenames
|
||||
|
||||
"""
|
||||
filenames = []
|
||||
for frame in range(int(start_frame), int(end_frame) + 1):
|
||||
filename = f"{instance.name}__tyPart_{frame:05}.tyc"
|
||||
filenames.append(filename)
|
||||
return filenames
|
||||
|
||||
def get_export_particles_job_args(self, members, start, end,
|
||||
filepath, additional_attributes):
|
||||
"""Sets up all job arguments for attributes.
|
||||
|
||||
Those attributes are to be exported in MAX Script.
|
||||
|
||||
Args:
|
||||
members (list): Member nodes of the instance.
|
||||
start (int): Start frame.
|
||||
end (int): End frame.
|
||||
filepath (str): Output path of the TyCache file.
|
||||
additional_attributes (dict): channel attributes data
|
||||
which needed to be exported
|
||||
|
||||
Returns:
|
||||
list of arguments for MAX Script.
|
||||
|
||||
"""
|
||||
settings = {
|
||||
"exportMode": 2,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"tyCacheFilename": filepath.replace("\\", "/")
|
||||
}
|
||||
settings.update(additional_attributes)
|
||||
|
||||
job_args = []
|
||||
for operator in self.get_operators(members):
|
||||
for key, value in settings.items():
|
||||
if isinstance(value, str):
|
||||
# embed in quotes
|
||||
value = f'"{value}"'
|
||||
|
||||
job_args.append(f"{operator}.{key}={value}")
|
||||
job_args.append(f"{operator}.exportTyCache()")
|
||||
return job_args
|
||||
|
||||
@staticmethod
|
||||
def get_operators(members):
|
||||
"""Get Export Particles Operator.
|
||||
|
||||
Args:
|
||||
members (list): Instance members.
|
||||
|
||||
Returns:
|
||||
list of particle operators
|
||||
|
||||
"""
|
||||
opt_list = []
|
||||
for member in members:
|
||||
obj = member.baseobject
|
||||
# TODO: see if it can use maxscript instead
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
for anim_name in anim_names:
|
||||
sub_anim = rt.GetSubAnim(obj, anim_name)
|
||||
boolean = rt.IsProperty(sub_anim, "Export_Particles")
|
||||
if boolean:
|
||||
event_name = sub_anim.Name
|
||||
opt = f"${member.Name}.{event_name}.export_particles"
|
||||
opt_list.append(opt)
|
||||
|
||||
return opt_list
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
ValidateContentsOrder,
|
||||
PublishValidationError
|
||||
)
|
||||
from openpype.hosts.max.api.lib import get_frame_range, set_timeline
|
||||
|
||||
|
||||
class ValidateAnimationTimeline(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Validates Animation Timeline for Preview Animation in Max
|
||||
"""
|
||||
|
||||
label = "Animation Timeline for Review"
|
||||
order = ValidateContentsOrder
|
||||
families = ["review"]
|
||||
hosts = ["max"]
|
||||
actions = [RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
frame_range = get_frame_range()
|
||||
frame_start_handle = frame_range["frameStart"] - int(
|
||||
frame_range["handleStart"]
|
||||
)
|
||||
frame_end_handle = frame_range["frameEnd"] + int(
|
||||
frame_range["handleEnd"]
|
||||
)
|
||||
if rt.animationRange.start != frame_start_handle or (
|
||||
rt.animationRange.end != frame_end_handle
|
||||
):
|
||||
raise PublishValidationError("Incorrect animation timeline "
|
||||
"set for preview animation.. "
|
||||
"\nYou can use repair action to "
|
||||
"the correct animation timeline")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
frame_range = get_frame_range()
|
||||
frame_start_handle = frame_range["frameStart"] - int(
|
||||
frame_range["handleStart"]
|
||||
)
|
||||
frame_end_handle = frame_range["frameEnd"] + int(
|
||||
frame_range["handleEnd"]
|
||||
)
|
||||
set_timeline(frame_start_handle, frame_end_handle)
|
||||
|
|
@ -7,8 +7,10 @@ from openpype.pipeline import (
|
|||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
ValidateContentsOrder,
|
||||
PublishValidationError
|
||||
PublishValidationError,
|
||||
KnownPublishError
|
||||
)
|
||||
from openpype.hosts.max.api.lib import get_frame_range, set_timeline
|
||||
|
||||
|
||||
class ValidateFrameRange(pyblish.api.InstancePlugin,
|
||||
|
|
@ -27,38 +29,60 @@ class ValidateFrameRange(pyblish.api.InstancePlugin,
|
|||
|
||||
label = "Validate Frame Range"
|
||||
order = ValidateContentsOrder
|
||||
families = ["maxrender"]
|
||||
families = ["camera", "maxrender",
|
||||
"pointcache", "pointcloud",
|
||||
"review", "redshiftproxy"]
|
||||
hosts = ["max"]
|
||||
optional = True
|
||||
actions = [RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
self.log.info("Skipping validation...")
|
||||
self.log.debug("Skipping Validate Frame Range...")
|
||||
return
|
||||
context = instance.context
|
||||
|
||||
frame_start = int(context.data.get("frameStart"))
|
||||
frame_end = int(context.data.get("frameEnd"))
|
||||
|
||||
inst_frame_start = int(instance.data.get("frameStart"))
|
||||
inst_frame_end = int(instance.data.get("frameEnd"))
|
||||
frame_range = get_frame_range(
|
||||
asset_doc=instance.data["assetEntity"])
|
||||
|
||||
inst_frame_start = instance.data.get("frameStartHandle")
|
||||
inst_frame_end = instance.data.get("frameEndHandle")
|
||||
if inst_frame_start is None or inst_frame_end is None:
|
||||
raise KnownPublishError(
|
||||
"Missing frame start and frame end on "
|
||||
"instance to to validate."
|
||||
)
|
||||
frame_start_handle = frame_range["frameStartHandle"]
|
||||
frame_end_handle = frame_range["frameEndHandle"]
|
||||
errors = []
|
||||
if frame_start != inst_frame_start:
|
||||
if frame_start_handle != inst_frame_start:
|
||||
errors.append(
|
||||
f"Start frame ({inst_frame_start}) on instance does not match " # noqa
|
||||
f"with the start frame ({frame_start}) set on the asset data. ") # noqa
|
||||
if frame_end != inst_frame_end:
|
||||
f"with the start frame ({frame_start_handle}) set on the asset data. ") # noqa
|
||||
if frame_end_handle != inst_frame_end:
|
||||
errors.append(
|
||||
f"End frame ({inst_frame_end}) on instance does not match "
|
||||
f"with the end frame ({frame_start}) from the asset data. ")
|
||||
f"with the end frame ({frame_end_handle}) "
|
||||
"from the asset data. ")
|
||||
|
||||
if errors:
|
||||
errors.append("You can use repair action to fix it.")
|
||||
raise PublishValidationError("\n".join(errors))
|
||||
bullet_point_errors = "\n".join(
|
||||
"- {}".format(error) for error in errors
|
||||
)
|
||||
report = (
|
||||
"Frame range settings are incorrect.\n\n"
|
||||
f"{bullet_point_errors}\n\n"
|
||||
"You can use repair action to fix it."
|
||||
)
|
||||
raise PublishValidationError(report, title="Frame Range incorrect")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
rt.rendStart = instance.context.data.get("frameStart")
|
||||
rt.rendEnd = instance.context.data.get("frameEnd")
|
||||
frame_range = get_frame_range()
|
||||
frame_start_handle = frame_range["frameStartHandle"]
|
||||
frame_end_handle = frame_range["frameEndHandle"]
|
||||
|
||||
if instance.data["family"] == "maxrender":
|
||||
rt.rendStart = frame_start_handle
|
||||
rt.rendEnd = frame_end_handle
|
||||
else:
|
||||
set_timeline(frame_start_handle, frame_end_handle)
|
||||
|
|
|
|||
|
|
@ -1,21 +1,24 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class ValidateMaxContents(pyblish.api.InstancePlugin):
|
||||
"""Validates Max contents.
|
||||
class ValidateInstanceHasMembers(pyblish.api.InstancePlugin):
|
||||
"""Validates Instance has members.
|
||||
|
||||
Check if MaxScene container includes any contents underneath.
|
||||
Check if MaxScene containers includes any contents underneath.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["camera",
|
||||
"model",
|
||||
"maxScene",
|
||||
"review"]
|
||||
"review",
|
||||
"pointcache",
|
||||
"pointcloud",
|
||||
"redshiftproxy"]
|
||||
hosts = ["max"]
|
||||
label = "Max Scene Contents"
|
||||
label = "Container Contents"
|
||||
|
||||
def process(self, instance):
|
||||
if not instance.data["members"]:
|
||||
|
|
@ -14,29 +14,16 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
"""
|
||||
Notes:
|
||||
|
||||
1. Validate the container only include tyFlow objects
|
||||
2. Validate if tyFlow operator Export Particle exists
|
||||
3. Validate if the export mode of Export Particle is at PRT format
|
||||
4. Validate the partition count and range set as default value
|
||||
1. Validate if the export mode of Export Particle is at PRT format
|
||||
2. Validate the partition count and range set as default value
|
||||
Partition Count : 100
|
||||
Partition Range : 1 to 1
|
||||
5. Validate if the custom attribute(s) exist as parameter(s)
|
||||
3. Validate if the custom attribute(s) exist as parameter(s)
|
||||
of export_particle operator
|
||||
|
||||
"""
|
||||
report = []
|
||||
|
||||
invalid_object = self.get_tyflow_object(instance)
|
||||
if invalid_object:
|
||||
report.append(f"Non tyFlow object found: {invalid_object}")
|
||||
|
||||
invalid_operator = self.get_tyflow_operator(instance)
|
||||
if invalid_operator:
|
||||
report.append((
|
||||
"tyFlow ExportParticle operator not "
|
||||
f"found: {invalid_operator}"))
|
||||
|
||||
if self.validate_export_mode(instance):
|
||||
report.append("The export mode is not at PRT")
|
||||
|
||||
|
|
@ -52,46 +39,6 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
if report:
|
||||
raise PublishValidationError(f"{report}")
|
||||
|
||||
def get_tyflow_object(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info(f"Validating tyFlow container for {container}")
|
||||
|
||||
selection_list = instance.data["members"]
|
||||
for sel in selection_list:
|
||||
sel_tmp = str(sel)
|
||||
if rt.ClassOf(sel) in [rt.tyFlow,
|
||||
rt.Editable_Mesh]:
|
||||
if "tyFlow" not in sel_tmp:
|
||||
invalid.append(sel)
|
||||
else:
|
||||
invalid.append(sel)
|
||||
|
||||
return invalid
|
||||
|
||||
def get_tyflow_operator(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info(f"Validating tyFlow object for {container}")
|
||||
selection_list = instance.data["members"]
|
||||
bool_list = []
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get all the names of the related tyFlow nodes
|
||||
sub_anim = rt.GetSubAnim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
boolean = rt.IsProperty(sub_anim, "Export_Particles")
|
||||
bool_list.append(str(boolean))
|
||||
# if the export_particles property is not there
|
||||
# it means there is not a "Export Particle" operator
|
||||
if "True" not in bool_list:
|
||||
self.log.error("Operator 'Export Particles' not found!")
|
||||
invalid.append(sel)
|
||||
|
||||
return invalid
|
||||
|
||||
def validate_custom_attribute(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
|
|
@ -100,8 +47,8 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
|
||||
selection_list = instance.data["members"]
|
||||
|
||||
project_setting = instance.data["project_setting"]
|
||||
attr_settings = project_setting["max"]["PointCloud"]["attribute"]
|
||||
project_settings = instance.context.data["project_settings"]
|
||||
attr_settings = project_settings["max"]["PointCloud"]["attribute"]
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
width, height = self.get_db_resolution(instance)
|
||||
current_width = rt.renderwidth
|
||||
current_width = rt.renderWidth
|
||||
current_height = rt.renderHeight
|
||||
if current_width != width and current_height != height:
|
||||
raise PublishValidationError("Resolution Setting "
|
||||
|
|
|
|||
88
openpype/hosts/max/plugins/publish/validate_tyflow_data.py
Normal file
88
openpype/hosts/max/plugins/publish/validate_tyflow_data.py
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class ValidateTyFlowData(pyblish.api.InstancePlugin):
|
||||
"""Validate TyFlow plugins or relevant operators are set correctly."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["pointcloud", "tycache"]
|
||||
hosts = ["max"]
|
||||
label = "TyFlow Data"
|
||||
|
||||
def process(self, instance):
|
||||
"""
|
||||
Notes:
|
||||
1. Validate the container only include tyFlow objects
|
||||
2. Validate if tyFlow operator Export Particle exists
|
||||
|
||||
"""
|
||||
|
||||
invalid_object = self.get_tyflow_object(instance)
|
||||
if invalid_object:
|
||||
self.log.error(f"Non tyFlow object found: {invalid_object}")
|
||||
|
||||
invalid_operator = self.get_tyflow_operator(instance)
|
||||
if invalid_operator:
|
||||
self.log.error(
|
||||
"Operator 'Export Particles' not found in tyFlow editor.")
|
||||
if invalid_object or invalid_operator:
|
||||
raise PublishValidationError(
|
||||
"issues occurred",
|
||||
description="Container should only include tyFlow object "
|
||||
"and tyflow operator 'Export Particle' should be in "
|
||||
"the tyFlow editor.")
|
||||
|
||||
def get_tyflow_object(self, instance):
|
||||
"""Get the nodes which are not tyFlow object(s)
|
||||
and editable mesh(es)
|
||||
|
||||
Args:
|
||||
instance (pyblish.api.Instance): instance
|
||||
|
||||
Returns:
|
||||
list: invalid nodes which are not tyFlow
|
||||
object(s) and editable mesh(es).
|
||||
"""
|
||||
container = instance.data["instance_node"]
|
||||
self.log.debug(f"Validating tyFlow container for {container}")
|
||||
|
||||
allowed_classes = [rt.tyFlow, rt.Editable_Mesh]
|
||||
return [
|
||||
member for member in instance.data["members"]
|
||||
if rt.ClassOf(member) not in allowed_classes
|
||||
]
|
||||
|
||||
def get_tyflow_operator(self, instance):
|
||||
"""Check if the Export Particle Operators in the node
|
||||
connections.
|
||||
|
||||
Args:
|
||||
instance (str): instance node
|
||||
|
||||
Returns:
|
||||
invalid(list): list of invalid nodes which do
|
||||
not consist of Export Particle Operators as parts
|
||||
of the node connections
|
||||
"""
|
||||
invalid = []
|
||||
members = instance.data["members"]
|
||||
for member in members:
|
||||
obj = member.baseobject
|
||||
|
||||
# There must be at least one animation with export
|
||||
# particles enabled
|
||||
has_export_particles = False
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get name of the related tyFlow node
|
||||
sub_anim = rt.GetSubAnim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
if rt.IsProperty(sub_anim, "Export_Particles"):
|
||||
has_export_particles = True
|
||||
break
|
||||
|
||||
if not has_export_particles:
|
||||
invalid.append(member)
|
||||
return invalid
|
||||
|
|
@ -146,6 +146,10 @@ def suspended_refresh(suspend=True):
|
|||
|
||||
cmds.ogs(pause=True) is a toggle so we cant pass False.
|
||||
"""
|
||||
if IS_HEADLESS:
|
||||
yield
|
||||
return
|
||||
|
||||
original_state = cmds.ogs(query=True, pause=True)
|
||||
try:
|
||||
if suspend and not original_state:
|
||||
|
|
|
|||
|
|
@ -95,6 +95,8 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
self.log.info("Installing callbacks ... ")
|
||||
register_event_callback("init", on_init)
|
||||
|
||||
_set_project()
|
||||
|
||||
if lib.IS_HEADLESS:
|
||||
self.log.info((
|
||||
"Running in headless mode, skipping Maya save/open/new"
|
||||
|
|
@ -103,7 +105,6 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
|
||||
return
|
||||
|
||||
_set_project()
|
||||
self._register_callbacks()
|
||||
|
||||
menu.install(project_settings)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class PreCopyMel(PreLaunchHook):
|
|||
|
||||
Hook `GlobalHostDataHook` must be executed before this hook.
|
||||
"""
|
||||
app_groups = {"maya"}
|
||||
app_groups = {"maya", "mayapy"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
|
|
|
|||
211
openpype/hosts/maya/plugins/create/create_multishot_layout.py
Normal file
211
openpype/hosts/maya/plugins/create/create_multishot_layout.py
Normal file
|
|
@ -0,0 +1,211 @@
|
|||
from ayon_api import (
|
||||
get_folder_by_name,
|
||||
get_folder_by_path,
|
||||
get_folders,
|
||||
)
|
||||
from maya import cmds # noqa: F401
|
||||
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
from openpype.client import get_assets
|
||||
from openpype.hosts.maya.api import plugin
|
||||
from openpype.lib import BoolDef, EnumDef, TextDef
|
||||
from openpype.pipeline import (
|
||||
Creator,
|
||||
get_current_asset_name,
|
||||
get_current_project_name,
|
||||
)
|
||||
from openpype.pipeline.create import CreatorError
|
||||
|
||||
|
||||
class CreateMultishotLayout(plugin.MayaCreator):
|
||||
"""Create a multi-shot layout in the Maya scene.
|
||||
|
||||
This creator will create a Camera Sequencer in the Maya scene based on
|
||||
the shots found under the specified folder. The shots will be added to
|
||||
the sequencer in the order of their clipIn and clipOut values. For each
|
||||
shot a Layout will be created.
|
||||
|
||||
"""
|
||||
identifier = "io.openpype.creators.maya.multishotlayout"
|
||||
label = "Multi-shot Layout"
|
||||
family = "layout"
|
||||
icon = "project-diagram"
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
# Present artist with a list of parents of the current context
|
||||
# to choose from. This will be used to get the shots under the
|
||||
# selected folder to create the Camera Sequencer.
|
||||
|
||||
"""
|
||||
Todo: `get_folder_by_name` should be switched to `get_folder_by_path`
|
||||
once the fork to pure AYON is done.
|
||||
|
||||
Warning: this will not work for projects where the asset name
|
||||
is not unique across the project until the switch mentioned
|
||||
above is done.
|
||||
"""
|
||||
|
||||
current_folder = get_folder_by_name(
|
||||
project_name=get_current_project_name(),
|
||||
folder_name=get_current_asset_name(),
|
||||
)
|
||||
|
||||
current_path_parts = current_folder["path"].split("/")
|
||||
|
||||
# populate the list with parents of the current folder
|
||||
# this will create menu items like:
|
||||
# [
|
||||
# {
|
||||
# "value": "",
|
||||
# "label": "project (shots directly under the project)"
|
||||
# }, {
|
||||
# "value": "shots/shot_01", "label": "shot_01 (current)"
|
||||
# }, {
|
||||
# "value": "shots", "label": "shots"
|
||||
# }
|
||||
# ]
|
||||
|
||||
# add the project as the first item
|
||||
items_with_label = [
|
||||
{
|
||||
"label": f"{self.project_name} "
|
||||
"(shots directly under the project)",
|
||||
"value": ""
|
||||
}
|
||||
]
|
||||
|
||||
# go through the current folder path and add each part to the list,
|
||||
# but mark the current folder.
|
||||
for part_idx, part in enumerate(current_path_parts):
|
||||
label = part
|
||||
if label == current_folder["name"]:
|
||||
label = f"{label} (current)"
|
||||
|
||||
value = "/".join(current_path_parts[:part_idx + 1])
|
||||
|
||||
items_with_label.append({"label": label, "value": value})
|
||||
|
||||
return [
|
||||
EnumDef("shotParent",
|
||||
default=current_folder["name"],
|
||||
label="Shot Parent Folder",
|
||||
items=items_with_label,
|
||||
),
|
||||
BoolDef("groupLoadedAssets",
|
||||
label="Group Loaded Assets",
|
||||
tooltip="Enable this when you want to publish group of "
|
||||
"loaded asset",
|
||||
default=False),
|
||||
TextDef("taskName",
|
||||
label="Associated Task Name",
|
||||
tooltip=("Task name to be associated "
|
||||
"with the created Layout"),
|
||||
default="layout"),
|
||||
]
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
shots = list(
|
||||
self.get_related_shots(folder_path=pre_create_data["shotParent"])
|
||||
)
|
||||
if not shots:
|
||||
# There are no shot folders under the specified folder.
|
||||
# We are raising an error here but in the future we might
|
||||
# want to create a new shot folders by publishing the layouts
|
||||
# and shot defined in the sequencer. Sort of editorial publish
|
||||
# in side of Maya.
|
||||
raise CreatorError((
|
||||
"No shots found under the specified "
|
||||
f"folder: {pre_create_data['shotParent']}."))
|
||||
|
||||
# Get layout creator
|
||||
layout_creator_id = "io.openpype.creators.maya.layout"
|
||||
layout_creator: Creator = self.create_context.creators.get(
|
||||
layout_creator_id)
|
||||
if not layout_creator:
|
||||
raise CreatorError(
|
||||
f"Creator {layout_creator_id} not found.")
|
||||
|
||||
# Get OpenPype style asset documents for the shots
|
||||
op_asset_docs = get_assets(
|
||||
self.project_name, [s["id"] for s in shots])
|
||||
asset_docs_by_id = {doc["_id"]: doc for doc in op_asset_docs}
|
||||
for shot in shots:
|
||||
# we are setting shot name to be displayed in the sequencer to
|
||||
# `shot name (shot label)` if the label is set, otherwise just
|
||||
# `shot name`. So far, labels are used only when the name is set
|
||||
# with characters that are not allowed in the shot name.
|
||||
if not shot["active"]:
|
||||
continue
|
||||
|
||||
# get task for shot
|
||||
asset_doc = asset_docs_by_id[shot["id"]]
|
||||
|
||||
tasks = asset_doc.get("data").get("tasks").keys()
|
||||
layout_task = None
|
||||
if pre_create_data["taskName"] in tasks:
|
||||
layout_task = pre_create_data["taskName"]
|
||||
|
||||
shot_name = f"{shot['name']}%s" % (
|
||||
f" ({shot['label']})" if shot["label"] else "")
|
||||
cmds.shot(sequenceStartTime=shot["attrib"]["clipIn"],
|
||||
sequenceEndTime=shot["attrib"]["clipOut"],
|
||||
shotName=shot_name)
|
||||
|
||||
# Create layout instance by the layout creator
|
||||
|
||||
instance_data = {
|
||||
"asset": shot["name"],
|
||||
"variant": layout_creator.get_default_variant()
|
||||
}
|
||||
if layout_task:
|
||||
instance_data["task"] = layout_task
|
||||
|
||||
layout_creator.create(
|
||||
subset_name=layout_creator.get_subset_name(
|
||||
layout_creator.get_default_variant(),
|
||||
self.create_context.get_current_task_name(),
|
||||
asset_doc,
|
||||
self.project_name),
|
||||
instance_data=instance_data,
|
||||
pre_create_data={
|
||||
"groupLoadedAssets": pre_create_data["groupLoadedAssets"]
|
||||
}
|
||||
)
|
||||
|
||||
def get_related_shots(self, folder_path: str):
|
||||
"""Get all shots related to the current asset.
|
||||
|
||||
Get all folders of type Shot under specified folder.
|
||||
|
||||
Args:
|
||||
folder_path (str): Path of the folder.
|
||||
|
||||
Returns:
|
||||
list: List of dicts with folder data.
|
||||
|
||||
"""
|
||||
# if folder_path is None, project is selected as a root
|
||||
# and its name is used as a parent id
|
||||
parent_id = self.project_name
|
||||
if folder_path:
|
||||
current_folder = get_folder_by_path(
|
||||
project_name=self.project_name,
|
||||
folder_path=folder_path,
|
||||
)
|
||||
parent_id = current_folder["id"]
|
||||
|
||||
# get all child folders of the current one
|
||||
return get_folders(
|
||||
project_name=self.project_name,
|
||||
parent_ids=[parent_id],
|
||||
fields=[
|
||||
"attrib.clipIn", "attrib.clipOut",
|
||||
"attrib.frameStart", "attrib.frameEnd",
|
||||
"name", "label", "path", "folderType", "id"
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
# blast this creator if Ayon server is not enabled
|
||||
if not AYON_SERVER_ENABLED:
|
||||
del CreateMultishotLayout
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Maya look extractor."""
|
||||
import sys
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections import OrderedDict
|
||||
import contextlib
|
||||
|
|
@ -176,6 +177,24 @@ class MakeRSTexBin(TextureProcessor):
|
|||
source
|
||||
]
|
||||
|
||||
# if color management is enabled we pass color space information
|
||||
if color_management["enabled"]:
|
||||
config_path = color_management["config"]
|
||||
if not os.path.exists(config_path):
|
||||
raise RuntimeError("OCIO config not found at: "
|
||||
"{}".format(config_path))
|
||||
|
||||
if not os.getenv("OCIO"):
|
||||
self.log.debug(
|
||||
"OCIO environment variable not set."
|
||||
"Setting it with OCIO config from Maya."
|
||||
)
|
||||
os.environ["OCIO"] = config_path
|
||||
|
||||
self.log.debug("converting colorspace {0} to redshift render "
|
||||
"colorspace".format(colorspace))
|
||||
subprocess_args.extend(["-cs", colorspace])
|
||||
|
||||
hash_args = ["rstex"]
|
||||
texture_hash = source_hash(source, *hash_args)
|
||||
|
||||
|
|
@ -186,11 +205,11 @@ class MakeRSTexBin(TextureProcessor):
|
|||
|
||||
self.log.debug(" ".join(subprocess_args))
|
||||
try:
|
||||
run_subprocess(subprocess_args)
|
||||
run_subprocess(subprocess_args, logger=self.log)
|
||||
except Exception:
|
||||
self.log.error("Texture .rstexbin conversion failed",
|
||||
exc_info=True)
|
||||
raise
|
||||
six.reraise(*sys.exc_info())
|
||||
|
||||
return TextureResult(
|
||||
path=destination,
|
||||
|
|
@ -472,7 +491,7 @@ class ExtractLook(publish.Extractor):
|
|||
"rstex": MakeRSTexBin
|
||||
}.items():
|
||||
if instance.data.get(key, False):
|
||||
processor = Processor()
|
||||
processor = Processor(log=self.log)
|
||||
processor.apply_settings(context.data["system_settings"],
|
||||
context.data["project_settings"])
|
||||
processors.append(processor)
|
||||
|
|
|
|||
117
openpype/hosts/maya/plugins/publish/validate_resolution.py
Normal file
117
openpype/hosts/maya/plugins/publish/validate_resolution.py
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import (
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from maya import cmds
|
||||
from openpype.pipeline.publish import RepairAction
|
||||
from openpype.hosts.maya.api import lib
|
||||
from openpype.hosts.maya.api.lib import reset_scene_resolution
|
||||
|
||||
|
||||
class ValidateResolution(pyblish.api.InstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validate the render resolution setting aligned with DB"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["renderlayer"]
|
||||
hosts = ["maya"]
|
||||
label = "Validate Resolution"
|
||||
actions = [RepairAction]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
invalid = self.get_invalid_resolution(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"Render resolution is invalid. See log for details.",
|
||||
description=(
|
||||
"Wrong render resolution setting. "
|
||||
"Please use repair button to fix it.\n\n"
|
||||
"If current renderer is V-Ray, "
|
||||
"make sure vraySettings node has been created."
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid_resolution(cls, instance):
|
||||
width, height, pixelAspect = cls.get_db_resolution(instance)
|
||||
current_renderer = instance.data["renderer"]
|
||||
layer = instance.data["renderlayer"]
|
||||
invalid = False
|
||||
if current_renderer == "vray":
|
||||
vray_node = "vraySettings"
|
||||
if cmds.objExists(vray_node):
|
||||
current_width = lib.get_attr_in_layer(
|
||||
"{}.width".format(vray_node), layer=layer)
|
||||
current_height = lib.get_attr_in_layer(
|
||||
"{}.height".format(vray_node), layer=layer)
|
||||
current_pixelAspect = lib.get_attr_in_layer(
|
||||
"{}.pixelAspect".format(vray_node), layer=layer
|
||||
)
|
||||
else:
|
||||
cls.log.error(
|
||||
"Can't detect VRay resolution because there is no node "
|
||||
"named: `{}`".format(vray_node)
|
||||
)
|
||||
return True
|
||||
else:
|
||||
current_width = lib.get_attr_in_layer(
|
||||
"defaultResolution.width", layer=layer)
|
||||
current_height = lib.get_attr_in_layer(
|
||||
"defaultResolution.height", layer=layer)
|
||||
current_pixelAspect = lib.get_attr_in_layer(
|
||||
"defaultResolution.pixelAspect", layer=layer
|
||||
)
|
||||
if current_width != width or current_height != height:
|
||||
cls.log.error(
|
||||
"Render resolution {}x{} does not match "
|
||||
"asset resolution {}x{}".format(
|
||||
current_width, current_height,
|
||||
width, height
|
||||
))
|
||||
invalid = True
|
||||
if current_pixelAspect != pixelAspect:
|
||||
cls.log.error(
|
||||
"Render pixel aspect {} does not match "
|
||||
"asset pixel aspect {}".format(
|
||||
current_pixelAspect, pixelAspect
|
||||
))
|
||||
invalid = True
|
||||
return invalid
|
||||
|
||||
@classmethod
|
||||
def get_db_resolution(cls, instance):
|
||||
asset_doc = instance.data["assetEntity"]
|
||||
project_doc = instance.context.data["projectEntity"]
|
||||
for data in [asset_doc["data"], project_doc["data"]]:
|
||||
if (
|
||||
"resolutionWidth" in data and
|
||||
"resolutionHeight" in data and
|
||||
"pixelAspect" in data
|
||||
):
|
||||
width = data["resolutionWidth"]
|
||||
height = data["resolutionHeight"]
|
||||
pixelAspect = data["pixelAspect"]
|
||||
return int(width), int(height), float(pixelAspect)
|
||||
|
||||
# Defaults if not found in asset document or project document
|
||||
return 1920, 1080, 1.0
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
# Usually without renderlayer overrides the renderlayers
|
||||
# all share the same resolution value - so fixing the first
|
||||
# will have fixed all the others too. It's much faster to
|
||||
# check whether it's invalid first instead of switching
|
||||
# into all layers individually
|
||||
if not cls.get_invalid_resolution(instance):
|
||||
cls.log.debug(
|
||||
"Nothing to repair on instance: {}".format(instance)
|
||||
)
|
||||
return
|
||||
layer_node = instance.data['setMembers']
|
||||
with lib.renderlayer(layer_node):
|
||||
reset_scene_resolution()
|
||||
|
|
@ -50,6 +50,11 @@ from .utils import (
|
|||
get_colorspace_list
|
||||
)
|
||||
|
||||
from .actions import (
|
||||
SelectInvalidAction,
|
||||
SelectInstanceNodeAction
|
||||
)
|
||||
|
||||
__all__ = (
|
||||
"file_extensions",
|
||||
"has_unsaved_changes",
|
||||
|
|
@ -92,5 +97,8 @@ __all__ = (
|
|||
"create_write_node",
|
||||
|
||||
"colorspace_exists_on_node",
|
||||
"get_colorspace_list"
|
||||
"get_colorspace_list",
|
||||
|
||||
"SelectInvalidAction",
|
||||
"SelectInstanceNodeAction"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -20,33 +20,58 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
|
||||
def process(self, context, plugin):
|
||||
|
||||
try:
|
||||
import nuke
|
||||
except ImportError:
|
||||
raise ImportError("Current host is not Nuke")
|
||||
|
||||
errored_instances = get_errored_instances_from_context(context,
|
||||
plugin=plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes..")
|
||||
invalid = list()
|
||||
invalid = set()
|
||||
for instance in errored_instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.append(invalid_nodes[0])
|
||||
invalid.update(invalid_nodes)
|
||||
else:
|
||||
self.log.warning("Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes.")
|
||||
|
||||
# Ensure unique (process each node only once)
|
||||
invalid = list(set(invalid))
|
||||
|
||||
if invalid:
|
||||
self.log.info("Selecting invalid nodes: {}".format(invalid))
|
||||
reset_selection()
|
||||
select_nodes(invalid)
|
||||
else:
|
||||
self.log.info("No invalid nodes found.")
|
||||
|
||||
|
||||
class SelectInstanceNodeAction(pyblish.api.Action):
|
||||
"""Select instance node for failed plugin."""
|
||||
label = "Select instance node"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "mdi.cursor-default-click"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
# Get the errored instances for the plug-in
|
||||
errored_instances = get_errored_instances_from_context(
|
||||
context, plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding instance nodes..")
|
||||
nodes = set()
|
||||
for instance in errored_instances:
|
||||
instance_node = instance.data.get("transientData", {}).get("node")
|
||||
if not instance_node:
|
||||
raise RuntimeError(
|
||||
"No transientData['node'] found on instance: {}".format(
|
||||
instance
|
||||
)
|
||||
)
|
||||
nodes.add(instance_node)
|
||||
|
||||
if nodes:
|
||||
self.log.info("Selecting instance nodes: {}".format(nodes))
|
||||
reset_selection()
|
||||
select_nodes(nodes)
|
||||
else:
|
||||
self.log.info("No instance nodes found.")
|
||||
|
|
|
|||
|
|
@ -48,20 +48,15 @@ from openpype.pipeline import (
|
|||
get_current_asset_name,
|
||||
)
|
||||
from openpype.pipeline.context_tools import (
|
||||
get_current_project_asset,
|
||||
get_custom_workfile_template_from_session
|
||||
)
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_imageio_config
|
||||
)
|
||||
from openpype.pipeline.colorspace import get_imageio_config
|
||||
from openpype.pipeline.workfile import BuildWorkfile
|
||||
from . import gizmo_menu
|
||||
from .constants import ASSIST
|
||||
|
||||
from .workio import (
|
||||
save_file,
|
||||
open_file
|
||||
)
|
||||
from .workio import save_file
|
||||
from .utils import get_node_outputs
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
|
@ -2222,7 +2217,6 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
|
|||
"""
|
||||
# replace path with env var if possible
|
||||
ocio_path = self._replace_ocio_path_with_env_var(config_data)
|
||||
ocio_path = ocio_path.replace("\\", "/")
|
||||
|
||||
log.info("Setting OCIO config path to: `{}`".format(
|
||||
ocio_path))
|
||||
|
|
@ -2802,16 +2796,28 @@ def find_free_space_to_paste_nodes(
|
|||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
def maintained_selection(exclude_nodes=None):
|
||||
"""Maintain selection during context
|
||||
|
||||
Maintain selection during context and unselect
|
||||
all nodes after context is done.
|
||||
|
||||
Arguments:
|
||||
exclude_nodes (list[nuke.Node]): list of nodes to be unselected
|
||||
before context is done
|
||||
|
||||
Example:
|
||||
>>> with maintained_selection():
|
||||
... node["selected"].setValue(True)
|
||||
>>> print(node["selected"].value())
|
||||
False
|
||||
"""
|
||||
if exclude_nodes:
|
||||
for node in exclude_nodes:
|
||||
node["selected"].setValue(False)
|
||||
|
||||
previous_selection = nuke.selectedNodes()
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
|
|
@ -2823,6 +2829,51 @@ def maintained_selection():
|
|||
select_nodes(previous_selection)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def swap_node_with_dependency(old_node, new_node):
|
||||
""" Swap node with dependency
|
||||
|
||||
Swap node with dependency and reconnect all inputs and outputs.
|
||||
It removes old node.
|
||||
|
||||
Arguments:
|
||||
old_node (nuke.Node): node to be replaced
|
||||
new_node (nuke.Node): node to replace with
|
||||
|
||||
Example:
|
||||
>>> old_node_name = old_node["name"].value()
|
||||
>>> print(old_node_name)
|
||||
old_node_name_01
|
||||
>>> with swap_node_with_dependency(old_node, new_node) as node_name:
|
||||
... new_node["name"].setValue(node_name)
|
||||
>>> print(new_node["name"].value())
|
||||
old_node_name_01
|
||||
"""
|
||||
# preserve position
|
||||
xpos, ypos = old_node.xpos(), old_node.ypos()
|
||||
# preserve selection after all is done
|
||||
outputs = get_node_outputs(old_node)
|
||||
inputs = old_node.dependencies()
|
||||
node_name = old_node["name"].value()
|
||||
|
||||
try:
|
||||
nuke.delete(old_node)
|
||||
|
||||
yield node_name
|
||||
finally:
|
||||
|
||||
# Reconnect inputs
|
||||
for i, node in enumerate(inputs):
|
||||
new_node.setInput(i, node)
|
||||
# Reconnect outputs
|
||||
if outputs:
|
||||
for n, pipes in outputs.items():
|
||||
for i in pipes:
|
||||
n.setInput(i, new_node)
|
||||
# return to original position
|
||||
new_node.setXYpos(xpos, ypos)
|
||||
|
||||
|
||||
def reset_selection():
|
||||
"""Deselect all selected nodes"""
|
||||
for node in nuke.selectedNodes():
|
||||
|
|
@ -2833,9 +2884,10 @@ def select_nodes(nodes):
|
|||
"""Selects all inputted nodes
|
||||
|
||||
Arguments:
|
||||
nodes (list): nuke nodes to be selected
|
||||
nodes (Union[list, tuple, set]): nuke nodes to be selected
|
||||
"""
|
||||
assert isinstance(nodes, (list, tuple)), "nodes has to be list or tuple"
|
||||
assert isinstance(nodes, (list, tuple, set)), \
|
||||
"nodes has to be list, tuple or set"
|
||||
|
||||
for node in nodes:
|
||||
node["selected"].setValue(True)
|
||||
|
|
@ -2919,13 +2971,13 @@ def process_workfile_builder():
|
|||
"workfile_builder", {})
|
||||
|
||||
# get settings
|
||||
createfv_on = workfile_builder.get("create_first_version") or None
|
||||
create_fv_on = workfile_builder.get("create_first_version") or None
|
||||
builder_on = workfile_builder.get("builder_on_start") or None
|
||||
|
||||
last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE")
|
||||
|
||||
# generate first version in file not existing and feature is enabled
|
||||
if createfv_on and not os.path.exists(last_workfile_path):
|
||||
if create_fv_on and not os.path.exists(last_workfile_path):
|
||||
# get custom template path if any
|
||||
custom_template_path = get_custom_workfile_template_from_session(
|
||||
project_settings=project_settings
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class SetFrameRangeLoader(load.LoaderPlugin):
|
|||
"yeticache",
|
||||
"pointcache"]
|
||||
representations = ["*"]
|
||||
extension = {"*"}
|
||||
extensions = {"*"}
|
||||
|
||||
label = "Set frame range"
|
||||
order = 11
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
|
|||
|
||||
families = ["workfile", "nukenodes"]
|
||||
representations = ["*"]
|
||||
extension = {"nk"}
|
||||
extensions = {"nk"}
|
||||
|
||||
label = "Import Nuke Nodes"
|
||||
order = 0
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
|
|||
|
||||
families = ["camera"]
|
||||
representations = ["*"]
|
||||
extension = {"abc"}
|
||||
extensions = {"abc"}
|
||||
|
||||
label = "Load Alembic Camera"
|
||||
icon = "camera"
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ class LoadEffects(load.LoaderPlugin):
|
|||
|
||||
families = ["effect"]
|
||||
representations = ["*"]
|
||||
extension = {"json"}
|
||||
extensions = {"json"}
|
||||
|
||||
label = "Load Effects - nodes"
|
||||
order = 0
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
|
|||
|
||||
families = ["effect"]
|
||||
representations = ["*"]
|
||||
extension = {"json"}
|
||||
extensions = {"json"}
|
||||
|
||||
label = "Load Effects - Input Process"
|
||||
order = 0
|
||||
|
|
|
|||
|
|
@ -12,7 +12,8 @@ from openpype.pipeline import (
|
|||
from openpype.hosts.nuke.api.lib import (
|
||||
maintained_selection,
|
||||
get_avalon_knob_data,
|
||||
set_avalon_knob_data
|
||||
set_avalon_knob_data,
|
||||
swap_node_with_dependency,
|
||||
)
|
||||
from openpype.hosts.nuke.api import (
|
||||
containerise,
|
||||
|
|
@ -26,7 +27,7 @@ class LoadGizmo(load.LoaderPlugin):
|
|||
|
||||
families = ["gizmo"]
|
||||
representations = ["*"]
|
||||
extension = {"gizmo"}
|
||||
extensions = {"nk"}
|
||||
|
||||
label = "Load Gizmo"
|
||||
order = 0
|
||||
|
|
@ -45,7 +46,7 @@ class LoadGizmo(load.LoaderPlugin):
|
|||
data (dict): compulsory attribute > not used
|
||||
|
||||
Returns:
|
||||
nuke node: containerised nuke node object
|
||||
nuke node: containerized nuke node object
|
||||
"""
|
||||
|
||||
# get main variables
|
||||
|
|
@ -83,12 +84,12 @@ class LoadGizmo(load.LoaderPlugin):
|
|||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
group_node = nuke.selectedNode()
|
||||
|
||||
GN["name"].setValue(object_name)
|
||||
group_node["name"].setValue(object_name)
|
||||
|
||||
return containerise(
|
||||
node=GN,
|
||||
node=group_node,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
|
|
@ -110,7 +111,7 @@ class LoadGizmo(load.LoaderPlugin):
|
|||
version_doc = get_version_by_id(project_name, representation["parent"])
|
||||
|
||||
# get corresponding node
|
||||
GN = nuke.toNode(container['objectName'])
|
||||
group_node = nuke.toNode(container['objectName'])
|
||||
|
||||
file = get_representation_path(representation).replace("\\", "/")
|
||||
name = container['name']
|
||||
|
|
@ -135,22 +136,24 @@ class LoadGizmo(load.LoaderPlugin):
|
|||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# capture pipeline metadata
|
||||
avalon_data = get_avalon_knob_data(group_node)
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with maintained_selection():
|
||||
xpos = GN.xpos()
|
||||
ypos = GN.ypos()
|
||||
avalon_data = get_avalon_knob_data(GN)
|
||||
nuke.delete(GN)
|
||||
# add group from nk
|
||||
with maintained_selection([group_node]):
|
||||
# insert nuke script to the script
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
set_avalon_knob_data(GN, avalon_data)
|
||||
GN.setXYpos(xpos, ypos)
|
||||
GN["name"].setValue(object_name)
|
||||
# convert imported to selected node
|
||||
new_group_node = nuke.selectedNode()
|
||||
# swap nodes with maintained connections
|
||||
with swap_node_with_dependency(
|
||||
group_node, new_group_node) as node_name:
|
||||
new_group_node["name"].setValue(node_name)
|
||||
# set updated pipeline metadata
|
||||
set_avalon_knob_data(new_group_node, avalon_data)
|
||||
|
||||
last_version_doc = get_last_version_by_subset_id(
|
||||
project_name, version_doc["parent"], fields=["_id"]
|
||||
|
|
@ -161,11 +164,12 @@ class LoadGizmo(load.LoaderPlugin):
|
|||
color_value = self.node_color
|
||||
else:
|
||||
color_value = "0xd88467ff"
|
||||
GN["tile_color"].setValue(int(color_value, 16))
|
||||
|
||||
new_group_node["tile_color"].setValue(int(color_value, 16))
|
||||
|
||||
self.log.info("updated to version: {}".format(version_doc.get("name")))
|
||||
|
||||
return update_container(GN, data_imprint)
|
||||
return update_container(new_group_node, data_imprint)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,8 @@ from openpype.hosts.nuke.api.lib import (
|
|||
maintained_selection,
|
||||
create_backdrop,
|
||||
get_avalon_knob_data,
|
||||
set_avalon_knob_data
|
||||
set_avalon_knob_data,
|
||||
swap_node_with_dependency,
|
||||
)
|
||||
from openpype.hosts.nuke.api import (
|
||||
containerise,
|
||||
|
|
@ -28,7 +29,7 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
|
|||
|
||||
families = ["gizmo"]
|
||||
representations = ["*"]
|
||||
extension = {"gizmo"}
|
||||
extensions = {"nk"}
|
||||
|
||||
label = "Load Gizmo - Input Process"
|
||||
order = 0
|
||||
|
|
@ -47,7 +48,7 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
|
|||
data (dict): compulsory attribute > not used
|
||||
|
||||
Returns:
|
||||
nuke node: containerised nuke node object
|
||||
nuke node: containerized nuke node object
|
||||
"""
|
||||
|
||||
# get main variables
|
||||
|
|
@ -85,17 +86,17 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
|
|||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
group_node = nuke.selectedNode()
|
||||
|
||||
GN["name"].setValue(object_name)
|
||||
group_node["name"].setValue(object_name)
|
||||
|
||||
# try to place it under Viewer1
|
||||
if not self.connect_active_viewer(GN):
|
||||
nuke.delete(GN)
|
||||
if not self.connect_active_viewer(group_node):
|
||||
nuke.delete(group_node)
|
||||
return
|
||||
|
||||
return containerise(
|
||||
node=GN,
|
||||
node=group_node,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
|
|
@ -117,7 +118,7 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
|
|||
version_doc = get_version_by_id(project_name, representation["parent"])
|
||||
|
||||
# get corresponding node
|
||||
GN = nuke.toNode(container['objectName'])
|
||||
group_node = nuke.toNode(container['objectName'])
|
||||
|
||||
file = get_representation_path(representation).replace("\\", "/")
|
||||
name = container['name']
|
||||
|
|
@ -142,22 +143,24 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
|
|||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# capture pipeline metadata
|
||||
avalon_data = get_avalon_knob_data(group_node)
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with maintained_selection():
|
||||
xpos = GN.xpos()
|
||||
ypos = GN.ypos()
|
||||
avalon_data = get_avalon_knob_data(GN)
|
||||
nuke.delete(GN)
|
||||
# add group from nk
|
||||
with maintained_selection([group_node]):
|
||||
# insert nuke script to the script
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
set_avalon_knob_data(GN, avalon_data)
|
||||
GN.setXYpos(xpos, ypos)
|
||||
GN["name"].setValue(object_name)
|
||||
# convert imported to selected node
|
||||
new_group_node = nuke.selectedNode()
|
||||
# swap nodes with maintained connections
|
||||
with swap_node_with_dependency(
|
||||
group_node, new_group_node) as node_name:
|
||||
new_group_node["name"].setValue(node_name)
|
||||
# set updated pipeline metadata
|
||||
set_avalon_knob_data(new_group_node, avalon_data)
|
||||
|
||||
last_version_doc = get_last_version_by_subset_id(
|
||||
project_name, version_doc["parent"], fields=["_id"]
|
||||
|
|
@ -168,11 +171,11 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
|
|||
color_value = self.node_color
|
||||
else:
|
||||
color_value = "0xd88467ff"
|
||||
GN["tile_color"].setValue(int(color_value, 16))
|
||||
new_group_node["tile_color"].setValue(int(color_value, 16))
|
||||
|
||||
self.log.info("updated to version: {}".format(version_doc.get("name")))
|
||||
|
||||
return update_container(GN, data_imprint)
|
||||
return update_container(new_group_node, data_imprint)
|
||||
|
||||
def connect_active_viewer(self, group_node):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -204,8 +204,6 @@ class LoadImage(load.LoaderPlugin):
|
|||
last = first = int(frame_number)
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
read_name = self._get_node_name(representation)
|
||||
node["name"].setValue(read_name)
|
||||
node["file"].setValue(file)
|
||||
node["origfirst"].setValue(first)
|
||||
node["first"].setValue(first)
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class MatchmoveLoader(load.LoaderPlugin):
|
|||
|
||||
families = ["matchmove"]
|
||||
representations = ["*"]
|
||||
extension = {"py"}
|
||||
extensions = {"py"}
|
||||
|
||||
defaults = ["Camera", "Object"]
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ class AlembicModelLoader(load.LoaderPlugin):
|
|||
|
||||
families = ["model", "pointcache", "animation"]
|
||||
representations = ["*"]
|
||||
extension = {"abc"}
|
||||
extensions = {"abc"}
|
||||
|
||||
label = "Load Alembic"
|
||||
icon = "cube"
|
||||
|
|
|
|||
350
openpype/hosts/nuke/plugins/load/load_ociolook.py
Normal file
350
openpype/hosts/nuke/plugins/load/load_ociolook.py
Normal file
|
|
@ -0,0 +1,350 @@
|
|||
import os
|
||||
import json
|
||||
import secrets
|
||||
import nuke
|
||||
import six
|
||||
|
||||
from openpype.client import (
|
||||
get_version_by_id,
|
||||
get_last_version_by_subset_id
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_current_project_name,
|
||||
get_representation_path,
|
||||
)
|
||||
from openpype.hosts.nuke.api import (
|
||||
containerise,
|
||||
viewer_update_and_undo_stop,
|
||||
update_container,
|
||||
)
|
||||
|
||||
|
||||
class LoadOcioLookNodes(load.LoaderPlugin):
|
||||
"""Loading Ocio look to the nuke.Node graph"""
|
||||
|
||||
families = ["ociolook"]
|
||||
representations = ["*"]
|
||||
extensions = {"json"}
|
||||
|
||||
label = "Load OcioLook [nodes]"
|
||||
order = 0
|
||||
icon = "cc"
|
||||
color = "white"
|
||||
ignore_attr = ["useLifetime"]
|
||||
|
||||
# plugin attributes
|
||||
current_node_color = "0x4ecd91ff"
|
||||
old_node_color = "0xd88467ff"
|
||||
|
||||
# json file variables
|
||||
schema_version = 1
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Loading function to get the soft effects to particular read node
|
||||
|
||||
Arguments:
|
||||
context (dict): context of version
|
||||
name (str): name of the version
|
||||
namespace (str): asset name
|
||||
data (dict): compulsory attribute > not used
|
||||
|
||||
Returns:
|
||||
nuke.Node: containerized nuke.Node object
|
||||
"""
|
||||
namespace = namespace or context['asset']['name']
|
||||
suffix = secrets.token_hex(nbytes=4)
|
||||
object_name = "{}_{}_{}".format(
|
||||
name, namespace, suffix)
|
||||
|
||||
# getting file path
|
||||
filepath = self.filepath_from_context(context)
|
||||
|
||||
json_f = self._load_json_data(filepath)
|
||||
|
||||
group_node = self._create_group_node(
|
||||
object_name, filepath, json_f["data"])
|
||||
|
||||
self._node_version_color(context["version"], group_node)
|
||||
|
||||
self.log.info(
|
||||
"Loaded lut setup: `{}`".format(group_node["name"].value()))
|
||||
|
||||
return containerise(
|
||||
node=group_node,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data={
|
||||
"objectName": object_name,
|
||||
}
|
||||
)
|
||||
|
||||
def _create_group_node(
|
||||
self,
|
||||
object_name,
|
||||
filepath,
|
||||
data
|
||||
):
|
||||
"""Creates group node with all the nodes inside.
|
||||
|
||||
Creating mainly `OCIOFileTransform` nodes with `OCIOColorSpace` nodes
|
||||
in between - in case those are needed.
|
||||
|
||||
Arguments:
|
||||
object_name (str): name of the group node
|
||||
filepath (str): path to json file
|
||||
data (dict): data from json file
|
||||
|
||||
Returns:
|
||||
nuke.Node: group node with all the nodes inside
|
||||
"""
|
||||
# get corresponding node
|
||||
|
||||
root_working_colorspace = nuke.root()["workingSpaceLUT"].value()
|
||||
|
||||
dir_path = os.path.dirname(filepath)
|
||||
all_files = os.listdir(dir_path)
|
||||
|
||||
ocio_working_colorspace = _colorspace_name_by_type(
|
||||
data["ocioLookWorkingSpace"])
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
input_node = None
|
||||
output_node = None
|
||||
group_node = nuke.toNode(object_name)
|
||||
if group_node:
|
||||
# remove all nodes between Input and Output nodes
|
||||
for node in group_node.nodes():
|
||||
if node.Class() not in ["Input", "Output"]:
|
||||
nuke.delete(node)
|
||||
elif node.Class() == "Input":
|
||||
input_node = node
|
||||
elif node.Class() == "Output":
|
||||
output_node = node
|
||||
else:
|
||||
group_node = nuke.createNode(
|
||||
"Group",
|
||||
"name {}_1".format(object_name),
|
||||
inpanel=False
|
||||
)
|
||||
|
||||
# adding content to the group node
|
||||
with group_node:
|
||||
pre_colorspace = root_working_colorspace
|
||||
|
||||
# reusing input node if it exists during update
|
||||
if input_node:
|
||||
pre_node = input_node
|
||||
else:
|
||||
pre_node = nuke.createNode("Input")
|
||||
pre_node["name"].setValue("rgb")
|
||||
|
||||
# Compare script working colorspace with ocio working colorspace
|
||||
# found in json file and convert to json's if needed
|
||||
if pre_colorspace != ocio_working_colorspace:
|
||||
pre_node = _add_ocio_colorspace_node(
|
||||
pre_node,
|
||||
pre_colorspace,
|
||||
ocio_working_colorspace
|
||||
)
|
||||
pre_colorspace = ocio_working_colorspace
|
||||
|
||||
for ocio_item in data["ocioLookItems"]:
|
||||
input_space = _colorspace_name_by_type(
|
||||
ocio_item["input_colorspace"])
|
||||
output_space = _colorspace_name_by_type(
|
||||
ocio_item["output_colorspace"])
|
||||
|
||||
# making sure we are set to correct colorspace for otio item
|
||||
if pre_colorspace != input_space:
|
||||
pre_node = _add_ocio_colorspace_node(
|
||||
pre_node,
|
||||
pre_colorspace,
|
||||
input_space
|
||||
)
|
||||
|
||||
node = nuke.createNode("OCIOFileTransform")
|
||||
|
||||
# file path from lut representation
|
||||
extension = ocio_item["ext"]
|
||||
item_name = ocio_item["name"]
|
||||
|
||||
item_lut_file = next(
|
||||
(
|
||||
file for file in all_files
|
||||
if file.endswith(extension)
|
||||
),
|
||||
None
|
||||
)
|
||||
if not item_lut_file:
|
||||
raise ValueError(
|
||||
"File with extension '{}' not "
|
||||
"found in directory".format(extension)
|
||||
)
|
||||
|
||||
item_lut_path = os.path.join(
|
||||
dir_path, item_lut_file).replace("\\", "/")
|
||||
node["file"].setValue(item_lut_path)
|
||||
node["name"].setValue(item_name)
|
||||
node["direction"].setValue(ocio_item["direction"])
|
||||
node["interpolation"].setValue(ocio_item["interpolation"])
|
||||
node["working_space"].setValue(input_space)
|
||||
|
||||
pre_node.autoplace()
|
||||
node.setInput(0, pre_node)
|
||||
node.autoplace()
|
||||
# pass output space into pre_colorspace for next iteration
|
||||
# or for output node comparison
|
||||
pre_colorspace = output_space
|
||||
pre_node = node
|
||||
|
||||
# making sure we are back in script working colorspace
|
||||
if pre_colorspace != root_working_colorspace:
|
||||
pre_node = _add_ocio_colorspace_node(
|
||||
pre_node,
|
||||
pre_colorspace,
|
||||
root_working_colorspace
|
||||
)
|
||||
|
||||
# reusing output node if it exists during update
|
||||
if not output_node:
|
||||
output = nuke.createNode("Output")
|
||||
else:
|
||||
output = output_node
|
||||
|
||||
output.setInput(0, pre_node)
|
||||
|
||||
return group_node
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
project_name = get_current_project_name()
|
||||
version_doc = get_version_by_id(project_name, representation["parent"])
|
||||
|
||||
object_name = container['objectName']
|
||||
|
||||
filepath = get_representation_path(representation)
|
||||
|
||||
json_f = self._load_json_data(filepath)
|
||||
|
||||
group_node = self._create_group_node(
|
||||
object_name,
|
||||
filepath,
|
||||
json_f["data"]
|
||||
)
|
||||
|
||||
self._node_version_color(version_doc, group_node)
|
||||
|
||||
self.log.info("Updated lut setup: `{}`".format(
|
||||
group_node["name"].value()))
|
||||
|
||||
return update_container(
|
||||
group_node, {"representation": str(representation["_id"])})
|
||||
|
||||
def _load_json_data(self, filepath):
|
||||
# getting data from json file with unicode conversion
|
||||
with open(filepath, "r") as _file:
|
||||
json_f = {self._bytify(key): self._bytify(value)
|
||||
for key, value in json.load(_file).items()}
|
||||
|
||||
# check if the version in json_f is the same as plugin version
|
||||
if json_f["version"] != self.schema_version:
|
||||
raise KeyError(
|
||||
"Version of json file is not the same as plugin version")
|
||||
|
||||
return json_f
|
||||
|
||||
def _bytify(self, input):
|
||||
"""
|
||||
Converts unicode strings to strings
|
||||
It goes through all dictionary
|
||||
|
||||
Arguments:
|
||||
input (dict/str): input
|
||||
|
||||
Returns:
|
||||
dict: with fixed values and keys
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(input, dict):
|
||||
return {self._bytify(key): self._bytify(value)
|
||||
for key, value in input.items()}
|
||||
elif isinstance(input, list):
|
||||
return [self._bytify(element) for element in input]
|
||||
elif isinstance(input, six.text_type):
|
||||
return str(input)
|
||||
else:
|
||||
return input
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
||||
def _node_version_color(self, version, node):
|
||||
""" Coloring a node by correct color by actual version"""
|
||||
|
||||
project_name = get_current_project_name()
|
||||
last_version_doc = get_last_version_by_subset_id(
|
||||
project_name, version["parent"], fields=["_id"]
|
||||
)
|
||||
|
||||
# change color of node
|
||||
if version["_id"] == last_version_doc["_id"]:
|
||||
color_value = self.current_node_color
|
||||
else:
|
||||
color_value = self.old_node_color
|
||||
node["tile_color"].setValue(int(color_value, 16))
|
||||
|
||||
|
||||
def _colorspace_name_by_type(colorspace_data):
|
||||
"""
|
||||
Returns colorspace name by type
|
||||
|
||||
Arguments:
|
||||
colorspace_data (dict): colorspace data
|
||||
|
||||
Returns:
|
||||
str: colorspace name
|
||||
"""
|
||||
if colorspace_data["type"] == "colorspaces":
|
||||
return colorspace_data["name"]
|
||||
elif colorspace_data["type"] == "roles":
|
||||
return colorspace_data["colorspace"]
|
||||
else:
|
||||
raise KeyError("Unknown colorspace type: {}".format(
|
||||
colorspace_data["type"]))
|
||||
|
||||
|
||||
def _add_ocio_colorspace_node(pre_node, input_space, output_space):
|
||||
"""
|
||||
Adds OCIOColorSpace node to the node graph
|
||||
|
||||
Arguments:
|
||||
pre_node (nuke.Node): node to connect to
|
||||
input_space (str): input colorspace
|
||||
output_space (str): output colorspace
|
||||
|
||||
Returns:
|
||||
nuke.Node: node with OCIOColorSpace node
|
||||
"""
|
||||
node = nuke.createNode("OCIOColorSpace")
|
||||
node.setInput(0, pre_node)
|
||||
node["in_colorspace"].setValue(input_space)
|
||||
node["out_colorspace"].setValue(output_space)
|
||||
|
||||
pre_node.autoplace()
|
||||
node.setInput(0, pre_node)
|
||||
node.autoplace()
|
||||
|
||||
return node
|
||||
|
|
@ -22,7 +22,7 @@ class LinkAsGroup(load.LoaderPlugin):
|
|||
|
||||
families = ["workfile", "nukenodes"]
|
||||
representations = ["*"]
|
||||
extension = {"nk"}
|
||||
extensions = {"nk"}
|
||||
|
||||
label = "Load Precomp"
|
||||
order = 0
|
||||
|
|
|
|||
|
|
@ -57,4 +57,4 @@ class CollectBackdrops(pyblish.api.InstancePlugin):
|
|||
if version:
|
||||
instance.data['version'] = version
|
||||
|
||||
self.log.info("Backdrop instance collected: `{}`".format(instance))
|
||||
self.log.debug("Backdrop instance collected: `{}`".format(instance))
|
||||
|
|
|
|||
|
|
@ -64,4 +64,4 @@ class CollectContextData(pyblish.api.ContextPlugin):
|
|||
context.data["scriptData"] = script_data
|
||||
context.data.update(script_data)
|
||||
|
||||
self.log.info('Context from Nuke script collected')
|
||||
self.log.debug('Context from Nuke script collected')
|
||||
|
|
|
|||
|
|
@ -43,4 +43,4 @@ class CollectGizmo(pyblish.api.InstancePlugin):
|
|||
"frameStart": first_frame,
|
||||
"frameEnd": last_frame
|
||||
})
|
||||
self.log.info("Gizmo instance collected: `{}`".format(instance))
|
||||
self.log.debug("Gizmo instance collected: `{}`".format(instance))
|
||||
|
|
|
|||
|
|
@ -43,4 +43,4 @@ class CollectModel(pyblish.api.InstancePlugin):
|
|||
"frameStart": first_frame,
|
||||
"frameEnd": last_frame
|
||||
})
|
||||
self.log.info("Model instance collected: `{}`".format(instance))
|
||||
self.log.debug("Model instance collected: `{}`".format(instance))
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class CollectSlate(pyblish.api.InstancePlugin):
|
|||
instance.data["slateNode"] = slate_node
|
||||
instance.data["slate"] = True
|
||||
instance.data["families"].append("slate")
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Slate node is in node graph: `{}`".format(slate.name()))
|
||||
self.log.debug(
|
||||
"__ instance.data: `{}`".format(instance.data))
|
||||
|
|
|
|||
|
|
@ -37,4 +37,6 @@ class CollectWorkfile(pyblish.api.InstancePlugin):
|
|||
# adding basic script data
|
||||
instance.data.update(script_data)
|
||||
|
||||
self.log.info("Collect script version")
|
||||
self.log.debug(
|
||||
"Collected current script version: {}".format(current_file)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -56,8 +56,6 @@ class ExtractBackdropNode(publish.Extractor):
|
|||
# connect output node
|
||||
for n, output in connections_out.items():
|
||||
opn = nuke.createNode("Output")
|
||||
self.log.info(n.name())
|
||||
self.log.info(output.name())
|
||||
output.setInput(
|
||||
next((i for i, d in enumerate(output.dependencies())
|
||||
if d.name() in n.name()), 0), opn)
|
||||
|
|
@ -102,5 +100,5 @@ class ExtractBackdropNode(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '{}' to: {}".format(
|
||||
self.log.debug("Extracted instance '{}' to: {}".format(
|
||||
instance.name, path))
|
||||
|
|
|
|||
|
|
@ -36,11 +36,8 @@ class ExtractCamera(publish.Extractor):
|
|||
step = 1
|
||||
output_range = str(nuke.FrameRange(first_frame, last_frame, step))
|
||||
|
||||
self.log.info("instance.data: `{}`".format(
|
||||
pformat(instance.data)))
|
||||
|
||||
rm_nodes = []
|
||||
self.log.info("Crating additional nodes")
|
||||
self.log.debug("Creating additional nodes for 3D Camera Extractor")
|
||||
subset = instance.data["subset"]
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
|
|
@ -84,8 +81,6 @@ class ExtractCamera(publish.Extractor):
|
|||
for n in rm_nodes:
|
||||
nuke.delete(n)
|
||||
|
||||
self.log.info(file_path)
|
||||
|
||||
# create representation data
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
@ -112,7 +107,7 @@ class ExtractCamera(publish.Extractor):
|
|||
"frameEndHandle": last_frame,
|
||||
})
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
self.log.debug("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name, file_path))
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -85,8 +85,5 @@ class ExtractGizmo(publish.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '{}' to: {}".format(
|
||||
self.log.debug("Extracted instance '{}' to: {}".format(
|
||||
instance.name, path))
|
||||
|
||||
self.log.info("Data {}".format(
|
||||
instance.data))
|
||||
|
|
|
|||
|
|
@ -33,13 +33,13 @@ class ExtractModel(publish.Extractor):
|
|||
first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
last_frame = int(nuke.root()["last_frame"].getValue())
|
||||
|
||||
self.log.info("instance.data: `{}`".format(
|
||||
self.log.debug("instance.data: `{}`".format(
|
||||
pformat(instance.data)))
|
||||
|
||||
rm_nodes = []
|
||||
model_node = instance.data["transientData"]["node"]
|
||||
|
||||
self.log.info("Crating additional nodes")
|
||||
self.log.debug("Creating additional nodes for Extract Model")
|
||||
subset = instance.data["subset"]
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
|
|
@ -76,7 +76,7 @@ class ExtractModel(publish.Extractor):
|
|||
for n in rm_nodes:
|
||||
nuke.delete(n)
|
||||
|
||||
self.log.info(file_path)
|
||||
self.log.debug("Filepath: {}".format(file_path))
|
||||
|
||||
# create representation data
|
||||
if "representations" not in instance.data:
|
||||
|
|
@ -104,5 +104,5 @@ class ExtractModel(publish.Extractor):
|
|||
"frameEndHandle": last_frame,
|
||||
})
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
self.log.debug("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name, file_path))
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class CreateOutputNode(pyblish.api.ContextPlugin):
|
|||
|
||||
if active_node:
|
||||
active_node = active_node.pop()
|
||||
self.log.info(active_node)
|
||||
self.log.debug("Active node: {}".format(active_node))
|
||||
active_node['selected'].setValue(True)
|
||||
|
||||
# select only instance render node
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ class NukeRenderLocal(publish.Extractor,
|
|||
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
self.log.debug("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name,
|
||||
out_dir
|
||||
))
|
||||
|
|
@ -143,7 +143,7 @@ class NukeRenderLocal(publish.Extractor,
|
|||
instance.data["families"] = families
|
||||
|
||||
collections, remainder = clique.assemble(filenames)
|
||||
self.log.info('collections: {}'.format(str(collections)))
|
||||
self.log.debug('collections: {}'.format(str(collections)))
|
||||
|
||||
if collections:
|
||||
collection = collections[0]
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class ExtractReviewDataLut(publish.Extractor):
|
|||
hosts = ["nuke"]
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("Creating staging dir...")
|
||||
self.log.debug("Creating staging dir...")
|
||||
if "representations" in instance.data:
|
||||
staging_dir = instance.data[
|
||||
"representations"][0]["stagingDir"].replace("\\", "/")
|
||||
|
|
@ -33,7 +33,7 @@ class ExtractReviewDataLut(publish.Extractor):
|
|||
staging_dir = os.path.normpath(os.path.dirname(render_path))
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
# generate data
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class ExtractReviewIntermediates(publish.Extractor):
|
|||
|
||||
task_type = instance.context.data["taskType"]
|
||||
subset = instance.data["subset"]
|
||||
self.log.info("Creating staging dir...")
|
||||
self.log.debug("Creating staging dir...")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
@ -62,10 +62,10 @@ class ExtractReviewIntermediates(publish.Extractor):
|
|||
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
self.log.info(self.outputs)
|
||||
self.log.debug("Outputs: {}".format(self.outputs))
|
||||
|
||||
# generate data
|
||||
with maintained_selection():
|
||||
|
|
@ -104,9 +104,10 @@ class ExtractReviewIntermediates(publish.Extractor):
|
|||
re.search(s, subset) for s in f_subsets):
|
||||
continue
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"Baking output `{}` with settings: {}".format(
|
||||
o_name, o_data))
|
||||
o_name, o_data)
|
||||
)
|
||||
|
||||
# check if settings have more then one preset
|
||||
# so we dont need to add outputName to representation
|
||||
|
|
@ -155,10 +156,10 @@ class ExtractReviewIntermediates(publish.Extractor):
|
|||
instance.data["useSequenceForReview"] = False
|
||||
else:
|
||||
instance.data["families"].remove("review")
|
||||
self.log.info((
|
||||
self.log.debug(
|
||||
"Removing `review` from families. "
|
||||
"Not available baking profile."
|
||||
))
|
||||
)
|
||||
self.log.debug(instance.data["families"])
|
||||
|
||||
self.log.debug(
|
||||
|
|
|
|||
|
|
@ -3,13 +3,12 @@ import pyblish.api
|
|||
|
||||
|
||||
class ExtractScriptSave(pyblish.api.Extractor):
|
||||
"""
|
||||
"""
|
||||
"""Save current Nuke workfile script"""
|
||||
label = 'Script Save'
|
||||
order = pyblish.api.Extractor.order - 0.1
|
||||
hosts = ['nuke']
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
self.log.info('saving script')
|
||||
self.log.debug('Saving current script')
|
||||
nuke.scriptSave()
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
|
||||
if instance.data.get("bakePresets"):
|
||||
for o_name, o_data in instance.data["bakePresets"].items():
|
||||
self.log.info("_ o_name: {}, o_data: {}".format(
|
||||
self.log.debug("_ o_name: {}, o_data: {}".format(
|
||||
o_name, pformat(o_data)))
|
||||
self.render_slate(
|
||||
instance,
|
||||
|
|
@ -65,14 +65,14 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
|
||||
def _create_staging_dir(self, instance):
|
||||
|
||||
self.log.info("Creating staging dir...")
|
||||
self.log.debug("Creating staging dir...")
|
||||
|
||||
staging_dir = os.path.normpath(
|
||||
os.path.dirname(instance.data["path"]))
|
||||
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
def _check_frames_exists(self, instance):
|
||||
|
|
@ -275,10 +275,10 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
break
|
||||
|
||||
if not matching_repre:
|
||||
self.log.info((
|
||||
"Matching reresentaion was not found."
|
||||
self.log.info(
|
||||
"Matching reresentation was not found."
|
||||
" Representation files were not filled with slate."
|
||||
))
|
||||
)
|
||||
return
|
||||
|
||||
# Add frame to matching representation files
|
||||
|
|
@ -345,7 +345,7 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
|
||||
try:
|
||||
node[key].setValue(value)
|
||||
self.log.info("Change key \"{}\" to value \"{}\"".format(
|
||||
self.log.debug("Change key \"{}\" to value \"{}\"".format(
|
||||
key, value
|
||||
))
|
||||
except NameError:
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from openpype.hosts.nuke import api as napi
|
|||
from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings
|
||||
|
||||
|
||||
# Python 2/3 compatibility
|
||||
if sys.version_info[0] >= 3:
|
||||
unicode = str
|
||||
|
||||
|
|
@ -45,11 +46,12 @@ class ExtractThumbnail(publish.Extractor):
|
|||
for o_name, o_data in instance.data["bakePresets"].items():
|
||||
self.render_thumbnail(instance, o_name, **o_data)
|
||||
else:
|
||||
viewer_process_swithes = {
|
||||
viewer_process_switches = {
|
||||
"bake_viewer_process": True,
|
||||
"bake_viewer_input_process": True
|
||||
}
|
||||
self.render_thumbnail(instance, None, **viewer_process_swithes)
|
||||
self.render_thumbnail(
|
||||
instance, None, **viewer_process_switches)
|
||||
|
||||
def render_thumbnail(self, instance, output_name=None, **kwargs):
|
||||
first_frame = instance.data["frameStartHandle"]
|
||||
|
|
@ -61,15 +63,13 @@ class ExtractThumbnail(publish.Extractor):
|
|||
|
||||
# solve output name if any is set
|
||||
output_name = output_name or ""
|
||||
if output_name:
|
||||
output_name = "_" + output_name
|
||||
|
||||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
|
||||
node = instance.data["transientData"]["node"] # group node
|
||||
self.log.info("Creating staging dir...")
|
||||
self.log.debug("Creating staging dir...")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
@ -79,7 +79,7 @@ class ExtractThumbnail(publish.Extractor):
|
|||
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
temporary_nodes = []
|
||||
|
|
@ -166,26 +166,42 @@ class ExtractThumbnail(publish.Extractor):
|
|||
previous_node = dag_node
|
||||
temporary_nodes.append(dag_node)
|
||||
|
||||
thumb_name = "thumbnail"
|
||||
# only add output name and
|
||||
# if there are more than one bake preset
|
||||
if (
|
||||
output_name
|
||||
and len(instance.data.get("bakePresets", {}).keys()) > 1
|
||||
):
|
||||
thumb_name = "{}_{}".format(output_name, thumb_name)
|
||||
|
||||
# create write node
|
||||
write_node = nuke.createNode("Write")
|
||||
file = fhead[:-1] + output_name + ".jpg"
|
||||
name = "thumbnail"
|
||||
path = os.path.join(staging_dir, file).replace("\\", "/")
|
||||
instance.data["thumbnail"] = path
|
||||
write_node["file"].setValue(path)
|
||||
file = fhead[:-1] + thumb_name + ".jpg"
|
||||
thumb_path = os.path.join(staging_dir, file).replace("\\", "/")
|
||||
|
||||
# add thumbnail to cleanup
|
||||
instance.context.data["cleanupFullPaths"].append(thumb_path)
|
||||
|
||||
# make sure only one thumbnail path is set
|
||||
# and it is existing file
|
||||
instance_thumb_path = instance.data.get("thumbnailPath")
|
||||
if not instance_thumb_path or not os.path.isfile(instance_thumb_path):
|
||||
instance.data["thumbnailPath"] = thumb_path
|
||||
|
||||
write_node["file"].setValue(thumb_path)
|
||||
write_node["file_type"].setValue("jpg")
|
||||
write_node["raw"].setValue(1)
|
||||
write_node.setInput(0, previous_node)
|
||||
temporary_nodes.append(write_node)
|
||||
tags = ["thumbnail", "publish_on_farm"]
|
||||
|
||||
repre = {
|
||||
'name': name,
|
||||
'name': thumb_name,
|
||||
'ext': "jpg",
|
||||
"outputName": "thumb",
|
||||
"outputName": thumb_name,
|
||||
'files': file,
|
||||
"stagingDir": staging_dir,
|
||||
"tags": tags
|
||||
"tags": ["thumbnail", "publish_on_farm", "delete"]
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,31 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Shot/Asset name</title>
|
||||
<description>
|
||||
## Publishing to a different asset context
|
||||
|
||||
There are publish instances present which are publishing into a different asset than your current context.
|
||||
|
||||
Usually this is not what you want but there can be cases where you might want to publish into another asset/shot or task.
|
||||
|
||||
If that's the case you can disable the validation on the instance to ignore it.
|
||||
|
||||
The wrong node's name is: \`{node_name}\`
|
||||
|
||||
### Correct context keys and values:
|
||||
|
||||
\`{correct_values}\`
|
||||
|
||||
### Wrong keys and values:
|
||||
|
||||
\`{wrong_values}\`.
|
||||
|
||||
|
||||
## How to repair?
|
||||
|
||||
1. Use \"Repair\" button.
|
||||
2. Hit Reload button on the publisher.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Shot/Asset name</title>
|
||||
<description>
|
||||
## Invalid Shot/Asset name in subset
|
||||
|
||||
Following Node with name `{node_name}`:
|
||||
Is in context of `{correct_name}` but Node _asset_ knob is set as `{wrong_name}`.
|
||||
|
||||
### How to repair?
|
||||
|
||||
1. Either use Repair or Select button.
|
||||
2. If you chose Select then rename asset knob to correct name.
|
||||
3. Hit Reload button on the publisher.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
112
openpype/hosts/nuke/plugins/publish/validate_asset_context.py
Normal file
112
openpype/hosts/nuke/plugins/publish/validate_asset_context.py
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validate if instance asset is the same as context asset."""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
ValidateContentsOrder,
|
||||
PublishXmlValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from openpype.hosts.nuke.api import SelectInstanceNodeAction
|
||||
|
||||
|
||||
class ValidateCorrectAssetContext(
|
||||
pyblish.api.InstancePlugin,
|
||||
OptionalPyblishPluginMixin
|
||||
):
|
||||
"""Validator to check if instance asset context match context asset.
|
||||
|
||||
When working in per-shot style you always publish data in context of
|
||||
current asset (shot). This validator checks if this is so. It is optional
|
||||
so it can be disabled when needed.
|
||||
|
||||
Checking `asset` and `task` keys.
|
||||
"""
|
||||
order = ValidateContentsOrder
|
||||
label = "Validate asset context"
|
||||
hosts = ["nuke"]
|
||||
actions = [
|
||||
RepairAction,
|
||||
SelectInstanceNodeAction
|
||||
]
|
||||
optional = True
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
"""Apply deprecated settings from project settings.
|
||||
"""
|
||||
nuke_publish = project_settings["nuke"]["publish"]
|
||||
if "ValidateCorrectAssetName" in nuke_publish:
|
||||
settings = nuke_publish["ValidateCorrectAssetName"]
|
||||
else:
|
||||
settings = nuke_publish["ValidateCorrectAssetContext"]
|
||||
|
||||
cls.enabled = settings["enabled"]
|
||||
cls.optional = settings["optional"]
|
||||
cls.active = settings["active"]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
invalid_keys = self.get_invalid(instance)
|
||||
|
||||
if not invalid_keys:
|
||||
return
|
||||
|
||||
message_values = {
|
||||
"node_name": instance.data["transientData"]["node"].name(),
|
||||
"correct_values": ", ".join([
|
||||
"{} > {}".format(_key, instance.context.data[_key])
|
||||
for _key in invalid_keys
|
||||
]),
|
||||
"wrong_values": ", ".join([
|
||||
"{} > {}".format(_key, instance.data.get(_key))
|
||||
for _key in invalid_keys
|
||||
])
|
||||
}
|
||||
|
||||
msg = (
|
||||
"Instance `{node_name}` has wrong context keys:\n"
|
||||
"Correct: `{correct_values}` | Wrong: `{wrong_values}`").format(
|
||||
**message_values)
|
||||
|
||||
self.log.debug(msg)
|
||||
|
||||
raise PublishXmlValidationError(
|
||||
self, msg, formatting_data=message_values
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
"""Get invalid keys from instance data and context data."""
|
||||
|
||||
invalid_keys = []
|
||||
testing_keys = ["asset", "task"]
|
||||
for _key in testing_keys:
|
||||
if _key not in instance.data:
|
||||
invalid_keys.append(_key)
|
||||
continue
|
||||
if instance.data[_key] != instance.context.data[_key]:
|
||||
invalid_keys.append(_key)
|
||||
|
||||
return invalid_keys
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
"""Repair instance data with context data."""
|
||||
invalid_keys = cls.get_invalid(instance)
|
||||
|
||||
create_context = instance.context.data["create_context"]
|
||||
|
||||
instance_id = instance.data.get("instance_id")
|
||||
created_instance = create_context.get_instance_by_id(
|
||||
instance_id
|
||||
)
|
||||
for _key in invalid_keys:
|
||||
created_instance[_key] = instance.context.data[_key]
|
||||
|
||||
create_context.save_changes()
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validate if instance asset is the same as context asset."""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import pyblish.api
|
||||
|
||||
import openpype.hosts.nuke.api.lib as nlib
|
||||
|
||||
from openpype.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
PublishXmlValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
|
||||
class SelectInvalidInstances(pyblish.api.Action):
|
||||
"""Select invalid instances in Outliner."""
|
||||
|
||||
label = "Select"
|
||||
icon = "briefcase"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
"""Process invalid validators and select invalid instances."""
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (
|
||||
result["error"] is None
|
||||
or result["instance"] is None
|
||||
or result["instance"] in failed
|
||||
or result["plugin"] != plugin
|
||||
):
|
||||
continue
|
||||
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
if instances:
|
||||
self.deselect()
|
||||
self.log.info(
|
||||
"Selecting invalid nodes: %s" % ", ".join(
|
||||
[str(x) for x in instances]
|
||||
)
|
||||
)
|
||||
self.select(instances)
|
||||
else:
|
||||
self.log.info("No invalid nodes found.")
|
||||
self.deselect()
|
||||
|
||||
def select(self, instances):
|
||||
for inst in instances:
|
||||
if inst.data.get("transientData", {}).get("node"):
|
||||
select_node = inst.data["transientData"]["node"]
|
||||
select_node["selected"].setValue(True)
|
||||
|
||||
def deselect(self):
|
||||
nlib.reset_selection()
|
||||
|
||||
|
||||
class RepairSelectInvalidInstances(pyblish.api.Action):
|
||||
"""Repair the instance asset."""
|
||||
|
||||
label = "Repair"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (
|
||||
result["error"] is None
|
||||
or result["instance"] is None
|
||||
or result["instance"] in failed
|
||||
or result["plugin"] != plugin
|
||||
):
|
||||
continue
|
||||
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
self.log.debug(instances)
|
||||
|
||||
context_asset = context.data["assetEntity"]["name"]
|
||||
for instance in instances:
|
||||
node = instance.data["transientData"]["node"]
|
||||
node_data = nlib.get_node_data(node, nlib.INSTANCE_DATA_KNOB)
|
||||
node_data["asset"] = context_asset
|
||||
nlib.set_node_data(node, nlib.INSTANCE_DATA_KNOB, node_data)
|
||||
|
||||
|
||||
class ValidateCorrectAssetName(
|
||||
pyblish.api.InstancePlugin,
|
||||
OptionalPyblishPluginMixin
|
||||
):
|
||||
"""Validator to check if instance asset match context asset.
|
||||
|
||||
When working in per-shot style you always publish data in context of
|
||||
current asset (shot). This validator checks if this is so. It is optional
|
||||
so it can be disabled when needed.
|
||||
|
||||
Action on this validator will select invalid instances in Outliner.
|
||||
"""
|
||||
order = ValidateContentsOrder
|
||||
label = "Validate correct asset name"
|
||||
hosts = ["nuke"]
|
||||
actions = [
|
||||
SelectInvalidInstances,
|
||||
RepairSelectInvalidInstances
|
||||
]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
asset = instance.data.get("asset")
|
||||
context_asset = instance.context.data["assetEntity"]["name"]
|
||||
node = instance.data["transientData"]["node"]
|
||||
|
||||
msg = (
|
||||
"Instance `{}` has wrong shot/asset name:\n"
|
||||
"Correct: `{}` | Wrong: `{}`").format(
|
||||
instance.name, asset, context_asset)
|
||||
|
||||
self.log.debug(msg)
|
||||
|
||||
if asset != context_asset:
|
||||
raise PublishXmlValidationError(
|
||||
self, msg, formatting_data={
|
||||
"node_name": node.name(),
|
||||
"wrong_name": asset,
|
||||
"correct_name": context_asset
|
||||
}
|
||||
)
|
||||
|
|
@ -43,8 +43,8 @@ class SelectCenterInNodeGraph(pyblish.api.Action):
|
|||
all_xC.append(xC)
|
||||
all_yC.append(yC)
|
||||
|
||||
self.log.info("all_xC: `{}`".format(all_xC))
|
||||
self.log.info("all_yC: `{}`".format(all_yC))
|
||||
self.log.debug("all_xC: `{}`".format(all_xC))
|
||||
self.log.debug("all_yC: `{}`".format(all_yC))
|
||||
|
||||
# zoom to nodes in node graph
|
||||
nuke.zoom(2, [min(all_xC), min(all_yC)])
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class ValidateOutputResolution(
|
|||
order = pyblish.api.ValidatorOrder
|
||||
optional = True
|
||||
families = ["render"]
|
||||
label = "Write resolution"
|
||||
label = "Validate Write resolution"
|
||||
hosts = ["nuke"]
|
||||
actions = [RepairAction]
|
||||
|
||||
|
|
@ -104,9 +104,9 @@ class ValidateOutputResolution(
|
|||
_rfn["resize"].setValue(0)
|
||||
_rfn["black_outside"].setValue(1)
|
||||
|
||||
cls.log.info("I am adding reformat node")
|
||||
cls.log.info("Adding reformat node")
|
||||
|
||||
if cls.resolution_msg == invalid:
|
||||
reformat = cls.get_reformat(instance)
|
||||
reformat["format"].setValue(nuke.root()["format"].value())
|
||||
cls.log.info("I am fixing reformat to root.format")
|
||||
cls.log.info("Fixing reformat to root.format")
|
||||
|
|
|
|||
|
|
@ -76,8 +76,8 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
|
|||
return
|
||||
|
||||
collections, remainder = clique.assemble(repre["files"])
|
||||
self.log.info("collections: {}".format(str(collections)))
|
||||
self.log.info("remainder: {}".format(str(remainder)))
|
||||
self.log.debug("collections: {}".format(str(collections)))
|
||||
self.log.debug("remainder: {}".format(str(remainder)))
|
||||
|
||||
collection = collections[0]
|
||||
|
||||
|
|
@ -103,15 +103,15 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
|
|||
coll_start = min(collection.indexes)
|
||||
coll_end = max(collection.indexes)
|
||||
|
||||
self.log.info("frame_length: {}".format(frame_length))
|
||||
self.log.info("collected_frames_len: {}".format(
|
||||
self.log.debug("frame_length: {}".format(frame_length))
|
||||
self.log.debug("collected_frames_len: {}".format(
|
||||
collected_frames_len))
|
||||
self.log.info("f_start_h-f_end_h: {}-{}".format(
|
||||
self.log.debug("f_start_h-f_end_h: {}-{}".format(
|
||||
f_start_h, f_end_h))
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"coll_start-coll_end: {}-{}".format(coll_start, coll_end))
|
||||
|
||||
self.log.info(
|
||||
self.log.debug(
|
||||
"len(collection.indexes): {}".format(collected_frames_len)
|
||||
)
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue