From 1ef840f0cb2532f362e7651f8e3d7bf5523c72a0 Mon Sep 17 00:00:00 2001 From: Petr Kalis Date: Mon, 1 Jul 2024 11:57:32 +0200 Subject: [PATCH 01/10] AY-1232 - removed unnecessary get_site_local_overrides Has wrong arguments, it would require SiteSync addon. Used only in dirmap file, where it could be replaced directly by SiteSync addon's method. --- client/ayon_core/settings/lib.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/client/ayon_core/settings/lib.py b/client/ayon_core/settings/lib.py index 3929818d31..3126bafd57 100644 --- a/client/ayon_core/settings/lib.py +++ b/client/ayon_core/settings/lib.py @@ -123,22 +123,6 @@ class _AyonSettingsCache: return cache_item.get_value() -def get_site_local_overrides(project_name, site_name, local_settings=None): - """Site overrides from local settings for passet project and site name. - - Deprecated: - This function is not implemented for AYON and will be removed. - - Args: - project_name (str): For which project are overrides. - site_name (str): For which site are overrides needed. - local_settings (dict): Preloaded local settings. They are loaded - automatically if not passed. - """ - - return {} - - def get_ayon_settings(project_name=None): """AYON studio settings. From d4e90a4eb212a347db60179168f6793ff88bbdf4 Mon Sep 17 00:00:00 2001 From: Petr Kalis Date: Mon, 1 Jul 2024 12:03:28 +0200 Subject: [PATCH 02/10] AY-1232 - used addon's get_site_root_overrides Addon's get_site_root_overrides should return overridden root values for any site which is overridden by Site Settings. 'studio' site is currently overridden by `Roots` tab and comes from project settings directly. --- client/ayon_core/host/dirmap.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/client/ayon_core/host/dirmap.py b/client/ayon_core/host/dirmap.py index 2e24877d28..d809abaefd 100644 --- a/client/ayon_core/host/dirmap.py +++ b/client/ayon_core/host/dirmap.py @@ -15,7 +15,6 @@ import six from ayon_core.lib import Logger from ayon_core.addon import AddonsManager from ayon_core.settings import get_project_settings -from ayon_core.settings.lib import get_site_local_overrides @six.add_metaclass(ABCMeta) @@ -181,17 +180,14 @@ class HostDirmap(object): exclude_locals=False, cached=False) - # TODO implement - # Dirmap is dependent on 'get_site_local_overrides' which - # is not implemented in AYON. The mapping should be received - # from sitesync addon. - active_overrides = get_site_local_overrides( + # overrides for roots set in `Site Settings` + active_roots = sitesync_addon.get_site_root_overrides( project_name, active_site) - remote_overrides = get_site_local_overrides( + remote_roots = sitesync_addon.get_site_root_overrides( project_name, remote_site) - self.log.debug("local overrides {}".format(active_overrides)) - self.log.debug("remote overrides {}".format(remote_overrides)) + self.log.debug("active roots overrides {}".format(active_roots)) + self.log.debug("remote roots overrides {}".format(remote_roots)) current_platform = platform.system().lower() remote_provider = sitesync_addon.get_provider_for_site( @@ -201,9 +197,9 @@ class HostDirmap(object): # won't be root on cloud or sftp provider if remote_provider != "local_drive": remote_site = "studio" - for root_name, active_site_dir in active_overrides.items(): + for root_name, active_site_dir in active_roots.items(): remote_site_dir = ( - remote_overrides.get(root_name) + remote_roots.get(root_name) or sync_settings["sites"][remote_site]["root"][root_name] ) From 1298cb5a765f871af8cf9a77ff42e9121036a2f3 Mon Sep 17 00:00:00 2001 From: Petr Kalis Date: Mon, 1 Jul 2024 12:04:00 +0200 Subject: [PATCH 03/10] AY-1232 - use faster method to check site sync enabled on project --- client/ayon_core/host/dirmap.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/ayon_core/host/dirmap.py b/client/ayon_core/host/dirmap.py index d809abaefd..8766e7485d 100644 --- a/client/ayon_core/host/dirmap.py +++ b/client/ayon_core/host/dirmap.py @@ -162,7 +162,7 @@ class HostDirmap(object): if ( sitesync_addon is None or not sitesync_addon.enabled - or project_name not in sitesync_addon.get_enabled_projects() + or not sitesync_addon.is_project_enabled(project_name, True) ): return mapping From 67becfd4b5e13707f132cf0bef1a9aedd0a2217c Mon Sep 17 00:00:00 2001 From: Jakub Trllo <43494761+iLLiCiTiT@users.noreply.github.com> Date: Tue, 2 Jul 2024 13:37:28 +0200 Subject: [PATCH 04/10] removed maya addon --- server_addon/maya/LICENCE | 201 - server_addon/maya/README.md | 4 - .../maya/client/ayon_maya/__init__.py | 13 - server_addon/maya/client/ayon_maya/addon.py | 49 - .../maya/client/ayon_maya/api/__init__.py | 72 - .../maya/client/ayon_maya/api/action.py | 146 - .../maya/client/ayon_maya/api/alembic.py | 350 -- .../maya/client/ayon_maya/api/commands.py | 118 - .../maya/client/ayon_maya/api/customize.py | 179 - .../maya/client/ayon_maya/api/exitstack.py | 139 - server_addon/maya/client/ayon_maya/api/fbx.py | 210 - .../maya/client/ayon_maya/api/gltf.py | 88 - server_addon/maya/client/ayon_maya/api/lib.py | 4243 ----------------- .../ayon_maya/api/lib_renderproducts.py | 1469 ------ .../ayon_maya/api/lib_rendersettings.py | 410 -- .../client/ayon_maya/api/lib_rendersetup.py | 417 -- .../maya/client/ayon_maya/api/menu.py | 299 -- .../maya/client/ayon_maya/api/pipeline.py | 779 --- .../maya/client/ayon_maya/api/plugin.py | 1037 ---- .../ayon_maya/api/render_setup_tools.py | 127 - .../maya/client/ayon_maya/api/setdress.py | 606 --- .../api/workfile_template_builder.py | 290 -- .../maya/client/ayon_maya/api/workio.py | 66 - .../maya/client/ayon_maya/api/yeti.py | 101 - .../ayon_maya/hooks/pre_auto_load_plugins.py | 30 - .../client/ayon_maya/hooks/pre_copy_mel.py | 23 - .../pre_open_workfile_post_initialization.py | 26 - server_addon/maya/client/ayon_maya/lib.py | 25 - .../maya/client/ayon_maya/plugins/__init__.py | 0 .../plugins/create/convert_legacy.py | 190 - .../create/create_animation_pointcache.py | 134 - .../create/create_arnold_scene_source.py | 112 - .../plugins/create/create_assembly.py | 10 - .../ayon_maya/plugins/create/create_camera.py | 36 - .../ayon_maya/plugins/create/create_layout.py | 21 - .../ayon_maya/plugins/create/create_look.py | 47 - .../plugins/create/create_matchmove.py | 32 - .../plugins/create/create_maya_usd.py | 102 - .../plugins/create/create_mayascene.py | 11 - .../ayon_maya/plugins/create/create_model.py | 43 - .../plugins/create/create_multishot_layout.py | 223 - .../plugins/create/create_multiverse_look.py | 27 - .../plugins/create/create_multiverse_usd.py | 139 - .../create/create_multiverse_usd_comp.py | 48 - .../create/create_multiverse_usd_over.py | 59 - .../plugins/create/create_proxy_abc.py | 50 - .../plugins/create/create_redshift_proxy.py | 25 - .../ayon_maya/plugins/create/create_render.py | 115 - .../plugins/create/create_rendersetup.py | 31 - .../ayon_maya/plugins/create/create_review.py | 148 - .../ayon_maya/plugins/create/create_rig.py | 32 - .../plugins/create/create_setdress.py | 24 - .../create/create_unreal_skeletalmesh.py | 105 - .../create/create_unreal_staticmesh.py | 95 - .../plugins/create/create_unreal_yeticache.py | 39 - .../plugins/create/create_vrayproxy.py | 50 - .../plugins/create/create_vrayscene.py | 52 - .../plugins/create/create_workfile.py | 118 - .../ayon_maya/plugins/create/create_xgen.py | 10 - .../plugins/create/create_yeti_cache.py | 39 - .../plugins/create/create_yeti_rig.py | 27 - .../plugins/inventory/connect_geometry.py | 158 - .../plugins/inventory/connect_xgen.py | 174 - .../plugins/inventory/connect_yeti_rig.py | 187 - .../plugins/inventory/import_modelrender.py | 169 - .../plugins/inventory/import_reference.py | 27 - .../rig_recreate_animation_instance.py | 44 - .../plugins/inventory/select_containers.py | 46 - .../ayon_maya/plugins/load/_load_animation.py | 103 - .../client/ayon_maya/plugins/load/actions.py | 192 - .../plugins/load/load_arnold_standin.py | 237 - .../plugins/load/load_as_template.py | 33 - .../ayon_maya/plugins/load/load_assembly.py | 75 - .../ayon_maya/plugins/load/load_audio.py | 111 - .../ayon_maya/plugins/load/load_gpucache.py | 101 - .../ayon_maya/plugins/load/load_image.py | 330 -- .../plugins/load/load_image_plane.py | 271 -- .../ayon_maya/plugins/load/load_look.py | 138 - .../ayon_maya/plugins/load/load_matchmove.py | 31 - .../ayon_maya/plugins/load/load_maya_usd.py | 103 - .../plugins/load/load_multiverse_usd.py | 122 - .../plugins/load/load_multiverse_usd_over.py | 129 - .../plugins/load/load_redshift_proxy.py | 150 - .../ayon_maya/plugins/load/load_reference.py | 382 -- .../plugins/load/load_rendersetup.py | 168 - .../plugins/load/load_vdb_to_arnold.py | 138 - .../plugins/load/load_vdb_to_redshift.py | 143 - .../plugins/load/load_vdb_to_vray.py | 286 -- .../ayon_maya/plugins/load/load_vrayproxy.py | 192 - .../ayon_maya/plugins/load/load_vrayscene.py | 148 - .../ayon_maya/plugins/load/load_xgen.py | 185 - .../ayon_maya/plugins/load/load_yeti_cache.py | 397 -- .../ayon_maya/plugins/load/load_yeti_rig.py | 94 - .../ayon_maya/plugins/publish/__init__.py | 0 .../plugins/publish/collect_animation.py | 59 - .../publish/collect_arnold_scene_source.py | 58 - .../plugins/publish/collect_assembly.py | 97 - .../plugins/publish/collect_current_file.py | 14 - .../plugins/publish/collect_fbx_animation.py | 36 - .../plugins/publish/collect_fbx_camera.py | 21 - .../plugins/publish/collect_fbx_model.py | 29 - .../publish/collect_file_dependencies.py | 39 - .../ayon_maya/plugins/publish/collect_gltf.py | 18 - .../plugins/publish/collect_history.py | 46 - .../plugins/publish/collect_inputs.py | 213 - .../plugins/publish/collect_instances.py | 115 - .../ayon_maya/plugins/publish/collect_look.py | 674 --- .../publish/collect_maya_scene_time.py | 28 - .../plugins/publish/collect_maya_units.py | 31 - .../plugins/publish/collect_maya_workspace.py | 25 - .../plugins/publish/collect_model.py | 29 - .../publish/collect_multiverse_look.py | 422 -- .../plugins/publish/collect_pointcache.py | 46 - .../plugins/publish/collect_remove_marked.py | 25 - .../plugins/publish/collect_render.py | 331 -- .../publish/collect_render_layer_aovs.py | 95 - .../publish/collect_renderable_camera.py | 31 - .../plugins/publish/collect_review.py | 185 - .../plugins/publish/collect_rig_sets.py | 40 - .../plugins/publish/collect_skeleton_mesh.py | 44 - .../publish/collect_unreal_skeletalmesh.py | 40 - .../publish/collect_unreal_staticmesh.py | 38 - .../collect_user_defined_attributes.py | 40 - .../plugins/publish/collect_vrayproxy.py | 25 - .../plugins/publish/collect_vrayscene.py | 122 - .../plugins/publish/collect_workfile.py | 36 - .../plugins/publish/collect_workscene_fps.py | 15 - .../ayon_maya/plugins/publish/collect_xgen.py | 71 - .../plugins/publish/collect_yeti_cache.py | 92 - .../plugins/publish/collect_yeti_rig.py | 306 -- .../publish/determine_future_version.py | 36 - .../publish/extract_active_view_thumbnail.py | 59 - .../publish/extract_arnold_scene_source.py | 244 - .../plugins/publish/extract_assembly.py | 67 - .../plugins/publish/extract_camera_alembic.py | 124 - .../publish/extract_camera_mayaScene.py | 306 -- .../ayon_maya/plugins/publish/extract_fbx.py | 60 - .../plugins/publish/extract_fbx_animation.py | 72 - .../ayon_maya/plugins/publish/extract_gltf.py | 63 - .../plugins/publish/extract_gpu_cache.py | 68 - .../publish/extract_import_reference.py | 165 - .../plugins/publish/extract_layout.py | 168 - .../ayon_maya/plugins/publish/extract_look.py | 889 ---- .../plugins/publish/extract_maya_scene_raw.py | 151 - .../plugins/publish/extract_maya_usd.py | 291 -- .../plugins/publish/extract_model.py | 107 - .../publish/extract_multiverse_look.py | 156 - .../plugins/publish/extract_multiverse_usd.py | 273 -- .../publish/extract_multiverse_usd_comp.py | 177 - .../publish/extract_multiverse_usd_over.py | 155 - .../ayon_maya/plugins/publish/extract_obj.py | 76 - .../plugins/publish/extract_playblast.py | 103 - .../plugins/publish/extract_pointcache.py | 524 -- .../plugins/publish/extract_proxy_abc.py | 108 - .../plugins/publish/extract_redshift_proxy.py | 98 - .../plugins/publish/extract_rendersetup.py | 40 - .../ayon_maya/plugins/publish/extract_rig.py | 66 - .../plugins/publish/extract_skeleton_mesh.py | 53 - .../plugins/publish/extract_thumbnail.py | 119 - .../extract_unreal_skeletalmesh_abc.py | 92 - .../extract_unreal_skeletalmesh_fbx.py | 89 - .../publish/extract_unreal_staticmesh.py | 57 - .../publish/extract_unreal_yeticache.py | 59 - .../plugins/publish/extract_vrayproxy.py | 70 - .../plugins/publish/extract_vrayscene.py | 137 - .../plugins/publish/extract_workfile_xgen.py | 249 - .../ayon_maya/plugins/publish/extract_xgen.py | 154 - .../plugins/publish/extract_yeti_cache.py | 88 - .../plugins/publish/extract_yeti_rig.py | 206 - .../submit_maya_remote_publish_deadline.xml | 16 - ...ate_animation_out_set_related_node_ids.xml | 29 - .../publish/help/validate_maya_units.xml | 21 - .../help/validate_mesh_non_manifold.xml | 33 - .../publish/help/validate_node_ids.xml | 29 - .../help/validate_rig_out_set_node_ids.xml | 32 - .../help/validate_skeletalmesh_hierarchy.xml | 14 - .../increment_current_file_deadline.py | 39 - .../plugins/publish/reset_xgen_attributes.py | 36 - .../ayon_maya/plugins/publish/save_scene.py | 33 - .../validate_alembic_options_defaults.py | 130 - .../publish/validate_animation_content.py | 58 - ...date_animation_out_set_related_node_ids.py | 108 - .../publish/validate_arnold_scene_source.py | 127 - .../validate_arnold_scene_source_cbid.py | 83 - .../publish/validate_ass_relative_paths.py | 137 - .../plugins/publish/validate_assembly_name.py | 59 - .../publish/validate_assembly_namespaces.py | 47 - .../publish/validate_assembly_transforms.py | 118 - .../plugins/publish/validate_attributes.py | 119 - .../publish/validate_camera_attributes.py | 75 - .../publish/validate_camera_contents.py | 82 - .../plugins/publish/validate_color_sets.py | 61 - ...validate_current_renderlayer_renderable.py | 73 - .../plugins/publish/validate_cycle_error.py | 40 - .../plugins/publish/validate_frame_range.py | 202 - .../plugins/publish/validate_glsl_material.py | 209 - .../plugins/publish/validate_glsl_plugin.py | 35 - .../publish/validate_instance_has_members.py | 38 - .../publish/validate_instance_in_context.py | 83 - .../publish/validate_instance_subset.py | 53 - .../plugins/publish/validate_loaded_plugin.py | 55 - .../plugins/publish/validate_look_contents.py | 135 - ...lidate_look_default_shaders_connections.py | 76 - .../validate_look_id_reference_edits.py | 108 - .../validate_look_no_default_shaders.py | 63 - .../plugins/publish/validate_look_sets.py | 102 - .../publish/validate_look_shading_group.py | 73 - .../publish/validate_look_single_shader.py | 59 - .../plugins/publish/validate_maya_units.py | 131 - .../validate_mesh_arnold_attributes.py | 126 - .../plugins/publish/validate_mesh_empty.py | 53 - .../plugins/publish/validate_mesh_has_uv.py | 87 - .../publish/validate_mesh_lamina_faces.py | 53 - .../plugins/publish/validate_mesh_ngons.py | 67 - .../validate_mesh_no_negative_scale.py | 66 - .../publish/validate_mesh_non_manifold.py | 168 - .../publish/validate_mesh_non_zero_edge.py | 81 - .../publish/validate_mesh_normals_unlocked.py | 76 - .../publish/validate_mesh_overlapping_uvs.py | 304 -- .../validate_mesh_shader_connections.py | 128 - .../publish/validate_mesh_single_uv_set.py | 73 - .../publish/validate_mesh_uv_set_map1.py | 136 - .../validate_mesh_vertices_have_edges.py | 85 - .../plugins/publish/validate_model_content.py | 135 - .../publish/validate_mvlook_contents.py | 103 - .../plugins/publish/validate_no_animation.py | 59 - .../publish/validate_no_default_camera.py | 50 - .../plugins/publish/validate_no_namespace.py | 75 - .../publish/validate_no_null_transforms.py | 90 - .../publish/validate_no_unknown_nodes.py | 53 - .../plugins/publish/validate_no_vraymesh.py | 48 - .../plugins/publish/validate_node_ids.py | 65 - .../validate_node_ids_deformed_shapes.py | 79 - .../publish/validate_node_ids_in_database.py | 101 - .../publish/validate_node_ids_related.py | 124 - .../publish/validate_node_ids_unique.py | 79 - .../publish/validate_node_no_ghosting.py | 58 - .../validate_plugin_path_attributes.py | 79 - .../publish/validate_render_image_rule.py | 73 - .../validate_render_no_default_cameras.py | 42 - .../publish/validate_render_single_camera.py | 82 - .../publish/validate_renderlayer_aovs.py | 65 - .../publish/validate_rendersettings.py | 446 -- .../plugins/publish/validate_resolution.py | 107 - .../plugins/publish/validate_resources.py | 60 - .../plugins/publish/validate_review.py | 29 - .../plugins/publish/validate_rig_contents.py | 259 - .../publish/validate_rig_controllers.py | 294 -- ...idate_rig_controllers_arnold_attributes.py | 98 - .../publish/validate_rig_joints_hidden.py | 50 - .../publish/validate_rig_out_set_node_ids.py | 159 - .../publish/validate_rig_output_ids.py | 158 - .../publish/validate_scene_set_workspace.py | 51 - .../plugins/publish/validate_setdress_root.py | 27 - .../plugins/publish/validate_shader_name.py | 83 - .../publish/validate_shape_default_names.py | 94 - .../publish/validate_shape_render_stats.py | 85 - .../plugins/publish/validate_shape_zero.py | 99 - .../publish/validate_single_assembly.py | 42 - .../validate_skeletalmesh_hierarchy.py | 40 - .../validate_skeletalmesh_triangulated.py | 57 - .../validate_skeleton_top_group_hierarchy.py | 42 - .../validate_skinCluster_deformer_set.py | 81 - .../plugins/publish/validate_step_size.py | 49 - .../validate_transform_naming_suffix.py | 145 - .../publish/validate_transform_zero.py | 91 - .../plugins/publish/validate_unique_names.py | 44 - .../validate_unreal_mesh_triangulated.py | 39 - .../validate_unreal_staticmesh_naming.py | 148 - .../publish/validate_unreal_up_axis.py | 34 - .../plugins/publish/validate_visible_only.py | 57 - .../plugins/publish/validate_vray.py | 17 - .../validate_vray_distributed_rendering.py | 67 - .../publish/validate_vray_referenced_aovs.py | 98 - .../validate_vray_translator_settings.py | 104 - .../plugins/publish/validate_vrayproxy.py | 36 - .../publish/validate_vrayproxy_members.py | 42 - .../plugins/publish/validate_xgen.py | 69 - .../validate_yeti_renderscript_callbacks.py | 122 - .../publish/validate_yeti_rig_cache_state.py | 72 - .../validate_yeti_rig_input_in_instance.py | 49 - .../publish/validate_yeti_rig_settings.py | 61 - .../workfile_build/assign_look_placeholder.py | 128 - .../workfile_build/load_placeholder.py | 132 - .../workfile_build/script_placeholder.py | 201 - .../client/ayon_maya/startup/userSetup.py | 50 - .../maya/client/ayon_maya/tools/__init__.py | 27 - .../ayon_maya/tools/mayalookassigner/LICENSE | 21 - .../tools/mayalookassigner/__init__.py | 9 - .../tools/mayalookassigner/alembic.py | 97 - .../ayon_maya/tools/mayalookassigner/app.py | 317 -- .../tools/mayalookassigner/arnold_standin.py | 263 - .../tools/mayalookassigner/commands.py | 199 - .../ayon_maya/tools/mayalookassigner/lib.py | 88 - .../tools/mayalookassigner/models.py | 134 - .../ayon_maya/tools/mayalookassigner/usd.py | 38 - .../ayon_maya/tools/mayalookassigner/views.py | 47 - .../tools/mayalookassigner/vray_proxies.py | 137 - .../tools/mayalookassigner/widgets.py | 256 - .../client/ayon_maya/vendor/python/capture.py | 919 ---- server_addon/maya/client/ayon_maya/version.py | 3 - server_addon/maya/package.py | 8 - server_addon/maya/server/__init__.py | 12 - server_addon/maya/server/settings/__init__.py | 0 server_addon/maya/server/settings/creators.py | 434 -- .../settings/explicit_plugins_loading.py | 427 -- server_addon/maya/server/settings/imageio.py | 150 - .../maya/server/settings/include_handles.py | 32 - server_addon/maya/server/settings/loaders.py | 284 -- server_addon/maya/server/settings/main.py | 123 - .../maya/server/settings/maya_dirmap.py | 38 - .../maya/server/settings/publish_playblast.py | 402 -- .../maya/server/settings/publishers.py | 1640 ------- .../maya/server/settings/render_settings.py | 499 -- .../maya/server/settings/scriptsmenu.py | 89 - .../settings/templated_workfile_settings.py | 30 - .../settings/workfile_build_settings.py | 134 - 317 files changed, 45387 deletions(-) delete mode 100644 server_addon/maya/LICENCE delete mode 100644 server_addon/maya/README.md delete mode 100644 server_addon/maya/client/ayon_maya/__init__.py delete mode 100644 server_addon/maya/client/ayon_maya/addon.py delete mode 100644 server_addon/maya/client/ayon_maya/api/__init__.py delete mode 100644 server_addon/maya/client/ayon_maya/api/action.py delete mode 100644 server_addon/maya/client/ayon_maya/api/alembic.py delete mode 100644 server_addon/maya/client/ayon_maya/api/commands.py delete mode 100644 server_addon/maya/client/ayon_maya/api/customize.py delete mode 100644 server_addon/maya/client/ayon_maya/api/exitstack.py delete mode 100644 server_addon/maya/client/ayon_maya/api/fbx.py delete mode 100644 server_addon/maya/client/ayon_maya/api/gltf.py delete mode 100644 server_addon/maya/client/ayon_maya/api/lib.py delete mode 100644 server_addon/maya/client/ayon_maya/api/lib_renderproducts.py delete mode 100644 server_addon/maya/client/ayon_maya/api/lib_rendersettings.py delete mode 100644 server_addon/maya/client/ayon_maya/api/lib_rendersetup.py delete mode 100644 server_addon/maya/client/ayon_maya/api/menu.py delete mode 100644 server_addon/maya/client/ayon_maya/api/pipeline.py delete mode 100644 server_addon/maya/client/ayon_maya/api/plugin.py delete mode 100644 server_addon/maya/client/ayon_maya/api/render_setup_tools.py delete mode 100644 server_addon/maya/client/ayon_maya/api/setdress.py delete mode 100644 server_addon/maya/client/ayon_maya/api/workfile_template_builder.py delete mode 100644 server_addon/maya/client/ayon_maya/api/workio.py delete mode 100644 server_addon/maya/client/ayon_maya/api/yeti.py delete mode 100644 server_addon/maya/client/ayon_maya/hooks/pre_auto_load_plugins.py delete mode 100644 server_addon/maya/client/ayon_maya/hooks/pre_copy_mel.py delete mode 100644 server_addon/maya/client/ayon_maya/hooks/pre_open_workfile_post_initialization.py delete mode 100644 server_addon/maya/client/ayon_maya/lib.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/__init__.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/convert_legacy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_animation_pointcache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_arnold_scene_source.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_assembly.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_camera.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_layout.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_look.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_matchmove.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_maya_usd.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_mayascene.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_model.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_multishot_layout.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_look.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd_comp.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd_over.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_proxy_abc.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_redshift_proxy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_render.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_rendersetup.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_review.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_rig.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_setdress.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_unreal_skeletalmesh.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_unreal_staticmesh.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_unreal_yeticache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_vrayproxy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_vrayscene.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_workfile.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_xgen.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_yeti_cache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/create/create_yeti_rig.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/inventory/connect_geometry.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/inventory/connect_xgen.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/inventory/connect_yeti_rig.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/inventory/import_modelrender.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/inventory/import_reference.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/inventory/rig_recreate_animation_instance.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/inventory/select_containers.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/_load_animation.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/actions.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_arnold_standin.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_as_template.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_assembly.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_audio.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_gpucache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_image.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_image_plane.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_look.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_matchmove.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_maya_usd.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_multiverse_usd.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_multiverse_usd_over.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_redshift_proxy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_reference.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_rendersetup.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_arnold.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_redshift.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_vray.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_vrayproxy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_vrayscene.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_xgen.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_yeti_cache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/load/load_yeti_rig.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/__init__.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_animation.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_arnold_scene_source.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_assembly.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_current_file.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_animation.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_camera.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_model.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_file_dependencies.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_gltf.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_history.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_inputs.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_instances.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_look.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_scene_time.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_units.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_workspace.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_model.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_multiverse_look.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_pointcache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_remove_marked.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_render.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_render_layer_aovs.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_renderable_camera.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_review.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_rig_sets.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_skeleton_mesh.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_unreal_skeletalmesh.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_unreal_staticmesh.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_user_defined_attributes.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_vrayproxy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_vrayscene.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_workfile.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_workscene_fps.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_xgen.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_yeti_cache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/collect_yeti_rig.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/determine_future_version.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_active_view_thumbnail.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_arnold_scene_source.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_assembly.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_camera_alembic.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_camera_mayaScene.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_fbx.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_fbx_animation.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_gltf.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_gpu_cache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_import_reference.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_layout.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_look.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_maya_scene_raw.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_maya_usd.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_model.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_look.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd_comp.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd_over.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_obj.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_playblast.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_pointcache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_proxy_abc.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_redshift_proxy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_rendersetup.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_rig.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_skeleton_mesh.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_thumbnail.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_skeletalmesh_abc.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_staticmesh.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_yeticache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_vrayproxy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_vrayscene.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_workfile_xgen.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_xgen.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_yeti_cache.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/extract_yeti_rig.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/help/validate_animation_out_set_related_node_ids.xml delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/help/validate_maya_units.xml delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/help/validate_mesh_non_manifold.xml delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/help/validate_node_ids.xml delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/help/validate_rig_out_set_node_ids.xml delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/increment_current_file_deadline.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/reset_xgen_attributes.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/save_scene.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_alembic_options_defaults.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_animation_content.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_animation_out_set_related_node_ids.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_arnold_scene_source.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_arnold_scene_source_cbid.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_ass_relative_paths.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_name.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_namespaces.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_transforms.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_attributes.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_camera_attributes.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_camera_contents.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_color_sets.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_current_renderlayer_renderable.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_cycle_error.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_frame_range.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_glsl_material.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_glsl_plugin.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_has_members.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_in_context.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_subset.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_loaded_plugin.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_look_contents.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_look_default_shaders_connections.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_look_id_reference_edits.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_look_no_default_shaders.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_look_sets.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_look_shading_group.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_look_single_shader.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_maya_units.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_arnold_attributes.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_empty.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_has_uv.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_lamina_faces.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_ngons.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_no_negative_scale.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_non_manifold.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_non_zero_edge.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_normals_unlocked.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_overlapping_uvs.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_shader_connections.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_single_uv_set.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_uv_set_map1.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_vertices_have_edges.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_model_content.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_mvlook_contents.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_no_animation.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_no_default_camera.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_no_namespace.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_no_null_transforms.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_no_unknown_nodes.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_no_vraymesh.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_deformed_shapes.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_in_database.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_related.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_unique.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_node_no_ghosting.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_plugin_path_attributes.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_render_image_rule.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_render_no_default_cameras.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_render_single_camera.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_renderlayer_aovs.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_rendersettings.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_resolution.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_resources.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_review.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_contents.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_controllers.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_controllers_arnold_attributes.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_joints_hidden.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_out_set_node_ids.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_output_ids.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_scene_set_workspace.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_setdress_root.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_shader_name.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_default_names.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_render_stats.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_zero.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_single_assembly.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_skeletalmesh_hierarchy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_skeletalmesh_triangulated.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_skeleton_top_group_hierarchy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_skinCluster_deformer_set.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_step_size.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_transform_naming_suffix.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_transform_zero.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_unique_names.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_mesh_triangulated.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_staticmesh_naming.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_up_axis.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_visible_only.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_vray.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_distributed_rendering.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_referenced_aovs.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_translator_settings.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_vrayproxy.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_vrayproxy_members.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_xgen.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_renderscript_callbacks.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_cache_state.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_input_in_instance.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_settings.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/workfile_build/assign_look_placeholder.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/workfile_build/load_placeholder.py delete mode 100644 server_addon/maya/client/ayon_maya/plugins/workfile_build/script_placeholder.py delete mode 100644 server_addon/maya/client/ayon_maya/startup/userSetup.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/__init__.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/LICENSE delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/__init__.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/alembic.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/app.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/arnold_standin.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/commands.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/lib.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/models.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/usd.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/views.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/vray_proxies.py delete mode 100644 server_addon/maya/client/ayon_maya/tools/mayalookassigner/widgets.py delete mode 100644 server_addon/maya/client/ayon_maya/vendor/python/capture.py delete mode 100644 server_addon/maya/client/ayon_maya/version.py delete mode 100644 server_addon/maya/package.py delete mode 100644 server_addon/maya/server/__init__.py delete mode 100644 server_addon/maya/server/settings/__init__.py delete mode 100644 server_addon/maya/server/settings/creators.py delete mode 100644 server_addon/maya/server/settings/explicit_plugins_loading.py delete mode 100644 server_addon/maya/server/settings/imageio.py delete mode 100644 server_addon/maya/server/settings/include_handles.py delete mode 100644 server_addon/maya/server/settings/loaders.py delete mode 100644 server_addon/maya/server/settings/main.py delete mode 100644 server_addon/maya/server/settings/maya_dirmap.py delete mode 100644 server_addon/maya/server/settings/publish_playblast.py delete mode 100644 server_addon/maya/server/settings/publishers.py delete mode 100644 server_addon/maya/server/settings/render_settings.py delete mode 100644 server_addon/maya/server/settings/scriptsmenu.py delete mode 100644 server_addon/maya/server/settings/templated_workfile_settings.py delete mode 100644 server_addon/maya/server/settings/workfile_build_settings.py diff --git a/server_addon/maya/LICENCE b/server_addon/maya/LICENCE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/server_addon/maya/LICENCE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/server_addon/maya/README.md b/server_addon/maya/README.md deleted file mode 100644 index c65c09fba0..0000000000 --- a/server_addon/maya/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Maya Integration Addon -====================== - -WIP diff --git a/server_addon/maya/client/ayon_maya/__init__.py b/server_addon/maya/client/ayon_maya/__init__.py deleted file mode 100644 index 39d990a3c2..0000000000 --- a/server_addon/maya/client/ayon_maya/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .version import __version__ -from .addon import ( - MayaAddon, - MAYA_ROOT_DIR, -) - - -__all__ = ( - "__version__", - - "MayaAddon", - "MAYA_ROOT_DIR", -) diff --git a/server_addon/maya/client/ayon_maya/addon.py b/server_addon/maya/client/ayon_maya/addon.py deleted file mode 100644 index 194528bda2..0000000000 --- a/server_addon/maya/client/ayon_maya/addon.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ - -MAYA_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class MayaAddon(AYONAddon, IHostAddon): - name = "maya" - version = __version__ - host_name = "maya" - - def add_implementation_envs(self, env, _app): - # Add requirements to PYTHONPATH - new_python_paths = [ - os.path.join(MAYA_ROOT_DIR, "startup") - ] - old_python_path = env.get("PYTHONPATH") or "" - for path in old_python_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_python_paths: - new_python_paths.append(norm_path) - - # add vendor path - new_python_paths.append( - os.path.join(MAYA_ROOT_DIR, "vendor", "python") - ) - env["PYTHONPATH"] = os.pathsep.join(new_python_paths) - - # Set default environments - envs = { - "AYON_LOG_NO_COLORS": "1", - } - for key, value in envs.items(): - env[key] = value - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(MAYA_ROOT_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".ma", ".mb"] diff --git a/server_addon/maya/client/ayon_maya/api/__init__.py b/server_addon/maya/client/ayon_maya/api/__init__.py deleted file mode 100644 index 8783fbeeb7..0000000000 --- a/server_addon/maya/client/ayon_maya/api/__init__.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Public API - -Anything that isn't defined here is INTERNAL and unreliable for external use. - -""" - -from .pipeline import ( - uninstall, - - ls, - containerise, - MayaHost, -) -from .plugin import ( - Loader -) - -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root -) - -from .lib import ( - lsattr, - lsattrs, - read, - - apply_shaders, - maintained_selection, - suspended_refresh, - - unique_namespace, -) - - -__all__ = [ - "uninstall", - - "ls", - "containerise", - "MayaHost", - - "Loader", - - # Workfiles API - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root", - - # Utility functions - "lsattr", - "lsattrs", - "read", - - "unique_namespace", - - "apply_shaders", - "maintained_selection", - "suspended_refresh", - -] - -# Backwards API compatibility -open = open_file -save = save_file diff --git a/server_addon/maya/client/ayon_maya/api/action.py b/server_addon/maya/client/ayon_maya/api/action.py deleted file mode 100644 index d845ac6066..0000000000 --- a/server_addon/maya/client/ayon_maya/api/action.py +++ /dev/null @@ -1,146 +0,0 @@ -# absolute_import is needed to counter the `module has no cmds error` in Maya -from __future__ import absolute_import - -import pyblish.api -import ayon_api - -from ayon_core.pipeline.publish import ( - get_errored_instances_from_context, - get_errored_plugins_from_context -) - - -class GenerateUUIDsOnInvalidAction(pyblish.api.Action): - """Generate UUIDs on the invalid nodes in the instance. - - Invalid nodes are those returned by the plugin's `get_invalid` method. - As such it is the plug-in's responsibility to ensure the nodes that - receive new UUIDs are actually invalid. - - Requires: - - instance.data["folderPath"] - - """ - - label = "Regenerate UUIDs" - on = "failed" # This action is only available on a failed plug-in - icon = "wrench" # Icon from Awesome Icon - - def process(self, context, plugin): - - from maya import cmds - - self.log.info("Finding bad nodes..") - - errored_instances = get_errored_instances_from_context(context) - - # Apply pyblish logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(errored_instances, plugin) - - # Get the nodes from the all instances that ran through this plug-in - all_invalid = [] - for instance in instances: - invalid = plugin.get_invalid(instance) - - # Don't allow referenced nodes to get their ids regenerated to - # avoid loaded content getting messed up with reference edits - if invalid: - referenced = {node for node in invalid if - cmds.referenceQuery(node, isNodeReferenced=True)} - if referenced: - self.log.warning("Skipping UUID generation on referenced " - "nodes: {}".format(list(referenced))) - invalid = [node for node in invalid - if node not in referenced] - - if invalid: - - self.log.info("Fixing instance {}".format(instance.name)) - self._update_id_attribute(instance, invalid) - - all_invalid.extend(invalid) - - if not all_invalid: - self.log.info("No invalid nodes found.") - return - - all_invalid = list(set(all_invalid)) - self.log.info("Generated ids on nodes: {0}".format(all_invalid)) - - def _update_id_attribute(self, instance, nodes): - """Delete the id attribute - - Args: - instance: The instance we're fixing for - nodes (list): all nodes to regenerate ids on - """ - - from . import lib - - # Expecting this is called on validators in which case 'folderEntity' - # should be always available, but kept a way to query it by name. - folder_entity = instance.data.get("folderEntity") - if not folder_entity: - folder_path = instance.data["folderPath"] - project_name = instance.context.data["projectName"] - self.log.info(( - "Folder is not stored on instance." - " Querying by path \"{}\" from project \"{}\"" - ).format(folder_path, project_name)) - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path, fields={"id"} - ) - - for node, _id in lib.generate_ids( - nodes, folder_id=folder_entity["id"] - ): - lib.set_id(node, _id, overwrite=True) - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid nodes in Maya when plug-in failed. - - To retrieve the invalid nodes this assumes a static `get_invalid()` - method is available on the plugin. - - """ - label = "Select invalid" - on = "failed" # This action is only available on a failed plug-in - icon = "search" # Icon from Awesome Icon - - def process(self, context, plugin): - - try: - from maya import cmds - except ImportError: - raise ImportError("Current host is not Maya") - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid nodes..") - invalid = list() - if issubclass(plugin, pyblish.api.ContextPlugin): - errored_plugins = get_errored_plugins_from_context(context) - if plugin in errored_plugins: - invalid = plugin.get_invalid(context) - else: - errored_instances = get_errored_instances_from_context( - context, plugin=plugin - ) - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning("Plug-in returned to be invalid, " - "but has no selectable nodes.") - - # Ensure unique (process each node only once) - invalid = list(set(invalid)) - - if invalid: - self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid)) - cmds.select(invalid, replace=True, noExpand=True) - else: - self.log.info("No invalid nodes found.") - cmds.select(deselect=True) diff --git a/server_addon/maya/client/ayon_maya/api/alembic.py b/server_addon/maya/client/ayon_maya/api/alembic.py deleted file mode 100644 index 007e3ce4f3..0000000000 --- a/server_addon/maya/client/ayon_maya/api/alembic.py +++ /dev/null @@ -1,350 +0,0 @@ -import json -import logging -import os - -from maya import cmds # noqa - -from ayon_maya.api.lib import evaluation - -log = logging.getLogger(__name__) - -# The maya alembic export types -ALEMBIC_ARGS = { - "attr": (list, tuple), - "attrPrefix": (list, tuple), - "autoSubd": bool, - "dataFormat": str, - "endFrame": float, - "eulerFilter": bool, - "frameRange": str, # "start end"; overrides startFrame & endFrame - "frameRelativeSample": float, - "melPerFrameCallback": str, - "melPostJobCallback": str, - "noNormals": bool, - "preRoll": bool, - "pythonPerFrameCallback": str, - "pythonPostJobCallback": str, - "renderableOnly": bool, - "root": (list, tuple), - "selection": bool, - "startFrame": float, - "step": float, - "stripNamespaces": bool, - "userAttr": (list, tuple), - "userAttrPrefix": (list, tuple), - "uvWrite": bool, - "uvsOnly": bool, - "verbose": bool, - "wholeFrameGeo": bool, - "worldSpace": bool, - "writeColorSets": bool, - "writeCreases": bool, # Maya 2015 Ext1+ - "writeFaceSets": bool, - "writeUVSets": bool, # Maya 2017+ - "writeVisibility": bool, -} - - -def extract_alembic( - file, - attr=None, - attrPrefix=None, - dataFormat="ogawa", - endFrame=None, - eulerFilter=True, - frameRange="", - melPerFrameCallback=None, - melPostJobCallback=None, - noNormals=False, - preRoll=False, - preRollStartFrame=0, - pythonPerFrameCallback=None, - pythonPostJobCallback=None, - renderableOnly=False, - root=None, - selection=True, - startFrame=None, - step=1.0, - stripNamespaces=True, - userAttr=None, - userAttrPrefix=None, - uvsOnly=False, - uvWrite=True, - verbose=False, - wholeFrameGeo=False, - worldSpace=False, - writeColorSets=False, - writeCreases=False, - writeFaceSets=False, - writeUVSets=False, - writeVisibility=False -): - """Extract a single Alembic Cache. - - This extracts an Alembic cache using the `-selection` flag to minimize - the extracted content to solely what was Collected into the instance. - - Arguments: - file (str): The filepath to write the alembic file to. - - attr (list of str, optional): A specific geometric attribute to write - out. Defaults to []. - - attrPrefix (list of str, optional): Prefix filter for determining which - geometric attributes to write out. Defaults to ["ABC_"]. - - dataFormat (str): The data format to use for the cache, - defaults to "ogawa" - - endFrame (float): End frame of output. Ignored if `frameRange` - provided. - - eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with - an Euler filter. Euler filtering helps resolve irregularities in - rotations especially if X, Y, and Z rotations exceed 360 degrees. - Defaults to True. - - frameRange (tuple or str): Two-tuple with start and end frame or a - string formatted as: "startFrame endFrame". This argument - overrides `startFrame` and `endFrame` arguments. - - melPerFrameCallback (Optional[str]): MEL callback run per frame. - - melPostJobCallback (Optional[str]): MEL callback after last frame is - written. - - noNormals (bool): When on, normal data from the original polygon - objects is not included in the exported Alembic cache file. - - preRoll (bool): This frame range will not be sampled. - Defaults to False. - - preRollStartFrame (float): The frame to start scene - evaluation at. This is used to set the starting frame for time - dependent translations and can be used to evaluate run-up that - isn't actually translated. Defaults to 0. - - pythonPerFrameCallback (Optional[str]): Python callback run per frame. - - pythonPostJobCallback (Optional[str]): Python callback after last frame - is written. - - renderableOnly (bool): When on, any non-renderable nodes or hierarchy, - such as hidden objects, are not included in the Alembic file. - Defaults to False. - - root (list of str): Maya dag path which will be parented to - the root of the Alembic file. Defaults to [], which means the - entire scene will be written out. - - selection (bool): Write out all all selected nodes from the - active selection list that are descendents of the roots specified - with -root. Defaults to False. - - startFrame (float): Start frame of output. Ignored if `frameRange` - provided. - - step (float): The time interval (expressed in frames) at - which the frame range is sampled. Additional samples around each - frame can be specified with -frs. Defaults to 1.0. - - stripNamespaces (bool): When on, any namespaces associated with the - exported objects are removed from the Alembic file. For example, an - object with the namespace taco:foo:bar appears as bar in the - Alembic file. - - userAttr (list of str, optional): A specific user defined attribute to - write out. Defaults to []. - - userAttrPrefix (list of str, optional): Prefix filter for determining - which user defined attributes to write out. Defaults to []. - - uvsOnly (bool): When on, only uv data for PolyMesh and SubD shapes - will be written to the Alembic file. - - uvWrite (bool): When on, UV data from polygon meshes and subdivision - objects are written to the Alembic file. Only the current UV map is - included. - - verbose (bool): When on, outputs frame number information to the - Script Editor or output window during extraction. - - wholeFrameGeo (bool): Data for geometry will only be written - out on whole frames. Defaults to False. - - worldSpace (bool): When on, the top node in the node hierarchy is - stored as world space. By default, these nodes are stored as local - space. Defaults to False. - - writeColorSets (bool): Write all color sets on MFnMeshes as - color 3 or color 4 indexed geometry parameters with face varying - scope. Defaults to False. - - writeCreases (bool): If the mesh has crease edges or crease - vertices, the mesh (OPolyMesh) would now be written out as an OSubD - and crease info will be stored in the Alembic file. Otherwise, - creases info won't be preserved in Alembic file unless a custom - Boolean attribute SubDivisionMesh has been added to mesh node and - its value is true. Defaults to False. - - writeFaceSets (bool): Write all Face sets on MFnMeshes. - Defaults to False. - - writeUVSets (bool): Write all uv sets on MFnMeshes as vector - 2 indexed geometry parameters with face varying scope. Defaults to - False. - - writeVisibility (bool): Visibility state will be stored in - the Alembic file. Otherwise everything written out is treated as - visible. Defaults to False. - """ - - # Ensure alembic exporter is loaded - cmds.loadPlugin('AbcExport', quiet=True) - - # Alembic Exporter requires forward slashes - file = file.replace('\\', '/') - - # Ensure list arguments are valid. - attr = attr or [] - attrPrefix = attrPrefix or [] - userAttr = userAttr or [] - userAttrPrefix = userAttrPrefix or [] - root = root or [] - - # Pass the start and end frame on as `frameRange` so that it - # never conflicts with that argument - if not frameRange: - # Fallback to maya timeline if no start or end frame provided. - if startFrame is None: - startFrame = cmds.playbackOptions(query=True, - animationStartTime=True) - if endFrame is None: - endFrame = cmds.playbackOptions(query=True, - animationEndTime=True) - - # Ensure valid types are converted to frame range - assert isinstance(startFrame, ALEMBIC_ARGS["startFrame"]) - assert isinstance(endFrame, ALEMBIC_ARGS["endFrame"]) - frameRange = "{0} {1}".format(startFrame, endFrame) - else: - # Allow conversion from tuple for `frameRange` - if isinstance(frameRange, (list, tuple)): - assert len(frameRange) == 2 - frameRange = "{0} {1}".format(frameRange[0], frameRange[1]) - - # Assemble options - options = { - "selection": selection, - "frameRange": frameRange, - "eulerFilter": eulerFilter, - "noNormals": noNormals, - "preRoll": preRoll, - "root": root, - "renderableOnly": renderableOnly, - "uvWrite": uvWrite, - "uvsOnly": uvsOnly, - "writeColorSets": writeColorSets, - "writeFaceSets": writeFaceSets, - "wholeFrameGeo": wholeFrameGeo, - "worldSpace": worldSpace, - "writeVisibility": writeVisibility, - "writeUVSets": writeUVSets, - "writeCreases": writeCreases, - "dataFormat": dataFormat, - "step": step, - "attr": attr, - "attrPrefix": attrPrefix, - "userAttr": userAttr, - "userAttrPrefix": userAttrPrefix, - "stripNamespaces": stripNamespaces, - "verbose": verbose - } - - # Validate options - for key, value in options.copy().items(): - - # Discard unknown options - if key not in ALEMBIC_ARGS: - log.warning("extract_alembic() does not support option '%s'. " - "Flag will be ignored..", key) - options.pop(key) - continue - - # Validate value type - valid_types = ALEMBIC_ARGS[key] - if not isinstance(value, valid_types): - raise TypeError("Alembic option unsupported type: " - "{0} (expected {1})".format(value, valid_types)) - - # Ignore empty values, like an empty string, since they mess up how - # job arguments are built - if isinstance(value, (list, tuple)): - value = [x for x in value if x.strip()] - - # Ignore option completely if no values remaining - if not value: - options.pop(key) - continue - - options[key] = value - - # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ - maya_version = int(cmds.about(version=True)) - if maya_version >= 2018: - options['autoSubd'] = options.pop('writeCreases', False) - - # Only add callbacks if they are set so that we're not passing `None` - callbacks = { - "melPerFrameCallback": melPerFrameCallback, - "melPostJobCallback": melPostJobCallback, - "pythonPerFrameCallback": pythonPerFrameCallback, - "pythonPostJobCallback": pythonPostJobCallback, - } - for key, callback in callbacks.items(): - if callback: - options[key] = str(callback) - - # Format the job string from options - job_args = list() - for key, value in options.items(): - if isinstance(value, (list, tuple)): - for entry in value: - job_args.append("-{} {}".format(key, entry)) - elif isinstance(value, bool): - # Add only when state is set to True - if value: - job_args.append("-{0}".format(key)) - else: - job_args.append("-{0} {1}".format(key, value)) - - job_str = " ".join(job_args) - job_str += ' -file "%s"' % file - - # Ensure output directory exists - parent_dir = os.path.dirname(file) - if not os.path.exists(parent_dir): - os.makedirs(parent_dir) - - if verbose: - log.debug("Preparing Alembic export with options: %s", - json.dumps(options, indent=4)) - log.debug("Extracting Alembic with job arguments: %s", job_str) - - # Perform extraction - print("Alembic Job Arguments : {}".format(job_str)) - - # Disable the parallel evaluation temporarily to ensure no buggy - # exports are made. (PLN-31) - # TODO: Make sure this actually fixes the issues - with evaluation("off"): - cmds.AbcExport( - j=job_str, - verbose=verbose, - preRollStartFrame=preRollStartFrame - ) - - if verbose: - log.debug("Extracted Alembic to: %s", file) - - return file diff --git a/server_addon/maya/client/ayon_maya/api/commands.py b/server_addon/maya/client/ayon_maya/api/commands.py deleted file mode 100644 index 22cf0871e2..0000000000 --- a/server_addon/maya/client/ayon_maya/api/commands.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -"""AYON script commands to be used directly in Maya.""" -from maya import cmds - -from ayon_api import get_project, get_folder_by_path - -from ayon_core.pipeline import get_current_project_name, get_current_folder_path - - -class ToolWindows: - - _windows = {} - - @classmethod - def get_window(cls, tool): - """Get widget for specific tool. - - Args: - tool (str): Name of the tool. - - Returns: - Stored widget. - - """ - try: - return cls._windows[tool] - except KeyError: - return None - - @classmethod - def set_window(cls, tool, window): - """Set widget for the tool. - - Args: - tool (str): Name of the tool. - window (QtWidgets.QWidget): Widget - - """ - cls._windows[tool] = window - - -def _resolution_from_entity(entity): - if not entity: - print("Entered entity is not valid. \"{}\"".format( - str(entity) - )) - return None - - attributes = entity.get("attrib") - if attributes is None: - attributes = entity.get("data", {}) - - resolution_width = attributes.get("resolutionWidth") - resolution_height = attributes.get("resolutionHeight") - # Backwards compatibility - if resolution_width is None or resolution_height is None: - resolution_width = attributes.get("resolution_width") - resolution_height = attributes.get("resolution_height") - - # Make sure both width and height are set - if resolution_width is None or resolution_height is None: - cmds.warning( - "No resolution information found for \"{}\"".format( - entity["name"] - ) - ) - return None - - return int(resolution_width), int(resolution_height) - - -def reset_resolution(): - # Default values - resolution_width = 1920 - resolution_height = 1080 - - # Get resolution from folder - project_name = get_current_project_name() - folder_path = get_current_folder_path() - folder_entity = get_folder_by_path(project_name, folder_path) - resolution = _resolution_from_entity(folder_entity) - # Try get resolution from project - if resolution is None: - # TODO go through visualParents - print(( - "Folder '{}' does not have set resolution." - " Trying to get resolution from project" - ).format(folder_path)) - project_entity = get_project(project_name) - resolution = _resolution_from_entity(project_entity) - - if resolution is None: - msg = "Using default resolution {}x{}" - else: - resolution_width, resolution_height = resolution - msg = "Setting resolution to {}x{}" - - print(msg.format(resolution_width, resolution_height)) - - # set for different renderers - # arnold, vray, redshift, renderman - - renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer").lower() - # handle various renderman names - if renderer.startswith("renderman"): - renderer = "renderman" - - # default attributes are usable for Arnold, Renderman and Redshift - width_attr_name = "defaultResolution.width" - height_attr_name = "defaultResolution.height" - - # Vray has its own way - if renderer == "vray": - width_attr_name = "vraySettings.width" - height_attr_name = "vraySettings.height" - - cmds.setAttr(width_attr_name, resolution_width) - cmds.setAttr(height_attr_name, resolution_height) diff --git a/server_addon/maya/client/ayon_maya/api/customize.py b/server_addon/maya/client/ayon_maya/api/customize.py deleted file mode 100644 index 16255f69ba..0000000000 --- a/server_addon/maya/client/ayon_maya/api/customize.py +++ /dev/null @@ -1,179 +0,0 @@ -"""A set of commands that install overrides to Maya's UI""" - -import os -import logging - -from functools import partial - -import maya.cmds as cmds -import maya.mel as mel - -from ayon_core import resources -from ayon_core.tools.utils import host_tools -from .lib import get_main_window -from ..tools import show_look_assigner - -log = logging.getLogger(__name__) - -COMPONENT_MASK_ORIGINAL = {} - - -def override_component_mask_commands(): - """Override component mask ctrl+click behavior. - - This implements special behavior for Maya's component - mask menu items where a ctrl+click will instantly make - it an isolated behavior disabling all others. - - Tested in Maya 2016 and 2018 - - """ - log.info("Installing override_component_mask_commands..") - - # Get all object mask buttons - buttons = cmds.formLayout("objectMaskIcons", - query=True, - childArray=True) - # Skip the triangle list item - buttons = [btn for btn in buttons if btn != "objPickMenuLayout"] - - def on_changed_callback(raw_command, state): - """New callback""" - - # If "control" is held force the toggled one to on and - # toggle the others based on whether any of the buttons - # was remaining active after the toggle, if not then - # enable all - if cmds.getModifiers() == 4: # = CTRL - state = True - active = [cmds.iconTextCheckBox(btn, query=True, value=True) - for btn in buttons] - if any(active): - cmds.selectType(allObjects=False) - else: - cmds.selectType(allObjects=True) - - # Replace #1 with the current button state - cmd = raw_command.replace(" #1", " {}".format(int(state))) - mel.eval(cmd) - - for btn in buttons: - - # Store a reference to the original command so that if - # we rerun this override command it doesn't recursively - # try to implement the fix. (This also allows us to - # "uninstall" the behavior later) - if btn not in COMPONENT_MASK_ORIGINAL: - original = cmds.iconTextCheckBox(btn, query=True, cc=True) - COMPONENT_MASK_ORIGINAL[btn] = original - - # Assign the special callback - original = COMPONENT_MASK_ORIGINAL[btn] - new_fn = partial(on_changed_callback, original) - cmds.iconTextCheckBox(btn, edit=True, cc=new_fn) - - -def override_toolbox_ui(): - """Add custom buttons in Toolbox as replacement for Maya web help icon.""" - icons = resources.get_resource("icons") - parent_widget = get_main_window() - - # Ensure the maya web icon on toolbox exists - button_names = [ - # Maya 2022.1+ with maya.cmds.iconTextStaticLabel - "ToolBox|MainToolboxLayout|mayaHomeToolboxButton", - # Older with maya.cmds.iconTextButton - "ToolBox|MainToolboxLayout|mayaWebButton" - ] - for name in button_names: - if cmds.control(name, query=True, exists=True): - web_button = name - break - else: - # Button does not exist - log.warning("Can't find Maya Home/Web button to override toolbox ui..") - return - - cmds.control(web_button, edit=True, visible=False) - - # real = 32, but 36 with padding - according to toolbox mel script - icon_size = 36 - parent = web_button.rsplit("|", 1)[0] - - # Ensure the parent is a formLayout - if not cmds.objectTypeUI(parent) == "formLayout": - return - - # Create our controls - controls = [] - - controls.append( - cmds.iconTextButton( - "ayon_toolbox_lookmanager", - annotation="Look Manager", - label="Look Manager", - image=os.path.join(icons, "lookmanager.png"), - command=lambda: show_look_assigner( - parent=parent_widget - ), - width=icon_size, - height=icon_size, - parent=parent - ) - ) - - controls.append( - cmds.iconTextButton( - "ayon_toolbox_workfiles", - annotation="Work Files", - label="Work Files", - image=os.path.join(icons, "workfiles.png"), - command=lambda: host_tools.show_workfiles( - parent=parent_widget - ), - width=icon_size, - height=icon_size, - parent=parent - ) - ) - - controls.append( - cmds.iconTextButton( - "ayon_toolbox_loader", - annotation="Loader", - label="Loader", - image=os.path.join(icons, "loader.png"), - command=lambda: host_tools.show_loader( - parent=parent_widget, use_context=True - ), - width=icon_size, - height=icon_size, - parent=parent - ) - ) - - controls.append( - cmds.iconTextButton( - "ayon_toolbox_manager", - annotation="Inventory", - label="Inventory", - image=os.path.join(icons, "inventory.png"), - command=lambda: host_tools.show_scene_inventory( - parent=parent_widget - ), - width=icon_size, - height=icon_size, - parent=parent - ) - ) - - # Add the buttons on the bottom and stack - # them above each other with side padding - controls.reverse() - for i, control in enumerate(controls): - previous = controls[i - 1] if i > 0 else web_button - - cmds.formLayout(parent, edit=True, - attachControl=[control, "bottom", 0, previous], - attachForm=([control, "left", 1], - [control, "right", 1])) diff --git a/server_addon/maya/client/ayon_maya/api/exitstack.py b/server_addon/maya/client/ayon_maya/api/exitstack.py deleted file mode 100644 index c35724e889..0000000000 --- a/server_addon/maya/client/ayon_maya/api/exitstack.py +++ /dev/null @@ -1,139 +0,0 @@ -"""Backwards compatible implementation of ExitStack for Python 2. - -ExitStack contextmanager was implemented with Python 3.3. -As long as we supportPython 2 hosts we can use this backwards -compatible implementation to support bothPython 2 and Python 3. - -Instead of using ExitStack from contextlib, use it from this module: - ->>> from ayon_maya.api.exitstack import ExitStack - -It will provide the appropriate ExitStack implementation for the current -running Python version. - -""" -# TODO: Remove the entire script once dropping Python 2 support. -import contextlib -if getattr(contextlib, "nested", None): - from contextlib import ExitStack # noqa -else: - import sys - from collections import deque - - class ExitStack(object): - - """Context manager for dynamic management of a stack of exit callbacks - - For example: - - with ExitStack() as stack: - files = [stack.enter_context(open(fname)) - for fname in filenames] - # All opened files will automatically be closed at the end of - # the with statement, even if attempts to open files later - # in the list raise an exception - - """ - def __init__(self): - self._exit_callbacks = deque() - - def pop_all(self): - """Preserve the context stack by transferring - it to a new instance""" - new_stack = type(self)() - new_stack._exit_callbacks = self._exit_callbacks - self._exit_callbacks = deque() - return new_stack - - def _push_cm_exit(self, cm, cm_exit): - """Helper to correctly register callbacks - to __exit__ methods""" - def _exit_wrapper(*exc_details): - return cm_exit(cm, *exc_details) - _exit_wrapper.__self__ = cm - self.push(_exit_wrapper) - - def push(self, exit): - """Registers a callback with the standard __exit__ method signature - - Can suppress exceptions the same way __exit__ methods can. - - Also accepts any object with an __exit__ method (registering a call - to the method instead of the object itself) - """ - # We use an unbound method rather than a bound method to follow - # the standard lookup behaviour for special methods - _cb_type = type(exit) - try: - exit_method = _cb_type.__exit__ - except AttributeError: - # Not a context manager, so assume its a callable - self._exit_callbacks.append(exit) - else: - self._push_cm_exit(exit, exit_method) - return exit # Allow use as a decorator - - def callback(self, callback, *args, **kwds): - """Registers an arbitrary callback and arguments. - - Cannot suppress exceptions. - """ - def _exit_wrapper(exc_type, exc, tb): - callback(*args, **kwds) - # We changed the signature, so using @wraps is not appropriate, but - # setting __wrapped__ may still help with introspection - _exit_wrapper.__wrapped__ = callback - self.push(_exit_wrapper) - return callback # Allow use as a decorator - - def enter_context(self, cm): - """Enters the supplied context manager - - If successful, also pushes its __exit__ method as a callback and - returns the result of the __enter__ method. - """ - # We look up the special methods on the type to - # match the with statement - _cm_type = type(cm) - _exit = _cm_type.__exit__ - result = _cm_type.__enter__(cm) - self._push_cm_exit(cm, _exit) - return result - - def close(self): - """Immediately unwind the context stack""" - self.__exit__(None, None, None) - - def __enter__(self): - return self - - def __exit__(self, *exc_details): - # We manipulate the exception state so it behaves as though - # we were actually nesting multiple with statements - frame_exc = sys.exc_info()[1] - - def _fix_exception_context(new_exc, old_exc): - while 1: - exc_context = new_exc.__context__ - if exc_context in (None, frame_exc): - break - new_exc = exc_context - new_exc.__context__ = old_exc - - # Callbacks are invoked in LIFO order to match the behaviour of - # nested context managers - suppressed_exc = False - while self._exit_callbacks: - cb = self._exit_callbacks.pop() - try: - if cb(*exc_details): - suppressed_exc = True - exc_details = (None, None, None) - except Exception: - new_exc_details = sys.exc_info() - # simulate the stack of exceptions by setting the context - _fix_exception_context(new_exc_details[1], exc_details[1]) - if not self._exit_callbacks: - raise - exc_details = new_exc_details - return suppressed_exc diff --git a/server_addon/maya/client/ayon_maya/api/fbx.py b/server_addon/maya/client/ayon_maya/api/fbx.py deleted file mode 100644 index 28a4058551..0000000000 --- a/server_addon/maya/client/ayon_maya/api/fbx.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- coding: utf-8 -*- -"""Tools to work with FBX.""" -import logging - -from maya import cmds # noqa -import maya.mel as mel # noqa -from ayon_maya.api.lib import maintained_selection - - -class FBXExtractor: - """Extract FBX from Maya. - - This extracts reproducible FBX exports ignoring any of the settings set - on the local machine in the FBX export options window. - - All export settings are applied with the `FBXExport*` commands prior - to the `FBXExport` call itself. The options can be overridden with - their - nice names as seen in the "options" property on this class. - - For more information on FBX exports see: - - https://knowledge.autodesk.com/support/maya/learn-explore/caas - /CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4 - -9CB19C28F4E0-htm.html - - http://forums.cgsociety.org/archive/index.php?t-1032853.html - - https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE - /LKs9hakE28kJ - - """ - @property - def options(self): - """Overridable options for FBX Export - - Given in the following format - - {NAME: EXPECTED TYPE} - - If the overridden option's type does not match, - the option is not included and a warning is logged. - - """ - - return { - "cameras": bool, - "smoothingGroups": bool, - "hardEdges": bool, - "tangents": bool, - "smoothMesh": bool, - "instances": bool, - # "referencedContainersContent": bool, # deprecated in Maya 2016+ - "bakeComplexAnimation": bool, - "bakeComplexStart": int, - "bakeComplexEnd": int, - "bakeComplexStep": int, - "bakeResampleAnimation": bool, - "useSceneName": bool, - "quaternion": str, # "euler" - "shapes": bool, - "skins": bool, - "constraints": bool, - "lights": bool, - "embeddedTextures": bool, - "includeChildren": bool, - "inputConnections": bool, - "upAxis": str, # x, y or z, - "triangulate": bool, - "fileVersion": str, - "skeletonDefinitions": bool, - "referencedAssetsContent": bool - } - - @property - def default_options(self): - """The default options for FBX extraction. - - This includes shapes, skins, constraints, lights and incoming - connections and exports with the Y-axis as up-axis. - - By default this uses the time sliders start and end time. - - """ - - start_frame = int(cmds.playbackOptions(query=True, - animationStartTime=True)) - end_frame = int(cmds.playbackOptions(query=True, - animationEndTime=True)) - - return { - "cameras": False, - "smoothingGroups": True, - "hardEdges": False, - "tangents": False, - "smoothMesh": True, - "instances": False, - "bakeComplexAnimation": True, - "bakeComplexStart": start_frame, - "bakeComplexEnd": end_frame, - "bakeComplexStep": 1, - "bakeResampleAnimation": True, - "useSceneName": False, - "quaternion": "euler", - "shapes": True, - "skins": True, - "constraints": False, - "lights": True, - "embeddedTextures": False, - "includeChildren": True, - "inputConnections": True, - "upAxis": "y", - "triangulate": False, - "fileVersion": "FBX202000", - "skeletonDefinitions": False, - "referencedAssetsContent": False - } - - def __init__(self, log=None): - # Ensure FBX plug-in is loaded - self.log = log or logging.getLogger(self.__class__.__name__) - cmds.loadPlugin("fbxmaya", quiet=True) - - def parse_overrides(self, instance, options): - """Inspect data of instance to determine overridden options - - An instance may supply any of the overridable options - as data, the option is then added to the extraction. - - """ - - for key in instance.data: - if key not in self.options: - continue - - # Ensure the data is of correct type - value = instance.data[key] - if not isinstance(value, self.options[key]): - self.log.warning( - "Overridden attribute {key} was of " - "the wrong type: {invalid_type} " - "- should have been {valid_type}".format( - key=key, - invalid_type=type(value).__name__, - valid_type=self.options[key].__name__)) - continue - - options[key] = value - - return options - - def set_options_from_instance(self, instance): - """Sets FBX export options from data in the instance. - - Args: - instance (Instance): Instance data. - - """ - # Parse export options - options = self.default_options - options = self.parse_overrides(instance, options) - self.log.debug("Export options: {0}".format(options)) - - # Collect the start and end including handles - start = instance.data.get("frameStartHandle") or \ - instance.context.data.get("frameStartHandle") - end = instance.data.get("frameEndHandle") or \ - instance.context.data.get("frameEndHandle") - - options['bakeComplexStart'] = start - options['bakeComplexEnd'] = end - - # First apply the default export settings to be fully consistent - # each time for successive publishes - mel.eval("FBXResetExport") - - # Apply the FBX overrides through MEL since the commands - # only work correctly in MEL according to online - # available discussions on the topic - _iteritems = getattr(options, "iteritems", options.items) - for option, value in _iteritems(): - key = option[0].upper() + option[1:] # uppercase first letter - - # Boolean must be passed as lower-case strings - # as to MEL standards - if isinstance(value, bool): - value = str(value).lower() - - template = "FBXExport{0} {1}" if key == "UpAxis" else \ - "FBXExport{0} -v {1}" # noqa - cmd = template.format(key, value) - self.log.debug(cmd) - mel.eval(cmd) - - # Never show the UI or generate a log - mel.eval("FBXExportShowUI -v false") - mel.eval("FBXExportGenerateLog -v false") - - @staticmethod - def export(members, path): - # type: (list, str) -> None - """Export members as FBX with given path. - - Args: - members (list): List of members to export. - path (str): Path to use for export. - - """ - # The export requires forward slashes because we need - # to format it into a string in a mel expression - path = path.replace("\\", "/") - with maintained_selection(): - cmds.select(members, r=True, noExpand=True) - mel.eval('FBXExport -f "{}" -s'.format(path)) diff --git a/server_addon/maya/client/ayon_maya/api/gltf.py b/server_addon/maya/client/ayon_maya/api/gltf.py deleted file mode 100644 index 9aa4bf37ef..0000000000 --- a/server_addon/maya/client/ayon_maya/api/gltf.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -"""Tools to work with GLTF.""" -import logging - -from maya import cmds, mel # noqa - -log = logging.getLogger(__name__) - -_gltf_options = { - "of": str, # outputFolder - "cpr": str, # copyright - "sno": bool, # selectedNodeOnly - "sn": str, # sceneName - "glb": bool, # binary - "nbu": bool, # niceBufferURIs - "hbu": bool, # hashBufferURI - "ext": bool, # externalTextures - "ivt": int, # initialValuesTime - "acn": str, # animationClipName # codespell:ignore acn - "ast": int, # animationClipStartTime - "aet": int, # animationClipEndTime - "afr": float, # animationClipFrameRate - "dsa": int, # detectStepAnimations - "mpa": str, # meshPrimitiveAttributes - "bpa": str, # blendPrimitiveAttributes - "i32": bool, # force32bitIndices - "ssm": bool, # skipStandardMaterials - "eut": bool, # excludeUnusedTexcoord - "dm": bool, # defaultMaterial - "cm": bool, # colorizeMaterials - "dmy": str, # dumpMaya - "dgl": str, # dumpGLTF - "imd": str, # ignoreMeshDeformers - "ssc": bool, # skipSkinClusters - "sbs": bool, # skipBlendShapes - "rvp": bool, # redrawViewport - "vno": bool # visibleNodesOnly -} - - -def extract_gltf(parent_dir, - filename, - **kwargs): - - """Sets GLTF export options from data in the instance. - - """ - - cmds.loadPlugin('maya2glTF', quiet=True) - # load the UI to run mel command - mel.eval("maya2glTF_UI()") - - parent_dir = parent_dir.replace('\\', '/') - options = { - "dsa": 1, - "glb": True - } - options.update(kwargs) - - for key, value in options.copy().items(): - if key not in _gltf_options: - log.warning("extract_gltf() does not support option '%s'. " - "Flag will be ignored..", key) - options.pop(key) - options.pop(value) - continue - - job_args = list() - default_opt = "maya2glTF -of \"{0}\" -sn \"{1}\"".format(parent_dir, filename) # noqa - job_args.append(default_opt) - - for key, value in options.items(): - if isinstance(value, str): - job_args.append("-{0} \"{1}\"".format(key, value)) - elif isinstance(value, bool): - if value: - job_args.append("-{0}".format(key)) - else: - job_args.append("-{0} {1}".format(key, value)) - - job_str = " ".join(job_args) - log.info("{}".format(job_str)) - mel.eval(job_str) - - # close the gltf export after finish the export - gltf_UI = "maya2glTF_exporter_window" - if cmds.window(gltf_UI, q=True, exists=True): - cmds.deleteUI(gltf_UI) diff --git a/server_addon/maya/client/ayon_maya/api/lib.py b/server_addon/maya/client/ayon_maya/api/lib.py deleted file mode 100644 index 0242dafc0b..0000000000 --- a/server_addon/maya/client/ayon_maya/api/lib.py +++ /dev/null @@ -1,4243 +0,0 @@ -"""Standalone helper functions""" - -import os -import copy -from pprint import pformat -import sys -import uuid -import re - -import json -import logging -import contextlib -import capture -from .exitstack import ExitStack -from collections import OrderedDict, defaultdict -from math import ceil -from six import string_types - -from maya import cmds, mel -from maya.api import OpenMaya - -import ayon_api - -from ayon_core.settings import get_project_settings -from ayon_core.pipeline import ( - get_current_project_name, - get_current_folder_path, - get_current_task_name, - discover_loader_plugins, - loaders_from_representation, - get_representation_path, - load_container, - registered_host, - AVALON_CONTAINER_ID, - AVALON_INSTANCE_ID, - AYON_INSTANCE_ID, - AYON_CONTAINER_ID, -) -from ayon_core.lib import NumberDef -from ayon_core.pipeline.context_tools import get_current_task_entity -from ayon_core.pipeline.create import CreateContext -from ayon_core.lib.profiles_filtering import filter_profiles - - -self = sys.modules[__name__] -self._parent = None - -log = logging.getLogger(__name__) - -IS_HEADLESS = not hasattr(cmds, "about") or cmds.about(batch=True) -ATTRIBUTE_DICT = {"int": {"attributeType": "long"}, - "str": {"dataType": "string"}, - "unicode": {"dataType": "string"}, - "float": {"attributeType": "double"}, - "bool": {"attributeType": "bool"}} - -SHAPE_ATTRS = {"castsShadows", - "receiveShadows", - "motionBlur", - "primaryVisibility", - "smoothShading", - "visibleInReflections", - "visibleInRefractions", - "doubleSided", - "opposite"} - - -DEFAULT_MATRIX = [1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.0, 0.0, 1.0] - -INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000} -FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94} - - -DISPLAY_LIGHTS_ENUM = [ - {"label": "Use Project Settings", "value": "project_settings"}, - {"label": "Default Lighting", "value": "default"}, - {"label": "All Lights", "value": "all"}, - {"label": "Selected Lights", "value": "selected"}, - {"label": "Flat Lighting", "value": "flat"}, - {"label": "No Lights", "value": "none"} -] - - -def get_main_window(): - """Acquire Maya's main window""" - from qtpy import QtWidgets - - if self._parent is None: - self._parent = { - widget.objectName(): widget - for widget in QtWidgets.QApplication.topLevelWidgets() - }["MayaWindow"] - return self._parent - - -@contextlib.contextmanager -def suspended_refresh(suspend=True): - """Suspend viewport refreshes - - cmds.ogs(pause=True) is a toggle so we can't pass False. - """ - if IS_HEADLESS: - yield - return - - original_state = cmds.ogs(query=True, pause=True) - try: - if suspend and not original_state: - cmds.ogs(pause=True) - yield - finally: - if suspend and not original_state: - cmds.ogs(pause=True) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context - - Example: - >>> scene = cmds.file(new=True, force=True) - >>> node = cmds.createNode("transform", name="Test") - >>> cmds.select("persp") - >>> with maintained_selection(): - ... cmds.select("Test", replace=True) - >>> "Test" in cmds.ls(selection=True) - False - - """ - - previous_selection = cmds.ls(selection=True) - try: - yield - finally: - if previous_selection: - cmds.select(previous_selection, - replace=True, - noExpand=True) - else: - cmds.select(clear=True) - - -def reload_all_udim_tile_previews(): - """Regenerate all UDIM tile preview in texture file""" - for texture_file in cmds.ls(type="file"): - if cmds.getAttr("{}.uvTilingMode".format(texture_file)) > 0: - cmds.ogs(regenerateUVTilePreview=texture_file) - - -@contextlib.contextmanager -def panel_camera(panel, camera): - """Set modelPanel's camera during the context. - - Arguments: - panel (str): modelPanel name. - camera (str): camera name. - - """ - original_camera = cmds.modelPanel(panel, query=True, camera=True) - try: - cmds.modelPanel(panel, edit=True, camera=camera) - yield - finally: - cmds.modelPanel(panel, edit=True, camera=original_camera) - - -def render_capture_preset(preset): - """Capture playblast with a preset. - - To generate the preset use `generate_capture_preset`. - - Args: - preset (dict): preset options - - Returns: - str: Output path of `capture.capture` - """ - - # Force a refresh at the start of the timeline - # TODO (Question): Why do we need to do this? What bug does it solve? - # Is this for simulations? - cmds.refresh(force=True) - refresh_frame_int = int(cmds.playbackOptions(query=True, minTime=True)) - cmds.currentTime(refresh_frame_int - 1, edit=True) - cmds.currentTime(refresh_frame_int, edit=True) - log.debug( - "Using preset: {}".format( - json.dumps(preset, indent=4, sort_keys=True) - ) - ) - preset = copy.deepcopy(preset) - # not supported by `capture` so we pop it off of the preset - reload_textures = preset["viewport_options"].pop("loadTextures", False) - panel = preset.pop("panel") - with ExitStack() as stack: - stack.enter_context(maintained_time()) - stack.enter_context(panel_camera(panel, preset["camera"])) - stack.enter_context(viewport_default_options(panel, preset)) - if reload_textures: - # Force immediate texture loading when to ensure - # all textures have loaded before the playblast starts - stack.enter_context(material_loading_mode(mode="immediate")) - # Regenerate all UDIM tiles previews - reload_all_udim_tile_previews() - path = capture.capture(log=self.log, **preset) - - return path - - -def generate_capture_preset(instance, camera, path, - start=None, end=None, capture_preset=None): - """Function for getting all the data of preset options for - playblast capturing - - Args: - instance (pyblish.api.Instance): instance - camera (str): review camera - path (str): filepath - start (int): frameStart - end (int): frameEnd - capture_preset (dict): capture preset - - Returns: - dict: Resulting preset - """ - preset = load_capture_preset(data=capture_preset) - - preset["camera"] = camera - preset["start_frame"] = start - preset["end_frame"] = end - preset["filename"] = path - preset["overwrite"] = True - preset["panel"] = instance.data["panel"] - - # Disable viewer since we use the rendering logic for publishing - # We don't want to open the generated playblast in a viewer directly. - preset["viewer"] = False - - # "isolate_view" will already have been applied at creation, so we'll - # ignore it here. - preset.pop("isolate_view") - - # Set resolution variables from capture presets - width_preset = capture_preset["Resolution"]["width"] - height_preset = capture_preset["Resolution"]["height"] - - # Set resolution variables from folder values - folder_attributes = instance.data["folderEntity"]["attrib"] - folder_width = folder_attributes.get("resolutionWidth") - folder_height = folder_attributes.get("resolutionHeight") - review_instance_width = instance.data.get("review_width") - review_instance_height = instance.data.get("review_height") - - # Use resolution from instance if review width/height is set - # Otherwise use the resolution from preset if it has non-zero values - # Otherwise fall back to folder width x height - # Else define no width, then `capture.capture` will use render resolution - if review_instance_width and review_instance_height: - preset["width"] = review_instance_width - preset["height"] = review_instance_height - elif width_preset and height_preset: - preset["width"] = width_preset - preset["height"] = height_preset - elif folder_width and folder_height: - preset["width"] = folder_width - preset["height"] = folder_height - - # Isolate view is requested by having objects in the set besides a - # camera. If there is only 1 member it'll be the camera because we - # validate to have 1 camera only. - if instance.data["isolate"] and len(instance.data["setMembers"]) > 1: - preset["isolate"] = instance.data["setMembers"] - - # Override camera options - # Enforce persisting camera depth of field - camera_options = preset.setdefault("camera_options", {}) - camera_options["depthOfField"] = cmds.getAttr( - "{0}.depthOfField".format(camera) - ) - - # Use Pan/Zoom from instance data instead of from preset - preset.pop("pan_zoom", None) - camera_options["panZoomEnabled"] = instance.data["panZoom"] - - # Override viewport options by instance data - viewport_options = preset.setdefault("viewport_options", {}) - viewport_options["displayLights"] = instance.data["displayLights"] - viewport_options["imagePlane"] = instance.data.get("imagePlane", True) - - # Override transparency if requested. - transparency = instance.data.get("transparency", 0) - if transparency != 0: - preset["viewport2_options"]["transparencyAlgorithm"] = transparency - - # Update preset with current panel setting - # if override_viewport_options is turned off - if not capture_preset["ViewportOptions"]["override_viewport_options"]: - panel_preset = capture.parse_view(preset["panel"]) - panel_preset.pop("camera") - preset.update(panel_preset) - - return preset - - -@contextlib.contextmanager -def viewport_default_options(panel, preset): - """Context manager used by `render_capture_preset`. - - We need to explicitly enable some viewport changes so the viewport is - refreshed ahead of playblasting. - - """ - # TODO: Clarify in the docstring WHY we need to set it ahead of - # playblasting. What issues does it solve? - viewport_defaults = {} - try: - keys = [ - "useDefaultMaterial", - "wireframeOnShaded", - "xray", - "jointXray", - "backfaceCulling", - "textures" - ] - for key in keys: - viewport_defaults[key] = cmds.modelEditor( - panel, query=True, **{key: True} - ) - if preset["viewport_options"].get(key): - cmds.modelEditor( - panel, edit=True, **{key: True} - ) - yield - finally: - # Restoring viewport options. - if viewport_defaults: - cmds.modelEditor( - panel, edit=True, **viewport_defaults - ) - - -@contextlib.contextmanager -def material_loading_mode(mode="immediate"): - """Set material loading mode during context""" - original = cmds.displayPref(query=True, materialLoadingMode=True) - cmds.displayPref(materialLoadingMode=mode) - try: - yield - finally: - cmds.displayPref(materialLoadingMode=original) - - -def get_namespace(node): - """Return namespace of given node""" - node_name = node.rsplit("|", 1)[-1] - if ":" in node_name: - return node_name.rsplit(":", 1)[0] - else: - return "" - - -def strip_namespace(node, namespace): - """Strip given namespace from node path. - - The namespace will only be stripped from names - if it starts with that namespace. If the namespace - occurs within another namespace it's not removed. - - Examples: - >>> strip_namespace("namespace:node", namespace="namespace:") - "node" - >>> strip_namespace("hello:world:node", namespace="hello:world") - "node" - >>> strip_namespace("hello:world:node", namespace="hello") - "world:node" - >>> strip_namespace("hello:world:node", namespace="world") - "hello:world:node" - >>> strip_namespace("ns:group|ns:node", namespace="ns") - "group|node" - - Returns: - str: Node name without given starting namespace. - - """ - - # Ensure namespace ends with `:` - if not namespace.endswith(":"): - namespace = "{}:".format(namespace) - - # The long path for a node can also have the namespace - # in its parents so we need to remove it from each - return "|".join( - name[len(namespace):] if name.startswith(namespace) else name - for name in node.split("|") - ) - - -def get_custom_namespace(custom_namespace): - """Return unique namespace. - - The input namespace can contain a single group - of '#' number tokens to indicate where the namespace's - unique index should go. The amount of tokens defines - the zero padding of the number, e.g ### turns into 001. - - Warning: Note that a namespace will always be - prefixed with a _ if it starts with a digit - - Example: - >>> get_custom_namespace("myspace_##_") - # myspace_01_ - >>> get_custom_namespace("##_myspace") - # _01_myspace - >>> get_custom_namespace("myspace##") - # myspace01 - - """ - split = re.split("([#]+)", custom_namespace, 1) - - if len(split) == 3: - base, padding, suffix = split - padding = "%0{}d".format(len(padding)) - else: - base = split[0] - padding = "%02d" # default padding - suffix = "" - - return unique_namespace( - base, - format=padding, - prefix="_" if not base or base[0].isdigit() else "", - suffix=suffix - ) - - -def unique_namespace(namespace, format="%02d", prefix="", suffix=""): - """Return unique namespace - - Arguments: - namespace (str): Name of namespace to consider - format (str, optional): Formatting of the given iteration number - suffix (str, optional): Only consider namespaces with this suffix. - - >>> unique_namespace("bar") - # bar01 - >>> unique_namespace(":hello") - # :hello01 - >>> unique_namespace("bar:", suffix="_NS") - # bar01_NS: - - """ - - def current_namespace(): - current = cmds.namespaceInfo(currentNamespace=True, - absoluteName=True) - # When inside a namespace Maya adds no trailing : - if not current.endswith(":"): - current += ":" - return current - - # Always check against the absolute namespace root - # There's no clash with :x if we're defining namespace :a:x - ROOT = ":" if namespace.startswith(":") else current_namespace() - - # Strip trailing `:` tokens since we might want to add a suffix - start = ":" if namespace.startswith(":") else "" - end = ":" if namespace.endswith(":") else "" - namespace = namespace.strip(":") - if ":" in namespace: - # Split off any nesting that we don't uniqify anyway. - parents, namespace = namespace.rsplit(":", 1) - start += parents + ":" - ROOT += start - - def exists(n): - # Check for clash with nodes and namespaces - fullpath = ROOT + n - return cmds.objExists(fullpath) or cmds.namespace(exists=fullpath) - - iteration = 1 - while True: - nr_namespace = namespace + format % iteration - unique = prefix + nr_namespace + suffix - - if not exists(unique): - return start + unique + end - - iteration += 1 - - -def read(node): - """Return user-defined attributes from `node`""" - - data = dict() - - for attr in cmds.listAttr(node, userDefined=True) or list(): - try: - value = cmds.getAttr(node + "." + attr, asString=True) - - except RuntimeError: - # For Message type attribute or others that have connections, - # take source node name as value. - source = cmds.listConnections(node + "." + attr, - source=True, - destination=False) - source = cmds.ls(source, long=True) or [None] - value = source[0] - - except ValueError: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - - data[attr] = value - - return data - - -def matrix_equals(a, b, tolerance=1e-10): - """ - Compares two matrices with an imperfection tolerance - - Args: - a (list, tuple): the matrix to check - b (list, tuple): the matrix to check against - tolerance (float): the precision of the differences - - Returns: - bool : True or False - - """ - if not all(abs(x - y) < tolerance for x, y in zip(a, b)): - return False - return True - - -def float_round(num, places=0, direction=ceil): - return direction(num * (10**places)) / float(10**places) - - -def pairwise(iterable): - """s -> (s0,s1), (s2,s3), (s4, s5), ...""" - from six.moves import zip - - a = iter(iterable) - return zip(a, a) - - -def collect_animation_defs(fps=False): - """Get the basic animation attribute definitions for the publisher. - - Returns: - OrderedDict - - """ - - # get scene values as defaults - frame_start = cmds.playbackOptions(query=True, minTime=True) - frame_end = cmds.playbackOptions(query=True, maxTime=True) - frame_start_handle = cmds.playbackOptions( - query=True, animationStartTime=True - ) - frame_end_handle = cmds.playbackOptions(query=True, animationEndTime=True) - - handle_start = frame_start - frame_start_handle - handle_end = frame_end_handle - frame_end - - # build attributes - defs = [ - NumberDef("frameStart", - label="Frame Start", - default=frame_start, - decimals=0), - NumberDef("frameEnd", - label="Frame End", - default=frame_end, - decimals=0), - NumberDef("handleStart", - label="Handle Start", - default=handle_start, - decimals=0), - NumberDef("handleEnd", - label="Handle End", - default=handle_end, - decimals=0), - NumberDef("step", - label="Step size", - tooltip="A smaller step size means more samples and larger " - "output files.\n" - "A 1.0 step size is a single sample every frame.\n" - "A 0.5 step size is two samples per frame.\n" - "A 0.2 step size is five samples per frame.", - default=1.0, - decimals=3), - ] - - if fps: - current_fps = mel.eval('currentTimeUnitToFPS()') - fps_def = NumberDef( - "fps", label="FPS", default=current_fps, decimals=5 - ) - defs.append(fps_def) - - return defs - - -def imprint(node, data): - """Write `data` to `node` as userDefined attributes - - Arguments: - node (str): Long name of node - data (dict): Dictionary of key/value pairs - - Example: - >>> from maya import cmds - >>> def compute(): - ... return 6 - ... - >>> cube, generator = cmds.polyCube() - >>> imprint(cube, { - ... "regularString": "myFamily", - ... "computedValue": lambda: compute() - ... }) - ... - >>> cmds.getAttr(cube + ".computedValue") - 6 - - """ - - for key, value in data.items(): - - if callable(value): - # Support values evaluated at imprint - value = value() - - if isinstance(value, bool): - add_type = {"attributeType": "bool"} - set_type = {"keyable": False, "channelBox": True} - elif isinstance(value, string_types): - add_type = {"dataType": "string"} - set_type = {"type": "string"} - elif isinstance(value, int): - add_type = {"attributeType": "long"} - set_type = {"keyable": False, "channelBox": True} - elif isinstance(value, float): - add_type = {"attributeType": "double"} - set_type = {"keyable": False, "channelBox": True} - elif isinstance(value, (list, tuple)): - add_type = {"attributeType": "enum", "enumName": ":".join(value)} - set_type = {"keyable": False, "channelBox": True} - value = 0 # enum default - else: - raise TypeError("Unsupported type: %r" % type(value)) - - cmds.addAttr(node, longName=key, **add_type) - cmds.setAttr(node + "." + key, value, **set_type) - - -def lsattr(attr, value=None): - """Return nodes matching `key` and `value` - - Arguments: - attr (str): Name of Maya attribute - value (object, optional): Value of attribute. If none - is provided, return all nodes with this attribute. - - Example: - >> lsattr("id", "myId") - ["myNode"] - >> lsattr("id") - ["myNode", "myOtherNode"] - - """ - - if value is None: - return cmds.ls("*.%s" % attr, - recursive=True, - objectsOnly=True, - long=True) - return lsattrs({attr: value}) - - -def lsattrs(attrs): - """Return nodes with the given attribute(s). - - Arguments: - attrs (dict): Name and value pairs of expected matches - - Example: - >>> # Return nodes with an `age` of five. - >>> lsattrs({"age": "five"}) - >>> # Return nodes with both `age` and `color` of five and blue. - >>> lsattrs({"age": "five", "color": "blue"}) - - Return: - list: matching nodes. - - """ - - dep_fn = OpenMaya.MFnDependencyNode() - dag_fn = OpenMaya.MFnDagNode() - selection_list = OpenMaya.MSelectionList() - - first_attr = next(iter(attrs)) - - try: - selection_list.add("*.{0}".format(first_attr), - searchChildNamespaces=True) - except RuntimeError as exc: - if str(exc).endswith("Object does not exist"): - return [] - - matches = set() - for i in range(selection_list.length()): - node = selection_list.getDependNode(i) - if node.hasFn(OpenMaya.MFn.kDagNode): - fn_node = dag_fn.setObject(node) - full_path_names = [path.fullPathName() - for path in fn_node.getAllPaths()] - else: - fn_node = dep_fn.setObject(node) - full_path_names = [fn_node.name()] - - for attr in attrs: - try: - plug = fn_node.findPlug(attr, True) - if plug.asString() != attrs[attr]: - break - except RuntimeError: - break - else: - matches.update(full_path_names) - - return list(matches) - - -@contextlib.contextmanager -def attribute_values(attr_values): - """Remaps node attributes to values during context. - - Arguments: - attr_values (dict): Dictionary with (attr, value) - - """ - - original = [(attr, cmds.getAttr(attr)) for attr in attr_values] - try: - for attr, value in attr_values.items(): - if isinstance(value, string_types): - cmds.setAttr(attr, value, type="string") - else: - cmds.setAttr(attr, value) - yield - finally: - for attr, value in original: - if isinstance(value, string_types): - cmds.setAttr(attr, value, type="string") - elif value is None and cmds.getAttr(attr, type=True) == "string": - # In some cases the maya.cmds.getAttr command returns None - # for string attributes but this value cannot assigned. - # Note: After setting it once to "" it will then return "" - # instead of None. So this would only happen once. - cmds.setAttr(attr, "", type="string") - else: - cmds.setAttr(attr, value) - - -@contextlib.contextmanager -def keytangent_default(in_tangent_type='auto', - out_tangent_type='auto'): - """Set the default keyTangent for new keys during this context""" - - original_itt = cmds.keyTangent(query=True, g=True, itt=True)[0] - original_ott = cmds.keyTangent(query=True, g=True, ott=True)[0] - cmds.keyTangent(g=True, itt=in_tangent_type) - cmds.keyTangent(g=True, ott=out_tangent_type) - try: - yield - finally: - cmds.keyTangent(g=True, itt=original_itt) - cmds.keyTangent(g=True, ott=original_ott) - - -@contextlib.contextmanager -def undo_chunk(): - """Open a undo chunk during context.""" - - try: - cmds.undoInfo(openChunk=True) - yield - finally: - cmds.undoInfo(closeChunk=True) - - -@contextlib.contextmanager -def evaluation(mode="off"): - """Set the evaluation manager during context. - - Arguments: - mode (str): The mode to apply during context. - "off": The standard DG evaluation (stable) - "serial": A serial DG evaluation - "parallel": The Maya 2016+ parallel evaluation - - """ - - original = cmds.evaluationManager(query=True, mode=1)[0] - try: - cmds.evaluationManager(mode=mode) - yield - finally: - cmds.evaluationManager(mode=original) - - -@contextlib.contextmanager -def empty_sets(sets, force=False): - """Remove all members of the sets during the context""" - - assert isinstance(sets, (list, tuple)) - - original = dict() - original_connections = [] - - # Store original state - for obj_set in sets: - members = cmds.sets(obj_set, query=True) - original[obj_set] = members - - try: - for obj_set in sets: - cmds.sets(clear=obj_set) - if force: - # Break all connections if force is enabled, this way we - # prevent Maya from exporting any reference nodes which are - # connected with placeHolder[x] attributes - plug = "%s.dagSetMembers" % obj_set - connections = cmds.listConnections(plug, - source=True, - destination=False, - plugs=True, - connections=True) or [] - original_connections.extend(connections) - for dest, src in pairwise(connections): - cmds.disconnectAttr(src, dest) - yield - finally: - - for dest, src in pairwise(original_connections): - cmds.connectAttr(src, dest) - - # Restore original members - _iteritems = getattr(original, "iteritems", original.items) - for origin_set, members in _iteritems(): - cmds.sets(members, forceElement=origin_set) - - -@contextlib.contextmanager -def renderlayer(layer): - """Set the renderlayer during the context - - Arguments: - layer (str): Name of layer to switch to. - - """ - - original = cmds.editRenderLayerGlobals(query=True, - currentRenderLayer=True) - - try: - cmds.editRenderLayerGlobals(currentRenderLayer=layer) - yield - finally: - cmds.editRenderLayerGlobals(currentRenderLayer=original) - - -class delete_after(object): - """Context Manager that will delete collected nodes after exit. - - This allows to ensure the nodes added to the context are deleted - afterwards. This is useful if you want to ensure nodes are deleted - even if an error is raised. - - Examples: - with delete_after() as delete_bin: - cube = maya.cmds.polyCube() - delete_bin.extend(cube) - # cube exists - # cube deleted - - """ - - def __init__(self, nodes=None): - - self._nodes = list() - - if nodes: - self.extend(nodes) - - def append(self, node): - self._nodes.append(node) - - def extend(self, nodes): - self._nodes.extend(nodes) - - def __iter__(self): - return iter(self._nodes) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if self._nodes: - cmds.delete(self._nodes) - - -def get_current_renderlayer(): - return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) - - -def get_renderer(layer): - with renderlayer(layer): - return cmds.getAttr("defaultRenderGlobals.currentRenderer") - - -@contextlib.contextmanager -def no_undo(flush=False): - """Disable the undo queue during the context - - Arguments: - flush (bool): When True the undo queue will be emptied when returning - from the context losing all undo history. Defaults to False. - - """ - original = cmds.undoInfo(query=True, state=True) - keyword = 'state' if flush else 'stateWithoutFlush' - - try: - cmds.undoInfo(**{keyword: False}) - yield - finally: - cmds.undoInfo(**{keyword: original}) - - -def get_shader_assignments_from_shapes(shapes, components=True): - """Return the shape assignment per related shading engines. - - Returns a dictionary where the keys are shadingGroups and the values are - lists of assigned shapes or shape-components. - - Since `maya.cmds.sets` returns shader members on the shapes as components - on the transform we correct that in this method too. - - For the 'shapes' this will return a dictionary like: - { - "shadingEngineX": ["nodeX", "nodeY"], - "shadingEngineY": ["nodeA", "nodeB"] - } - - Args: - shapes (list): The shapes to collect the assignments for. - components (bool): Whether to include the component assignments. - - Returns: - dict: The {shadingEngine: shapes} relationships - - """ - - shapes = cmds.ls(shapes, - long=True, - shapes=True, - objectsOnly=True) - if not shapes: - return {} - - # Collect shading engines and their shapes - assignments = defaultdict(list) - for shape in shapes: - - # Get unique shading groups for the shape - shading_groups = cmds.listConnections(shape, - source=False, - destination=True, - plugs=False, - connections=False, - type="shadingEngine") or [] - shading_groups = list(set(shading_groups)) - for shading_group in shading_groups: - assignments[shading_group].append(shape) - - if components: - # Note: Components returned from maya.cmds.sets are "listed" as if - # being assigned to the transform like: pCube1.f[0] as opposed - # to pCubeShape1.f[0] so we correct that here too. - - # Build a mapping from parent to shapes to include in lookup. - transforms = {shape.rsplit("|", 1)[0]: shape for shape in shapes} - lookup = set(shapes) | set(transforms.keys()) - - component_assignments = defaultdict(list) - for shading_group in assignments.keys(): - members = cmds.ls(cmds.sets(shading_group, query=True), long=True) - for member in members: - - node = member.split(".", 1)[0] - if node not in lookup: - continue - - # Component - if "." in member: - - # Fix transform to shape as shaders are assigned to shapes - if node in transforms: - shape = transforms[node] - component = member.split(".", 1)[1] - member = "{0}.{1}".format(shape, component) - - component_assignments[shading_group].append(member) - assignments = component_assignments - - return dict(assignments) - - -@contextlib.contextmanager -def shader(nodes, shadingEngine="initialShadingGroup"): - """Assign a shader to nodes during the context""" - - shapes = cmds.ls(nodes, dag=1, objectsOnly=1, shapes=1, long=1) - original = get_shader_assignments_from_shapes(shapes) - - try: - # Assign override shader - if shapes: - cmds.sets(shapes, edit=True, forceElement=shadingEngine) - yield - finally: - - # Assign original shaders - for sg, members in original.items(): - if members: - cmds.sets(members, edit=True, forceElement=sg) - - -@contextlib.contextmanager -def displaySmoothness(nodes, - divisionsU=0, - divisionsV=0, - pointsWire=4, - pointsShaded=1, - polygonObject=1): - """Set the displaySmoothness during the context""" - - # Ensure only non-intermediate shapes - nodes = cmds.ls(nodes, - dag=1, - shapes=1, - long=1, - noIntermediate=True) - - def parse(node): - """Parse the current state of a node""" - state = {} - for key in ["divisionsU", - "divisionsV", - "pointsWire", - "pointsShaded", - "polygonObject"]: - value = cmds.displaySmoothness(node, query=1, **{key: True}) - if value is not None: - state[key] = value[0] - return state - - originals = dict((node, parse(node)) for node in nodes) - - try: - # Apply current state - cmds.displaySmoothness(nodes, - divisionsU=divisionsU, - divisionsV=divisionsV, - pointsWire=pointsWire, - pointsShaded=pointsShaded, - polygonObject=polygonObject) - yield - finally: - # Revert state - _iteritems = getattr(originals, "iteritems", originals.items) - for node, state in _iteritems(): - if state: - cmds.displaySmoothness(node, **state) - - -@contextlib.contextmanager -def no_display_layers(nodes): - """Ensure nodes are not in a displayLayer during context. - - Arguments: - nodes (list): The nodes to remove from any display layer. - - """ - - # Ensure long names - nodes = cmds.ls(nodes, long=True) - - # Get the original state - lookup = set(nodes) - original = {} - for layer in cmds.ls(type='displayLayer'): - - # Skip default layer - if layer == "defaultLayer": - continue - - members = cmds.editDisplayLayerMembers(layer, - query=True, - fullNames=True) - if not members: - continue - members = set(members) - - included = lookup.intersection(members) - if included: - original[layer] = list(included) - - try: - # Add all nodes to default layer - cmds.editDisplayLayerMembers("defaultLayer", nodes, noRecurse=True) - yield - finally: - # Restore original members - _iteritems = getattr(original, "iteritems", original.items) - for layer, members in _iteritems(): - cmds.editDisplayLayerMembers(layer, members, noRecurse=True) - - -@contextlib.contextmanager -def namespaced(namespace, new=True, relative_names=None): - """Work inside namespace during context - - Args: - new (bool): When enabled this will rename the namespace to a unique - namespace if the input namespace already exists. - - Yields: - str: The namespace that is used during the context - - """ - original = cmds.namespaceInfo(cur=True, absoluteName=True) - original_relative_names = cmds.namespace(query=True, relativeNames=True) - if new: - namespace = unique_namespace(namespace) - cmds.namespace(add=namespace) - if relative_names is not None: - cmds.namespace(relativeNames=relative_names) - try: - cmds.namespace(set=namespace) - yield namespace - finally: - cmds.namespace(set=original) - if relative_names is not None: - cmds.namespace(relativeNames=original_relative_names) - - -@contextlib.contextmanager -def maintained_selection_api(): - """Maintain selection using the Maya Python API. - - Warning: This is *not* added to the undo stack. - - """ - original = OpenMaya.MGlobal.getActiveSelectionList() - try: - yield - finally: - OpenMaya.MGlobal.setActiveSelectionList(original) - - -@contextlib.contextmanager -def tool(context): - """Set a tool context during the context manager. - - """ - original = cmds.currentCtx() - try: - cmds.setToolTo(context) - yield - finally: - cmds.setToolTo(original) - - -def polyConstraint(components, *args, **kwargs): - """Return the list of *components* with the constraints applied. - - A wrapper around Maya's `polySelectConstraint` to retrieve its results as - a list without altering selections. For a list of possible constraints - see `maya.cmds.polySelectConstraint` documentation. - - Arguments: - components (list): List of components of polygon meshes - - Returns: - list: The list of components filtered by the given constraints. - - """ - - kwargs.pop('mode', None) - - with no_undo(flush=False): - # Reverting selection to the original selection using - # `maya.cmds.select` can be slow in rare cases where previously - # `maya.cmds.polySelectConstraint` had set constrain to "All and Next" - # and the "Random" setting was activated. To work around this we - # revert to the original selection using the Maya API. This is safe - # since we're not generating any undo change anyway. - with tool("selectSuperContext"): - # Selection can be very slow when in a manipulator mode. - # So we force the selection context which is fast. - with maintained_selection_api(): - # Apply constraint using mode=2 (current and next) so - # it applies to the selection made before it; because just - # a `maya.cmds.select()` call will not trigger the constraint. - with reset_polySelectConstraint(): - cmds.select(components, r=1, noExpand=True) - cmds.polySelectConstraint(*args, mode=2, **kwargs) - result = cmds.ls(selection=True) - cmds.select(clear=True) - return result - - -@contextlib.contextmanager -def reset_polySelectConstraint(reset=True): - """Context during which the given polyConstraint settings are disabled. - - The original settings are restored after the context. - - """ - - original = cmds.polySelectConstraint(query=True, stateString=True) - - try: - if reset: - # Ensure command is available in mel - # This can happen when running standalone - if not mel.eval("exists resetPolySelectConstraint"): - mel.eval("source polygonConstraint") - - # Reset all parameters - mel.eval("resetPolySelectConstraint;") - cmds.polySelectConstraint(disable=True) - yield - finally: - mel.eval(original) - - -def is_visible(node, - displayLayer=True, - intermediateObject=True, - parentHidden=True, - visibility=True): - """Is `node` visible? - - Returns whether a node is hidden by one of the following methods: - - The node exists (always checked) - - The node must be a dagNode (always checked) - - The node's visibility is off. - - The node is set as intermediate Object. - - The node is in a disabled displayLayer. - - Whether any of its parent nodes is hidden. - - Roughly based on: http://ewertb.soundlinker.com/mel/mel.098.php - - Returns: - bool: Whether the node is visible in the scene - - """ - - # Only existing objects can be visible - if not cmds.objExists(node): - return False - - # Only dagNodes can be visible - if not cmds.objectType(node, isAType='dagNode'): - return False - - if visibility: - if not cmds.getAttr('{0}.visibility'.format(node)): - return False - - if intermediateObject and cmds.objectType(node, isAType='shape'): - if cmds.getAttr('{0}.intermediateObject'.format(node)): - return False - - if displayLayer: - # Display layers set overrideEnabled and overrideVisibility on members - if cmds.attributeQuery('overrideEnabled', node=node, exists=True): - override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node)) - override_visibility = cmds.getAttr('{}.overrideVisibility'.format( - node)) - if override_enabled and not override_visibility: - return False - - if parentHidden: - parents = cmds.listRelatives(node, parent=True, fullPath=True) - if parents: - parent = parents[0] - if not is_visible(parent, - displayLayer=displayLayer, - intermediateObject=False, - parentHidden=parentHidden, - visibility=visibility): - return False - - return True - -# region ID -def get_id_required_nodes(referenced_nodes=False, - nodes=None, - existing_ids=True): - """Return nodes that should receive a `cbId` attribute. - - This includes only mesh and curve nodes, parent transforms of the shape - nodes, file texture nodes and object sets (including shading engines). - - This filters out any node which is locked, referenced, read-only, - intermediate object. - - Args: - referenced_nodes (bool): set True to include referenced nodes - nodes (list, Optional): nodes to consider - existing_ids (bool): set True to include nodes with `cbId` attribute - - Returns: - nodes (set): list of filtered nodes - """ - - if nodes is not None and not nodes: - # User supplied an empty `nodes` list to check so all we can - # do is return the empty result - return set() - - def _node_type_exists(node_type): - try: - cmds.nodeType(node_type, isTypeName=True) - return True - except RuntimeError: - return False - - def iterate(maya_iterator): - while not maya_iterator.isDone(): - yield maya_iterator.thisNode() - maya_iterator.next() - - # `readOnly` flag is obsolete as of Maya 2016 therefore we explicitly - # remove default nodes and reference nodes - default_camera_shapes = { - "frontShape", "sideShape", "topShape", "perspShape" - } - - # The filtered types do not include transforms because we only want the - # parent transforms that have a child shape that we filtered to, so we - # include the parents here - types = ["mesh", "nurbsCurve", "nurbsSurface", "file", "objectSet"] - - # Check if plugin nodes are available for Maya by checking if the plugin - # is loaded - if cmds.pluginInfo("pgYetiMaya", query=True, loaded=True): - types.append("pgYetiMaya") - - iterator_type = OpenMaya.MIteratorType() - # This tries to be closest matching API equivalents of `types` variable - iterator_type.filterList = [ - OpenMaya.MFn.kMesh, # mesh - OpenMaya.MFn.kNurbsSurface, # nurbsSurface - OpenMaya.MFn.kNurbsCurve, # nurbsCurve - OpenMaya.MFn.kFileTexture, # file - OpenMaya.MFn.kSet, # objectSet - OpenMaya.MFn.kPluginShape # pgYetiMaya - ] - it = OpenMaya.MItDependencyNodes(iterator_type) - - fn_dep = OpenMaya.MFnDependencyNode() - fn_dag = OpenMaya.MFnDagNode() - result = set() - - def _should_include_parents(obj): - """Whether to include parents of obj in output""" - if not obj.hasFn(OpenMaya.MFn.kShape): - return False - - fn_dag.setObject(obj) - if fn_dag.isIntermediateObject: - return False - - # Skip default cameras - if ( - obj.hasFn(OpenMaya.MFn.kCamera) and - fn_dag.name() in default_camera_shapes - ): - return False - - return True - - def _add_to_result_if_valid(obj): - """Add to `result` if the object should be included""" - fn_dep.setObject(obj) - if not existing_ids and fn_dep.hasAttribute("cbId"): - return - - if not referenced_nodes and fn_dep.isFromReferencedFile: - return - - if fn_dep.isDefaultNode: - return - - if fn_dep.isLocked: - return - - # Skip default cameras - if ( - obj.hasFn(OpenMaya.MFn.kCamera) and - fn_dep.name() in default_camera_shapes - ): - return - - if obj.hasFn(OpenMaya.MFn.kDagNode): - # DAG nodes - fn_dag.setObject(obj) - - # Skip intermediate objects - if fn_dag.isIntermediateObject: - return - - # DAG nodes can be instanced and thus may have multiple paths. - # We need to identify each path - paths = OpenMaya.MDagPath.getAllPathsTo(obj) - for dag in paths: - path = dag.fullPathName() - result.add(path) - else: - # Dependency node - path = fn_dep.name() - result.add(path) - - for obj in iterate(it): - # For any non-intermediate shape node always include the parent - # even if we exclude the shape itself (e.g. when locked, default) - if _should_include_parents(obj): - fn_dag.setObject(obj) - parents = [ - fn_dag.parent(index) for index in range(fn_dag.parentCount()) - ] - for parent_obj in parents: - _add_to_result_if_valid(parent_obj) - - _add_to_result_if_valid(obj) - - if not result: - return result - - # Exclude some additional types - exclude_types = [] - if _node_type_exists("ilrBakeLayer"): - # Remove Turtle from the result if Turtle is loaded - exclude_types.append("ilrBakeLayer") - - if exclude_types: - exclude_nodes = set(cmds.ls(nodes, long=True, type=exclude_types)) - if exclude_nodes: - result -= exclude_nodes - - # Filter to explicit input nodes if provided - if nodes is not None: - # The amount of input nodes to filter to can be large and querying - # many nodes can be slow in Maya. As such we want to try and reduce - # it as much as possible, so we include the type filter to try and - # reduce the result of `maya.cmds.ls` here. - nodes = set(cmds.ls(nodes, long=True, type=types + ["dagNode"])) - if nodes: - result &= nodes - else: - return set() - - return result - - -def get_id(node): - """Get the `cbId` attribute of the given node. - - Args: - node (str): the name of the node to retrieve the attribute from - Returns: - str - - """ - if node is None: - return - - sel = OpenMaya.MSelectionList() - sel.add(node) - - api_node = sel.getDependNode(0) - fn = OpenMaya.MFnDependencyNode(api_node) - - if not fn.hasAttribute("cbId"): - return - - try: - return fn.findPlug("cbId", False).asString() - except RuntimeError: - log.warning("Failed to retrieve cbId on %s", node) - return - - -def generate_ids(nodes, folder_id=None): - """Returns new unique ids for the given nodes. - - Note: This does not assign the new ids, it only generates the values. - - To assign new ids using this method: - >>> nodes = ["a", "b", "c"] - >>> for node, id in generate_ids(nodes): - >>> set_id(node, id) - - To also override any existing values (and assign regenerated ids): - >>> nodes = ["a", "b", "c"] - >>> for node, id in generate_ids(nodes): - >>> set_id(node, id, overwrite=True) - - Args: - nodes (list): List of nodes. - folder_id (Optional[str]): Folder id to generate id for. When None - provided current folder is used. - - Returns: - list: A list of (node, id) tuples. - - """ - - if folder_id is None: - # Get the folder id based on current context folder - project_name = get_current_project_name() - folder_path = get_current_folder_path() - if not folder_path: - raise ValueError("Current folder path is not set") - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path, fields=["id"] - ) - if not folder_entity: - raise ValueError(( - "Current folder '{}' was not found on the server" - ).format(folder_path)) - folder_id = folder_entity["id"] - - node_ids = [] - for node in nodes: - _, uid = str(uuid.uuid4()).rsplit("-", 1) - unique_id = "{}:{}".format(folder_id, uid) - node_ids.append((node, unique_id)) - - return node_ids - - -def set_id(node, unique_id, overwrite=False): - """Add cbId to `node` unless one already exists. - - Args: - node (str): the node to add the "cbId" on - unique_id (str): The unique node id to assign. - This should be generated by `generate_ids`. - overwrite (bool, optional): When True overrides the current value even - if `node` already has an id. Defaults to False. - - Returns: - None - - """ - - exists = cmds.attributeQuery("cbId", node=node, exists=True) - - # Add the attribute if it does not exist yet - if not exists: - cmds.addAttr(node, longName="cbId", dataType="string") - - # Set the value - if not exists or overwrite: - attr = "{0}.cbId".format(node) - cmds.setAttr(attr, unique_id, type="string") - - -def get_attribute(plug, - asString=False, - expandEnvironmentVariables=False, - **kwargs): - """Maya getAttr with some fixes based on `pymel.core.general.getAttr()`. - - Like Pymel getAttr this applies some changes to `maya.cmds.getAttr` - - maya pointlessly returned vector results as a tuple wrapped in a list - (ex. '[(1,2,3)]'). This command unpacks the vector for you. - - when getting a multi-attr, maya would raise an error, but this will - return a list of values for the multi-attr - - added support for getting message attributes by returning the - connections instead - - Note that the asString + expandEnvironmentVariables argument naming - convention matches the `maya.cmds.getAttr` arguments so that it can - act as a direct replacement for it. - - Args: - plug (str): Node's attribute plug as `node.attribute` - asString (bool): Return string value for enum attributes instead - of the index. Note that the return value can be dependent on the - UI language Maya is running in. - expandEnvironmentVariables (bool): Expand any environment variable and - (tilde characters on UNIX) found in string attributes which are - returned. - - Kwargs: - Supports the keyword arguments of `maya.cmds.getAttr` - - Returns: - object: The value of the maya attribute. - - """ - attr_type = cmds.getAttr(plug, type=True) - if asString: - kwargs["asString"] = True - if expandEnvironmentVariables: - kwargs["expandEnvironmentVariables"] = True - try: - res = cmds.getAttr(plug, **kwargs) - except RuntimeError: - if attr_type == "message": - return cmds.listConnections(plug) - - node, attr = plug.split(".", 1) - children = cmds.attributeQuery(attr, node=node, listChildren=True) - if children: - return [ - get_attribute("{}.{}".format(node, child)) - for child in children - ] - - raise - - # Convert vector result wrapped in tuple - if isinstance(res, list) and len(res): - if isinstance(res[0], tuple) and len(res): - if attr_type in {'pointArray', 'vectorArray'}: - return res - return res[0] - - return res - - -def set_attribute(attribute, value, node): - """Adjust attributes based on the value from the attribute data - - If an attribute does not exists on the target it will be added with - the dataType being controlled by the value type. - - Args: - attribute (str): name of the attribute to change - value: the value to change to attribute to - node (str): name of the node - - Returns: - None - """ - - value_type = type(value).__name__ - kwargs = ATTRIBUTE_DICT[value_type] - if not cmds.attributeQuery(attribute, node=node, exists=True): - log.debug("Creating attribute '{}' on " - "'{}'".format(attribute, node)) - cmds.addAttr(node, longName=attribute, **kwargs) - - node_attr = "{}.{}".format(node, attribute) - enum_type = cmds.attributeQuery(attribute, node=node, enum=True) - if enum_type and value_type == "str": - enum_string_values = cmds.attributeQuery( - attribute, node=node, listEnum=True - )[0].split(":") - cmds.setAttr( - "{}.{}".format(node, attribute), enum_string_values.index(value) - ) - elif "dataType" in kwargs: - attr_type = kwargs["dataType"] - cmds.setAttr(node_attr, value, type=attr_type) - else: - cmds.setAttr(node_attr, value) - - -def apply_attributes(attributes, nodes_by_id): - """Alter the attributes to match the state when publishing - - Apply attribute settings from the publish to the node in the scene based - on the UUID which is stored in the cbId attribute. - - Args: - attributes (list): list of dictionaries - nodes_by_id (dict): collection of nodes based on UUID - {uuid: [node, node]} - - """ - - for attr_data in attributes: - nodes = nodes_by_id[attr_data["uuid"]] - attr_value = attr_data["attributes"] - for node in nodes: - for attr, value in attr_value.items(): - set_attribute(attr, value, node) - - -def is_valid_reference_node(reference_node): - """Return whether Maya considers the reference node a valid reference. - - Maya might report an error when using `maya.cmds.referenceQuery`: - Reference node 'reference_node' is not associated with a reference file. - - Note that this does *not* check whether the reference node points to an - existing file. Instead, it only returns whether maya considers it valid - and thus is not an unassociated reference node - - Arguments: - reference_node (str): Reference node name - - Returns: - bool: Whether reference node is a valid reference - - """ - # maya 2022 is missing `isValidReference` so the check needs to be - # done in different way. - if int(cmds.about(version=True)) < 2023: - try: - cmds.referenceQuery(reference_node, filename=True) - return True - except RuntimeError: - return False - sel = OpenMaya.MSelectionList() - sel.add(reference_node) - depend_node = sel.getDependNode(0) - - return OpenMaya.MFnReference(depend_node).isValidReference() - - -def get_container_members(container): - """Returns the members of a container. - This includes the nodes from any loaded references in the container. - """ - if isinstance(container, dict): - # Assume it's a container dictionary - container = container["objectName"] - - members = cmds.sets(container, query=True) or [] - members = cmds.ls(members, long=True, objectsOnly=True) or [] - all_members = set(members) - - # Include any referenced nodes from any reference in the container - # This is required since we've removed adding ALL nodes of a reference - # into the container set and only add the reference node now. - for ref in cmds.ls(members, exactType="reference", objectsOnly=True): - - # Ignore any `:sharedReferenceNode` - if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"): - continue - - # Ignore _UNKNOWN_REF_NODE_ (PLN-160) - if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"): - continue - - try: - reference_members = cmds.referenceQuery(ref, - nodes=True, - dagPath=True) - except RuntimeError: - # Ignore reference nodes that are not associated with a - # referenced file on which `referenceQuery` command fails - if not is_valid_reference_node(ref): - continue - raise - reference_members = cmds.ls(reference_members, - long=True, - objectsOnly=True) - all_members.update(reference_members) - - return list(all_members) - - -# region LOOKDEV -def list_looks(project_name, folder_id): - """Return all look products for the given folder. - - This assumes all look products start with "look*" in their names. - - Returns: - list[dict[str, Any]]: List of look products. - - """ - return list(ayon_api.get_products( - project_name, folder_ids=[folder_id], product_types={"look"} - )) - - -def assign_look_by_version(nodes, version_id): - """Assign nodes a specific published look version by id. - - This assumes the nodes correspond with the asset. - - Args: - nodes(list): nodes to assign look to - version_id (bson.ObjectId): database id of the version - - Returns: - None - """ - - project_name = get_current_project_name() - - # Get representations of shader file and relationships - representations = list(ayon_api.get_representations( - project_name=project_name, - representation_names={"ma", "json"}, - version_ids=[version_id] - )) - look_representation = next( - repre for repre in representations if repre["name"] == "ma") - json_representation = next( - repre for repre in representations if repre["name"] == "json") - - # See if representation is already loaded, if so reuse it. - host = registered_host() - representation_id = look_representation["id"] - for container in host.ls(): - if (container['loader'] == "LookLoader" and - container['representation'] == representation_id): - log.info("Reusing loaded look ..") - container_node = container['objectName'] - break - else: - log.info("Using look for the first time ..") - - # Load file - _loaders = discover_loader_plugins() - loaders = loaders_from_representation(_loaders, representation_id) - Loader = next((i for i in loaders if i.__name__ == "LookLoader"), None) - if Loader is None: - raise RuntimeError("Could not find LookLoader, this is a bug") - - # Reference the look file - with maintained_selection(): - container_node = load_container(Loader, look_representation) - - # Get container members - shader_nodes = get_container_members(container_node) - - # Load relationships - shader_relation = get_representation_path(json_representation) - with open(shader_relation, "r") as f: - relationships = json.load(f) - - # Assign relationships - apply_shaders(relationships, shader_nodes, nodes) - - -def assign_look(nodes, product_name="lookMain"): - """Assigns a look to a node. - - Optimizes the nodes by grouping by folder id and finding - related product by name. - - Args: - nodes (list): all nodes to assign the look to - product_name (str): name of the product to find - """ - - # Group all nodes per folder id - grouped = defaultdict(list) - for node in nodes: - hash_id = get_id(node) - if not hash_id: - continue - - parts = hash_id.split(":", 1) - grouped[parts[0]].append(node) - - project_name = get_current_project_name() - product_entities = ayon_api.get_products( - project_name, product_names=[product_name], folder_ids=grouped.keys() - ) - product_entities_by_folder_id = { - product_entity["folderId"]: product_entity - for product_entity in product_entities - } - product_ids = { - product_entity["id"] - for product_entity in product_entities_by_folder_id.values() - } - last_version_entities_by_product_id = ayon_api.get_last_versions( - project_name, - product_ids - ) - - for folder_id, asset_nodes in grouped.items(): - product_entity = product_entities_by_folder_id.get(folder_id) - if not product_entity: - log.warning(( - "No product '{}' found for {}" - ).format(product_name, folder_id)) - continue - - product_id = product_entity["id"] - last_version = last_version_entities_by_product_id.get(product_id) - if not last_version: - log.warning(( - "Not found last version for product '{}' on folder with id {}" - ).format(product_name, folder_id)) - continue - - families = last_version.get("attrib", {}).get("families") or [] - if "look" not in families: - log.warning(( - "Last version for product '{}' on folder with id {}" - " does not have look product type" - ).format(product_name, folder_id)) - continue - - log.debug("Assigning look '{}' ".format( - product_name, last_version["version"])) - - assign_look_by_version(asset_nodes, last_version["id"]) - - -def apply_shaders(relationships, shadernodes, nodes): - """Link shadingEngine to the right nodes based on relationship data - - Relationship data is constructed of a collection of `sets` and `attributes` - `sets` corresponds with the shaderEngines found in the lookdev. - Each set has the keys `name`, `members` and `uuid`, the `members` - hold a collection of node information `name` and `uuid`. - - Args: - relationships (dict): relationship data - shadernodes (list): list of nodes of the shading objectSets (includes - VRayObjectProperties and shadingEngines) - nodes (list): list of nodes to apply shader to - - Returns: - None - """ - - attributes = relationships.get("attributes", []) - shader_data = relationships.get("relationships", {}) - - shading_engines = cmds.ls(shadernodes, type="objectSet", long=True) - assert shading_engines, "Error in retrieving objectSets from reference" - - # region compute lookup - nodes_by_id = defaultdict(list) - for node in nodes: - nodes_by_id[get_id(node)].append(node) - - shading_engines_by_id = defaultdict(list) - for shad in shading_engines: - shading_engines_by_id[get_id(shad)].append(shad) - # endregion - - # region assign shading engines and other sets - for data in shader_data.values(): - # collect all unique IDs of the set members - shader_uuid = data["uuid"] - member_uuids = [member["uuid"] for member in data["members"]] - - filtered_nodes = list() - for m_uuid in member_uuids: - filtered_nodes.extend(nodes_by_id[m_uuid]) - - id_shading_engines = shading_engines_by_id[shader_uuid] - if not id_shading_engines: - log.error("No shader found with cbId " - "'{}'".format(shader_uuid)) - continue - elif len(id_shading_engines) > 1: - log.error("Skipping shader assignment. " - "More than one shader found with cbId " - "'{}'. (found: {})".format(shader_uuid, - id_shading_engines)) - continue - - if not filtered_nodes: - log.warning("No nodes found for shading engine " - "'{0}'".format(id_shading_engines[0])) - continue - try: - cmds.sets(filtered_nodes, forceElement=id_shading_engines[0]) - except RuntimeError as rte: - log.error("Error during shader assignment: {}".format(rte)) - - # endregion - - apply_attributes(attributes, nodes_by_id) - - -# endregion LOOKDEV -def get_isolate_view_sets(): - """Return isolate view sets of all modelPanels. - - Returns: - list: all sets related to isolate view - - """ - - view_sets = set() - for panel in cmds.getPanel(type="modelPanel") or []: - view_set = cmds.modelEditor(panel, query=True, viewObjects=True) - if view_set: - view_sets.add(view_set) - - return view_sets - - -def get_related_sets(node): - """Return objectSets that are relationships for a look for `node`. - - Filters out based on: - - id attribute is NOT `AVALON_CONTAINER_ID` - - shapes and deformer shapes (alembic creates meshShapeDeformed) - - set name ends with any from a predefined list - - set in not in viewport set (isolate selected for example) - - Args: - node (str): name of the current node to check - - Returns: - list: The related sets - - """ - - sets = cmds.listSets(object=node, extendToShape=False) - if not sets: - return [] - - # Fix 'no object matches name' errors on nodes returned by listSets. - # In rare cases it can happen that a node is added to an internal maya - # set inaccessible by maya commands, for example check some nodes - # returned by `cmds.listSets(allSets=True)` - sets = cmds.ls(sets) - - # Ids to ignore - ignored = { - AVALON_INSTANCE_ID, - AVALON_CONTAINER_ID, - AYON_INSTANCE_ID, - AYON_CONTAINER_ID, - } - - # Ignore `avalon.container` - sets = [ - s for s in sets - if ( - not cmds.attributeQuery("id", node=s, exists=True) - or cmds.getAttr(f"{s}.id") not in ignored - ) - ] - if not sets: - return sets - - # Exclude deformer sets (`type=2` for `maya.cmds.listSets`) - exclude_sets = cmds.listSets(object=node, - extendToShape=False, - type=2) or [] - exclude_sets = set(exclude_sets) # optimize lookup - - # Default nodes to ignore - exclude_sets.update({"defaultLightSet", "defaultObjectSet"}) - - # Filter out the sets to exclude - sets = [s for s in sets if s not in exclude_sets] - - # Ignore when the set has a specific suffix - ignore_suffices = ("out_SET", "controls_SET", "_INST", "_CON") - sets = [s for s in sets if not s.endswith(ignore_suffices)] - if not sets: - return sets - - # Ignore viewport filter view sets (from isolate select and - # viewports) - view_sets = get_isolate_view_sets() - sets = [s for s in sets if s not in view_sets] - - return sets - - -def get_container_transforms(container, members=None, root=False): - """Retrieve the root node of the container content - - When a container is created through a Loader the content - of the file will be grouped under a transform. The name of the root - transform is stored in the container information - - Args: - container (dict): the container - members (list): optional and convenience argument - root (bool): return highest node in hierarchy if True - - Returns: - root (list / str): - """ - - if not members: - members = get_container_members(container) - - results = cmds.ls(members, type="transform", long=True) - if root: - root = get_highest_in_hierarchy(results) - if root: - results = root[0] - - return results - - -def get_highest_in_hierarchy(nodes): - """Return highest nodes in the hierarchy that are in the `nodes` list. - - The "highest in hierarchy" are the nodes closest to world: top-most level. - - Args: - nodes (list): The nodes in which find the highest in hierarchies. - - Returns: - list: The highest nodes from the input nodes. - - """ - - # Ensure we use long names - nodes = cmds.ls(nodes, long=True) - lookup = set(nodes) - - highest = [] - for node in nodes: - # If no parents are within the nodes input list - # then this is a highest node - if not any(n in lookup for n in iter_parents(node)): - highest.append(node) - - return highest - - -def iter_parents(node): - """Iter parents of node from its long name. - - Note: The `node` *must* be the long node name. - - Args: - node (str): Node long name. - - Yields: - str: All parent node names (long names) - - """ - while True: - split = node.rsplit("|", 1) - if len(split) == 1 or not split[0]: - return - - node = split[0] - yield node - - -def remove_other_uv_sets(mesh): - """Remove all other UV sets than the current UV set. - - Keep only current UV set and ensure it's the renamed to default 'map1'. - - """ - - uvSets = cmds.polyUVSet(mesh, query=True, allUVSets=True) - current = cmds.polyUVSet(mesh, query=True, currentUVSet=True)[0] - - # Copy over to map1 - if current != 'map1': - cmds.polyUVSet(mesh, uvSet=current, newUVSet='map1', copy=True) - cmds.polyUVSet(mesh, currentUVSet=True, uvSet='map1') - current = 'map1' - - # Delete all non-current UV sets - deleteUVSets = [uvSet for uvSet in uvSets if uvSet != current] - uvSet = None - - # Maya Bug (tested in 2015/2016): - # In some cases the API's MFnMesh will report less UV sets than - # maya.cmds.polyUVSet. This seems to happen when the deletion of UV sets - # has not triggered a cleanup of the UVSet array attribute on the mesh - # node. It will still have extra entries in the attribute, though it will - # not show up in API or UI. Nevertheless it does show up in - # maya.cmds.polyUVSet. To ensure we clean up the array we'll force delete - # the extra remaining 'indices' that we don't want. - - # TODO: Implement a better fix - # The best way to fix would be to get the UVSet indices from api with - # MFnMesh (to ensure we keep correct ones) and then only force delete the - # other entries in the array attribute on the node. But for now we're - # deleting all entries except first one. Note that the first entry could - # never be removed (the default 'map1' always exists and is supposed to - # be undeletable.) - try: - for uvSet in deleteUVSets: - cmds.polyUVSet(mesh, delete=True, uvSet=uvSet) - except RuntimeError as exc: - log.warning('Error uvSet: %s - %s', uvSet, exc) - indices = cmds.getAttr('{0}.uvSet'.format(mesh), - multiIndices=True) - if not indices: - log.warning("No uv set found indices for: %s", mesh) - return - - # Delete from end to avoid shifting indices - # and remove the indices in the attribute - indices = reversed(indices[1:]) - for i in indices: - attr = '{0}.uvSet[{1}]'.format(mesh, i) - cmds.removeMultiInstance(attr, b=True) - - -def get_node_parent(node): - """Return full path name for parent of node""" - parents = cmds.listRelatives(node, parent=True, fullPath=True) - return parents[0] if parents else None - - -def get_id_from_sibling(node, history_only=True): - """Return first node id in the history chain that matches this node. - - The nodes in history must be of the exact same node type and must be - parented under the same parent. - - Optionally, if no matching node is found from the history, all the - siblings of the node that are of the same type are checked. - Additionally to having the same parent, the sibling must be marked as - 'intermediate object'. - - Args: - node (str): node to retrieve the history from - history_only (bool): if True and if nothing found in history, - look for an 'intermediate object' in all the node's siblings - of same type - - Returns: - str or None: The id from the sibling node or None when no id found - on any valid nodes in the history or siblings. - - """ - - node = cmds.ls(node, long=True)[0] - - # Find all similar nodes in history - history = cmds.listHistory(node) - node_type = cmds.nodeType(node) - similar_nodes = cmds.ls(history, exactType=node_type, long=True) - - # Exclude itself - similar_nodes = [x for x in similar_nodes if x != node] - - # The node *must be* under the same parent - parent = get_node_parent(node) - similar_nodes = [i for i in similar_nodes if get_node_parent(i) == parent] - - # Check all of the remaining similar nodes and take the first one - # with an id and assume it's the original. - for similar_node in similar_nodes: - _id = get_id(similar_node) - if _id: - return _id - - if not history_only: - # Get siblings of same type - similar_nodes = cmds.listRelatives(parent, - type=node_type, - fullPath=True) - similar_nodes = cmds.ls(similar_nodes, exactType=node_type, long=True) - - # Exclude itself - similar_nodes = [x for x in similar_nodes if x != node] - - # Get all unique ids from siblings in order since - # we consistently take the first one found - sibling_ids = OrderedDict() - for similar_node in similar_nodes: - # Check if "intermediate object" - if not cmds.getAttr(similar_node + ".intermediateObject"): - continue - - _id = get_id(similar_node) - if not _id: - continue - - if _id in sibling_ids: - sibling_ids[_id].append(similar_node) - else: - sibling_ids[_id] = [similar_node] - - if sibling_ids: - first_id, found_nodes = next(iter(sibling_ids.items())) - - # Log a warning if we've found multiple unique ids - if len(sibling_ids) > 1: - log.warning(("Found more than 1 intermediate shape with" - " unique id for '{}'. Using id of first" - " found: '{}'".format(node, found_nodes[0]))) - - return first_id - - -def set_scene_fps(fps, update=True): - """Set FPS from project configuration - - Args: - fps (int, float): desired FPS - update(bool): toggle update animation, default is True - - Returns: - None - - """ - - fps_mapping = { - '2': '2fps', - '3': '3fps', - '4': '4fps', - '5': '5fps', - '6': '6fps', - '8': '8fps', - '10': '10fps', - '12': '12fps', - '15': 'game', - '16': '16fps', - '24': 'film', - '25': 'pal', - '30': 'ntsc', - '48': 'show', - '50': 'palf', - '60': 'ntscf', - '23.976023976023978': '23.976fps', - '29.97002997002997': '29.97fps', - '47.952047952047955': '47.952fps', - '59.94005994005994': '59.94fps', - '44100': '44100fps', - '48000': '48000fps' - } - - unit = fps_mapping.get(str(convert_to_maya_fps(fps)), None) - if unit is None: - raise ValueError("Unsupported FPS value: `%s`" % fps) - - # Get time slider current state - start_frame = cmds.playbackOptions(query=True, minTime=True) - end_frame = cmds.playbackOptions(query=True, maxTime=True) - - # Get animation data - animation_start = cmds.playbackOptions(query=True, animationStartTime=True) - animation_end = cmds.playbackOptions(query=True, animationEndTime=True) - - current_frame = cmds.currentTime(query=True) - - log.info("Setting scene FPS to: '{}'".format(unit)) - cmds.currentUnit(time=unit, updateAnimation=update) - - # Set time slider data back to previous state - cmds.playbackOptions(minTime=start_frame, - maxTime=end_frame, - animationStartTime=animation_start, - animationEndTime=animation_end) - - cmds.currentTime(current_frame, edit=True, update=True) - - # Force file stated to 'modified' - cmds.file(modified=True) - - -def set_scene_resolution(width, height, pixelAspect): - """Set the render resolution - - Args: - width(int): value of the width - height(int): value of the height - - Returns: - None - - """ - - control_node = "defaultResolution" - current_renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer") - aspect_ratio_attr = "deviceAspectRatio" - - # Give VRay a helping hand as it is slightly different from the rest - if current_renderer == "vray": - aspect_ratio_attr = "aspectRatio" - vray_node = "vraySettings" - if cmds.objExists(vray_node): - control_node = vray_node - else: - log.error("Can't set VRay resolution because there is no node " - "named: `%s`" % vray_node) - - log.info("Setting scene resolution to: %s x %s" % (width, height)) - cmds.setAttr("%s.width" % control_node, width) - cmds.setAttr("%s.height" % control_node, height) - - deviceAspectRatio = ((float(width) / float(height)) * float(pixelAspect)) - cmds.setAttr( - "{}.{}".format(control_node, aspect_ratio_attr), deviceAspectRatio) - cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect) - - -def get_fps_for_current_context(): - """Get fps that should be set for current context. - - Todos: - - Skip project value. - - Merge logic with 'get_frame_range' and 'reset_scene_resolution' -> - all the values in the functions can be collected at one place as - they have same requirements. - - Returns: - Union[int, float]: FPS value. - """ - task_entity = get_current_task_entity(fields={"attrib"}) - fps = task_entity.get("attrib", {}).get("fps") - if not fps: - project_name = get_current_project_name() - folder_path = get_current_folder_path() - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path, fields={"attrib.fps"} - ) or {} - - fps = folder_entity.get("attrib", {}).get("fps") - if not fps: - project_entity = ayon_api.get_project( - project_name, fields=["attrib.fps"] - ) or {} - fps = project_entity.get("attrib", {}).get("fps") - - if not fps: - fps = 25 - - return convert_to_maya_fps(fps) - - -def get_frame_range(include_animation_range=False): - """Get the current task frame range and handles. - - Args: - include_animation_range (bool, optional): Whether to include - `animationStart` and `animationEnd` keys to define the outer - range of the timeline. It is excluded by default. - - Returns: - dict: Task's expected frame range values. - - """ - - # Set frame start/end - project_name = get_current_project_name() - folder_path = get_current_folder_path() - task_name = get_current_task_name() - - folder_entity = ayon_api.get_folder_by_path( - project_name, - folder_path, - fields={"id"}) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - - task_attributes = task_entity["attrib"] - - frame_start = task_attributes.get("frameStart") - frame_end = task_attributes.get("frameEnd") - - if frame_start is None or frame_end is None: - cmds.warning("No edit information found for '{}'".format(folder_path)) - return - - handle_start = task_attributes.get("handleStart") or 0 - handle_end = task_attributes.get("handleEnd") or 0 - - frame_range = { - "frameStart": frame_start, - "frameEnd": frame_end, - "handleStart": handle_start, - "handleEnd": handle_end - } - if include_animation_range: - # The animation range values are only included to define whether - # the Maya time slider should include the handles or not. - # Some usages of this function use the full dictionary to define - # instance attributes for which we want to exclude the animation - # keys. That is why these are excluded by default. - - settings = get_project_settings(project_name) - - task_type = task_entity["taskType"] - - include_handles_settings = settings["maya"]["include_handles"] - - animation_start = frame_start - animation_end = frame_end - - include_handles = include_handles_settings["include_handles_default"] - for item in include_handles_settings["per_task_type"]: - if task_type in item["task_type"]: - include_handles = item["include_handles"] - break - if include_handles: - animation_start -= int(handle_start) - animation_end += int(handle_end) - - frame_range["animationStart"] = animation_start - frame_range["animationEnd"] = animation_end - - return frame_range - - -def reset_frame_range(playback=True, render=True, fps=True): - """Set frame range to current folder. - - Args: - playback (bool, Optional): Whether to set the maya timeline playback - frame range. Defaults to True. - render (bool, Optional): Whether to set the maya render frame range. - Defaults to True. - fps (bool, Optional): Whether to set scene FPS. Defaults to True. - """ - if fps: - set_scene_fps(get_fps_for_current_context()) - - frame_range = get_frame_range(include_animation_range=True) - if not frame_range: - # No frame range data found for folder - return - - frame_start = frame_range["frameStart"] - frame_end = frame_range["frameEnd"] - animation_start = frame_range["animationStart"] - animation_end = frame_range["animationEnd"] - - if playback: - cmds.playbackOptions( - minTime=frame_start, - maxTime=frame_end, - animationStartTime=animation_start, - animationEndTime=animation_end - ) - cmds.currentTime(frame_start) - - if render: - cmds.setAttr("defaultRenderGlobals.startFrame", animation_start) - cmds.setAttr("defaultRenderGlobals.endFrame", animation_end) - - -def reset_scene_resolution(): - """Apply the scene resolution from the project definition - - The scene resolution will be retrieved from the current task entity's - attributes. - - Returns: - None - """ - - task_attributes = get_current_task_entity(fields={"attrib"})["attrib"] - - # Set resolution - width = task_attributes.get("resolutionWidth", 1920) - height = task_attributes.get("resolutionHeight", 1080) - pixel_aspect = task_attributes.get("pixelAspect", 1) - - set_scene_resolution(width, height, pixel_aspect) - - -def set_context_settings( - fps=True, - resolution=True, - frame_range=True, - colorspace=True -): - """Apply the project settings from the project definition - - Settings can be overwritten by an asset if the asset.data contains - any information regarding those settings. - - Args: - fps (bool): Whether to set the scene FPS. - resolution (bool): Whether to set the render resolution. - frame_range (bool): Whether to reset the time slide frame ranges. - colorspace (bool): Whether to reset the colorspace. - - Returns: - None - - """ - if fps: - # Set project fps - set_scene_fps(get_fps_for_current_context()) - - if resolution: - reset_scene_resolution() - - # Set frame range. - if frame_range: - reset_frame_range(fps=False) - - # Set colorspace - if colorspace: - set_colorspace() - - -def prompt_reset_context(): - """Prompt the user what context settings to reset. - This prompt is used on saving to a different task to allow the scene to - get matched to the new context. - """ - # TODO: Cleanup this prototyped mess of imports and odd dialog - from ayon_core.tools.attribute_defs.dialog import ( - AttributeDefinitionsDialog - ) - from ayon_core.style import load_stylesheet - from ayon_core.lib import BoolDef, UILabelDef - - definitions = [ - UILabelDef( - label=( - "You are saving your workfile into a different folder or task." - "\n\n" - "Would you like to update some settings to the new context?\n" - ) - ), - BoolDef( - "fps", - label="FPS", - tooltip="Reset workfile FPS", - default=True - ), - BoolDef( - "frame_range", - label="Frame Range", - tooltip="Reset workfile start and end frame ranges", - default=True - ), - BoolDef( - "resolution", - label="Resolution", - tooltip="Reset workfile resolution", - default=True - ), - BoolDef( - "colorspace", - label="Colorspace", - tooltip="Reset workfile resolution", - default=True - ), - BoolDef( - "instances", - label="Publish instances", - tooltip="Update all publish instance's folder and task to match " - "the new folder and task", - default=True - ), - ] - - dialog = AttributeDefinitionsDialog(definitions) - dialog.setWindowTitle("Saving to different context.") - dialog.setStyleSheet(load_stylesheet()) - if not dialog.exec_(): - return None - - options = dialog.get_values() - with suspended_refresh(): - set_context_settings( - fps=options["fps"], - resolution=options["resolution"], - frame_range=options["frame_range"], - colorspace=options["colorspace"] - ) - if options["instances"]: - update_content_on_context_change() - - dialog.deleteLater() - - -# Valid FPS -def validate_fps(): - """Validate current scene FPS and show pop-up when it is incorrect - - Returns: - bool - - """ - - expected_fps = get_fps_for_current_context() - current_fps = mel.eval("currentTimeUnitToFPS()") - - fps_match = current_fps == expected_fps - if not fps_match and not IS_HEADLESS: - from ayon_core.tools.utils import PopupUpdateKeys - - parent = get_main_window() - - dialog = PopupUpdateKeys(parent=parent) - dialog.setModal(True) - dialog.setWindowTitle("Maya scene does not match project FPS") - dialog.set_message( - "Scene {} FPS does not match project {} FPS".format( - current_fps, expected_fps - ) - ) - dialog.set_button_text("Fix") - - # Set new text for button (add optional argument for the popup?) - def on_click(update): - set_scene_fps(expected_fps, update) - - dialog.on_clicked_state.connect(on_click) - dialog.show() - - return False - - return fps_match - - -def bake(nodes, - frame_range=None, - step=1.0, - simulation=True, - preserve_outside_keys=False, - disable_implicit_control=True, - shape=True): - """Bake the given nodes over the time range. - - This will bake all attributes of the node, including custom attributes. - - Args: - nodes (list): Names of transform nodes, eg. camera, light. - frame_range (list): frame range with start and end frame. - or if None then takes timeSliderRange - simulation (bool): Whether to perform a full simulation of the - attributes over time. - preserve_outside_keys (bool): Keep keys that are outside of the baked - range. - disable_implicit_control (bool): When True will disable any - constraints to the object. - shape (bool): When True also bake attributes on the children shapes. - step (float): The step size to sample by. - - Returns: - None - - """ - - # Parse inputs - if not nodes: - return - - assert isinstance(nodes, (list, tuple)), "Nodes must be a list or tuple" - - # If frame range is None fall back to time slider playback time range - if frame_range is None: - frame_range = [cmds.playbackOptions(query=True, minTime=True), - cmds.playbackOptions(query=True, maxTime=True)] - - # If frame range is single frame bake one frame more, - # otherwise maya.cmds.bakeResults gets confused - if frame_range[1] == frame_range[0]: - frame_range[1] += 1 - - # Bake it - with keytangent_default(in_tangent_type='auto', - out_tangent_type='auto'): - cmds.bakeResults(nodes, - simulation=simulation, - preserveOutsideKeys=preserve_outside_keys, - disableImplicitControl=disable_implicit_control, - shape=shape, - sampleBy=step, - time=(frame_range[0], frame_range[1])) - - -def bake_to_world_space(nodes, - frame_range=None, - simulation=True, - preserve_outside_keys=False, - disable_implicit_control=True, - shape=True, - step=1.0): - """Bake the nodes to world space transformation (incl. other attributes) - - Bakes the transforms to world space (while maintaining all its animated - attributes and settings) by duplicating the node. Then parents it to world - and constrains to the original. - - Other attributes are also baked by connecting all attributes directly. - Baking is then done using Maya's bakeResults command. - - See `bake` for the argument documentation. - - Returns: - list: The newly created and baked node names. - - """ - @contextlib.contextmanager - def _unlock_attr(attr): - """Unlock attribute during context if it is locked""" - if not cmds.getAttr(attr, lock=True): - # If not locked, do nothing - yield - return - try: - cmds.setAttr(attr, lock=False) - yield - finally: - cmds.setAttr(attr, lock=True) - - def _get_attrs(node): - """Workaround for buggy shape attribute listing with listAttr - - This will only return keyable settable attributes that have an - incoming connections (those that have a reason to be baked). - - Technically this *may* fail to return attributes driven by complex - expressions for which maya makes no connections, e.g. doing actual - `setAttr` calls in expressions. - - Arguments: - node (str): The node to list attributes for. - - Returns: - list: Keyable attributes with incoming connections. - The attribute may be locked. - - """ - attrs = cmds.listAttr(node, - write=True, - scalar=True, - settable=True, - connectable=True, - keyable=True, - shortNames=True) or [] - valid_attrs = [] - for attr in attrs: - node_attr = '{0}.{1}'.format(node, attr) - - # Sometimes Maya returns 'non-existent' attributes for shapes - # so we filter those out - if not cmds.attributeQuery(attr, node=node, exists=True): - continue - - # We only need those that have a connection, just to be safe - # that it's actually keyable/connectable anyway. - if cmds.connectionInfo(node_attr, - isDestination=True): - valid_attrs.append(attr) - - return valid_attrs - - transform_attrs = {"t", "r", "s", - "tx", "ty", "tz", - "rx", "ry", "rz", - "sx", "sy", "sz"} - - world_space_nodes = [] - with ExitStack() as stack: - delete_bin = stack.enter_context(delete_after()) - # Create the duplicate nodes that are in world-space connected to - # the originals - for node in nodes: - - # Duplicate the node - short_name = node.rsplit("|", 1)[-1] - new_name = "{0}_baked".format(short_name) - new_node = cmds.duplicate(node, - name=new_name, - renameChildren=True)[0] # noqa - - # Parent new node to world - if cmds.listRelatives(new_node, parent=True): - new_node = cmds.parent(new_node, world=True)[0] - - # Temporarily unlock and passthrough connect all attributes - # so we can bake them over time - # Skip transform attributes because we will constrain them later - attrs = set(_get_attrs(node)) - transform_attrs - for attr in attrs: - orig_node_attr = "{}.{}".format(node, attr) - new_node_attr = "{}.{}".format(new_node, attr) - - # unlock during context to avoid connection errors - stack.enter_context(_unlock_attr(new_node_attr)) - cmds.connectAttr(orig_node_attr, - new_node_attr, - force=True) - - # If shapes are also baked then also temporarily unlock and - # passthrough connect all shape attributes for baking - if shape: - children_shapes = cmds.listRelatives(new_node, - children=True, - fullPath=True, - shapes=True) - if children_shapes: - orig_children_shapes = cmds.listRelatives(node, - children=True, - fullPath=True, - shapes=True) - for orig_shape, new_shape in zip(orig_children_shapes, - children_shapes): - attrs = _get_attrs(orig_shape) - for attr in attrs: - orig_node_attr = "{}.{}".format(orig_shape, attr) - new_node_attr = "{}.{}".format(new_shape, attr) - - # unlock during context to avoid connection errors - stack.enter_context(_unlock_attr(new_node_attr)) - cmds.connectAttr(orig_node_attr, - new_node_attr, - force=True) - - # Constraint transforms - for attr in transform_attrs: - transform_attr = "{}.{}".format(new_node, attr) - stack.enter_context(_unlock_attr(transform_attr)) - delete_bin.extend(cmds.parentConstraint(node, new_node, mo=False)) - delete_bin.extend(cmds.scaleConstraint(node, new_node, mo=False)) - - world_space_nodes.append(new_node) - - bake(world_space_nodes, - frame_range=frame_range, - step=step, - simulation=simulation, - preserve_outside_keys=preserve_outside_keys, - disable_implicit_control=disable_implicit_control, - shape=shape) - - return world_space_nodes - - -def load_capture_preset(data): - """Convert AYON Extract Playblast settings to `capture` arguments - - Input data is the settings from: - `project_settings/maya/publish/ExtractPlayblast/capture_preset` - - Args: - data (dict): Capture preset settings from AYON settings - - Returns: - dict: `capture.capture` compatible keyword arguments - - """ - - options = dict() - viewport_options = dict() - viewport2_options = dict() - camera_options = dict() - - # Straight key-value match from settings to capture arguments - options.update(data["Codec"]) - options.update(data["Generic"]) - options.update(data["Resolution"]) - - camera_options.update(data["CameraOptions"]) - viewport_options.update(data["Renderer"]) - - # DISPLAY OPTIONS - disp_options = {} - for key, value in data["DisplayOptions"].items(): - if key.startswith("background"): - # Convert background, backgroundTop, backgroundBottom colors - - if len(value) == 4: - # Ignore alpha + convert RGB to float - value = [ - float(value[0]) / 255, - float(value[1]) / 255, - float(value[2]) / 255 - ] - disp_options[key] = value - elif key == "displayGradient": - disp_options[key] = value - - options["display_options"] = disp_options - - # Viewport Options has a mixture of Viewport2 Options and Viewport Options - # to pass along to capture. So we'll need to differentiate between the two - VIEWPORT2_OPTIONS = { - "textureMaxResolution", - "renderDepthOfField", - "ssaoEnable", - "ssaoSamples", - "ssaoAmount", - "ssaoRadius", - "ssaoFilterRadius", - "hwFogStart", - "hwFogEnd", - "hwFogAlpha", - "hwFogFalloff", - "hwFogColorR", - "hwFogColorG", - "hwFogColorB", - "hwFogDensity", - "motionBlurEnable", - "motionBlurSampleCount", - "motionBlurShutterOpenFraction", - "lineAAEnable" - } - for key, value in data["ViewportOptions"].items(): - - # There are some keys we want to ignore - if key in {"override_viewport_options", "high_quality"}: - continue - - # First handle special cases where we do value conversion to - # separate option values - if key == 'textureMaxResolution': - viewport2_options['textureMaxResolution'] = value - if value > 0: - viewport2_options['enableTextureMaxRes'] = True - viewport2_options['textureMaxResMode'] = 1 - else: - viewport2_options['enableTextureMaxRes'] = False - viewport2_options['textureMaxResMode'] = 0 - - elif key == 'multiSample': - viewport2_options['multiSampleEnable'] = value > 0 - viewport2_options['multiSampleCount'] = value - - elif key == 'alphaCut': - viewport2_options['transparencyAlgorithm'] = 5 - viewport2_options['transparencyQuality'] = 1 - - elif key == 'hwFogFalloff': - # Settings enum value string to integer - viewport2_options['hwFogFalloff'] = int(value) - - # Then handle Viewport 2.0 Options - elif key in VIEWPORT2_OPTIONS: - viewport2_options[key] = value - - # Then assume remainder is Viewport Options - else: - viewport_options[key] = value - - options['viewport_options'] = viewport_options - options['viewport2_options'] = viewport2_options - options['camera_options'] = camera_options - - # use active sound track - scene = capture.parse_active_scene() - options['sound'] = scene['sound'] - - return options - - -def get_attr_in_layer(attr, layer, as_string=True): - """Return attribute value in specified renderlayer. - - Same as cmds.getAttr but this gets the attribute's value in a - given render layer without having to switch to it. - - Warning for parent attribute overrides: - Attributes that have render layer overrides to their parent attribute - are not captured correctly since they do not have a direct connection. - For example, an override to sphere.rotate when querying sphere.rotateX - will not return correctly! - - Note: This is much faster for Maya's renderLayer system, yet the code - does no optimized query for render setup. - - Args: - attr (str): attribute name, ex. "node.attribute" - layer (str): layer name - as_string (bool): whether attribute should convert to a string value - - Returns: - The return value from `maya.cmds.getAttr` - - """ - - try: - if cmds.mayaHasRenderSetup(): - from . import lib_rendersetup - return lib_rendersetup.get_attr_in_layer( - attr, layer, as_string=as_string) - except AttributeError: - pass - - # Ignore complex query if we're in the layer anyway - current_layer = cmds.editRenderLayerGlobals(query=True, - currentRenderLayer=True) - if layer == current_layer: - return cmds.getAttr(attr, asString=as_string) - - connections = cmds.listConnections(attr, - plugs=True, - source=False, - destination=True, - type="renderLayer") or [] - connections = filter(lambda x: x.endswith(".plug"), connections) - if not connections: - return cmds.getAttr(attr) - - # Some value types perform a conversion when assigning - # TODO: See if there's a maya method to allow this conversion - # instead of computing it ourselves. - attr_type = cmds.getAttr(attr, type=True) - conversion = None - if attr_type == "time": - conversion = mel.eval('currentTimeUnitToFPS()') # returns float - elif attr_type == "doubleAngle": - # Radians to Degrees: 180 / pi - # TODO: This will likely only be correct when Maya units are set - # to degrees - conversion = 57.2957795131 - elif attr_type == "doubleLinear": - raise NotImplementedError("doubleLinear conversion not implemented.") - - for connection in connections: - if connection.startswith(layer + "."): - attr_split = connection.split(".") - if attr_split[0] == layer: - attr = ".".join(attr_split[0:-1]) - value = cmds.getAttr("%s.value" % attr) - if conversion: - value *= conversion - return value - - else: - # When connections are present, but none - # to the specific renderlayer than the layer - # should have the "defaultRenderLayer"'s value - layer = "defaultRenderLayer" - for connection in connections: - if connection.startswith(layer): - attr_split = connection.split(".") - if attr_split[0] == "defaultRenderLayer": - attr = ".".join(attr_split[0:-1]) - value = cmds.getAttr("%s.value" % attr) - if conversion: - value *= conversion - return value - - return cmds.getAttr(attr, asString=as_string) - - -def fix_incompatible_containers(): - """Backwards compatibility: old containers to use new ReferenceLoader""" - old_loaders = { - "MayaAsciiLoader", - "AbcLoader", - "ModelLoader", - "CameraLoader", - "RigLoader", - "FBXLoader" - } - host = registered_host() - for container in host.ls(): - loader = container['loader'] - if loader in old_loaders: - log.info( - "Converting legacy container loader {} to " - "ReferenceLoader: {}".format(loader, container["objectName"]) - ) - cmds.setAttr(container["objectName"] + ".loader", - "ReferenceLoader", type="string") - - -def update_content_on_context_change(): - """ - This will update scene content to match new folder on context change - """ - - host = registered_host() - create_context = CreateContext(host) - folder_entity = get_current_task_entity(fields={"attrib"}) - - instance_values = { - "folderPath": create_context.get_current_folder_path(), - "task": create_context.get_current_task_name(), - } - creator_attribute_values = { - "frameStart": folder_entity["attrib"]["frameStart"], - "frameEnd": folder_entity["attrib"]["frameEnd"], - } - - has_changes = False - for instance in create_context.instances: - for key, value in instance_values.items(): - if key not in instance or instance[key] == value: - continue - - # Update instance value - print(f"Updating {instance.product_name} {key} to: {value}") - instance[key] = value - has_changes = True - - creator_attributes = instance.creator_attributes - for key, value in creator_attribute_values.items(): - if ( - key not in creator_attributes - or creator_attributes[key] == value - ): - continue - - # Update instance creator attribute value - print(f"Updating {instance.product_name} {key} to: {value}") - instance[key] = value - has_changes = True - - if has_changes: - create_context.save_changes() - - -def show_message(title, msg): - from qtpy import QtWidgets - from ayon_core.tools.utils import show_message_dialog - - # Find maya main window - top_level_widgets = {w.objectName(): w for w in - QtWidgets.QApplication.topLevelWidgets()} - - parent = top_level_widgets.get("MayaWindow", None) - if parent is not None: - show_message_dialog(title=title, message=msg, parent=parent) - - -def iter_shader_edits(relationships, shader_nodes, nodes_by_id, label=None): - """Yield edits as a set of actions.""" - - attributes = relationships.get("attributes", []) - shader_data = relationships.get("relationships", {}) - - shading_engines = cmds.ls(shader_nodes, type="objectSet", long=True) - assert shading_engines, "Error in retrieving objectSets from reference" - - # region compute lookup - shading_engines_by_id = defaultdict(list) - for shad in shading_engines: - shading_engines_by_id[get_id(shad)].append(shad) - # endregion - - # region assign shading engines and other sets - for data in shader_data.values(): - # collect all unique IDs of the set members - shader_uuid = data["uuid"] - member_uuids = [ - (member["uuid"], member.get("components")) - for member in data["members"]] - - filtered_nodes = list() - for _uuid, components in member_uuids: - nodes = nodes_by_id.get(_uuid, None) - if nodes is None: - continue - - if components: - # Assign to the components - nodes = [".".join([node, components]) for node in nodes] - - filtered_nodes.extend(nodes) - - id_shading_engines = shading_engines_by_id[shader_uuid] - if not id_shading_engines: - log.error("{} - No shader found with cbId " - "'{}'".format(label, shader_uuid)) - continue - elif len(id_shading_engines) > 1: - log.error("{} - Skipping shader assignment. " - "More than one shader found with cbId " - "'{}'. (found: {})".format(label, shader_uuid, - id_shading_engines)) - continue - - if not filtered_nodes: - log.warning("{} - No nodes found for shading engine " - "'{}'".format(label, id_shading_engines[0])) - continue - - yield {"action": "assign", - "uuid": data["uuid"], - "nodes": filtered_nodes, - "shader": id_shading_engines[0]} - - for data in attributes: - nodes = nodes_by_id.get(data["uuid"], []) - attr_value = data["attributes"] - yield {"action": "setattr", - "uuid": data["uuid"], - "nodes": nodes, - "attributes": attr_value} - - -def set_colorspace(): - """Set Colorspace from project configuration""" - - project_name = get_current_project_name() - imageio = get_project_settings(project_name)["maya"]["imageio"] - - # ocio compatibility variables - ocio_v2_maya_version = 2022 - maya_version = int(cmds.about(version=True)) - ocio_v2_support = use_ocio_v2 = maya_version >= ocio_v2_maya_version - is_ocio_set = bool(os.environ.get("OCIO")) - - use_workfile_settings = imageio.get("workfile", {}).get("enabled") - if use_workfile_settings: - root_dict = imageio["workfile"] - else: - # TODO: deprecated code from 3.15.5 - remove - # Maya 2022+ introduces new OCIO v2 color management settings that - # can override the old color management preferences. AYON has - # separate settings for both so we fall back when necessary. - use_ocio_v2 = imageio["colorManagementPreference_v2"]["enabled"] - if use_ocio_v2 and not ocio_v2_support: - # Fallback to legacy behavior with a warning - log.warning( - "Color Management Preference v2 is enabled but not " - "supported by current Maya version: {} (< {}). Falling " - "back to legacy settings.".format( - maya_version, ocio_v2_maya_version) - ) - - if use_ocio_v2: - root_dict = imageio["colorManagementPreference_v2"] - else: - root_dict = imageio["colorManagementPreference"] - - if not isinstance(root_dict, dict): - msg = "set_colorspace(): argument should be dictionary" - log.error(msg) - return - - # backward compatibility - # TODO: deprecated code from 3.15.5 - remove with deprecated code above - view_name = root_dict.get("viewTransform") - if view_name is None: - view_name = root_dict.get("viewName") - - log.debug(">> root_dict: {}".format(pformat(root_dict))) - if not root_dict: - return - - # set color spaces for rendering space and view transforms - def _colormanage(**kwargs): - """Wrapper around `cmds.colorManagementPrefs`. - - This logs errors instead of raising an error so color management - settings get applied as much as possible. - - """ - assert len(kwargs) == 1, "Must receive one keyword argument" - try: - cmds.colorManagementPrefs(edit=True, **kwargs) - log.debug("Setting Color Management Preference: {}".format(kwargs)) - except RuntimeError as exc: - log.error(exc) - - # enable color management - cmds.colorManagementPrefs(edit=True, cmEnabled=True) - cmds.colorManagementPrefs(edit=True, ocioRulesEnabled=True) - - if use_ocio_v2: - log.info("Using Maya OCIO v2") - if not is_ocio_set: - # Set the Maya 2022+ default OCIO v2 config file path - log.info("Setting default Maya OCIO v2 config") - # Note: Setting "" as value also sets this default however - # introduces a bug where launching a file on startup will prompt - # to save the empty scene before it, so we set using the path. - # This value has been the same for 2022, 2023 and 2024 - path = "/OCIO-configs/Maya2022-default/config.ocio" - cmds.colorManagementPrefs(edit=True, configFilePath=path) - - # set rendering space and view transform - _colormanage(renderingSpaceName=root_dict["renderSpace"]) - _colormanage(viewName=view_name) - _colormanage(displayName=root_dict["displayName"]) - else: - log.info("Using Maya OCIO v1 (legacy)") - if not is_ocio_set: - # Set the Maya default config file path - log.info("Setting default Maya OCIO v1 legacy config") - cmds.colorManagementPrefs(edit=True, configFilePath="legacy") - - # set rendering space and view transform - _colormanage(renderingSpaceName=root_dict["renderSpace"]) - _colormanage(viewTransformName=view_name) - - -@contextlib.contextmanager -def parent_nodes(nodes, parent=None): - # type: (list, str) -> list - """Context manager to un-parent provided nodes and return them back.""" - - def _as_mdagpath(node): - """Return MDagPath for node path.""" - if not node: - return - sel = OpenMaya.MSelectionList() - sel.add(node) - return sel.getDagPath(0) - - # We can only parent dag nodes so we ensure input contains only dag nodes - nodes = cmds.ls(nodes, type="dagNode", long=True) - if not nodes: - # opt-out early - yield - return - - parent_node_path = None - delete_parent = False - if parent: - if not cmds.objExists(parent): - parent_node = cmds.createNode("transform", - name=parent, - skipSelect=False) - delete_parent = True - else: - parent_node = parent - parent_node_path = cmds.ls(parent_node, long=True)[0] - - # Store original parents - node_parents = [] - for node in nodes: - node_parent = get_node_parent(node) - node_parents.append((_as_mdagpath(node), _as_mdagpath(node_parent))) - - try: - for node, node_parent in node_parents: - node_parent_path = node_parent.fullPathName() if node_parent else None # noqa - if node_parent_path == parent_node_path: - # Already a child - continue - - if parent_node_path: - cmds.parent(node.fullPathName(), parent_node_path) - else: - cmds.parent(node.fullPathName(), world=True) - - yield - finally: - # Reparent to original parents - for node, original_parent in node_parents: - node_path = node.fullPathName() - if not node_path: - # Node must have been deleted - continue - - node_parent_path = get_node_parent(node_path) - - original_parent_path = None - if original_parent: - original_parent_path = original_parent.fullPathName() - if not original_parent_path: - # Original parent node must have been deleted - continue - - if node_parent_path != original_parent_path: - if not original_parent_path: - cmds.parent(node_path, world=True) - else: - cmds.parent(node_path, original_parent_path) - - if delete_parent: - cmds.delete(parent_node_path) - - -@contextlib.contextmanager -def maintained_time(): - ct = cmds.currentTime(query=True) - try: - yield - finally: - cmds.currentTime(ct, edit=True) - - -def iter_visible_nodes_in_range(nodes, start, end): - """Yield nodes that are visible in start-end frame range. - - - Ignores intermediateObjects completely. - - Considers animated visibility attributes + upstream visibilities. - - This is optimized for large scenes where some nodes in the parent - hierarchy might have some input connections to the visibilities, - e.g. key, driven keys, connections to other attributes, etc. - - This only does a single time step to `start` if current frame is - not inside frame range since the assumption is made that changing - a frame isn't so slow that it beats querying all visibility - plugs through MDGContext on another frame. - - Args: - nodes (list): List of node names to consider. - start (int, float): Start frame. - end (int, float): End frame. - - Returns: - list: List of node names. These will be long full path names so - might have a longer name than the input nodes. - - """ - # States we consider per node - VISIBLE = 1 # always visible - INVISIBLE = 0 # always invisible - ANIMATED = -1 # animated visibility - - # Ensure integers - start = int(start) - end = int(end) - - # Consider only non-intermediate dag nodes and use the "long" names. - nodes = cmds.ls(nodes, long=True, noIntermediate=True, type="dagNode") - if not nodes: - return - - with maintained_time(): - # Go to first frame of the range if the current time is outside - # the queried range so can directly query all visible nodes on - # that frame. - current_time = cmds.currentTime(query=True) - if not (start <= current_time <= end): - cmds.currentTime(start) - - visible = cmds.ls(nodes, long=True, visible=True) - for node in visible: - yield node - if len(visible) == len(nodes) or start == end: - # All are visible on frame one, so they are at least visible once - # inside the frame range. - return - - # For the invisible ones check whether its visibility and/or - # any of its parents visibility attributes are animated. If so, it might - # get visible on other frames in the range. - def memodict(f): - """Memoization decorator for a function taking a single argument. - - See: http://code.activestate.com/recipes/ - 578231-probably-the-fastest-memoization-decorator-in-the-/ - """ - - class memodict(dict): - def __missing__(self, key): - ret = self[key] = f(key) - return ret - - return memodict().__getitem__ - - @memodict - def get_state(node): - plug = node + ".visibility" - connections = cmds.listConnections(plug, - source=True, - destination=False) - if connections: - return ANIMATED - else: - return VISIBLE if cmds.getAttr(plug) else INVISIBLE - - visible = set(visible) - invisible = [node for node in nodes if node not in visible] - always_invisible = set() - # Iterate over the nodes by short to long names to iterate the highest - # in hierarchy nodes first. So the collected data can be used from the - # cache for parent queries in next iterations. - node_dependencies = dict() - for node in sorted(invisible, key=len): - - state = get_state(node) - if state == INVISIBLE: - always_invisible.add(node) - continue - - # If not always invisible by itself we should go through and check - # the parents to see if any of them are always invisible. For those - # that are "ANIMATED" we consider that this node is dependent on - # that attribute, we store them as dependency. - dependencies = set() - if state == ANIMATED: - dependencies.add(node) - - traversed_parents = list() - for parent in iter_parents(node): - - if parent in always_invisible or get_state(parent) == INVISIBLE: - # When parent is always invisible then consider this parent, - # this node we started from and any of the parents we - # have traversed in-between to be *always invisible* - always_invisible.add(parent) - always_invisible.add(node) - always_invisible.update(traversed_parents) - break - - # If we have traversed the parent before and its visibility - # was dependent on animated visibilities then we can just extend - # its dependencies for to those for this node and break further - # iteration upwards. - parent_dependencies = node_dependencies.get(parent, None) - if parent_dependencies is not None: - dependencies.update(parent_dependencies) - break - - state = get_state(parent) - if state == ANIMATED: - dependencies.add(parent) - - traversed_parents.append(parent) - - if node not in always_invisible and dependencies: - node_dependencies[node] = dependencies - - if not node_dependencies: - return - - # Now we only have to check the visibilities for nodes that have animated - # visibility dependencies upstream. The fastest way to check these - # visibility attributes across different frames is with Python api 2.0 - # so we do that. - @memodict - def get_visibility_mplug(node): - """Return api 2.0 MPlug with cached memoize decorator""" - sel = OpenMaya.MSelectionList() - sel.add(node) - dag = sel.getDagPath(0) - return OpenMaya.MFnDagNode(dag).findPlug("visibility", True) - - @contextlib.contextmanager - def dgcontext(mtime): - """MDGContext context manager""" - context = OpenMaya.MDGContext(mtime) - try: - previous = context.makeCurrent() - yield context - finally: - previous.makeCurrent() - - # We skip the first frame as we already used that frame to check for - # overall visibilities. And end+1 to include the end frame. - scene_units = OpenMaya.MTime.uiUnit() - for frame in range(start + 1, end + 1): - mtime = OpenMaya.MTime(frame, unit=scene_units) - - # Build little cache so we don't query the same MPlug's value - # again if it was checked on this frame and also is a dependency - # for another node - frame_visibilities = {} - with dgcontext(mtime) as context: - for node, dependencies in list(node_dependencies.items()): - for dependency in dependencies: - dependency_visible = frame_visibilities.get(dependency, - None) - if dependency_visible is None: - mplug = get_visibility_mplug(dependency) - dependency_visible = mplug.asBool(context) - frame_visibilities[dependency] = dependency_visible - - if not dependency_visible: - # One dependency is not visible, thus the - # node is not visible. - break - - else: - # All dependencies are visible. - yield node - # Remove node with dependencies for next frame iterations - # because it was visible at least once. - node_dependencies.pop(node) - - # If no more nodes to process break the frame iterations.. - if not node_dependencies: - break - - -def get_attribute_input(attr): - connections = cmds.listConnections(attr, plugs=True, destination=False) - return connections[0] if connections else None - - -def convert_to_maya_fps(fps): - """Convert any fps to supported Maya framerates.""" - float_framerates = [ - 23.976023976023978, - # WTF is 29.97 df vs fps? - 29.97002997002997, - 47.952047952047955, - 59.94005994005994 - ] - # 44100 fps evaluates as 41000.0. Why? Omitting for now. - int_framerates = [ - 2, - 3, - 4, - 5, - 6, - 8, - 10, - 12, - 15, - 16, - 20, - 24, - 25, - 30, - 40, - 48, - 50, - 60, - 75, - 80, - 90, - 100, - 120, - 125, - 150, - 200, - 240, - 250, - 300, - 375, - 400, - 500, - 600, - 750, - 1200, - 1500, - 2000, - 3000, - 6000, - 48000 - ] - - # If input fps is a whole number we'll return. - if float(fps).is_integer(): - # Validate fps is part of Maya's fps selection. - if int(fps) not in int_framerates: - raise ValueError( - "Framerate \"{}\" is not supported in Maya".format(fps) - ) - return int(fps) - else: - # Differences to supported float frame rates. - differences = [] - for i in float_framerates: - differences.append(abs(i - fps)) - - # Validate difference does not stray too far from supported framerates. - min_difference = min(differences) - min_index = differences.index(min_difference) - supported_framerate = float_framerates[min_index] - if min_difference > 0.1: - raise ValueError( - "Framerate \"{}\" strays too far from any supported framerate" - " in Maya. Closest supported framerate is \"{}\"".format( - fps, supported_framerate - ) - ) - - return supported_framerate - - -def write_xgen_file(data, filepath): - """Overwrites data in .xgen files. - - Quite naive approach to mainly overwrite "xgDataPath" and "xgProjectPath". - - Args: - data (dict): Dictionary of key, value. Key matches with xgen file. - For example: - {"xgDataPath": "some/path"} - filepath (string): Absolute path of .xgen file. - """ - # Generate regex lookup for line to key basically - # match any of the keys in `\t{key}\t\t` - keys = "|".join(re.escape(key) for key in data.keys()) - re_keys = re.compile("^\t({})\t\t".format(keys)) - - lines = [] - with open(filepath, "r") as f: - for line in f: - match = re_keys.match(line) - if match: - key = match.group(1) - value = data[key] - line = "\t{}\t\t{}\n".format(key, value) - - lines.append(line) - - with open(filepath, "w") as f: - f.writelines(lines) - - -def get_color_management_preferences(): - """Get and resolve OCIO preferences.""" - data = { - # Is color management enabled. - "enabled": cmds.colorManagementPrefs( - query=True, cmEnabled=True - ), - "rendering_space": cmds.colorManagementPrefs( - query=True, renderingSpaceName=True - ), - "output_transform": cmds.colorManagementPrefs( - query=True, outputTransformName=True - ), - "output_transform_enabled": cmds.colorManagementPrefs( - query=True, outputTransformEnabled=True - ), - "view_transform": cmds.colorManagementPrefs( - query=True, viewTransformName=True - ) - } - - # Split view and display from view_transform. view_transform comes in - # format of "{view} ({display})". - regex = re.compile(r"^(?P.+) \((?P.+)\)$") - if int(cmds.about(version=True)) <= 2020: - # view_transform comes in format of "{view} {display}" in 2020. - regex = re.compile(r"^(?P.+) (?P.+)$") - - match = regex.match(data["view_transform"]) - if not match: - raise ValueError( - "Unable to parse view and display from Maya view transform: '{}' " - "using regex '{}'".format(data["view_transform"], regex.pattern) - ) - - data.update({ - "display": match.group("display"), - "view": match.group("view") - }) - - # Get config absolute path. - path = cmds.colorManagementPrefs( - query=True, configFilePath=True - ) - - # The OCIO config supports a custom token. - maya_resources_token = "" - maya_resources_path = OpenMaya.MGlobal.getAbsolutePathToResources() - path = path.replace(maya_resources_token, maya_resources_path) - - data["config"] = path - - return data - - -def get_color_management_output_transform(): - preferences = get_color_management_preferences() - colorspace = preferences["rendering_space"] - if preferences["output_transform_enabled"]: - colorspace = preferences["output_transform"] - return colorspace - - -def image_info(file_path): - # type: (str) -> dict - """Based on the texture path, get its bit depth and format information. - Take reference from makeTx.py in Arnold: - ImageInfo(filename): Get Image Information for colorspace - AiTextureGetFormat(filename): Get Texture Format - AiTextureGetBitDepth(filename): Get Texture bit depth - Args: - file_path (str): Path to the texture file. - Returns: - dict: Dictionary with the information about the texture file. - """ - from arnold import ( - AiTextureGetBitDepth, - AiTextureGetFormat - ) - # Get Texture Information - img_info = {'filename': file_path} - if os.path.isfile(file_path): - img_info['bit_depth'] = AiTextureGetBitDepth(file_path) # noqa - img_info['format'] = AiTextureGetFormat(file_path) # noqa - else: - img_info['bit_depth'] = 8 - img_info['format'] = "unknown" - return img_info - - -def guess_colorspace(img_info): - # type: (dict) -> str - """Guess the colorspace of the input image filename. - Note: - Reference from makeTx.py - Args: - img_info (dict): Image info generated by :func:`image_info` - Returns: - str: color space name use in the `--colorconvert` - option of maketx. - """ - from arnold import ( - AiTextureInvalidate, - # types - AI_TYPE_BYTE, - AI_TYPE_INT, - AI_TYPE_UINT - ) - try: - if img_info['bit_depth'] <= 16: - if img_info['format'] in (AI_TYPE_BYTE, AI_TYPE_INT, AI_TYPE_UINT): # noqa - return 'sRGB' - else: - return 'linear' - # now discard the image file as AiTextureGetFormat has loaded it - AiTextureInvalidate(img_info['filename']) # noqa - except ValueError: - print(("[maketx] Error: Could not guess" - "colorspace for {}").format(img_info["filename"])) - return "linear" - - -def len_flattened(components): - """Return the length of the list as if it was flattened. - - Maya will return consecutive components as a single entry - when requesting with `maya.cmds.ls` without the `flatten` - flag. Though enabling `flatten` on a large list (e.g. millions) - will result in a slow result. This command will return the amount - of entries in a non-flattened list by parsing the result with - regex. - - Args: - components (list): The non-flattened components. - - Returns: - int: The amount of entries. - - """ - assert isinstance(components, (list, tuple)) - n = 0 - - pattern = re.compile(r"\[(\d+):(\d+)\]") - for c in components: - match = pattern.search(c) - if match: - start, end = match.groups() - n += int(end) - int(start) + 1 - else: - n += 1 - return n - - -def get_all_children(nodes, ignore_intermediate_objects=False): - """Return all children of `nodes` including each instanced child. - Using maya.cmds.listRelatives(allDescendents=True) includes only the first - instance. As such, this function acts as an optimal replacement with a - focus on a fast query. - - Args: - nodes (iterable): List of nodes to get children for. - ignore_intermediate_objects (bool): Ignore any children that - are intermediate objects. - - Returns: - set: Children of input nodes. - - """ - - sel = OpenMaya.MSelectionList() - traversed = set() - iterator = OpenMaya.MItDag(OpenMaya.MItDag.kDepthFirst) - fn_dag = OpenMaya.MFnDagNode() - for node in nodes: - - if node in traversed: - # Ignore if already processed as a child - # before - continue - - sel.clear() - sel.add(node) - dag = sel.getDagPath(0) - - iterator.reset(dag) - # ignore self - iterator.next() # noqa: B305 - while not iterator.isDone(): - - if ignore_intermediate_objects: - fn_dag.setObject(iterator.currentItem()) - if fn_dag.isIntermediateObject: - iterator.prune() - iterator.next() # noqa: B305 - continue - - path = iterator.fullPathName() - - if path in traversed: - iterator.prune() - iterator.next() # noqa: B305 - continue - - traversed.add(path) - iterator.next() # noqa: B305 - - return traversed - - -def get_capture_preset( - task_name, task_type, product_name, project_settings, log -): - """Get capture preset for playblasting. - - Logic for transitioning from old style capture preset to new capture preset - profiles. - - Args: - task_name (str): Task name. - task_type (str): Task type. - product_name (str): Product name. - project_settings (dict): Project settings. - log (logging.Logger): Logging object. - """ - capture_preset = None - filtering_criteria = { - "task_names": task_name, - "task_types": task_type, - "product_names": product_name - } - - plugin_settings = project_settings["maya"]["publish"]["ExtractPlayblast"] - if plugin_settings["profiles"]: - profile = filter_profiles( - plugin_settings["profiles"], - filtering_criteria, - logger=log - ) - capture_preset = profile.get("capture_preset") - else: - log.warning("No profiles present for Extract Playblast") - - # Backward compatibility for deprecated Extract Playblast settings - # without profiles. - if capture_preset is None: - log.debug( - "Falling back to deprecated Extract Playblast capture preset " - "because no new style playblast profiles are defined." - ) - capture_preset = plugin_settings.get("capture_preset") - - if capture_preset: - # Create deepcopy of preset as we'll change the values - capture_preset = copy.deepcopy(capture_preset) - - viewport_options = capture_preset["ViewportOptions"] - # Change 'list' to 'dict' for 'capture.py' - viewport_options["pluginObjects"] = { - item["name"]: item["value"] - for item in viewport_options["pluginObjects"] - } - return capture_preset or {} - - -def get_reference_node(members, log=None): - """Get the reference node from the container members - Args: - members: list of node names - - Returns: - str: Reference node name. - - """ - - # Collect the references without .placeHolderList[] attributes as - # unique entries (objects only) and skipping the sharedReferenceNode. - references = set() - for ref in cmds.ls(members, exactType="reference", objectsOnly=True): - - # Ignore any `:sharedReferenceNode` - if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"): - continue - - # Ignore _UNKNOWN_REF_NODE_ (PLN-160) - if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"): - continue - - if not is_valid_reference_node(ref): - continue - - references.add(ref) - - assert references, "No reference node found in container" - - # Get highest reference node (least parents) - highest = min(references, - key=lambda x: len(get_reference_node_parents(x))) - - # Warn the user when we're taking the highest reference node - if len(references) > 1: - if not log: - log = logging.getLogger(__name__) - - log.warning("More than one reference node found in " - "container, using highest reference node: " - "%s (in: %s)", highest, list(references)) - - return highest - - -def get_reference_node_parents(ref): - """Return all parent reference nodes of reference node - - Args: - ref (str): reference node. - - Returns: - list: The upstream parent reference nodes. - - """ - def _get_parent(reference_node): - """Return parent reference node, but ignore invalid reference nodes""" - if not is_valid_reference_node(reference_node): - return - return cmds.referenceQuery(reference_node, - referenceNode=True, - parent=True) - - parent = _get_parent(ref) - parents = [] - while parent: - parents.append(parent) - parent = _get_parent(parent) - return parents - - -def create_rig_animation_instance( - nodes, context, namespace, options=None, log=None -): - """Create an animation publish instance for loaded rigs. - - See the RecreateRigAnimationInstance inventory action on how to use this - for loaded rig containers. - - Arguments: - nodes (list): Member nodes of the rig instance. - context (dict): Representation context of the rig container - namespace (str): Namespace of the rig container - options (dict, optional): Additional loader data - log (logging.Logger, optional): Logger to log to if provided - - Returns: - None - - """ - if options is None: - options = {} - name = context["representation"]["name"] - output = next((node for node in nodes if - node.endswith("out_SET")), None) - controls = next((node for node in nodes if - node.endswith("controls_SET")), None) - if name != "fbx": - assert output, "No out_SET in rig, this is a bug." - assert controls, "No controls_SET in rig, this is a bug." - - anim_skeleton = next((node for node in nodes if - node.endswith("skeletonAnim_SET")), None) - skeleton_mesh = next((node for node in nodes if - node.endswith("skeletonMesh_SET")), None) - - # Find the roots amongst the loaded nodes - roots = ( - cmds.ls(nodes, assemblies=True, long=True) or - get_highest_in_hierarchy(nodes) - ) - assert roots, "No root nodes in rig, this is a bug." - - folder_entity = context["folder"] - product_entity = context["product"] - product_type = product_entity["productType"] - product_name = product_entity["name"] - - custom_product_name = options.get("animationProductName") - if custom_product_name: - formatting_data = { - "folder": { - "name": folder_entity["name"] - }, - "product": { - "type": product_type, - "name": product_name, - }, - "asset": folder_entity["name"], - "subset": product_name, - "family": product_type - } - namespace = get_custom_namespace( - custom_product_name.format(**formatting_data) - ) - - if log: - log.info("Creating product: {}".format(namespace)) - - # Fill creator identifier - creator_identifier = "io.openpype.creators.maya.animation" - - host = registered_host() - create_context = CreateContext(host) - # Create the animation instance - rig_sets = [output, controls, anim_skeleton, skeleton_mesh] - # Remove sets that this particular rig does not have - rig_sets = [s for s in rig_sets if s is not None] - with maintained_selection(): - cmds.select(rig_sets + roots, noExpand=True) - create_context.create( - creator_identifier=creator_identifier, - variant=namespace, - pre_create_data={"use_selection": True} - ) - - -def get_node_index_under_parent(node: str) -> int: - """Return the index of a DAG node under its parent. - - Arguments: - node (str): A DAG Node path. - - Returns: - int: The DAG node's index under its parents or world - - """ - node = cmds.ls(node, long=True)[0] # enforce long names - parent = node.rsplit("|", 1)[0] - if not parent: - return cmds.ls(assemblies=True, long=True).index(node) - else: - return cmds.listRelatives(parent, - children=True, - fullPath=True).index(node) diff --git a/server_addon/maya/client/ayon_maya/api/lib_renderproducts.py b/server_addon/maya/client/ayon_maya/api/lib_renderproducts.py deleted file mode 100644 index 52c282c6de..0000000000 --- a/server_addon/maya/client/ayon_maya/api/lib_renderproducts.py +++ /dev/null @@ -1,1469 +0,0 @@ -# -*- coding: utf-8 -*- -"""Module handling expected render output from Maya. - -This module is used in :mod:`collect_render` and :mod:`collect_vray_scene`. - -Note: - To implement new renderer, just create new class inheriting from - :class:`ARenderProducts` and add it to :func:`RenderProducts.get()`. - -Attributes: - R_SINGLE_FRAME (:class:`re.Pattern`): Find single frame number. - R_FRAME_RANGE (:class:`re.Pattern`): Find frame range. - R_FRAME_NUMBER (:class:`re.Pattern`): Find frame number in string. - R_LAYER_TOKEN (:class:`re.Pattern`): Find layer token in image prefixes. - R_AOV_TOKEN (:class:`re.Pattern`): Find AOV token in image prefixes. - R_SUBSTITUTE_AOV_TOKEN (:class:`re.Pattern`): Find and substitute AOV token - in image prefixes. - R_REMOVE_AOV_TOKEN (:class:`re.Pattern`): Find and remove AOV token in - image prefixes. - R_CLEAN_FRAME_TOKEN (:class:`re.Pattern`): Find and remove unfilled - Renderman frame token in image prefix. - R_CLEAN_EXT_TOKEN (:class:`re.Pattern`): Find and remove unfilled Renderman - extension token in image prefix. - R_SUBSTITUTE_LAYER_TOKEN (:class:`re.Pattern`): Find and substitute render - layer token in image prefixes. - R_SUBSTITUTE_SCENE_TOKEN (:class:`re.Pattern`): Find and substitute scene - token in image prefixes. - R_SUBSTITUTE_CAMERA_TOKEN (:class:`re.Pattern`): Find and substitute camera - token in image prefixes. - IMAGE_PREFIXES (dict): Mapping between renderers and their respective - image prefix attribute names. - -Thanks: - Roy Nieterau (BigRoy) / Colorbleed for overhaul of original - *expected_files*. - -""" - -import logging -import re -import os -from abc import ABCMeta, abstractmethod - -import six -import attr - -from . import lib -from . import lib_rendersetup -from ayon_core.pipeline.colorspace import get_ocio_config_views - -from maya import cmds, mel - -log = logging.getLogger(__name__) - -R_SINGLE_FRAME = re.compile(r"^(-?)\d+$") -R_FRAME_RANGE = re.compile(r"^(?P(-?)\d+)-(?P(-?)\d+)$") -R_FRAME_NUMBER = re.compile(r".+\.(?P[0-9]+)\..+") -R_LAYER_TOKEN = re.compile( - r".*((?:%l)|(?:)|(?:)).*", re.IGNORECASE -) -R_AOV_TOKEN = re.compile(r".*%a.*|.*.*|.*.*", re.IGNORECASE) -R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a||", re.IGNORECASE) -R_REMOVE_AOV_TOKEN = re.compile( - r"_%a|\.%a|_|\.|_|\.", re.IGNORECASE) -# to remove unused renderman tokens -R_CLEAN_FRAME_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE) -R_CLEAN_EXT_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE) - -R_SUBSTITUTE_LAYER_TOKEN = re.compile( - r"%l||", re.IGNORECASE -) -R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|", re.IGNORECASE) -R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|", re.IGNORECASE) - -# not sure about the renderman image prefix -IMAGE_PREFIXES = { - "vray": "vraySettings.fileNamePrefix", - "arnold": "defaultRenderGlobals.imageFilePrefix", - "renderman": "rmanGlobals.imageFileFormat", - "redshift": "defaultRenderGlobals.imageFilePrefix", - "mayahardware2": "defaultRenderGlobals.imageFilePrefix" -} - -RENDERMAN_IMAGE_DIR = "/" - - -def has_tokens(string, tokens): - """Return whether any of tokens is in input string (case-insensitive)""" - pattern = "({})".format("|".join(re.escape(token) for token in tokens)) - match = re.search(pattern, string, re.IGNORECASE) - return bool(match) - - -@attr.s -class LayerMetadata(object): - """Data class for Render Layer metadata.""" - frameStart = attr.ib() - frameEnd = attr.ib() - cameras = attr.ib() - sceneName = attr.ib() - layerName = attr.ib() - renderer = attr.ib() - defaultExt = attr.ib() - filePrefix = attr.ib() - frameStep = attr.ib(default=1) - padding = attr.ib(default=4) - - # Render Products - products = attr.ib(init=False, default=attr.Factory(list)) - - # The AOV separator token. Note that not all renderers define an explicit - # render separator but allow to put the AOV/RenderPass token anywhere in - # the file path prefix. For those renderers we'll fall back to whatever - # is between the last occurrences of and tokens. - aov_separator = attr.ib(default="_") - - -@attr.s -class RenderProduct(object): - """Describes an image or other file-like artifact produced by a render. - - Warning: - This currently does NOT return as a product PER render camera. - A single Render Product will generate files per camera. E.g. with two - cameras each render product generates two sequences on disk assuming - the file path prefix correctly uses the tokens. - - """ - productName = attr.ib() - ext = attr.ib() # extension - colorspace = attr.ib() # colorspace - aov = attr.ib(default=None) # source aov - driver = attr.ib(default=None) # source driver - multipart = attr.ib(default=False) # multichannel file - camera = attr.ib(default=None) # used only when rendering - # from multiple cameras - - -def get(layer, render_instance=None): - # type: (str, object) -> ARenderProducts - """Get render details and products for given renderer and render layer. - - Args: - layer (str): Name of render layer - render_instance (pyblish.api.Instance): Publish instance. - If not provided an empty mock instance is used. - - Returns: - ARenderProducts: The correct RenderProducts instance for that - renderlayer. - - Raises: - :exc:`UnsupportedRendererException`: If requested renderer - is not supported. It needs to be implemented by extending - :class:`ARenderProducts` and added to this methods ``if`` - statement. - - """ - - if render_instance is None: - # For now produce a mock instance - class Instance(object): - data = {} - render_instance = Instance() - - renderer_name = lib.get_attr_in_layer( - "defaultRenderGlobals.currentRenderer", - layer=layer - ) - - renderer = { - "arnold": RenderProductsArnold, - "vray": RenderProductsVray, - "redshift": RenderProductsRedshift, - "renderman": RenderProductsRenderman, - "mayahardware2": RenderProductsMayaHardware - }.get(renderer_name.lower(), None) - if renderer is None: - raise UnsupportedRendererException( - "Unsupported renderer: {}".format(renderer_name) - ) - - return renderer(layer, render_instance) - - -@six.add_metaclass(ABCMeta) -class ARenderProducts: - """Abstract class with common code for all renderers. - - Attributes: - renderer (str): name of renderer. - - """ - - renderer = None - - def __init__(self, layer, render_instance): - """Constructor.""" - self.layer = layer - self.render_instance = render_instance - self.multipart = self.get_multipart() - - # Initialize - self.layer_data = self._get_layer_data() - self.layer_data.products = self.get_render_products() - - def get_multipart(self): - raise NotImplementedError( - "The render product implementation does not have a " - "\"get_multipart\" method." - ) - - def has_camera_token(self): - # type: () -> bool - """Check if camera token is in image prefix. - - Returns: - bool: True/False if camera token is present. - - """ - return "" in self.layer_data.filePrefix.lower() - - @abstractmethod - def get_render_products(self): - """To be implemented by renderer class. - - This should return a list of RenderProducts. - - Returns: - list: List of RenderProduct - - """ - - @staticmethod - def sanitize_camera_name(camera): - # type: (str) -> str - """Sanitize camera name. - - Remove Maya illegal characters from camera name. - - Args: - camera (str): Maya camera name. - - Returns: - (str): sanitized camera name - - Example: - >>> ARenderProducts.sanizite_camera_name('test:camera_01') - test_camera_01 - - """ - return re.sub('[^0-9a-zA-Z_]+', '_', camera) - - def get_renderer_prefix(self): - # type: () -> str - """Return prefix for specific renderer. - - This is for most renderers the same and can be overridden if needed. - - Returns: - str: String with image prefix containing tokens - - Raises: - :exc:`UnsupportedRendererException`: If we requested image - prefix for renderer we know nothing about. - See :data:`IMAGE_PREFIXES` for mapping of renderers and - image prefixes. - - """ - try: - prefix_attr = IMAGE_PREFIXES[self.renderer] - except KeyError: - raise UnsupportedRendererException( - "Unsupported renderer {}".format(self.renderer) - ) - - # Note: When this attribute is never set (e.g. on maya launch) then - # this can return None even though it is a string attribute - prefix = self._get_attr(prefix_attr) - - if not prefix: - # Fall back to scene name by default - log.warning("Image prefix not set, using ") - prefix = "" - - return prefix - - def get_render_attribute(self, attribute): - """Get attribute from render options. - - Args: - attribute (str): name of attribute to be looked up. - - Returns: - Attribute value - - """ - return self._get_attr("defaultRenderGlobals", attribute) - - def _get_attr(self, node_attr, attribute=None, as_string=True): - """Return the value of the attribute in the renderlayer - - For readability this allows passing in the attribute in two ways. - - As a single argument: - _get_attr("node.attr") - Or as two arguments: - _get_attr("node", "attr") - - Returns: - Value of the attribute inside the layer this instance is set to. - - """ - - if attribute is None: - plug = node_attr - else: - plug = "{}.{}".format(node_attr, attribute) - - return lib.get_attr_in_layer(plug, layer=self.layer, as_string=as_string) - - @staticmethod - def extract_separator(file_prefix): - """Extract AOV separator character from the prefix. - - Default behavior extracts the part between - last occurrences of and - - Todo: - This code also triggers for V-Ray which overrides it explicitly - so this code will invalidly debug log it couldn't extract the - AOV separator even though it does set it in RenderProductsVray. - - Args: - file_prefix (str): File prefix with tokens. - - Returns: - str or None: prefix character if it can be extracted. - """ - layer_tokens = ["", ""] - aov_tokens = ["", ""] - - def match_last(tokens, text): - """regex match the last occurrence from a list of tokens""" - pattern = "(?:.*)({})".format("|".join(tokens)) - return re.search(pattern, text, re.IGNORECASE) - - layer_match = match_last(layer_tokens, file_prefix) - aov_match = match_last(aov_tokens, file_prefix) - separator = None - if layer_match and aov_match: - matches = sorted((layer_match, aov_match), - key=lambda match: match.end(1)) - separator = file_prefix[matches[0].end(1):matches[1].start(1)] - return separator - - def _get_layer_data(self): - # type: () -> LayerMetadata - # ______________________________________________ - # ____________________/ ____________________________________________/ - # 1 - get scene name /__________________/ - # ____________________/ - _, scene_basename = os.path.split(cmds.file(q=True, loc=True)) - scene_name, _ = os.path.splitext(scene_basename) - kwargs = {} - file_prefix = self.get_renderer_prefix() - - # If the Render Layer belongs to a Render Setup layer then the - # output name is based on the Render Setup Layer name without - # the `rs_` prefix. - layer_name = self.layer - rs_layer = lib_rendersetup.get_rendersetup_layer(layer_name) - if rs_layer: - layer_name = rs_layer - - if self.layer == "defaultRenderLayer": - # defaultRenderLayer renders as masterLayer - layer_name = "masterLayer" - - separator = self.extract_separator(file_prefix) - if separator: - kwargs["aov_separator"] = separator - else: - log.debug("Couldn't extract aov separator from " - "file prefix: {}".format(file_prefix)) - - # todo: Support Custom Frames sequences 0,5-10,100-120 - # Deadline allows submitting renders with a custom frame list - # to support those cases we might want to allow 'custom frames' - # to be overridden to `ExpectFiles` class? - return LayerMetadata( - frameStart=int(self.get_render_attribute("startFrame")), - frameEnd=int(self.get_render_attribute("endFrame")), - frameStep=int(self.get_render_attribute("byFrameStep")), - padding=int(self.get_render_attribute("extensionPadding")), - # if we have token in prefix path we'll expect output for - # every renderable camera in layer. - cameras=self.get_renderable_cameras(), - sceneName=scene_name, - layerName=layer_name, - renderer=self.renderer, - defaultExt=self._get_attr("defaultRenderGlobals.imfPluginKey"), - filePrefix=file_prefix, - **kwargs - ) - - def _generate_file_sequence( - self, layer_data, - force_aov_name=None, - force_ext=None, - force_cameras=None): - # type: (LayerMetadata, str, str, list) -> list - expected_files = [] - cameras = force_cameras or layer_data.cameras - ext = force_ext or layer_data.defaultExt - for cam in cameras: - file_prefix = layer_data.filePrefix - mappings = ( - (R_SUBSTITUTE_SCENE_TOKEN, layer_data.sceneName), - (R_SUBSTITUTE_LAYER_TOKEN, layer_data.layerName), - (R_SUBSTITUTE_CAMERA_TOKEN, self.sanitize_camera_name(cam)), - # this is required to remove unfilled aov token, for example - # in Redshift - (R_REMOVE_AOV_TOKEN, "") if not force_aov_name \ - else (R_SUBSTITUTE_AOV_TOKEN, force_aov_name), - - (R_CLEAN_FRAME_TOKEN, ""), - (R_CLEAN_EXT_TOKEN, ""), - ) - - for regex, value in mappings: - file_prefix = re.sub(regex, value, file_prefix) - - for frame in range( - int(layer_data.frameStart), - int(layer_data.frameEnd) + 1, - int(layer_data.frameStep), - ): - frame_str = str(frame).rjust(layer_data.padding, "0") - expected_files.append( - "{}.{}.{}".format(file_prefix, frame_str, ext) - ) - return expected_files - - def get_files(self, product): - # type: (RenderProduct) -> list - """Return list of expected files. - - It will translate render token strings ('', etc.) to - their values. This task is tricky as every renderer deals with this - differently. That's why we expose `get_files` as a method on the - Renderer class so it can be overridden for complex cases. - - Args: - product (RenderProduct): Render product to be used for file - generation. - - Returns: - List of files - - """ - return self._generate_file_sequence( - self.layer_data, - force_aov_name=product.productName, - force_ext=product.ext, - force_cameras=[product.camera] - ) - - def get_renderable_cameras(self): - # type: () -> list - """Get all renderable camera transforms. - - Returns: - list: list of renderable cameras. - - """ - - renderable_cameras = [ - cam for cam in cmds.ls(cameras=True) - if self._get_attr(cam, "renderable") - ] - - # The output produces a sanitized name for using its - # shortest unique path of the transform so we'll return - # at least that unique path. This could include a parent - # name too when two cameras have the same name but are - # in a different hierarchy, e.g. "group1|cam" and "group2|cam" - def get_name(camera): - return cmds.ls(cmds.listRelatives(camera, - parent=True, - fullPath=True))[0] - - return [get_name(cam) for cam in renderable_cameras] - - -class RenderProductsArnold(ARenderProducts): - """Render products for Arnold renderer. - - References: - mtoa.utils.getFileName() - mtoa.utils.ui.common.updateArnoldTargetFilePreview() - - Notes: - - Output Denoising AOVs are not currently included. - - Only Frame/Animation ext: name.#.ext is supported. - - Use Custom extension is not supported. - - and tokens not tested - - With Merge AOVs but in File Name Prefix Arnold - will still NOT merge the aovs. This class correctly resolves - it - but user should be aware. - - File Path Prefix overrides per AOV driver are not implemented - - Attributes: - aiDriverExtension (dict): Arnold AOV driver extension mapping. - Is there a better way? - renderer (str): name of renderer. - - """ - renderer = "arnold" - aiDriverExtension = { - "jpeg": "jpg", - "exr": "exr", - "deepexr": "exr", - "png": "png", - "tiff": "tif", - "mtoa_shaders": "ass", # TODO: research what those last two should be - "maya": "", - } - - def get_renderer_prefix(self): - - prefix = super(RenderProductsArnold, self).get_renderer_prefix() - merge_aovs = self._get_attr("defaultArnoldDriver.mergeAOVs") - if not merge_aovs and "" not in prefix.lower(): - # When Merge AOVs is disabled and token not present - # then Arnold prepends / to the output path. - # todo: It's untested what happens if AOV driver has an - # an explicit override path prefix. - prefix = "/" + prefix - - return prefix - - def get_multipart(self): - multipart = False - multilayer = bool(self._get_attr("defaultArnoldDriver.multipart")) - merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs")) - if multilayer or merge_AOVs: - multipart = True - - return multipart - - def _get_aov_render_products(self, aov, cameras=None): - """Return all render products for the AOV""" - - products = [] - aov_name = self._get_attr(aov, "name") - ai_drivers = cmds.listConnections("{}.outputs".format(aov), - source=True, - destination=False, - type="aiAOVDriver") or [] - if not cameras: - cameras = [ - self.sanitize_camera_name( - self.get_renderable_cameras()[0] - ) - ] - - for ai_driver in ai_drivers: - colorspace = self._get_colorspace( - ai_driver + ".colorManagement" - ) - # todo: check aiAOVDriver.prefix as it could have - # a custom path prefix set for this driver - - # Skip Drivers set only for GUI - # 0: GUI, 1: Batch, 2: GUI and Batch - output_mode = self._get_attr(ai_driver, "outputMode") - if output_mode == 0: # GUI only - log.warning("%s has Output Mode set to GUI, " - "skipping...", ai_driver) - continue - - ai_translator = self._get_attr(ai_driver, "aiTranslator") - try: - ext = self.aiDriverExtension[ai_translator] - except KeyError: - raise AOVError( - "Unrecognized arnold driver format " - "for AOV - {}".format(aov_name) - ) - - # If aov RGBA is selected, arnold will translate it to `beauty` - name = aov_name - if name == "RGBA": - name = "beauty" - - # Support Arnold light groups for AOVs - # Global AOV: When disabled the main layer is - # not written: `{pass}` - # All Light Groups: When enabled, a `{pass}_lgroups` file is - # written and is always merged into a - # single file - # Light Groups List: When set, a product per light - # group is written - # e.g. {pass}_front, {pass}_rim - global_aov = self._get_attr(aov, "globalAov") - if global_aov: - for camera in cameras: - product = RenderProduct( - productName=name, - ext=ext, - aov=aov_name, - driver=ai_driver, - multipart=self.multipart, - camera=camera, - colorspace=colorspace - ) - products.append(product) - - all_light_groups = self._get_attr(aov, "lightGroups") - if all_light_groups: - # All light groups is enabled. A single multipart - # Render Product - for camera in cameras: - product = RenderProduct( - productName=name + "_lgroups", - ext=ext, - aov=aov_name, - driver=ai_driver, - # Always multichannel output - multipart=True, - camera=camera, - colorspace=colorspace - ) - products.append(product) - else: - value = self._get_attr(aov, "lightGroupsList") - if not value: - continue - selected_light_groups = value.strip().split() - for light_group in selected_light_groups: - # Render Product per selected light group - aov_light_group_name = "{}_{}".format(name, light_group) - for camera in cameras: - product = RenderProduct( - productName=aov_light_group_name, - aov=aov_name, - driver=ai_driver, - ext=ext, - camera=camera, - colorspace=colorspace - ) - products.append(product) - - return products - - def _get_colorspace(self, attribute): - """Resolve colorspace from Arnold settings.""" - - def _view_transform(): - preferences = lib.get_color_management_preferences() - views_data = get_ocio_config_views(preferences["config"]) - view_data = views_data[ - "{}/{}".format(preferences["display"], preferences["view"]) - ] - return view_data["colorspace"] - - def _raw(): - preferences = lib.get_color_management_preferences() - return preferences["rendering_space"] - - resolved_values = { - "Raw": _raw, - "Use View Transform": _view_transform, - # Default. Same as Maya Preferences. - "Use Output Transform": lib.get_color_management_output_transform - } - return resolved_values[self._get_attr(attribute)]() - - def get_render_products(self): - """Get all AOVs. - - See Also: - :func:`ARenderProducts.get_render_products()` - - Raises: - :class:`AOVError`: If AOV cannot be determined. - - """ - - if not cmds.ls("defaultArnoldRenderOptions", type="aiOptions"): - # this occurs when Render Setting windows was not opened yet. In - # such case there are no Arnold options created so query for AOVs - # will fail. We terminate here as there are no AOVs specified then. - # This state will most probably fail later on some Validator - # anyway. - return [] - - # check if camera token is in prefix. If so, and we have list of - # renderable cameras, generate render product for each and every - # of them. - cameras = [ - self.sanitize_camera_name(c) - for c in self.get_renderable_cameras() - ] - - default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey") - colorspace = self._get_colorspace( - "defaultArnoldDriver.colorManagement" - ) - beauty_products = [ - RenderProduct( - productName="beauty", - ext=default_ext, - driver="defaultArnoldDriver", - camera=camera, - colorspace=colorspace - ) for camera in cameras - ] - - # AOVs > Legacy > Maya Render View > Mode - aovs_enabled = bool( - self._get_attr( - "defaultArnoldRenderOptions.aovMode", as_string=False) - ) - if not aovs_enabled: - return beauty_products - - # Common > File Output > Merge AOVs or - # We don't need to check for Merge AOVs due to overridden - # `get_renderer_prefix()` behavior which forces - has_renderpass_token = ( - "" in self.layer_data.filePrefix.lower() - ) - if not has_renderpass_token: - for product in beauty_products: - product.multipart = True - return beauty_products - - # AOVs are set to be rendered separately. We should expect - # token in path. - # handle aovs from references - use_ref_aovs = self.render_instance.data.get( - "useReferencedAovs", False) or False - - aovs = cmds.ls(type="aiAOV") - if not use_ref_aovs: - ref_aovs = cmds.ls(type="aiAOV", referencedNodes=True) - aovs = list(set(aovs) - set(ref_aovs)) - - products = [] - - # Append the AOV products - for aov in aovs: - enabled = self._get_attr(aov, "enabled") - if not enabled: - continue - - # For now stick to the legacy output format. - aov_products = self._get_aov_render_products(aov, cameras) - products.extend(aov_products) - - if all(product.aov != "RGBA" for product in products): - # Append default 'beauty' as this is arnolds default. - # However, it is excluded whenever a RGBA pass is enabled. - # For legibility add the beauty layer as first entry - products += beauty_products - - # TODO: Output Denoising AOVs? - - return products - - -class RenderProductsVray(ARenderProducts): - """Expected files for V-Ray renderer. - - Notes: - - "Disabled" animation incorrectly returns frames in filename - - "Renumber Frames" is not supported - - Reference: - vrayAddRenderElementImpl() in vrayCreateRenderElementsTab.mel - - """ - # todo: detect whether rendering with V-Ray GPU + whether AOV is supported - - renderer = "vray" - - def get_multipart(self): - multipart = False - image_format = self._get_attr("vraySettings.imageFormatStr") - if image_format == "exr (multichannel)": - multipart = True - - return multipart - - def get_renderer_prefix(self): - # type: () -> str - """Get image prefix for V-Ray. - - This overrides :func:`ARenderProducts.get_renderer_prefix()` as - we must add `` token manually. This is done only for - non-multipart outputs, where `` token doesn't make sense. - - See also: - :func:`ARenderProducts.get_renderer_prefix()` - - """ - prefix = super(RenderProductsVray, self).get_renderer_prefix() - if self.multipart: - return prefix - aov_separator = self._get_aov_separator() - prefix = "{}{}".format(prefix, aov_separator) - return prefix - - def _get_aov_separator(self): - # type: () -> str - """Return the V-Ray AOV/Render Elements separator""" - return self._get_attr( - "vraySettings.fileNameRenderElementSeparator" - ) - - def _get_layer_data(self): - # type: () -> LayerMetadata - """Override to get vray specific extension.""" - layer_data = super(RenderProductsVray, self)._get_layer_data() - - default_ext = self._get_attr("vraySettings.imageFormatStr") - if default_ext in ["exr (multichannel)", "exr (deep)"]: - default_ext = "exr" - layer_data.defaultExt = default_ext - layer_data.padding = self._get_attr("vraySettings.fileNamePadding") - - layer_data.aov_separator = self._get_aov_separator() - - return layer_data - - def get_render_products(self): - """Get all AOVs. - - See Also: - :func:`ARenderProducts.get_render_products()` - - """ - if not cmds.ls("vraySettings", type="VRaySettingsNode"): - # this occurs when Render Setting windows was not opened yet. In - # such case there are no VRay options created so query for AOVs - # will fail. We terminate here as there are no AOVs specified then. - # This state will most probably fail later on some Validator - # anyway. - return [] - - cameras = [ - self.sanitize_camera_name(c) - for c in self.get_renderable_cameras() - ] - - image_format_str = self._get_attr("vraySettings.imageFormatStr") - default_ext = image_format_str - if default_ext in {"exr (multichannel)", "exr (deep)"}: - default_ext = "exr" - - colorspace = lib.get_color_management_output_transform() - products = [] - - # add beauty as default when not disabled - dont_save_rgb = self._get_attr("vraySettings.dontSaveRgbChannel") - if not dont_save_rgb: - for camera in cameras: - products.append( - RenderProduct( - productName="", - ext=default_ext, - camera=camera, - colorspace=colorspace, - multipart=self.multipart - ) - ) - - # separate alpha file - separate_alpha = self._get_attr("vraySettings.separateAlpha") - if separate_alpha: - for camera in cameras: - products.append( - RenderProduct( - productName="Alpha", - ext=default_ext, - camera=camera, - colorspace=colorspace, - multipart=self.multipart - ) - ) - if self.multipart: - # AOVs are merged in m-channel file, only main layer is rendered - return products - - # handle aovs from references - use_ref_aovs = self.render_instance.data.get( - "useReferencedAovs", False) or False - - # this will have list of all aovs no matter if they are coming from - # reference or not. - aov_types = ["VRayRenderElement", "VRayRenderElementSet"] - aovs = cmds.ls(type=aov_types) - if not use_ref_aovs: - ref_aovs = cmds.ls(type=aov_types, referencedNodes=True) or [] - aovs = list(set(aovs) - set(ref_aovs)) - - for aov in aovs: - enabled = self._get_attr(aov, "enabled") - if not enabled: - continue - - class_type = self._get_attr(aov + ".vrayClassType") - if class_type == "LightMixElement": - # Special case which doesn't define a name by itself but - # instead seems to output multiple Render Products, - # specifically "Self_Illumination" and "Environment" - product_names = ["Self_Illumination", "Environment"] - for camera in cameras: - for name in product_names: - product = RenderProduct(productName=name, - ext=default_ext, - aov=aov, - camera=camera, - colorspace=colorspace) - products.append(product) - # Continue as we've processed this special case AOV - continue - - aov_name = self._get_vray_aov_name(aov) - for camera in cameras: - product = RenderProduct( - productName=aov_name, - ext=default_ext, - aov=aov, - camera=camera, - colorspace=colorspace - ) - products.append(product) - - return products - - def _get_vray_aov_attr(self, node, prefix): - """Get value for attribute that starts with key in name - - V-Ray AOVs have attribute names that include the type - of AOV in the attribute name, for example: - - vray_filename_rawdiffuse - - vray_filename_velocity - - vray_name_gi - - vray_explicit_name_extratex - - To simplify querying the "vray_filename" or "vray_name" - attributes we just find the first attribute that has - that particular "{prefix}_" in the attribute name. - - Args: - node (str): AOV node name - prefix (str): Prefix of the attribute name. - - Returns: - Value of the attribute if it exists, else None - - """ - attrs = cmds.listAttr(node, string="{}_*".format(prefix)) - if not attrs: - return None - - assert len(attrs) == 1, "Found more than one attribute: %s" % attrs - attr = attrs[0] - - return self._get_attr(node, attr) - - def _get_vray_aov_name(self, node): - """Get AOVs name from Vray. - - Args: - node (str): aov node name. - - Returns: - str: aov name. - - """ - - vray_explicit_name = self._get_vray_aov_attr(node, - "vray_explicit_name") - vray_filename = self._get_vray_aov_attr(node, "vray_filename") - vray_name = self._get_vray_aov_attr(node, "vray_name") - final_name = vray_explicit_name or vray_filename or vray_name or None - - class_type = self._get_attr(node, "vrayClassType") - if not vray_explicit_name: - # Explicit name takes precedence and overrides completely - # otherwise add the connected node names to the special cases - # Any namespace colon ':' gets replaced to underscore '_' - # so we sanitize using `sanitize_camera_name` - def _get_source_name(node, attr): - """Return sanitized name of input connection to attribute""" - plug = "{}.{}".format(node, attr) - connections = cmds.listConnections(plug, - source=True, - destination=False) - if connections: - return self.sanitize_camera_name(connections[0]) - - if class_type == "MaterialSelectElement": - # Name suffix is based on the connected material or set - attrs = [ - "vray_mtllist_mtlselect", - "vray_mtl_mtlselect" - ] - for attribute in attrs: - name = _get_source_name(node, attribute) - if name: - final_name += '_{}'.format(name) - break - else: - log.warning("Material Select Element has no " - "selected materials: %s", node) - - elif class_type == "ExtraTexElement": - # Name suffix is based on the connected textures - extratex_type = self._get_attr(node, "vray_type_extratex") - attr = { - 0: "vray_texture_extratex", - 1: "vray_float_texture_extratex", - 2: "vray_int_texture_extratex", - }.get(extratex_type) - name = _get_source_name(node, attr) - if name: - final_name += '_{}'.format(name) - else: - log.warning("Extratex Element has no incoming texture") - - assert final_name, "Output filename not defined for AOV: %s" % node - - return final_name - - -class RenderProductsRedshift(ARenderProducts): - """Expected files for Redshift renderer. - - Notes: - - `get_files()` only supports rendering with frames, like "animation" - - Attributes: - - unmerged_aovs (list): Name of aovs that are not merged into resulting - exr and we need them specified in Render Products output. - - """ - - renderer = "redshift" - unmerged_aovs = {"Cryptomatte"} - - def get_files(self, product): - # When outputting AOVs we need to replace Redshift specific AOV tokens - # with Maya render tokens for generating file sequences. We validate to - # a specific AOV fileprefix so we only need to account for one - # replacement. - if not product.multipart and product.driver: - file_prefix = self._get_attr(product.driver + ".filePrefix") - self.layer_data.filePrefix = file_prefix.replace( - "/", - "//" - ) - - return super(RenderProductsRedshift, self).get_files(product) - - def get_multipart(self): - # For Redshift we don't directly return upon forcing multilayer - # due to some AOVs still being written into separate files, - # like Cryptomatte. - # AOVs are merged in multi-channel file - multipart = False - force_layer = bool( - self._get_attr("redshiftOptions.exrForceMultilayer") - ) - if force_layer: - multipart = True - - return multipart - - def get_renderer_prefix(self): - """Get image prefix for Redshift. - - This overrides :func:`ARenderProducts.get_renderer_prefix()` as - we must add `` token manually. This is done only for - non-multipart outputs, where `` token doesn't make sense. - - See also: - :func:`ARenderProducts.get_renderer_prefix()` - - """ - prefix = super(RenderProductsRedshift, self).get_renderer_prefix() - if self.multipart: - return prefix - separator = self.extract_separator(prefix) - prefix = "{}{}".format(prefix, separator or "_") - return prefix - - def get_render_products(self): - """Get all AOVs. - - See Also: - :func:`ARenderProducts.get_render_products()` - - """ - - if not cmds.ls("redshiftOptions", type="RedshiftOptions"): - # this occurs when Render Setting windows was not opened yet. In - # such case there are no Redshift options created so query for AOVs - # will fail. We terminate here as there are no AOVs specified then. - # This state will most probably fail later on some Validator - # anyway. - return [] - - cameras = [ - self.sanitize_camera_name(c) - for c in self.get_renderable_cameras() - ] - - # Get Redshift Extension from image format - image_format = self._get_attr("redshiftOptions.imageFormat") # integer - ext = mel.eval("redshiftGetImageExtension(%i)" % image_format) - - use_ref_aovs = self.render_instance.data.get( - "useReferencedAovs", False) or False - - aovs = cmds.ls(type="RedshiftAOV") - if not use_ref_aovs: - ref_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=True) - aovs = list(set(aovs) - set(ref_aovs)) - - products = [] - global_aov_enabled = bool( - self._get_attr("redshiftOptions.aovGlobalEnableMode", as_string=False) - ) - colorspace = lib.get_color_management_output_transform() - if not global_aov_enabled: - # only beauty output - for camera in cameras: - products.insert(0, - RenderProduct(productName="", - ext=ext, - multipart=self.multipart, - camera=camera, - colorspace=colorspace)) - return products - - light_groups_enabled = False - has_beauty_aov = False - - for aov in aovs: - enabled = self._get_attr(aov, "enabled") - if not enabled: - continue - - aov_type = self._get_attr(aov, "aovType") - if self.multipart and aov_type not in self.unmerged_aovs: - continue - - # Any AOVs that still get processed, like Cryptomatte - # by themselves are not multipart files. - - # Redshift skips rendering of masterlayer without AOV suffix - # when a Beauty AOV is rendered. It overrides the main layer. - if aov_type == "Beauty": - has_beauty_aov = True - - aov_name = self._get_attr(aov, "name") - - # Support light Groups - light_groups = [] - if self._get_attr(aov, "supportsLightGroups"): - all_light_groups = self._get_attr(aov, "allLightGroups") - if all_light_groups: - # All light groups is enabled - light_groups = self._get_redshift_light_groups() - else: - value = self._get_attr(aov, "lightGroupList") - # note: string value can return None when never set - if value: - selected_light_groups = value.strip().split() - light_groups = selected_light_groups - - for light_group in light_groups: - aov_light_group_name = "{}_{}".format(aov_name, - light_group) - for camera in cameras: - product = RenderProduct( - productName=aov_light_group_name, - aov=aov_name, - ext=ext, - multipart=False, - camera=camera, - driver=aov, - colorspace=colorspace) - products.append(product) - - if light_groups: - light_groups_enabled = True - - # Redshift AOV Light Select always renders the global AOV - # even when light groups are present so we don't need to - # exclude it when light groups are active - for camera in cameras: - product = RenderProduct(productName=aov_name, - aov=aov_name, - ext=ext, - multipart=False, - camera=camera, - driver=aov, - colorspace=colorspace) - products.append(product) - - # When a Beauty AOV is added manually, it will be rendered as - # 'Beauty_other' in file name and "standard" beauty will have - # 'Beauty' in its name. When disabled, standard output will be - # without `Beauty`. Except when using light groups. - if light_groups_enabled: - return products - - beauty_name = "BeautyAux" if has_beauty_aov else "" - for camera in cameras: - products.insert(0, - RenderProduct(productName=beauty_name, - ext=ext, - multipart=self.multipart, - camera=camera, - colorspace=colorspace)) - - return products - - @staticmethod - def _get_redshift_light_groups(): - return sorted(mel.eval("redshiftAllAovLightGroups")) - - -class RenderProductsRenderman(ARenderProducts): - """Expected files for Renderman renderer. - - Warning: - This is very rudimentary and needs more love and testing. - """ - - renderer = "renderman" - unmerged_aovs = {"PxrCryptomatte"} - - def get_multipart(self): - # Implemented as display specific in "get_render_products". - return False - - def get_render_products(self): - """Get all AOVs. - - See Also: - :func:`ARenderProducts.get_render_products()` - - """ - from rfm2.api.displays import get_displays # noqa - - colorspace = lib.get_color_management_output_transform() - - cameras = [ - self.sanitize_camera_name(c) - for c in self.get_renderable_cameras() - ] - - if not cameras: - cameras = [ - self.sanitize_camera_name( - self.get_renderable_cameras()[0]) - ] - products = [] - - # NOTE: This is guessing extensions from renderman display types. - # Some of them are just framebuffers, d_texture format can be - # set in display setting. We set those now to None, but it - # should be handled more gracefully. - display_types = { - "d_deepexr": "exr", - "d_it": None, - "d_null": None, - "d_openexr": "exr", - "d_png": "png", - "d_pointcloud": "ptc", - "d_targa": "tga", - "d_texture": None, - "d_tiff": "tif" - } - - displays = get_displays(override_dst="render")["displays"] - for name, display in displays.items(): - enabled = display["params"]["enable"]["value"] - if not enabled: - continue - - # Skip display types not producing any file output. - # Is there a better way to do it? - if not display_types.get(display["driverNode"]["type"]): - continue - - has_cryptomatte = cmds.ls(type=self.unmerged_aovs) - matte_enabled = False - if has_cryptomatte: - for cryptomatte in has_cryptomatte: - cryptomatte_aov = cryptomatte - matte_name = "cryptomatte" - rman_globals = cmds.listConnections(cryptomatte + - ".message") - if rman_globals: - matte_enabled = True - - aov_name = name - if aov_name == "rmanDefaultDisplay": - aov_name = "beauty" - - extensions = display_types.get( - display["driverNode"]["type"], "exr") - - for camera in cameras: - # Create render product and set it as multipart only on - # display types supporting it. In all other cases, Renderman - # will create separate output per channel. - if display["driverNode"]["type"] in ["d_openexr", "d_deepexr", "d_tiff"]: # noqa - product = RenderProduct( - productName=aov_name, - ext=extensions, - camera=camera, - multipart=True, - colorspace=colorspace - ) - - if has_cryptomatte and matte_enabled: - cryptomatte = RenderProduct( - productName=matte_name, - aov=cryptomatte_aov, - ext=extensions, - camera=camera, - multipart=True, - colorspace=colorspace - ) - else: - # this code should handle the case where no multipart - # capable format is selected. But since it involves - # shady logic to determine what channel become what - # lets not do that as all productions will use exr anyway. - """ - for channel in display['params']['displayChannels']['value']: # noqa - product = RenderProduct( - productName="{}_{}".format(aov_name, channel), - ext=extensions, - camera=camera, - multipart=False - ) - """ - raise UnsupportedImageFormatException( - "Only exr, deep exr and tiff formats are supported.") - - products.append(product) - - if has_cryptomatte and matte_enabled: - products.append(cryptomatte) - - return products - - def get_files(self, product): - """Get expected files. - - """ - files = super(RenderProductsRenderman, self).get_files(product) - - layer_data = self.layer_data - new_files = [] - - resolved_image_dir = re.sub("", layer_data.sceneName, RENDERMAN_IMAGE_DIR, flags=re.IGNORECASE) # noqa: E501 - resolved_image_dir = re.sub("", layer_data.layerName, resolved_image_dir, flags=re.IGNORECASE) # noqa: E501 - for file in files: - new_file = "{}/{}".format(resolved_image_dir, file) - new_files.append(new_file) - - return new_files - - -class RenderProductsMayaHardware(ARenderProducts): - """Expected files for MayaHardware renderer.""" - - renderer = "mayahardware2" - - extensions = [ - {"label": "JPEG", "index": 8, "extension": "jpg"}, - {"label": "PNG", "index": 32, "extension": "png"}, - {"label": "EXR(exr)", "index": 40, "extension": "exr"} - ] - - def get_multipart(self): - # MayaHardware does not support multipart EXRs. - return False - - def _get_extension(self, value): - result = None - if isinstance(value, int): - extensions = { - extension["index"]: extension["extension"] - for extension in self.extensions - } - try: - result = extensions[value] - except KeyError: - raise NotImplementedError( - "Could not find extension for {}".format(value) - ) - - if isinstance(value, six.string_types): - extensions = { - extension["label"]: extension["extension"] - for extension in self.extensions - } - try: - result = extensions[value] - except KeyError: - raise NotImplementedError( - "Could not find extension for {}".format(value) - ) - - if not result: - raise NotImplementedError( - "Could not find extension for {}".format(value) - ) - - return result - - def get_render_products(self): - """Get all AOVs. - See Also: - :func:`ARenderProducts.get_render_products()` - """ - ext = self._get_extension( - self._get_attr("defaultRenderGlobals.imageFormat") - ) - - products = [] - for cam in self.get_renderable_cameras(): - product = RenderProduct( - productName="beauty", - ext=ext, - camera=cam, - colorspace=lib.get_color_management_output_transform() - ) - products.append(product) - - return products - - -class AOVError(Exception): - """Custom exception for determining AOVs.""" - - -class UnsupportedRendererException(Exception): - """Custom exception. - - Raised when requesting data from unsupported renderer. - """ - - -class UnsupportedImageFormatException(Exception): - """Custom exception to report unsupported output image format.""" diff --git a/server_addon/maya/client/ayon_maya/api/lib_rendersettings.py b/server_addon/maya/client/ayon_maya/api/lib_rendersettings.py deleted file mode 100644 index f7f3f1d746..0000000000 --- a/server_addon/maya/client/ayon_maya/api/lib_rendersettings.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- -"""Class for handling Render Settings.""" -import six -import sys - -from ayon_core.lib import Logger -from ayon_core.settings import get_project_settings - -from ayon_core.pipeline import CreatorError, get_current_project_name -from ayon_core.pipeline.context_tools import get_current_folder_entity -from ayon_maya.api.lib import reset_frame_range - - -class RenderSettings(object): - - _image_prefix_nodes = { - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'rmanGlobals.imageFileFormat', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix' - } - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - - log = Logger.get_logger("RenderSettings") - - @classmethod - def get_image_prefix_attr(cls, renderer): - return cls._image_prefix_nodes[renderer] - - @staticmethod - def get_padding_attr(renderer): - """Return attribute for renderer that defines frame padding amount""" - if renderer == "vray": - return "vraySettings.fileNamePadding" - else: - return "defaultRenderGlobals.extensionPadding" - - def __init__(self, project_settings=None): - if not project_settings: - project_settings = get_project_settings( - get_current_project_name() - ) - render_settings = project_settings["maya"]["render_settings"] - image_prefixes = { - "vray": render_settings["vray_renderer"]["image_prefix"], - "arnold": render_settings["arnold_renderer"]["image_prefix"], - "renderman": render_settings["renderman_renderer"]["image_prefix"], - "redshift": render_settings["redshift_renderer"]["image_prefix"] - } - - # TODO probably should be stored to more explicit attribute - # Renderman only - renderman_settings = render_settings["renderman_renderer"] - _image_dir = { - "renderman": renderman_settings["image_dir"], - "cryptomatte": renderman_settings["cryptomatte_dir"], - "imageDisplay": renderman_settings["imageDisplay_dir"], - "watermark": renderman_settings["watermark_dir"] - } - self._image_prefixes = image_prefixes - self._image_dir = _image_dir - self._project_settings = project_settings - - def set_default_renderer_settings(self, renderer=None): - """Set basic settings based on renderer.""" - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - if not renderer: - renderer = cmds.getAttr( - 'defaultRenderGlobals.currentRenderer').lower() - - folder_entity = get_current_folder_entity() - folder_attributes = folder_entity["attrib"] - # project_settings/maya/create/CreateRender/aov_separator - try: - aov_separator = self._aov_chars[( - self._project_settings["maya"] - ["render_settings"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - reset_frame = self._project_settings["maya"]["render_settings"]["reset_current_frame"] # noqa - - if reset_frame: - start_frame = cmds.getAttr("defaultRenderGlobals.startFrame") - cmds.currentTime(start_frame, edit=True) - - if renderer in self._image_prefix_nodes: - prefix = self._image_prefixes[renderer] - prefix = prefix.replace("{aov_separator}", aov_separator) - cmds.setAttr(self._image_prefix_nodes[renderer], - prefix, type="string") # noqa - else: - print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa - # TODO: handle not having res values in the doc - width = folder_attributes.get("resolutionWidth") - height = folder_attributes.get("resolutionHeight") - - if renderer == "arnold": - # set renderer settings for Arnold from project settings - self._set_arnold_settings(width, height) - - if renderer == "vray": - self._set_vray_settings(aov_separator, width, height) - - if renderer == "redshift": - self._set_redshift_settings(width, height) - mel.eval("redshiftUpdateActiveAovList") - - if renderer == "renderman": - image_dir = self._image_dir["renderman"] - cmds.setAttr("rmanGlobals.imageOutputDir", - image_dir, type="string") - self._set_renderman_settings(width, height, - aov_separator) - - def _set_arnold_settings(self, width, height): - """Sets settings for Arnold.""" - from mtoa.core import createOptions # noqa - from mtoa.aovs import AOVInterface # noqa - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - createOptions() - render_settings = self._project_settings["maya"]["render_settings"] - arnold_render_presets = render_settings["arnold_renderer"] # noqa - # Force resetting settings and AOV list to avoid having to deal with - # AOV checking logic, for now. - # This is a work around because the standard - # function to revert render settings does not reset AOVs list in MtoA - # Fetch current aovs in case there's any. - current_aovs = AOVInterface().getAOVs() - remove_aovs = render_settings["remove_aovs"] - if remove_aovs: - # Remove fetched AOVs - AOVInterface().removeAOVs(current_aovs) - mel.eval("unifiedRenderGlobalsRevertToDefault") - img_ext = arnold_render_presets["image_format"] - img_prefix = arnold_render_presets["image_prefix"] - aovs = arnold_render_presets["aov_list"] - img_tiled = arnold_render_presets["tiled"] - multi_exr = arnold_render_presets["multilayer_exr"] - additional_options = arnold_render_presets["additional_options"] - for aov in aovs: - if aov in current_aovs and not remove_aovs: - continue - AOVInterface('defaultArnoldRenderOptions').addAOV(aov) - - cmds.setAttr("defaultResolution.width", width) - cmds.setAttr("defaultResolution.height", height) - - self._set_global_output_settings() - - cmds.setAttr( - "defaultRenderGlobals.imageFilePrefix", img_prefix, type="string") - - cmds.setAttr( - "defaultArnoldDriver.ai_translator", img_ext, type="string") - - cmds.setAttr( - "defaultArnoldDriver.exrTiled", img_tiled) - - cmds.setAttr( - "defaultArnoldDriver.mergeAOVs", multi_exr) - self._additional_attribs_setter(additional_options) - reset_frame_range(playback=False, fps=False, render=True) - - def _set_redshift_settings(self, width, height): - """Sets settings for Redshift.""" - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - render_settings = self._project_settings["maya"]["render_settings"] - redshift_render_presets = render_settings["redshift_renderer"] - - remove_aovs = render_settings["remove_aovs"] - all_rs_aovs = cmds.ls(type='RedshiftAOV') - if remove_aovs: - for aov in all_rs_aovs: - enabled = cmds.getAttr("{}.enabled".format(aov)) - if enabled: - cmds.delete(aov) - - redshift_aovs = redshift_render_presets["aov_list"] - # list all the aovs - all_rs_aovs = cmds.ls(type='RedshiftAOV') - for rs_aov in redshift_aovs: - rs_layername = "rsAov_{}".format(rs_aov.replace(" ", "")) - if rs_layername in all_rs_aovs: - continue - cmds.rsCreateAov(type=rs_aov) - # update the AOV list - mel.eval("redshiftUpdateActiveAovList") - - rs_p_engine = redshift_render_presets["primary_gi_engine"] - rs_s_engine = redshift_render_presets["secondary_gi_engine"] - - if int(rs_p_engine) or int(rs_s_engine) != 0: - cmds.setAttr("redshiftOptions.GIEnabled", 1) - if int(rs_p_engine) == 0: - # reset the primary GI Engine as default - cmds.setAttr("redshiftOptions.primaryGIEngine", 4) - if int(rs_s_engine) == 0: - # reset the secondary GI Engine as default - cmds.setAttr("redshiftOptions.secondaryGIEngine", 2) - else: - cmds.setAttr("redshiftOptions.GIEnabled", 0) - - cmds.setAttr("redshiftOptions.primaryGIEngine", int(rs_p_engine)) - cmds.setAttr("redshiftOptions.secondaryGIEngine", int(rs_s_engine)) - - additional_options = redshift_render_presets["additional_options"] - ext = redshift_render_presets["image_format"] - img_exts = ["iff", "exr", "tif", "png", "tga", "jpg"] - img_ext = img_exts.index(ext) - - self._set_global_output_settings() - cmds.setAttr("redshiftOptions.imageFormat", img_ext) - cmds.setAttr("defaultResolution.width", width) - cmds.setAttr("defaultResolution.height", height) - self._additional_attribs_setter(additional_options) - - def _set_renderman_settings(self, width, height, aov_separator): - """Sets settings for Renderman""" - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - rman_render_presets = ( - self._project_settings - ["maya"] - ["render_settings"] - ["renderman_renderer"] - ) - display_filters = rman_render_presets["display_filters"] - d_filters_number = len(display_filters) - for i in range(d_filters_number): - d_node = cmds.ls(typ=display_filters[i]) - if len(d_node) > 0: - filter_nodes = d_node[0] - else: - filter_nodes = cmds.createNode(display_filters[i]) - - cmds.connectAttr(filter_nodes + ".message", - "rmanGlobals.displayFilters[%i]" % i, - force=True) - if filter_nodes.startswith("PxrImageDisplayFilter"): - imageDisplay_dir = self._image_dir["imageDisplay"] - imageDisplay_dir = imageDisplay_dir.replace("{aov_separator}", - aov_separator) - cmds.setAttr(filter_nodes + ".filename", - imageDisplay_dir, type="string") - - sample_filters = rman_render_presets["sample_filters"] - s_filters_number = len(sample_filters) - for n in range(s_filters_number): - s_node = cmds.ls(typ=sample_filters[n]) - if len(s_node) > 0: - filter_nodes = s_node[0] - else: - filter_nodes = cmds.createNode(sample_filters[n]) - - cmds.connectAttr(filter_nodes + ".message", - "rmanGlobals.sampleFilters[%i]" % n, - force=True) - - if filter_nodes.startswith("PxrCryptomatte"): - matte_dir = self._image_dir["cryptomatte"] - matte_dir = matte_dir.replace("{aov_separator}", - aov_separator) - cmds.setAttr(filter_nodes + ".filename", - matte_dir, type="string") - elif filter_nodes.startswith("PxrWatermarkFilter"): - watermark_dir = self._image_dir["watermark"] - watermark_dir = watermark_dir.replace("{aov_separator}", - aov_separator) - cmds.setAttr(filter_nodes + ".filename", - watermark_dir, type="string") - - additional_options = rman_render_presets["additional_options"] - - self._set_global_output_settings() - cmds.setAttr("defaultResolution.width", width) - cmds.setAttr("defaultResolution.height", height) - self._additional_attribs_setter(additional_options) - - def _set_vray_settings(self, aov_separator, width, height): - # type: (str, int, int) -> None - """Sets important settings for Vray.""" - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - - settings = cmds.ls(type="VRaySettingsNode") - node = settings[0] if settings else cmds.createNode("VRaySettingsNode") - render_settings = self._project_settings["maya"]["render_settings"] - vray_render_presets = render_settings["vray_renderer"] - # vrayRenderElement - remove_aovs = render_settings["remove_aovs"] - all_vray_aovs = cmds.ls(type='VRayRenderElement') - lightSelect_aovs = cmds.ls(type='VRayRenderElementSet') - if remove_aovs: - for aov in all_vray_aovs: - # remove all aovs except LightSelect - enabled = cmds.getAttr("{}.enabled".format(aov)) - if enabled: - cmds.delete(aov) - # remove LightSelect - for light_aovs in lightSelect_aovs: - light_enabled = cmds.getAttr("{}.enabled".format(light_aovs)) - if light_enabled: - cmds.delete(lightSelect_aovs) - - vray_aovs = vray_render_presets["aov_list"] - for renderlayer in vray_aovs: - renderElement = "vrayAddRenderElement {}".format(renderlayer) - RE_name = mel.eval(renderElement) - # if there is more than one same render element - if RE_name.endswith("1"): - cmds.delete(RE_name) - # Set aov separator - # First we need to explicitly set the UI items in Render Settings - # because that is also what V-Ray updates to when that Render Settings - # UI did initialize before and refreshes again. - MENU = "vrayRenderElementSeparator" - if cmds.optionMenuGrp(MENU, query=True, exists=True): - items = cmds.optionMenuGrp(MENU, query=True, ill=True) - separators = [cmds.menuItem(i, query=True, label=True) for i in items] # noqa: E501 - try: - sep_idx = separators.index(aov_separator) - except ValueError: - six.reraise( - CreatorError, - CreatorError( - "AOV character {} not in {}".format( - aov_separator, separators)), - sys.exc_info()[2]) - - cmds.optionMenuGrp(MENU, edit=True, select=sep_idx + 1) - - # Set the render element attribute as string. This is also what V-Ray - # sets whenever the `vrayRenderElementSeparator` menu items switch - cmds.setAttr( - "{}.fileNameRenderElementSeparator".format(node), - aov_separator, - type="string" - ) - - # Set render file format to exr - ext = vray_render_presets["image_format"] - cmds.setAttr("{}.imageFormatStr".format(node), ext, type="string") - - # animType - cmds.setAttr("{}.animType".format(node), 1) - - # resolution - cmds.setAttr("{}.width".format(node), width) - cmds.setAttr("{}.height".format(node), height) - - additional_options = vray_render_presets["additional_options"] - - self._additional_attribs_setter(additional_options) - - @staticmethod - def _set_global_output_settings(): - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - # enable animation - cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) - cmds.setAttr("defaultRenderGlobals.animation", 1) - cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) - cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) - - def _additional_attribs_setter(self, additional_attribs): - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - for item in additional_attribs: - attribute = item["attribute"] - value = item["value"] - attribute = str(attribute) # ensure str conversion from settings - attribute_type = cmds.getAttr(attribute, type=True) - if attribute_type in {"long", "bool"}: - cmds.setAttr(attribute, int(value)) - elif attribute_type == "string": - cmds.setAttr(attribute, str(value), type="string") - elif attribute_type in {"double", "doubleAngle", "doubleLinear"}: - cmds.setAttr(attribute, float(value)) - else: - self.log.error( - "Attribute {attribute} can not be set due to unsupported " - "type: {attribute_type}".format( - attribute=attribute, - attribute_type=attribute_type) - ) diff --git a/server_addon/maya/client/ayon_maya/api/lib_rendersetup.py b/server_addon/maya/client/ayon_maya/api/lib_rendersetup.py deleted file mode 100644 index d93e6af0e2..0000000000 --- a/server_addon/maya/client/ayon_maya/api/lib_rendersetup.py +++ /dev/null @@ -1,417 +0,0 @@ -# -*- coding: utf-8 -*- -"""Code to get attributes from render layer without switching to it. - -https://github.com/Colorbleed/colorbleed-config/blob/acre/colorbleed/maya/lib_rendersetup.py -Credits: Roy Nieterau (BigRoy) / Colorbleed -Modified for use in AYON - -""" - -from maya import cmds -import maya.api.OpenMaya as om -import logging - -import maya.app.renderSetup.model.utils as utils -from maya.app.renderSetup.model import renderSetup -from maya.app.renderSetup.model.override import ( - AbsOverride, - RelOverride, - UniqueOverride -) - -from ayon_maya.api.lib import get_attribute - -EXACT_MATCH = 0 -PARENT_MATCH = 1 -CLIENT_MATCH = 2 - -DEFAULT_RENDER_LAYER = "defaultRenderLayer" - -log = logging.getLogger(__name__) - - -def get_rendersetup_layer(layer): - """Return render setup layer name. - - This also converts names from legacy renderLayer node name to render setup - name. - - Note: `defaultRenderLayer` is not a renderSetupLayer node but it is however - the valid layer name for Render Setup - so we return that as is. - - Example: - >>> for legacy_layer in cmds.ls(type="renderLayer"): - >>> layer = get_rendersetup_layer(legacy_layer) - - Returns: - str or None: Returns renderSetupLayer node name if `layer` is a valid - layer name in legacy renderlayers or render setup layers. - Returns None if the layer can't be found or Render Setup is - currently disabled. - - - """ - if layer == DEFAULT_RENDER_LAYER: - # defaultRenderLayer doesn't have a `renderSetupLayer` - return layer - - if not cmds.mayaHasRenderSetup(): - return None - - if not cmds.objExists(layer): - return None - - if cmds.nodeType(layer) == "renderSetupLayer": - return layer - - # By default Render Setup renames the legacy renderlayer - # to `rs_` but lets not rely on that as the - # layer node can be renamed manually - connections = cmds.listConnections(layer + ".message", - type="renderSetupLayer", - exactType=True, - source=False, - destination=True, - plugs=True) or [] - return next((conn.split(".", 1)[0] for conn in connections - if conn.endswith(".legacyRenderLayer")), None) - - -def get_attr_in_layer(node_attr, layer, as_string=True): - """Return attribute value in Render Setup layer. - - This will only work for attributes which can be - retrieved with `maya.cmds.getAttr` and for which - Relative and Absolute overrides are applicable. - - Examples: - >>> get_attr_in_layer("defaultResolution.width", layer="layer1") - >>> get_attr_in_layer("defaultRenderGlobals.startFrame", layer="layer") - >>> get_attr_in_layer("transform.translate", layer="layer3") - - Args: - attr (str): attribute name as 'node.attribute' - layer (str): layer name - - Returns: - object: attribute value in layer - - """ - - def _layer_needs_update(layer): - """Return whether layer needs updating.""" - # Use `getattr` as e.g. DEFAULT_RENDER_LAYER does not have - # the attribute - return getattr(layer, "needsMembershipUpdate", False) or \ - getattr(layer, "needsApplyUpdate", False) - - def get_default_layer_value(node_attr_): - """Return attribute value in `DEFAULT_RENDER_LAYER`.""" - inputs = cmds.listConnections(node_attr_, - source=True, - destination=False, - # We want to skip conversion nodes since - # an override to `endFrame` could have - # a `unitToTimeConversion` node - # in-between - skipConversionNodes=True, - type="applyOverride") or [] - if inputs: - override = inputs[0] - history_overrides = cmds.ls(cmds.listHistory(override, - pruneDagObjects=True), - type="applyOverride") - node = history_overrides[-1] if history_overrides else override - node_attr_ = node + ".original" - - return get_attribute(node_attr_, asString=as_string) - - layer = get_rendersetup_layer(layer) - rs = renderSetup.instance() - current_layer = rs.getVisibleRenderLayer() - if current_layer.name() == layer: - - # Ensure layer is up-to-date - if _layer_needs_update(current_layer): - try: - rs.switchToLayer(current_layer) - except RuntimeError: - # Some cases can cause errors on switching - # the first time with Render Setup layers - # e.g. different overrides to compounds - # and its children plugs. So we just force - # it another time. If it then still fails - # we will let it error out. - rs.switchToLayer(current_layer) - - return get_attribute(node_attr, asString=as_string) - - overrides = get_attr_overrides(node_attr, layer) - default_layer_value = get_default_layer_value(node_attr) - if not overrides: - return default_layer_value - - value = default_layer_value - for match, layer_override, index in overrides: - if isinstance(layer_override, AbsOverride): - # Absolute override - value = get_attribute(layer_override.name() + ".attrValue") - if match == EXACT_MATCH: - # value = value - pass - elif match == PARENT_MATCH: - value = value[index] - elif match == CLIENT_MATCH: - value[index] = value - - elif isinstance(layer_override, RelOverride): - # Relative override - # Value = Original * Multiply + Offset - multiply = get_attribute(layer_override.name() + ".multiply") - offset = get_attribute(layer_override.name() + ".offset") - - if match == EXACT_MATCH: - value = value * multiply + offset - elif match == PARENT_MATCH: - value = value * multiply[index] + offset[index] - elif match == CLIENT_MATCH: - value[index] = value[index] * multiply + offset - - else: - raise TypeError("Unsupported override: %s" % layer_override) - - return value - - -def get_attr_overrides(node_attr, layer, - skip_disabled=True, - skip_local_render=True, - stop_at_absolute_override=True): - """Return all Overrides applicable to the attribute. - - Overrides are returned as a 3-tuple: - (Match, Override, Index) - - Match: - This is any of EXACT_MATCH, PARENT_MATCH, CLIENT_MATCH - and defines whether the override is exactly on the - plug, on the parent or on a child plug. - - Override: - This is the RenderSetup Override instance. - - Index: - This is the Plug index under the parent or for - the child that matches. The EXACT_MATCH index will - always be None. For PARENT_MATCH the index is which - index the plug is under the parent plug. For CLIENT_MATCH - the index is which child index matches the plug. - - Args: - node_attr (str): attribute name as 'node.attribute' - layer (str): layer name - skip_disabled (bool): exclude disabled overrides - skip_local_render (bool): exclude overrides marked - as local render. - stop_at_absolute_override: exclude overrides prior - to the last absolute override as they have - no influence on the resulting value. - - Returns: - list: Ordered Overrides in order of strength - - """ - - def get_mplug_children(plug): - """Return children MPlugs of compound `MPlug`.""" - children = [] - if plug.isCompound: - for i in range(plug.numChildren()): - children.append(plug.child(i)) - return children - - def get_mplug_names(mplug): - """Return long and short name of `MPlug`.""" - long_name = mplug.partialName(useLongNames=True) - short_name = mplug.partialName(useLongNames=False) - return {long_name, short_name} - - def iter_override_targets(override): - try: - for target in override._targets(): - yield target - except AssertionError: - # Workaround: There is a bug where the private `_targets()` method - # fails on some attribute plugs. For example overrides - # to the defaultRenderGlobals.endFrame - # (Tested in Maya 2020.2) - log.debug("Workaround for %s" % override) - from maya.app.renderSetup.common.utils import findPlug - - attr = override.attributeName() - if isinstance(override, UniqueOverride): - node = override.targetNodeName() - yield findPlug(node, attr) - else: - nodes = override.parent().selector().nodes() - for node in nodes: - if cmds.attributeQuery(attr, node=node, exists=True): - yield findPlug(node, attr) - - # Get the MPlug for the node.attr - sel = om.MSelectionList() - sel.add(node_attr) - plug = sel.getPlug(0) - - layer = get_rendersetup_layer(layer) - if layer == DEFAULT_RENDER_LAYER: - # DEFAULT_RENDER_LAYER will never have overrides - # since it's the default layer - return [] - - rs_layer = renderSetup.instance().getRenderLayer(layer) - if rs_layer is None: - # Renderlayer does not exist - return - - # Get any parent or children plugs as we also - # want to include them in the attribute match - # for overrides - parent = plug.parent() if plug.isChild else None - parent_index = None - if parent: - parent_index = get_mplug_children(parent).index(plug) - - children = get_mplug_children(plug) - - # Create lookup for the attribute by both long - # and short names - attr_names = get_mplug_names(plug) - for child in children: - attr_names.update(get_mplug_names(child)) - if parent: - attr_names.update(get_mplug_names(parent)) - - # Get all overrides of the layer - # And find those that are relevant to the attribute - plug_overrides = [] - - # Iterate over the overrides in reverse so we get the last - # overrides first and can "break" whenever an absolute - # override is reached - layer_overrides = list(utils.getOverridesRecursive(rs_layer)) - for layer_override in reversed(layer_overrides): - - if skip_disabled and not layer_override.isEnabled(): - # Ignore disabled overrides - continue - - if skip_local_render and layer_override.isLocalRender(): - continue - - # The targets list can be very large so we'll do - # a quick filter by attribute name to detect whether - # it matches the attribute name, or its parent or child - if layer_override.attributeName() not in attr_names: - continue - - override_match = None - for override_plug in iter_override_targets(layer_override): - - override_match = None - if plug == override_plug: - override_match = (EXACT_MATCH, layer_override, None) - - elif parent and override_plug == parent: - override_match = (PARENT_MATCH, layer_override, parent_index) - - elif children and override_plug in children: - child_index = children.index(override_plug) - override_match = (CLIENT_MATCH, layer_override, child_index) - - if override_match: - plug_overrides.append(override_match) - break - - if ( - override_match and - stop_at_absolute_override and - isinstance(layer_override, AbsOverride) and - # When the override is only on a child plug then it doesn't - # override the entire value so we not stop at this override - not override_match[0] == CLIENT_MATCH - ): - # If override is absolute override, then BREAK out - # of parent loop we don't need to look any further as - # this is the absolute override - break - - return reversed(plug_overrides) - - -def get_shader_in_layer(node, layer): - """Return the assigned shader in a renderlayer without switching layers. - - This has been developed and tested for Legacy Renderlayers and *not* for - Render Setup. - - Note: This will also return the shader for any face assignments, however - it will *not* return the components they are assigned to. This could - be implemented, but since Maya's renderlayers are famous for breaking - with face assignments there has been no need for this function to - support that. - - Returns: - list: The list of assigned shaders in the given layer. - - """ - - def _get_connected_shader(plug): - """Return current shader""" - return cmds.listConnections(plug, - source=False, - destination=True, - plugs=False, - connections=False, - type="shadingEngine") or [] - - # We check the instObjGroups (shader connection) for layer overrides. - plug = node + ".instObjGroups" - - # Ignore complex query if we're in the layer anyway (optimization) - current_layer = cmds.editRenderLayerGlobals(query=True, - currentRenderLayer=True) - if layer == current_layer: - return _get_connected_shader(plug) - - connections = cmds.listConnections(plug, - plugs=True, - source=False, - destination=True, - type="renderLayer") or [] - connections = filter(lambda x: x.endswith(".outPlug"), connections) - if not connections: - # If no overrides anywhere on the shader, just get the current shader - return _get_connected_shader(plug) - - def _get_override(connections, layer): - """Return the overridden connection for that layer in connections""" - # If there's an override on that layer, return that. - for connection in connections: - if (connection.startswith(layer + ".outAdjustments") and - connection.endswith(".outPlug")): - - # This is a shader override on that layer so get the shader - # connected to .outValue of the .outAdjustment[i] - out_adjustment = connection.rsplit(".", 1)[0] - connection_attr = out_adjustment + ".outValue" - override = cmds.listConnections(connection_attr) or [] - - return override - - override_shader = _get_override(connections, layer) - if override_shader is not None: - return override_shader - else: - # Get the override for "defaultRenderLayer" (=masterLayer) - return _get_override(connections, layer="defaultRenderLayer") diff --git a/server_addon/maya/client/ayon_maya/api/menu.py b/server_addon/maya/client/ayon_maya/api/menu.py deleted file mode 100644 index 4693d0e131..0000000000 --- a/server_addon/maya/client/ayon_maya/api/menu.py +++ /dev/null @@ -1,299 +0,0 @@ -import os -import json -import logging -from functools import partial - -from qtpy import QtWidgets, QtGui - -import maya.utils -import maya.cmds as cmds - -from ayon_core.pipeline import ( - get_current_folder_path, - get_current_task_name, - registered_host -) -from ayon_core.pipeline.workfile import BuildWorkfile -from ayon_core.tools.utils import host_tools -from ayon_maya.api import lib, lib_rendersettings -from .lib import get_main_window, IS_HEADLESS -from ..tools import show_look_assigner - -from .workfile_template_builder import ( - create_placeholder, - update_placeholder, - build_workfile_template, - update_workfile_template -) -from ayon_core.pipeline.context_tools import version_up_current_workfile -from ayon_core.tools.workfile_template_build import open_template_ui -from .workfile_template_builder import MayaTemplateBuilder - -log = logging.getLogger(__name__) - -MENU_NAME = "op_maya_menu" - - -def _get_menu(menu_name=None): - """Return the menu instance if it currently exists in Maya""" - if menu_name is None: - menu_name = MENU_NAME - - widgets = {w.objectName(): w for w in QtWidgets.QApplication.allWidgets()} - return widgets.get(menu_name) - - -def get_context_label(): - return "{}, {}".format( - get_current_folder_path(), - get_current_task_name() - ) - - -def install(project_settings): - if cmds.about(batch=True): - log.info("Skipping AYON menu initialization in batch mode..") - return - - def add_menu(): - pyblish_icon = host_tools.get_pyblish_icon() - parent_widget = get_main_window() - cmds.menu( - MENU_NAME, - label=os.environ.get("AYON_MENU_LABEL") or "AYON", - tearOff=True, - parent="MayaWindow" - ) - - # Create context menu - cmds.menuItem( - "currentContext", - label=get_context_label(), - parent=MENU_NAME, - enable=False - ) - - cmds.setParent("..", menu=True) - - try: - if project_settings["core"]["tools"]["ayon_menu"].get( - "version_up_current_workfile"): - cmds.menuItem(divider=True) - cmds.menuItem( - "Version Up Workfile", - command=lambda *args: version_up_current_workfile() - ) - except KeyError: - print("Version Up Workfile setting not found in " - "Core Settings. Please update Core Addon") - - cmds.menuItem(divider=True) - - cmds.menuItem( - "Create...", - command=lambda *args: host_tools.show_publisher( - parent=parent_widget, - tab="create" - ) - ) - - cmds.menuItem( - "Load...", - command=lambda *args: host_tools.show_loader( - parent=parent_widget, - use_context=True - ) - ) - - cmds.menuItem( - "Publish...", - command=lambda *args: host_tools.show_publisher( - parent=parent_widget, - tab="publish" - ), - image=pyblish_icon - ) - - cmds.menuItem( - "Manage...", - command=lambda *args: host_tools.show_scene_inventory( - parent=parent_widget - ) - ) - - cmds.menuItem( - "Library...", - command=lambda *args: host_tools.show_library_loader( - parent=parent_widget - ) - ) - - cmds.menuItem(divider=True) - - cmds.menuItem( - "Work Files...", - command=lambda *args: host_tools.show_workfiles( - parent=parent_widget - ), - ) - - cmds.menuItem( - "Set Frame Range", - command=lambda *args: lib.reset_frame_range() - ) - - cmds.menuItem( - "Set Resolution", - command=lambda *args: lib.reset_scene_resolution() - ) - - cmds.menuItem( - "Set Colorspace", - command=lambda *args: lib.set_colorspace(), - ) - - cmds.menuItem( - "Set Render Settings", - command=lambda *args: lib_rendersettings.RenderSettings().set_default_renderer_settings() # noqa - ) - - cmds.menuItem(divider=True, parent=MENU_NAME) - cmds.menuItem( - "Build First Workfile", - parent=MENU_NAME, - command=lambda *args: BuildWorkfile().process() - ) - - cmds.menuItem( - "Look assigner...", - command=lambda *args: show_look_assigner( - parent_widget - ) - ) - - cmds.menuItem( - "Experimental tools...", - command=lambda *args: host_tools.show_experimental_tools_dialog( - parent_widget - ) - ) - - builder_menu = cmds.menuItem( - "Template Builder", - subMenu=True, - tearOff=True, - parent=MENU_NAME - ) - cmds.menuItem( - "Build Workfile from template", - parent=builder_menu, - command=build_workfile_template - ) - cmds.menuItem( - "Update Workfile from template", - parent=builder_menu, - command=update_workfile_template - ) - cmds.menuItem( - divider=True, - parent=builder_menu - ) - cmds.menuItem( - "Open Template", - parent=builder_menu, - command=lambda *args: open_template_ui( - MayaTemplateBuilder(registered_host()), get_main_window() - ), - ) - cmds.menuItem( - "Create Placeholder", - parent=builder_menu, - command=create_placeholder - ) - cmds.menuItem( - "Update Placeholder", - parent=builder_menu, - command=update_placeholder - ) - - cmds.setParent(MENU_NAME, menu=True) - - def add_scripts_menu(project_settings): - try: - import scriptsmenu.launchformaya as launchformaya - except ImportError: - log.warning( - "Skipping studio.menu install, because " - "'scriptsmenu' module seems unavailable." - ) - return - - menu_settings = project_settings["maya"]["scriptsmenu"] - menu_name = menu_settings["name"] - config = menu_settings["definition"] - - if menu_settings.get("definition_type") == "definition_json": - data = menu_settings["definition_json"] - try: - config = json.loads(data) - except json.JSONDecodeError as exc: - print("Skipping studio menu, error decoding JSON definition.") - log.error(exc) - return - - if not config: - log.warning("Skipping studio menu, no definition found.") - return - - # run the launcher for Maya menu - studio_menu = launchformaya.main( - title=menu_name.title(), - objectName=menu_name.title().lower().replace(" ", "_") - ) - - # apply configuration - studio_menu.build_from_configuration(studio_menu, config) - - # Allow time for uninstallation to finish. - # We use Maya's executeDeferred instead of QTimer.singleShot - # so that it only gets called after Maya UI has initialized too. - # This is crucial with Maya 2020+ which initializes without UI - # first as a QCoreApplication - maya.utils.executeDeferred(add_menu) - cmds.evalDeferred(partial(add_scripts_menu, project_settings), - lowestPriority=True) - - -def uninstall(): - menu = _get_menu() - if menu: - log.info("Attempting to uninstall ...") - - try: - menu.deleteLater() - del menu - except Exception as e: - log.error(e) - - -def popup(): - """Pop-up the existing menu near the mouse cursor.""" - menu = _get_menu() - cursor = QtGui.QCursor() - point = cursor.pos() - menu.exec_(point) - - -def update_menu_task_label(): - """Update the task label in AYON menu to current session""" - - if IS_HEADLESS: - return - - object_name = "{}|currentContext".format(MENU_NAME) - if not cmds.menuItem(object_name, query=True, exists=True): - log.warning("Can't find menuItem: {}".format(object_name)) - return - - label = get_context_label() - cmds.menuItem(object_name, edit=True, label=label) diff --git a/server_addon/maya/client/ayon_maya/api/pipeline.py b/server_addon/maya/client/ayon_maya/api/pipeline.py deleted file mode 100644 index 84268cc6f1..0000000000 --- a/server_addon/maya/client/ayon_maya/api/pipeline.py +++ /dev/null @@ -1,779 +0,0 @@ -import json -import base64 -import os -import errno -import logging -import contextlib -import shutil - -from maya import utils, cmds, OpenMaya -import maya.api.OpenMaya as om - -import pyblish.api - -from ayon_core.settings import get_project_settings -from ayon_core.host import ( - HostBase, - IWorkfileHost, - ILoadHost, - IPublishHost, - HostDirmap, -) -from ayon_core.tools.utils import host_tools -from ayon_core.tools.workfiles.lock_dialog import WorkfileLockDialog -from ayon_core.lib import ( - register_event_callback, - emit_event -) -from ayon_core.pipeline import ( - get_current_project_name, - register_loader_plugin_path, - register_inventory_action_path, - register_creator_plugin_path, - register_workfile_build_plugin_path, - deregister_loader_plugin_path, - deregister_inventory_action_path, - deregister_creator_plugin_path, - deregister_workfile_build_plugin_path, - AYON_CONTAINER_ID, - AVALON_CONTAINER_ID, -) -from ayon_core.pipeline.load import any_outdated_containers -from ayon_core.pipeline.workfile.lock_workfile import ( - create_workfile_lock, - remove_workfile_lock, - is_workfile_locked, - is_workfile_lock_enabled -) -from ayon_maya import MAYA_ROOT_DIR -from ayon_maya.lib import create_workspace_mel - -from . import menu, lib -from .workio import ( - open_file, - save_file, - file_extensions, - has_unsaved_changes, - work_root, - current_file -) - -log = logging.getLogger("ayon_maya") - -PLUGINS_DIR = os.path.join(MAYA_ROOT_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") -WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") - -AVALON_CONTAINERS = ":AVALON_CONTAINERS" - -# Track whether the workfile tool is about to save -_about_to_save = False - - -class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "maya" - - def __init__(self): - super(MayaHost, self).__init__() - self._op_events = {} - - def install(self): - project_name = get_current_project_name() - project_settings = get_project_settings(project_name) - # process path mapping - dirmap_processor = MayaDirmap("maya", project_name, project_settings) - dirmap_processor.process_dirmap() - - pyblish.api.register_plugin_path(PUBLISH_PATH) - pyblish.api.register_host("mayabatch") - pyblish.api.register_host("mayapy") - pyblish.api.register_host("maya") - - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) - - self.log.info("Installing callbacks ... ") - register_event_callback("init", on_init) - - _set_project() - - if lib.IS_HEADLESS: - self.log.info(( - "Running in headless mode, skipping Maya save/open/new" - " callback installation.." - )) - - return - - self._register_callbacks() - - menu.install(project_settings) - - register_event_callback("save", on_save) - register_event_callback("open", on_open) - register_event_callback("new", on_new) - register_event_callback("before.save", on_before_save) - register_event_callback("after.save", on_after_save) - register_event_callback("before.close", on_before_close) - register_event_callback("before.file.open", before_file_open) - register_event_callback("taskChanged", on_task_changed) - register_event_callback("workfile.open.before", before_workfile_open) - register_event_callback("workfile.save.before", before_workfile_save) - register_event_callback( - "workfile.save.before", workfile_save_before_xgen - ) - register_event_callback("workfile.save.after", after_workfile_save) - - def open_workfile(self, filepath): - return open_file(filepath) - - def save_workfile(self, filepath=None): - return save_file(filepath) - - def work_root(self, session): - return work_root(session) - - def get_current_workfile(self): - return current_file() - - def workfile_has_unsaved_changes(self): - return has_unsaved_changes() - - def get_workfile_extensions(self): - return file_extensions() - - def get_containers(self): - return ls() - - @contextlib.contextmanager - def maintained_selection(self): - with lib.maintained_selection(): - yield - - def get_context_data(self): - data = cmds.fileInfo("OpenPypeContext", query=True) - if not data: - return {} - - data = data[0] # Maya seems to return a list - decoded = base64.b64decode(data).decode("utf-8") - return json.loads(decoded) - - def update_context_data(self, data, changes): - json_str = json.dumps(data) - encoded = base64.b64encode(json_str.encode("utf-8")) - return cmds.fileInfo("OpenPypeContext", encoded) - - def _register_callbacks(self): - for handler, event in self._op_events.copy().items(): - if event is None: - continue - - try: - OpenMaya.MMessage.removeCallback(event) - self._op_events[handler] = None - except RuntimeError as exc: - self.log.info(exc) - - self._op_events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save - ) - - self._op_events[_after_scene_save] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterSave, - _after_scene_save - ) - ) - - self._op_events[_before_scene_save] = ( - OpenMaya.MSceneMessage.addCheckCallback( - OpenMaya.MSceneMessage.kBeforeSaveCheck, - _before_scene_save - ) - ) - - self._op_events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterNew, _on_scene_new - ) - - self._op_events[_on_maya_initialized] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kMayaInitialized, - _on_maya_initialized - ) - ) - - self._op_events[_on_scene_open] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterOpen, - _on_scene_open - ) - ) - - self._op_events[_before_scene_open] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kBeforeOpen, - _before_scene_open - ) - ) - - self._op_events[_before_close_maya] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kMayaExiting, - _before_close_maya - ) - ) - - self.log.info("Installed event handler _on_scene_save..") - self.log.info("Installed event handler _before_scene_save..") - self.log.info("Installed event handler _on_after_save..") - self.log.info("Installed event handler _on_scene_new..") - self.log.info("Installed event handler _on_maya_initialized..") - self.log.info("Installed event handler _on_scene_open..") - self.log.info("Installed event handler _check_lock_file..") - self.log.info("Installed event handler _before_close_maya..") - - -def _set_project(): - """Sets the maya project to the current Session's work directory. - - Returns: - None - - """ - workdir = os.getenv("AYON_WORKDIR") - - try: - os.makedirs(workdir) - except OSError as e: - # An already existing working directory is fine. - if e.errno == errno.EEXIST: - pass - else: - raise - - cmds.workspace(workdir, openWorkspace=True) - - -def _on_maya_initialized(*args): - emit_event("init") - - if cmds.about(batch=True): - log.warning("Running batch mode ...") - return - - # Keep reference to the main Window, once a main window exists. - lib.get_main_window() - - -def _on_scene_new(*args): - emit_event("new") - - -def _after_scene_save(*arg): - emit_event("after.save") - - -def _on_scene_save(*args): - emit_event("save") - - -def _on_scene_open(*args): - emit_event("open") - - -def _before_close_maya(*args): - emit_event("before.close") - - -def _before_scene_open(*args): - emit_event("before.file.open") - - -def _before_scene_save(return_code, client_data): - - # Default to allowing the action. Registered - # callbacks can optionally set this to False - # in order to block the operation. - OpenMaya.MScriptUtil.setBool(return_code, True) - - emit_event( - "before.save", - {"return_code": return_code} - ) - - -def _remove_workfile_lock(): - """Remove workfile lock on current file""" - if not handle_workfile_locks(): - return - filepath = current_file() - log.info("Removing lock on current file {}...".format(filepath)) - if filepath: - remove_workfile_lock(filepath) - - -def handle_workfile_locks(): - if lib.IS_HEADLESS: - return False - project_name = get_current_project_name() - return is_workfile_lock_enabled(MayaHost.name, project_name) - - -def uninstall(): - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - pyblish.api.deregister_host("mayabatch") - pyblish.api.deregister_host("mayapy") - pyblish.api.deregister_host("maya") - - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) - deregister_inventory_action_path(INVENTORY_PATH) - deregister_workfile_build_plugin_path(WORKFILE_BUILD_PATH) - - menu.uninstall() - - -def parse_container(container): - """Return the container node's full container data. - - Args: - container (str): A container node name. - - Returns: - dict: The container schema data for this container node. - - """ - data = lib.read(container) - - # Backwards compatibility pre-schemas for containers - data["schema"] = data.get("schema", "openpype:container-1.0") - - # Append transient data - data["objectName"] = container - - return data - - -def _ls(): - """Yields AYON container node names. - - Used by `ls()` to retrieve the nodes and then query the full container's - data. - - Yields: - str: AYON container node name (objectSet) - - """ - - def _maya_iterate(iterator): - """Helper to iterate a maya iterator""" - while not iterator.isDone(): - yield iterator.thisNode() - iterator.next() - - ids = { - AYON_CONTAINER_ID, - # Backwards compatibility - AVALON_CONTAINER_ID - } - - # Iterate over all 'set' nodes in the scene to detect whether - # they have the ayon container ".id" attribute. - fn_dep = om.MFnDependencyNode() - iterator = om.MItDependencyNodes(om.MFn.kSet) - for mobject in _maya_iterate(iterator): - if mobject.apiTypeStr != "kSet": - # Only match by exact type - continue - - fn_dep.setObject(mobject) - if not fn_dep.hasAttribute("id"): - continue - - plug = fn_dep.findPlug("id", True) - value = plug.asString() - if value in ids: - yield fn_dep.name() - - -def ls(): - """Yields containers from active Maya scene - - This is the host-equivalent of api.ls(), but instead of listing - assets on disk, it lists assets already loaded in Maya; once loaded - they are called 'containers' - - Yields: - dict: container - - """ - container_names = _ls() - for container in sorted(container_names): - yield parse_container(container) - - -def containerise(name, - namespace, - nodes, - context, - loader=None, - suffix="CON"): - """Bundle `nodes` into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - nodes (list): Long names of nodes to containerise - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Name of container assembly - - """ - container = cmds.sets(nodes, name="%s_%s_%s" % (namespace, name, suffix)) - - data = [ - ("schema", "openpype:container-2.0"), - ("id", AVALON_CONTAINER_ID), - ("name", name), - ("namespace", namespace), - ("loader", loader), - ("representation", context["representation"]["id"]), - ] - - for key, value in data: - cmds.addAttr(container, longName=key, dataType="string") - cmds.setAttr(container + "." + key, str(value), type="string") - - main_container = cmds.ls(AVALON_CONTAINERS, type="objectSet") - if not main_container: - main_container = cmds.sets(empty=True, name=AVALON_CONTAINERS) - - # Implement #399: Maya 2019+ hide AVALON_CONTAINERS on creation.. - if cmds.attributeQuery("hiddenInOutliner", - node=main_container, - exists=True): - cmds.setAttr(main_container + ".hiddenInOutliner", True) - else: - main_container = main_container[0] - - cmds.sets(container, addElement=main_container) - - # Implement #399: Maya 2019+ hide containers in outliner - if cmds.attributeQuery("hiddenInOutliner", - node=container, - exists=True): - cmds.setAttr(container + ".hiddenInOutliner", True) - - return container - - -def on_init(): - log.info("Running callback on init..") - - def safe_deferred(fn): - """Execute deferred the function in a try-except""" - - def _fn(): - """safely call in deferred callback""" - try: - fn() - except Exception as exc: - print(exc) - - try: - utils.executeDeferred(_fn) - except Exception as exc: - print(exc) - - # Force load Alembic so referenced alembics - # work correctly on scene open - cmds.loadPlugin("AbcImport", quiet=True) - cmds.loadPlugin("AbcExport", quiet=True) - - # Force load objExport plug-in (requested by artists) - cmds.loadPlugin("objExport", quiet=True) - - if not lib.IS_HEADLESS: - launch_workfiles = os.environ.get("WORKFILES_STARTUP") - if launch_workfiles: - safe_deferred(host_tools.show_workfiles) - - from .customize import ( - override_component_mask_commands, - override_toolbox_ui - ) - safe_deferred(override_component_mask_commands) - safe_deferred(override_toolbox_ui) - - -def on_before_save(): - """Run validation for scene's FPS prior to saving""" - return lib.validate_fps() - - -def on_after_save(): - """Check if there is a lockfile after save""" - check_lock_on_current_file() - - -def check_lock_on_current_file(): - - """Check if there is a user opening the file""" - if not handle_workfile_locks(): - return - log.info("Running callback on checking the lock file...") - - # add the lock file when opening the file - filepath = current_file() - # Skip if current file is 'untitled' - if not filepath: - return - - if is_workfile_locked(filepath): - # add lockfile dialog - workfile_dialog = WorkfileLockDialog(filepath) - if not workfile_dialog.exec_(): - cmds.file(new=True) - return - - create_workfile_lock(filepath) - - -def on_before_close(): - """Delete the lock file after user quitting the Maya Scene""" - log.info("Closing Maya...") - # delete the lock file - filepath = current_file() - if handle_workfile_locks(): - remove_workfile_lock(filepath) - - -def before_file_open(): - """check lock file when the file changed""" - # delete the lock file - _remove_workfile_lock() - - -def on_save(): - """Automatically add IDs to new nodes - - Any transform of a mesh, without an existing ID, is given one - automatically on file save. - """ - log.info("Running callback on save..") - # remove lockfile if users jumps over from one scene to another - _remove_workfile_lock() - - # Generate ids of the current context on nodes in the scene - nodes = lib.get_id_required_nodes(referenced_nodes=False, - existing_ids=False) - for node, new_id in lib.generate_ids(nodes): - lib.set_id(node, new_id, overwrite=False) - - # We are now starting the actual save directly - global _about_to_save - _about_to_save = False - - -def on_open(): - """On scene open let's assume the containers have changed.""" - - from ayon_core.tools.utils import SimplePopup - - # Validate FPS after update_task_from_path to - # ensure it is using correct FPS for the folder - lib.validate_fps() - lib.fix_incompatible_containers() - - if any_outdated_containers(): - log.warning("Scene has outdated content.") - - # Find maya main window - parent = lib.get_main_window() - if parent is None: - log.info("Skipping outdated content pop-up " - "because Maya window can't be found.") - else: - - # Show outdated pop-up - def _on_show_inventory(): - host_tools.show_scene_inventory(parent=parent) - - dialog = SimplePopup(parent=parent) - dialog.setWindowTitle("Maya scene has outdated content") - dialog.set_message("There are outdated containers in " - "your Maya scene.") - dialog.on_clicked.connect(_on_show_inventory) - dialog.show() - - # create lock file for the maya scene - check_lock_on_current_file() - - -def on_new(): - """Set project resolution and fps when create a new file""" - log.info("Running callback on new..") - with lib.suspended_refresh(): - lib.set_context_settings() - - _remove_workfile_lock() - - -def on_task_changed(): - """Wrapped function of app initialize and maya's on task changed""" - # Run - menu.update_menu_task_label() - - workdir = os.getenv("AYON_WORKDIR") - if os.path.exists(workdir): - log.info("Updating Maya workspace for task change to %s", workdir) - _set_project() - - # Set Maya fileDialog's start-dir to /scenes - frule_scene = cmds.workspace(fileRuleEntry="scene") - cmds.optionVar(stringValue=("browserLocationmayaBinaryscene", - workdir + "/" + frule_scene)) - - else: - log.warning(( - "Can't set project for new context because path does not exist: {}" - ).format(workdir)) - - global _about_to_save - if not lib.IS_HEADLESS and _about_to_save: - # Let's prompt the user to update the context settings or not - lib.prompt_reset_context() - - -def before_workfile_open(): - if handle_workfile_locks(): - _remove_workfile_lock() - - -def before_workfile_save(event): - project_name = get_current_project_name() - if handle_workfile_locks(): - _remove_workfile_lock() - workdir_path = event["workdir_path"] - if workdir_path: - create_workspace_mel(workdir_path, project_name) - - global _about_to_save - _about_to_save = True - - -def workfile_save_before_xgen(event): - """Manage Xgen external files when switching context. - - Xgen has various external files that needs to be unique and relative to the - workfile, so we need to copy and potentially overwrite these files when - switching context. - - Args: - event (Event) - ayon_core/lib/events.py - """ - if not cmds.pluginInfo("xgenToolkit", query=True, loaded=True): - return - - import xgenm - - current_work_dir = os.getenv("AYON_WORKDIR").replace("\\", "/") - expected_work_dir = event.data["workdir_path"].replace("\\", "/") - if current_work_dir == expected_work_dir: - return - - palettes = cmds.ls(type="xgmPalette", long=True) - if not palettes: - return - - transfers = [] - overwrites = [] - attribute_changes = {} - attrs = ["xgFileName", "xgBaseFile"] - for palette in palettes: - sanitized_palette = palette.replace("|", "") - project_path = xgenm.getAttr("xgProjectPath", sanitized_palette) - _, maya_extension = os.path.splitext(event.data["filename"]) - - for attr in attrs: - node_attr = "{}.{}".format(palette, attr) - attr_value = cmds.getAttr(node_attr) - - if not attr_value: - continue - - source = os.path.join(project_path, attr_value) - - attr_value = event.data["filename"].replace( - maya_extension, - "__{}{}".format( - sanitized_palette.replace(":", "__"), - os.path.splitext(attr_value)[1] - ) - ) - target = os.path.join(expected_work_dir, attr_value) - - transfers.append((source, target)) - attribute_changes[node_attr] = attr_value - - relative_path = xgenm.getAttr( - "xgDataPath", sanitized_palette - ).split(os.pathsep)[0] - absolute_path = relative_path.replace("${PROJECT}", project_path) - for root, _, files in os.walk(absolute_path): - for f in files: - source = os.path.join(root, f).replace("\\", "/") - target = source.replace(project_path, expected_work_dir + "/") - transfers.append((source, target)) - if os.path.exists(target): - overwrites.append(target) - - # Ask user about overwriting files. - if overwrites: - log.warning( - "WARNING! Potential loss of data.\n\n" - "Found duplicate Xgen files in new context.\n{}".format( - "\n".join(overwrites) - ) - ) - return - - for source, destination in transfers: - if not os.path.exists(os.path.dirname(destination)): - os.makedirs(os.path.dirname(destination)) - shutil.copy(source, destination) - - for attribute, value in attribute_changes.items(): - cmds.setAttr(attribute, value, type="string") - - -def after_workfile_save(event): - workfile_name = event["filename"] - if ( - handle_workfile_locks() - and workfile_name - and not is_workfile_locked(workfile_name) - ): - create_workfile_lock(workfile_name) - - -class MayaDirmap(HostDirmap): - def on_enable_dirmap(self): - cmds.dirmap(en=True) - - def dirmap_routine(self, source_path, destination_path): - cmds.dirmap(m=(source_path, destination_path)) - cmds.dirmap(m=(destination_path, source_path)) diff --git a/server_addon/maya/client/ayon_maya/api/plugin.py b/server_addon/maya/client/ayon_maya/api/plugin.py deleted file mode 100644 index d2678e2100..0000000000 --- a/server_addon/maya/client/ayon_maya/api/plugin.py +++ /dev/null @@ -1,1037 +0,0 @@ -import json -import os -from abc import ABCMeta - -import ayon_api -import qargparse -import six - -from ayon_core.lib import BoolDef, Logger -from ayon_core.pipeline import ( - AVALON_CONTAINER_ID, - AVALON_INSTANCE_ID, - AYON_CONTAINER_ID, - AYON_INSTANCE_ID, - Anatomy, - AutoCreator, - CreatedInstance, - Creator, - CreatorError, - HiddenCreator, - LoaderPlugin, - get_current_project_name, - get_representation_path, - publish, -) -from ayon_core.pipeline.create import get_product_name -from ayon_core.pipeline.load import LoadError -from ayon_core.settings import get_project_settings -from maya import cmds -from maya.app.renderSetup.model import renderSetup -from pyblish.api import ContextPlugin, InstancePlugin - -from . import lib -from .lib import imprint, read -from .pipeline import containerise - -log = Logger.get_logger() -SETTINGS_CATEGORY = "maya" - - -def _get_attr(node, attr, default=None): - """Helper to get attribute which allows attribute to not exist.""" - if not cmds.attributeQuery(attr, node=node, exists=True): - return default - return cmds.getAttr("{}.{}".format(node, attr)) - - -# Backwards compatibility: these functions has been moved to lib. -def get_reference_node(*args, **kwargs): - """Get the reference node from the container members - - Deprecated: - This function was moved and will be removed in 3.16.x. - """ - msg = "Function 'get_reference_node' has been moved." - log.warning(msg) - cmds.warning(msg) - return lib.get_reference_node(*args, **kwargs) - - -def get_reference_node_parents(*args, **kwargs): - """ - Deprecated: - This function was moved and will be removed in 3.16.x. - """ - msg = "Function 'get_reference_node_parents' has been moved." - log.warning(msg) - cmds.warning(msg) - return lib.get_reference_node_parents(*args, **kwargs) - - -@six.add_metaclass(ABCMeta) -class MayaCreatorBase(object): - - @staticmethod - def cache_instance_data(shared_data): - """Cache instances for Creators to shared data. - - Create `maya_cached_instance_data` key when needed in shared data and - fill it with all collected instances from the scene under its - respective creator identifiers. - - If legacy instances are detected in the scene, create - `maya_cached_legacy_instances` there and fill it with - all legacy products under product type as a key. - - Args: - Dict[str, Any]: Shared data. - - """ - if shared_data.get("maya_cached_instance_data") is None: - cache = dict() - cache_legacy = dict() - - for node in cmds.ls(type="objectSet"): - - if _get_attr(node, attr="id") not in { - AYON_INSTANCE_ID, AVALON_INSTANCE_ID - }: - continue - - creator_id = _get_attr(node, attr="creator_identifier") - if creator_id is not None: - # creator instance - cache.setdefault(creator_id, []).append(node) - else: - # legacy instance - family = _get_attr(node, attr="family") - if family is None: - # must be a broken instance - continue - - cache_legacy.setdefault(family, []).append(node) - - shared_data["maya_cached_instance_data"] = cache - shared_data["maya_cached_legacy_instances"] = cache_legacy - return shared_data - - def get_publish_families(self): - """Return families for the instances of this creator. - - Allow a Creator to define multiple families so that a creator can - e.g. specify `usd` and `usdMaya` and another USD creator can also - specify `usd` but apply different extractors like `usdMultiverse`. - - There is no need to override this method if you only have the - 'product_type' required for publish filtering. - - Returns: - list: families for instances of this creator - - """ - return [] - - def imprint_instance_node(self, node, data): - - # We never store the instance_node as value on the node since - # it's the node name itself - data.pop("instance_node", None) - data.pop("instance_id", None) - - # Don't store `families` since it's up to the creator itself - # to define the initial publish families - not a stored attribute of - # `families` - data.pop("families", None) - - # We store creator attributes at the root level and assume they - # will not clash in names with `product`, `task`, etc. and other - # default names. This is just so these attributes in many cases - # are still editable in the maya UI by artists. - # note: pop to move to end of dict to sort attributes last on the node - creator_attributes = data.pop("creator_attributes", {}) - - # We only flatten value types which `imprint` function supports - json_creator_attributes = {} - for key, value in dict(creator_attributes).items(): - if isinstance(value, (list, tuple, dict)): - creator_attributes.pop(key) - json_creator_attributes[key] = value - - # Flatten remaining creator attributes to the node itself - data.update(creator_attributes) - - # We know the "publish_attributes" will be complex data of - # settings per plugins, we'll store this as a flattened json structure - # pop to move to end of dict to sort attributes last on the node - data["publish_attributes"] = json.dumps( - data.pop("publish_attributes", {}) - ) - - # Persist the non-flattened creator attributes (special value types, - # like multiselection EnumDef) - data["creator_attributes"] = json.dumps(json_creator_attributes) - - # Since we flattened the data structure for creator attributes we want - # to correctly detect which flattened attributes should end back in the - # creator attributes when reading the data from the node, so we store - # the relevant keys as a string - data["__creator_attributes_keys"] = ",".join(creator_attributes.keys()) - - # Kill any existing attributes just so we can imprint cleanly again - for attr in data.keys(): - if cmds.attributeQuery(attr, node=node, exists=True): - cmds.deleteAttr("{}.{}".format(node, attr)) - - return imprint(node, data) - - def read_instance_node(self, node): - node_data = read(node) - - # Never care about a cbId attribute on the object set - # being read as 'data' - node_data.pop("cbId", None) - - # Make sure we convert any creator attributes from the json string - creator_attributes = node_data.get("creator_attributes") - if creator_attributes: - node_data["creator_attributes"] = json.loads(creator_attributes) - else: - node_data["creator_attributes"] = {} - - # Move the relevant attributes into "creator_attributes" that - # we flattened originally - creator_attribute_keys = node_data.pop("__creator_attributes_keys", - "").split(",") - for key in creator_attribute_keys: - if key in node_data: - node_data["creator_attributes"][key] = node_data.pop(key) - - # Make sure we convert any publish attributes from the json string - publish_attributes = node_data.get("publish_attributes") - if publish_attributes: - node_data["publish_attributes"] = json.loads(publish_attributes) - - # Explicitly re-parse the node name - node_data["instance_node"] = node - node_data["instance_id"] = node - - # If the creator plug-in specifies - families = self.get_publish_families() - if families: - node_data["families"] = families - - return node_data - - def _default_collect_instances(self): - self.cache_instance_data(self.collection_shared_data) - cached_instances = ( - self.collection_shared_data["maya_cached_instance_data"] - ) - for node in cached_instances.get(self.identifier, []): - node_data = self.read_instance_node(node) - - created_instance = CreatedInstance.from_existing(node_data, self) - self._add_instance_to_context(created_instance) - - def _default_update_instances(self, update_list): - for created_inst, _changes in update_list: - data = created_inst.data_to_store() - node = data.get("instance_node") - - self.imprint_instance_node(node, data) - - def _default_remove_instances(self, instances): - """Remove specified instance from the scene. - - This is only removing `id` parameter so instance is no longer - instance, because it might contain valuable data for artist. - - """ - for instance in instances: - node = instance.data.get("instance_node") - if node: - cmds.delete(node) - - self._remove_instance_from_context(instance) - - -@six.add_metaclass(ABCMeta) -class MayaCreator(Creator, MayaCreatorBase): - - settings_category = "maya" - - def create(self, product_name, instance_data, pre_create_data): - - members = list() - if pre_create_data.get("use_selection"): - members = cmds.ls(selection=True) - - # Allow a Creator to define multiple families - publish_families = self.get_publish_families() - if publish_families: - families = instance_data.setdefault("families", []) - for family in self.get_publish_families(): - if family not in families: - families.append(family) - - with lib.undo_chunk(): - instance_node = cmds.sets(members, name=product_name) - instance_data["instance_node"] = instance_node - instance = CreatedInstance( - self.product_type, - product_name, - instance_data, - self) - self._add_instance_to_context(instance) - - self.imprint_instance_node(instance_node, - data=instance.data_to_store()) - return instance - - def collect_instances(self): - return self._default_collect_instances() - - def update_instances(self, update_list): - return self._default_update_instances(update_list) - - def remove_instances(self, instances): - return self._default_remove_instances(instances) - - def get_pre_create_attr_defs(self): - return [ - BoolDef("use_selection", - label="Use selection", - default=True) - ] - - -class MayaAutoCreator(AutoCreator, MayaCreatorBase): - """Automatically triggered creator for Maya. - - The plugin is not visible in UI, and 'create' method does not expect - any arguments. - """ - - settings_category = "maya" - - def collect_instances(self): - return self._default_collect_instances() - - def update_instances(self, update_list): - return self._default_update_instances(update_list) - - def remove_instances(self, instances): - return self._default_remove_instances(instances) - - -class MayaHiddenCreator(HiddenCreator, MayaCreatorBase): - """Hidden creator for Maya. - - The plugin is not visible in UI, and it does not have strictly defined - arguments for 'create' method. - """ - - settings_category = "maya" - - def create(self, *args, **kwargs): - return MayaCreator.create(self, *args, **kwargs) - - def collect_instances(self): - return self._default_collect_instances() - - def update_instances(self, update_list): - return self._default_update_instances(update_list) - - def remove_instances(self, instances): - return self._default_remove_instances(instances) - - -def ensure_namespace(namespace): - """Make sure the namespace exists. - - Args: - namespace (str): The preferred namespace name. - - Returns: - str: The generated or existing namespace - - """ - exists = cmds.namespace(exists=namespace) - if exists: - return namespace - else: - return cmds.namespace(add=namespace) - - -class RenderlayerCreator(Creator, MayaCreatorBase): - """Creator which creates an instance per renderlayer in the workfile. - - Create and manages renderlayer product per renderLayer in workfile. - This generates a singleton node in the scene which, if it exists, tells the - Creator to collect Maya rendersetup renderlayers as individual instances. - As such, triggering create doesn't actually create the instance node per - layer but only the node which tells the Creator it may now collect - an instance per renderlayer. - - """ - - # These are required to be overridden in subclass - singleton_node_name = "" - - # These are optional to be overridden in subclass - layer_instance_prefix = None - - def _get_singleton_node(self, return_all=False): - nodes = lib.lsattr("pre_creator_identifier", self.identifier) - if nodes: - return nodes if return_all else nodes[0] - - def create(self, product_name, instance_data, pre_create_data): - # A Renderlayer is never explicitly created using the create method. - # Instead, renderlayers from the scene are collected. Thus "create" - # would only ever be called to say, 'hey, please refresh collect' - self.create_singleton_node() - - # if no render layers are present, create default one with - # asterisk selector - rs = renderSetup.instance() - if not rs.getRenderLayers(): - render_layer = rs.createRenderLayer("Main") - collection = render_layer.createCollection("defaultCollection") - collection.getSelector().setPattern('*') - - # By RenderLayerCreator.create we make it so that the renderlayer - # instances directly appear even though it just collects scene - # renderlayers. This doesn't actually 'create' any scene contents. - self.collect_instances() - - def create_singleton_node(self): - if self._get_singleton_node(): - raise CreatorError("A Render instance already exists - only " - "one can be configured.") - - with lib.undo_chunk(): - node = cmds.sets(empty=True, name=self.singleton_node_name) - lib.imprint(node, data={ - "pre_creator_identifier": self.identifier - }) - - return node - - def collect_instances(self): - - # We only collect if the global render instance exists - if not self._get_singleton_node(): - return - - host_name = self.create_context.host_name - rs = renderSetup.instance() - layers = rs.getRenderLayers() - for layer in layers: - layer_instance_node = self.find_layer_instance_node(layer) - if layer_instance_node: - data = self.read_instance_node(layer_instance_node) - instance = CreatedInstance.from_existing(data, creator=self) - else: - # No existing scene instance node for this layer. Note that - # this instance will not have the `instance_node` data yet - # until it's been saved/persisted at least once. - project_name = self.create_context.get_current_project_name() - folder_path = self.create_context.get_current_folder_path() - task_name = self.create_context.get_current_task_name() - instance_data = { - "folderPath": folder_path, - "task": task_name, - "variant": layer.name(), - } - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - layer.name(), - host_name, - ) - - instance = CreatedInstance( - product_type=self.product_type, - product_name=product_name, - data=instance_data, - creator=self - ) - - instance.transient_data["layer"] = layer - self._add_instance_to_context(instance) - - def find_layer_instance_node(self, layer): - connected_sets = cmds.listConnections( - "{}.message".format(layer.name()), - source=False, - destination=True, - type="objectSet" - ) or [] - - for node in connected_sets: - if not cmds.attributeQuery("creator_identifier", - node=node, - exists=True): - continue - - creator_identifier = cmds.getAttr(node + ".creator_identifier") - if creator_identifier == self.identifier: - self.log.info("Found node: {}".format(node)) - return node - - def _create_layer_instance_node(self, layer): - - # We only collect if a CreateRender instance exists - create_render_set = self._get_singleton_node() - if not create_render_set: - raise CreatorError("Creating a renderlayer instance node is not " - "allowed if no 'CreateRender' instance exists") - - namespace = "_{}".format(self.singleton_node_name) - namespace = ensure_namespace(namespace) - - name = "{}:{}".format(namespace, layer.name()) - render_set = cmds.sets(name=name, empty=True) - - # Keep an active link with the renderlayer so we can retrieve it - # later by a physical maya connection instead of relying on the layer - # name - cmds.addAttr(render_set, longName="renderlayer", at="message") - cmds.connectAttr("{}.message".format(layer.name()), - "{}.renderlayer".format(render_set), force=True) - - # Add the set to the 'CreateRender' set. - cmds.sets(render_set, forceElement=create_render_set) - - return render_set - - def update_instances(self, update_list): - # We only generate the persisting layer data into the scene once - # we save with the UI on e.g. validate or publish - for instance, _changes in update_list: - instance_node = instance.data.get("instance_node") - - # Ensure a node exists to persist the data to - if not instance_node: - layer = instance.transient_data["layer"] - instance_node = self._create_layer_instance_node(layer) - instance.data["instance_node"] = instance_node - - self.imprint_instance_node(instance_node, - data=instance.data_to_store()) - - def imprint_instance_node(self, node, data): - # Do not ever try to update the `renderlayer` since it'll try - # to remove the attribute and recreate it but fail to keep it a - # message attribute link. We only ever imprint that on the initial - # node creation. - # TODO: Improve how this is handled - data.pop("renderlayer", None) - data.get("creator_attributes", {}).pop("renderlayer", None) - - return super(RenderlayerCreator, self).imprint_instance_node(node, - data=data) - - def remove_instances(self, instances): - """Remove specified instances from the scene. - - This is only removing `id` parameter so instance is no longer - instance, because it might contain valuable data for artist. - - """ - # Instead of removing the single instance or renderlayers we instead - # remove the CreateRender node this creator relies on to decide whether - # it should collect anything at all. - nodes = self._get_singleton_node(return_all=True) - if nodes: - cmds.delete(nodes) - - # Remove ALL the instances even if only one gets deleted - for instance in list(self.create_context.instances): - if instance.get("creator_identifier") == self.identifier: - self._remove_instance_from_context(instance) - - # Remove the stored settings per renderlayer too - node = instance.data.get("instance_node") - if node and cmds.objExists(node): - cmds.delete(node) - - def get_product_name( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name=None, - instance=None - ): - if host_name is None: - host_name = self.create_context.host_name - dynamic_data = self.get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ) - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - # creator.product_type != 'render' as expected - return get_product_name( - project_name, - task_name, - task_type, - host_name, - self.layer_instance_prefix or self.product_type, - variant, - dynamic_data=dynamic_data, - project_settings=self.project_settings - ) - - -def get_load_color_for_product_type(product_type, settings=None): - """Get color for product type from settings. - - Args: - product_type (str): Family name. - settings (Optional[dict]): Settings dictionary. - - Returns: - Union[tuple[float, float, float], None]: RGB color. - - """ - if settings is None: - settings = get_project_settings(get_current_project_name()) - - colors = settings["maya"]["load"]["colors"] - color = colors.get(product_type) - if not color: - return None - - if len(color) == 3: - red, green, blue = color - elif len(color) == 4: - red, green, blue, _ = color - else: - raise ValueError("Invalid color definition {}".format(str(color))) - - if isinstance(red, int): - red = red / 255.0 - green = green / 255.0 - blue = blue / 255.0 - return red, green, blue - - -class Loader(LoaderPlugin): - hosts = ["maya"] - settings_category = SETTINGS_CATEGORY - load_settings = {} # defined in settings - - @classmethod - def apply_settings(cls, project_settings): - super(Loader, cls).apply_settings(project_settings) - cls.load_settings = project_settings['maya']['load'] - - def get_custom_namespace_and_group(self, context, options, loader_key): - """Queries Settings to get custom template for namespace and group. - - Group template might be empty >> this forces to not wrap imported items - into separate group. - - Args: - context (dict) - options (dict): artist modifiable options from dialog - loader_key (str): key to get separate configuration from Settings - ('reference_loader'|'import_loader') - """ - - options["attach_to_root"] = True - custom_naming = self.load_settings[loader_key] - - if not custom_naming["namespace"]: - raise LoadError("No namespace specified in " - "Maya ReferenceLoader settings") - elif not custom_naming["group_name"]: - self.log.debug("No custom group_name, no group will be created.") - options["attach_to_root"] = False - - folder_entity = context["folder"] - product_entity = context["product"] - product_name = product_entity["name"] - product_type = product_entity["productType"] - formatting_data = { - "asset_name": folder_entity["name"], - "asset_type": "asset", - "folder": { - "name": folder_entity["name"], - }, - "subset": product_name, - "product": { - "name": product_name, - "type": product_type, - }, - "family": product_type - } - - custom_namespace = custom_naming["namespace"].format( - **formatting_data - ) - - custom_group_name = custom_naming["group_name"].format( - **formatting_data - ) - - return custom_group_name, custom_namespace, options - - -class ReferenceLoader(Loader): - """A basic ReferenceLoader for Maya - - This will implement the basic behavior for a loader to inherit from that - will containerize the reference and will implement the `remove` and - `update` logic. - - """ - - options = [ - qargparse.Integer( - "count", - label="Count", - default=1, - min=1, - help="How many times to load?" - ), - qargparse.Double3( - "offset", - label="Position Offset", - help="Offset loaded models for easier selection." - ), - qargparse.Boolean( - "attach_to_root", - label="Group imported asset", - default=True, - help="Should a group be created to encapsulate" - " imported representation ?" - ) - ] - - def load( - self, - context, - name=None, - namespace=None, - options=None - ): - path = self.filepath_from_context(context) - assert os.path.exists(path), "%s does not exist." % path - - custom_group_name, custom_namespace, options = \ - self.get_custom_namespace_and_group(context, options, - "reference_loader") - - count = options.get("count") or 1 - - loaded_containers = [] - for c in range(0, count): - namespace = lib.get_custom_namespace(custom_namespace) - group_name = "{}:{}".format( - namespace, - custom_group_name - ) - - options['group_name'] = group_name - - # Offset loaded product - if "offset" in options: - offset = [i * c for i in options["offset"]] - options["translate"] = offset - - self.log.info(options) - - self.process_reference( - context=context, - name=name, - namespace=namespace, - options=options - ) - - # Only containerize if any nodes were loaded by the Loader - nodes = self[:] - if not nodes: - return - - ref_node = lib.get_reference_node(nodes, self.log) - container = containerise( - name=name, - namespace=namespace, - nodes=[ref_node], - context=context, - loader=self.__class__.__name__ - ) - loaded_containers.append(container) - self._organize_containers(nodes, container) - c += 1 - - return loaded_containers - - def process_reference(self, context, name, namespace, options): - """To be implemented by subclass""" - raise NotImplementedError("Must be implemented by subclass") - - def update(self, container, context): - from ayon_maya.api.lib import get_container_members - from maya import cmds - - node = container["objectName"] - - project_name = context["project"]["name"] - repre_entity = context["representation"] - - path = get_representation_path(repre_entity) - - # Get reference node from container members - members = get_container_members(node) - reference_node = lib.get_reference_node(members, self.log) - namespace = cmds.referenceQuery(reference_node, namespace=True) - - file_type = { - "ma": "mayaAscii", - "mb": "mayaBinary", - "abc": "Alembic", - "fbx": "FBX", - "usd": "USD Import" - }.get(repre_entity["name"]) - - assert file_type, "Unsupported representation: %s" % repre_entity - - assert os.path.exists(path), "%s does not exist." % path - - # Need to save alembic settings and reapply, cause referencing resets - # them to incoming data. - alembic_attrs = ["speed", "offset", "cycleType", "time"] - alembic_data = {} - if repre_entity["name"] == "abc": - alembic_nodes = cmds.ls( - "{}:*".format(namespace), type="AlembicNode" - ) - if alembic_nodes: - for attr in alembic_attrs: - node_attr = "{}.{}".format(alembic_nodes[0], attr) - data = { - "input": lib.get_attribute_input(node_attr), - "value": cmds.getAttr(node_attr) - } - - alembic_data[attr] = data - else: - self.log.debug("No alembic nodes found in {}".format(members)) - - try: - path = self.prepare_root_value(path, project_name) - content = cmds.file(path, - loadReference=reference_node, - type=file_type, - returnNewNodes=True) - except RuntimeError as exc: - # When changing a reference to a file that has load errors the - # command will raise an error even if the file is still loaded - # correctly (e.g. when raising errors on Arnold attributes) - # When the file is loaded and has content, we consider it's fine. - if not cmds.referenceQuery(reference_node, isLoaded=True): - raise - - content = cmds.referenceQuery(reference_node, - nodes=True, - dagPath=True) - if not content: - raise - - self.log.warning("Ignoring file read error:\n%s", exc) - - self._organize_containers(content, container["objectName"]) - - # Reapply alembic settings. - if repre_entity["name"] == "abc" and alembic_data: - alembic_nodes = cmds.ls( - "{}:*".format(namespace), type="AlembicNode" - ) - if alembic_nodes: - alembic_node = alembic_nodes[0] # assume single AlembicNode - for attr, data in alembic_data.items(): - node_attr = "{}.{}".format(alembic_node, attr) - input = lib.get_attribute_input(node_attr) - if data["input"]: - if data["input"] != input: - cmds.connectAttr( - data["input"], node_attr, force=True - ) - else: - if input: - cmds.disconnectAttr(input, node_attr) - cmds.setAttr(node_attr, data["value"]) - - # Fix PLN-40 for older containers created with AYON that had the - # `.verticesOnlySet` set to True. - if cmds.getAttr("{}.verticesOnlySet".format(node)): - self.log.info("Setting %s.verticesOnlySet to False", node) - cmds.setAttr("{}.verticesOnlySet".format(node), False) - - # Remove any placeHolderList attribute entries from the set that - # are remaining from nodes being removed from the referenced file. - members = cmds.sets(node, query=True) - invalid = [x for x in members if ".placeHolderList" in x] - if invalid: - cmds.sets(invalid, remove=node) - - # Update metadata - cmds.setAttr("{}.representation".format(node), - repre_entity["id"], - type="string") - - # When an animation or pointcache gets connected to an Xgen container, - # the compound attribute "xgenContainers" gets created. When animation - # containers gets updated we also need to update the cacheFileName on - # the Xgen collection. - compound_name = "xgenContainers" - if cmds.objExists("{}.{}".format(node, compound_name)): - import xgenm - container_amount = cmds.getAttr( - "{}.{}".format(node, compound_name), size=True - ) - # loop through all compound children - for i in range(container_amount): - attr = "{}.{}[{}].container".format(node, compound_name, i) - objectset = cmds.listConnections(attr)[0] - reference_node = cmds.sets(objectset, query=True)[0] - palettes = cmds.ls( - cmds.referenceQuery(reference_node, nodes=True), - type="xgmPalette" - ) - for palette in palettes: - for description in xgenm.descriptions(palette): - xgenm.setAttr( - "cacheFileName", - path.replace("\\", "/"), - palette, - description, - "SplinePrimitive" - ) - - # Refresh UI and viewport. - de = xgenm.xgGlobal.DescriptionEditor - de.refresh("Full") - - def remove(self, container): - """Remove an existing `container` from Maya scene - - Deprecated; this functionality is replaced by `api.remove()` - - Arguments: - container (openpype:container-1.0): Which container - to remove from scene. - - """ - from maya import cmds - - node = container["objectName"] - - # Assume asset has been referenced - members = cmds.sets(node, query=True) - reference_node = lib.get_reference_node(members, self.log) - - assert reference_node, ("Imported container not supported; " - "container must be referenced.") - - self.log.info("Removing '%s' from Maya.." % container["name"]) - - namespace = cmds.referenceQuery(reference_node, namespace=True) - fname = cmds.referenceQuery(reference_node, filename=True) - cmds.file(fname, removeReference=True) - - try: - cmds.delete(node) - except ValueError: - # Already implicitly deleted by Maya upon removing reference - pass - - try: - # If container is not automatically cleaned up by May (issue #118) - cmds.namespace(removeNamespace=namespace, - deleteNamespaceContent=True) - except RuntimeError: - pass - - def prepare_root_value(self, file_url, project_name): - """Replace root value with env var placeholder. - - Use ${AYON_PROJECT_ROOT_WORK} (or any other root) instead of proper - root value when storing referenced url into a workfile. - Useful for remote workflows with SiteSync. - - Args: - file_url (str) - project_name (dict) - Returns: - (str) - """ - settings = get_project_settings(project_name) - use_env_var_as_root = (settings["maya"] - ["maya_dirmap"] - ["use_env_var_as_root"]) - if use_env_var_as_root: - anatomy = Anatomy(project_name) - file_url = anatomy.replace_root_with_env_key(file_url, '${{{}}}') - - return file_url - - @staticmethod - def _organize_containers(nodes, container): - # type: (list, str) -> None - """Put containers in loaded data to correct hierarchy.""" - for node in nodes: - id_attr = "{}.id".format(node) - if not cmds.attributeQuery("id", node=node, exists=True): - continue - if cmds.getAttr(id_attr) not in { - AYON_CONTAINER_ID, AVALON_CONTAINER_ID - }: - cmds.sets(node, forceElement=container) - - -class MayaLoader(LoaderPlugin): - """Base class for loader plugins.""" - - settings_category = SETTINGS_CATEGORY - - -class MayaInstancePlugin(InstancePlugin): - """Base class for instance publish plugins.""" - - settings_category = SETTINGS_CATEGORY - hosts = ["maya"] - - -class MayaContextPlugin(ContextPlugin): - """Base class for context publish plugins.""" - - settings_category = SETTINGS_CATEGORY - hosts = ["maya"] - - -class MayaExtractorPlugin(publish.Extractor): - """Base class for extract plugins.""" - - settings_category = SETTINGS_CATEGORY - hosts = ["maya"] diff --git a/server_addon/maya/client/ayon_maya/api/render_setup_tools.py b/server_addon/maya/client/ayon_maya/api/render_setup_tools.py deleted file mode 100644 index 9b00b53eee..0000000000 --- a/server_addon/maya/client/ayon_maya/api/render_setup_tools.py +++ /dev/null @@ -1,127 +0,0 @@ -# -*- coding: utf-8 -*- -"""Export stuff in render setup layer context. - -Export Maya nodes from Render Setup layer as if flattened in that layer instead -of exporting the defaultRenderLayer as Maya forces by default - -Credits: Roy Nieterau (BigRoy) / Colorbleed -Modified for use in AYON - -""" - -import os -import contextlib - -from maya import cmds -from maya.app.renderSetup.model import renderSetup - -from .lib import pairwise - - -@contextlib.contextmanager -def allow_export_from_render_setup_layer(): - """Context manager to override Maya settings to allow RS layer export""" - try: - - rs = renderSetup.instance() - - # Exclude Render Setup nodes from the export - rs._setAllRSNodesDoNotWrite(True) - - # Disable Render Setup forcing the switch to master layer - os.environ["MAYA_BATCH_RENDER_EXPORT"] = "1" - - yield - - finally: - # Reset original state - rs._setAllRSNodesDoNotWrite(False) - os.environ.pop("MAYA_BATCH_RENDER_EXPORT", None) - - -def export_in_rs_layer(path, nodes, export=None): - """Export nodes from Render Setup layer. - - When exporting from Render Setup layer Maya by default - forces a switch to the defaultRenderLayer as such making - it impossible to export the contents of a Render Setup - layer. Maya presents this warning message: - # Warning: Exporting Render Setup master layer content # - - This function however avoids the renderlayer switch and - exports from the Render Setup layer as if the edits were - 'flattened' in the master layer. - - It does so by: - - Allowing export from Render Setup Layer - - Enforce Render Setup nodes to NOT be written on export - - Disconnect connections from any `applyOverride` nodes - to flatten the values (so they are written correctly)* - *Connection overrides like Shader Override and Material - Overrides export correctly out of the box since they don't - create an intermediate connection to an 'applyOverride' node. - However, any scalar override (absolute or relative override) - will get input connections in the layer so we'll break those - to 'store' the values on the attribute itself and write value - out instead. - - Args: - path (str): File path to export to. - nodes (list): Maya nodes to export. - export (callable, optional): Callback to be used for exporting. If - not specified, default export to `.ma` will be called. - - Returns: - None - - Raises: - AssertionError: When not in a Render Setup layer an - AssertionError is raised. This command assumes - you are currently in a Render Setup layer. - - """ - rs = renderSetup.instance() - assert rs.getVisibleRenderLayer().name() != "defaultRenderLayer", \ - ("Export in Render Setup layer is only supported when in " - "Render Setup layer") - - # Break connection to any value overrides - history = cmds.listHistory(nodes) or [] - nodes_all = list( - set(cmds.ls(nodes + history, long=True, objectsOnly=True))) - overrides = cmds.listConnections(nodes_all, - source=True, - destination=False, - type="applyOverride", - plugs=True, - connections=True) or [] - for dest, src in pairwise(overrides): - # Even after disconnecting the values - # should be preserved as they were - # Note: animated overrides would be lost for export - cmds.disconnectAttr(src, dest) - - # Export Selected - with allow_export_from_render_setup_layer(): - cmds.select(nodes, noExpand=True) - if export: - export() - else: - cmds.file(path, - force=True, - typ="mayaAscii", - exportSelected=True, - preserveReferences=False, - channels=True, - constraints=True, - expressions=True, - constructionHistory=True) - - if overrides: - # If we have broken override connections then Maya - # is unaware that the Render Setup layer is in an - # invalid state. So let's 'hard reset' the state - # by going to default render layer and switching back - layer = rs.getVisibleRenderLayer() - rs.switchToLayer(None) - rs.switchToLayer(layer) diff --git a/server_addon/maya/client/ayon_maya/api/setdress.py b/server_addon/maya/client/ayon_maya/api/setdress.py deleted file mode 100644 index a130b93f4f..0000000000 --- a/server_addon/maya/client/ayon_maya/api/setdress.py +++ /dev/null @@ -1,606 +0,0 @@ -import logging -import json -import os - -import contextlib -import copy - -import six -import ayon_api - -from maya import cmds - -from ayon_core.pipeline import ( - schema, - discover_loader_plugins, - loaders_from_representation, - load_container, - update_container, - remove_container, - get_representation_path, - get_current_project_name, -) -from ayon_maya.api.lib import ( - matrix_equals, - unique_namespace, - get_container_transforms, - DEFAULT_MATRIX -) - -log = logging.getLogger("PackageLoader") - - -def to_namespace(node, namespace): - """Return node name as if it's inside the namespace. - - Args: - node (str): Node name - namespace (str): Namespace - - Returns: - str: The node in the namespace. - - """ - namespace_prefix = "|{}:".format(namespace) - node = namespace_prefix.join(node.split("|")) - return node - - -@contextlib.contextmanager -def namespaced(namespace, new=True): - """Work inside namespace during context - - Args: - new (bool): When enabled this will rename the namespace to a unique - namespace if the input namespace already exists. - - Yields: - str: The namespace that is used during the context - - """ - original = cmds.namespaceInfo(cur=True) - if new: - namespace = unique_namespace(namespace) - cmds.namespace(add=namespace) - - try: - cmds.namespace(set=namespace) - yield namespace - finally: - cmds.namespace(set=original) - - -@contextlib.contextmanager -def unlocked(nodes): - - # Get node state by Maya's uuid - nodes = cmds.ls(nodes, long=True) - uuids = cmds.ls(nodes, uuid=True) - states = cmds.lockNode(nodes, query=True, lock=True) - states = {uuid: state for uuid, state in zip(uuids, states)} - originals = {uuid: node for uuid, node in zip(uuids, nodes)} - - try: - cmds.lockNode(nodes, lock=False) - yield - finally: - # Reapply original states - _iteritems = getattr(states, "iteritems", states.items) - for uuid, state in _iteritems(): - nodes_from_id = cmds.ls(uuid, long=True) - if nodes_from_id: - node = nodes_from_id[0] - else: - log.debug("Falling back to node name: %s", node) - node = originals[uuid] - if not cmds.objExists(node): - log.warning("Unable to find: %s", node) - continue - cmds.lockNode(node, lock=state) - - -def load_package(filepath, name, namespace=None): - """Load a package that was gathered elsewhere. - - A package is a group of published instances, possibly with additional data - in a hierarchy. - - """ - - if namespace is None: - # Define a unique namespace for the package - namespace = os.path.basename(filepath).split(".")[0] - unique_namespace(namespace) - assert isinstance(namespace, six.string_types) - - # Load the setdress package data - with open(filepath, "r") as fp: - data = json.load(fp) - - # Load the setdress alembic hierarchy - # We import this into the namespace in which we'll load the package's - # instances into afterwards. - alembic = filepath.replace(".json", ".abc") - hierarchy = cmds.file(alembic, - reference=True, - namespace=namespace, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name), - typ="Alembic") - - # Get the top root node (the reference group) - root = "{}:{}".format(namespace, name) - - containers = [] - all_loaders = discover_loader_plugins() - for representation_id, instances in data.items(): - - # Find the compatible loaders - loaders = loaders_from_representation( - all_loaders, representation_id - ) - - for instance in instances: - container = _add(instance=instance, - representation_id=representation_id, - loaders=loaders, - namespace=namespace, - root=root) - containers.append(container) - - # TODO: Do we want to cripple? Or do we want to add a 'parent' parameter? - # Cripple the original AYON containers so they don't show up in the - # manager - # for container in containers: - # cmds.setAttr("%s.id" % container, - # "setdress.container", - # type="string") - - # TODO: Lock all loaded nodes - # This is to ensure the hierarchy remains unaltered by the artists - # for node in nodes: - # cmds.lockNode(node, lock=True) - - return containers + hierarchy - - -def _add(instance, representation_id, loaders, namespace, root="|"): - """Add an item from the package - - Args: - instance (dict): - representation_id (str): - loaders (list): - namespace (str): - - Returns: - str: The created AYON container. - - """ - - # Process within the namespace - with namespaced(namespace, new=False) as namespace: - - # Get the used loader - Loader = next((x for x in loaders if - x.__name__ == instance['loader']), - None) - - if Loader is None: - log.warning("Loader is missing: %s. Skipping %s", - instance['loader'], instance) - raise RuntimeError("Loader is missing.") - - container = load_container( - Loader, - representation_id, - namespace=instance['namespace'] - ) - - # Get the root from the loaded container - loaded_root = get_container_transforms({"objectName": container}, - root=True) - - # Apply matrix to root node (if any matrix edits) - matrix = instance.get("matrix", None) - if matrix: - cmds.xform(loaded_root, objectSpace=True, matrix=matrix) - - # Parent into the setdress hierarchy - # Namespace is missing from parent node(s), add namespace - # manually - parent = root + to_namespace(instance["parent"], namespace) - cmds.parent(loaded_root, parent, relative=True) - - return container - - -# Store root nodes based on representation and namespace -def _instances_by_namespace(data): - """Rebuild instance data so we can look it up by namespace. - - Note that the `representation` is added into the instance's - data with a `representation` key. - - Args: - data (dict): scene build data - - Returns: - dict - - """ - result = {} - # Add new assets - for representation_id, instances in data.items(): - - # Ensure we leave the source data unaltered - instances = copy.deepcopy(instances) - for instance in instances: - instance['representation'] = representation_id - result[instance['namespace']] = instance - - return result - - -def get_contained_containers(container): - """Get the AYON containers in this container - - Args: - container (dict): The container dict. - - Returns: - list: A list of member container dictionaries. - - """ - - from .pipeline import parse_container - - # Get AYON containers in this package setdress container - containers = [] - members = cmds.sets(container['objectName'], query=True) - for node in cmds.ls(members, type="objectSet"): - try: - member_container = parse_container(node) - containers.append(member_container) - except schema.ValidationError: - pass - - return containers - - -def update_package_version(container, version): - """ - Update package by version number - - Args: - container (dict): container data of the container node - version (int): the new version number of the package - - Returns: - None - - """ - - # Versioning (from `core.maya.pipeline`) - project_name = get_current_project_name() - repre_id = container["representation"] - current_representation = ayon_api.get_representation_by_id( - project_name, repre_id - ) - - assert current_representation is not None, "This is a bug" - - ( - version_entity, - product_entity, - folder_entity, - project_entity - ) = ayon_api.get_representation_parents(project_name, repre_id) - - if version == -1: - new_version = ayon_api.get_last_version_by_product_id( - project_name, product_entity["id"] - ) - else: - new_version = ayon_api.get_version_by_name( - project_name, version, product_entity["id"] - ) - - if new_version is None: - raise ValueError("Version not found: {}".format(version)) - - # Get the new representation (new file) - new_representation = ayon_api.get_representation_by_name( - project_name, current_representation["name"], new_version["id"] - ) - # TODO there is 'get_representation_context' to get the context which - # could be possible to use here - new_context = { - "project": project_entity, - "folder": folder_entity, - "product": product_entity, - "version": version_entity, - "representation": new_representation, - } - update_package(container, new_context) - - -def update_package(set_container, context): - """Update any matrix changes in the scene based on the new data - - Args: - set_container (dict): container data from `ls()` - context (dict): the representation document from the database - - Returns: - None - - """ - - # Load the original package data - project_name = context["project"]["name"] - repre_entity = context["representation"] - current_representation = ayon_api.get_representation_by_id( - project_name, set_container["representation"] - ) - - current_file = get_representation_path(current_representation) - assert current_file.endswith(".json") - with open(current_file, "r") as fp: - current_data = json.load(fp) - - # Load the new package data - new_file = get_representation_path(repre_entity) - assert new_file.endswith(".json") - with open(new_file, "r") as fp: - new_data = json.load(fp) - - # Update scene content - containers = get_contained_containers(set_container) - update_scene(set_container, containers, current_data, new_data, new_file) - - # TODO: This should be handled by the pipeline itself - cmds.setAttr(set_container['objectName'] + ".representation", - context["representation"]["id"], type="string") - - -def update_scene(set_container, containers, current_data, new_data, new_file): - """Updates the hierarchy, assets and their matrix - - Updates the following within the scene: - * Setdress hierarchy alembic - * Matrix - * Parenting - * Representations - - It removes any assets which are not present in the new build data - - Args: - set_container (dict): the setdress container of the scene - containers (list): the list of containers under the setdress container - current_data (dict): the current build data of the setdress - new_data (dict): the new build data of the setdres - - Returns: - processed_containers (list): all new and updated containers - - """ - - set_namespace = set_container['namespace'] - project_name = get_current_project_name() - - # Update the setdress hierarchy alembic - set_root = get_container_transforms(set_container, root=True) - set_hierarchy_root = cmds.listRelatives(set_root, fullPath=True)[0] - set_hierarchy_reference = cmds.referenceQuery(set_hierarchy_root, - referenceNode=True) - new_alembic = new_file.replace(".json", ".abc") - assert os.path.exists(new_alembic), "%s does not exist." % new_alembic - with unlocked(cmds.listRelatives(set_root, ad=True, fullPath=True)): - cmds.file(new_alembic, - loadReference=set_hierarchy_reference, - type="Alembic") - - identity = DEFAULT_MATRIX[:] - - processed_namespaces = set() - processed_containers = list() - - new_lookup = _instances_by_namespace(new_data) - old_lookup = _instances_by_namespace(current_data) - repre_ids = set() - containers_for_repre_compare = [] - for container in containers: - container_ns = container['namespace'] - - # Consider it processed here, even it it fails we want to store that - # the namespace was already available. - processed_namespaces.add(container_ns) - processed_containers.append(container['objectName']) - - if container_ns not in new_lookup: - # Remove this container because it's not in the new data - log.warning("Removing content: %s", container_ns) - remove_container(container) - continue - - root = get_container_transforms(container, root=True) - if not root: - log.error("Can't find root for %s", container['objectName']) - continue - - old_instance = old_lookup.get(container_ns, {}) - new_instance = new_lookup[container_ns] - - # Update the matrix - # check matrix against old_data matrix to find local overrides - current_matrix = cmds.xform(root, - query=True, - matrix=True, - objectSpace=True) - - original_matrix = old_instance.get("matrix", identity) - has_matrix_override = not matrix_equals(current_matrix, - original_matrix) - - if has_matrix_override: - log.warning("Matrix override preserved on %s", container_ns) - else: - new_matrix = new_instance.get("matrix", identity) - cmds.xform(root, matrix=new_matrix, objectSpace=True) - - # Update the parenting - if old_instance.get("parent", None) != new_instance["parent"]: - - parent = to_namespace(new_instance['parent'], set_namespace) - if not cmds.objExists(parent): - log.error("Can't find parent %s", parent) - continue - - # Set the new parent - cmds.lockNode(root, lock=False) - root = cmds.parent(root, parent, relative=True) - cmds.lockNode(root, lock=True) - - # Update the representation - representation_current = container['representation'] - representation_old = old_instance['representation'] - representation_new = new_instance['representation'] - has_representation_override = (representation_current != - representation_old) - - if representation_new == representation_current: - continue - - if has_representation_override: - log.warning("Your scene had local representation " - "overrides within the set. New " - "representations not loaded for %s.", - container_ns) - continue - - # We check it against the current 'loader' in the scene instead - # of the original data of the package that was loaded because - # an Artist might have made scene local overrides - if new_instance['loader'] != container['loader']: - log.warning("Loader is switched - local edits will be " - "lost. Removing: %s", - container_ns) - - # Remove this from the "has been processed" list so it's - # considered as new element and added afterwards. - processed_containers.pop() - processed_namespaces.remove(container_ns) - remove_container(container) - continue - - # Check whether the conversion can be done by the Loader. - # They *must* use the same folder, product and Loader for - # `update_container` to make sense. - repre_ids.add(representation_current) - repre_ids.add(representation_new) - - containers_for_repre_compare.append( - (container, representation_current, representation_new) - ) - - repre_entities_by_id = { - repre_entity["id"]: repre_entity - for repre_entity in ayon_api.get_representations( - project_name, representation_ids=repre_ids - ) - } - repre_parents_by_id = ayon_api.get_representations_parents( - project_name, repre_ids - ) - for ( - container, - repre_current_id, - repre_new_id - ) in containers_for_repre_compare: - current_repre = repre_entities_by_id[repre_current_id] - current_parents = repre_parents_by_id[repre_current_id] - new_repre = repre_entities_by_id[repre_new_id] - new_parents = repre_parents_by_id[repre_new_id] - - is_valid = compare_representations( - current_repre, current_parents, new_repre, new_parents - ) - if not is_valid: - log.error("Skipping: %s. See log for details.", - container["namespace"]) - continue - - new_version = new_parents.version["version"] - update_container(container, version=new_version) - - # Add new assets - all_loaders = discover_loader_plugins() - for representation_id, instances in new_data.items(): - - # Find the compatible loaders - loaders = loaders_from_representation( - all_loaders, representation_id - ) - for instance in instances: - - # Already processed in update functionality - if instance['namespace'] in processed_namespaces: - continue - - container = _add(instance=instance, - representation_id=representation_id, - loaders=loaders, - namespace=set_container['namespace'], - root=set_root) - - # Add to the setdress container - cmds.sets(container, - addElement=set_container['objectName']) - - processed_containers.append(container) - - return processed_containers - - -def compare_representations( - current_repre, current_parents, new_repre, new_parents -): - """Check if the old representation given can be updated - - Due to limitations of the `update_container` function we cannot allow - differences in the following data: - - * Representation name (extension) - * Folder id - * Product id - - If any of those data values differs, the function will raise an - RuntimeError - - Args: - current_repre (dict[str, Any]): Current representation entity. - current_parents (RepresentationParents): Current - representation parents. - new_repre (dict[str, Any]): New representation entity. - new_parents (RepresentationParents): New representation parents. - - Returns: - bool: False if the representation is not invalid else True - - """ - if current_repre["name"] != new_repre["name"]: - log.error("Cannot switch extensions") - return False - - # TODO add better validation e.g. based on parent ids - if current_parents.folder["id"] != new_parents.folder["id"]: - log.error("Changing folders between updates is not supported.") - return False - - if current_parents.product["id"] != new_parents.product["id"]: - log.error("Changing products between updates is not supported.") - return False - - return True diff --git a/server_addon/maya/client/ayon_maya/api/workfile_template_builder.py b/server_addon/maya/client/ayon_maya/api/workfile_template_builder.py deleted file mode 100644 index f4f9a34983..0000000000 --- a/server_addon/maya/client/ayon_maya/api/workfile_template_builder.py +++ /dev/null @@ -1,290 +0,0 @@ -import json - -from maya import cmds - -from ayon_core.pipeline import ( - registered_host, - get_current_folder_path, - AYON_INSTANCE_ID, - AVALON_INSTANCE_ID, -) -from ayon_core.pipeline.workfile.workfile_template_builder import ( - TemplateAlreadyImported, - AbstractTemplateBuilder, - PlaceholderPlugin, - PlaceholderItem, -) -from ayon_core.tools.workfile_template_build import ( - WorkfileBuildPlaceholderDialog, -) - -from .lib import read, imprint, get_main_window - -PLACEHOLDER_SET = "PLACEHOLDERS_SET" - - -class MayaTemplateBuilder(AbstractTemplateBuilder): - """Concrete implementation of AbstractTemplateBuilder for maya""" - - use_legacy_creators = True - - def import_template(self, path): - """Import template into current scene. - Block if a template is already loaded. - - Args: - path (str): A path to current template (usually given by - get_template_preset implementation) - - Returns: - bool: Whether the template was successfully imported or not - """ - - if cmds.objExists(PLACEHOLDER_SET): - raise TemplateAlreadyImported(( - "Build template already loaded\n" - "Clean scene if needed (File > New Scene)" - )) - - cmds.sets(name=PLACEHOLDER_SET, empty=True) - new_nodes = cmds.file( - path, - i=True, - returnNewNodes=True, - preserveReferences=True, - loadReferenceDepth="all", - ) - - # make default cameras non-renderable - default_cameras = [cam for cam in cmds.ls(cameras=True) - if cmds.camera(cam, query=True, startupCamera=True)] - for cam in default_cameras: - if not cmds.attributeQuery("renderable", node=cam, exists=True): - self.log.debug( - "Camera {} has no attribute 'renderable'".format(cam) - ) - continue - cmds.setAttr("{}.renderable".format(cam), 0) - - cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True) - - imported_sets = cmds.ls(new_nodes, set=True) - if not imported_sets: - return True - - # update imported sets information - folder_path = get_current_folder_path() - for node in imported_sets: - if not cmds.attributeQuery("id", node=node, exists=True): - continue - if cmds.getAttr("{}.id".format(node)) not in { - AYON_INSTANCE_ID, AVALON_INSTANCE_ID - }: - continue - if not cmds.attributeQuery("folderPath", node=node, exists=True): - continue - - cmds.setAttr( - "{}.folderPath".format(node), folder_path, type="string") - - return True - - -class MayaPlaceholderPlugin(PlaceholderPlugin): - """Base Placeholder Plugin for Maya with one unified cache. - - Creates a locator as placeholder node, which during populate provide - all of its attributes defined on the locator's transform in - `placeholder.data` and where `placeholder.scene_identifier` is the - full path to the node. - - Inherited classes must still implement `populate_placeholder` - - """ - - use_selection_as_parent = True - item_class = PlaceholderItem - - def _create_placeholder_name(self, placeholder_data): - return self.identifier.replace(".", "_") - - def _collect_scene_placeholders(self): - nodes_by_identifier = self.builder.get_shared_populate_data( - "placeholder_nodes" - ) - if nodes_by_identifier is None: - # Cache placeholder data to shared data - nodes = cmds.ls("*.plugin_identifier", long=True, objectsOnly=True) - - nodes_by_identifier = {} - for node in nodes: - identifier = cmds.getAttr("{}.plugin_identifier".format(node)) - nodes_by_identifier.setdefault(identifier, []).append(node) - - # Set the cache - self.builder.set_shared_populate_data( - "placeholder_nodes", nodes_by_identifier - ) - - return nodes_by_identifier - - def create_placeholder(self, placeholder_data): - - parent = None - if self.use_selection_as_parent: - selection = cmds.ls(selection=True) - if len(selection) > 1: - raise ValueError( - "More than one node is selected. " - "Please select only one to define the parent." - ) - parent = selection[0] if selection else None - - placeholder_data["plugin_identifier"] = self.identifier - placeholder_name = self._create_placeholder_name(placeholder_data) - - placeholder = cmds.spaceLocator(name=placeholder_name)[0] - if parent: - placeholder = cmds.parent(placeholder, selection[0])[0] - - self.imprint(placeholder, placeholder_data) - - def update_placeholder(self, placeholder_item, placeholder_data): - node_name = placeholder_item.scene_identifier - - changed_values = {} - for key, value in placeholder_data.items(): - if value != placeholder_item.data.get(key): - changed_values[key] = value - - # Delete attributes to ensure we imprint new data with correct type - for key in changed_values.keys(): - placeholder_item.data[key] = value - if cmds.attributeQuery(key, node=node_name, exists=True): - attribute = "{}.{}".format(node_name, key) - cmds.deleteAttr(attribute) - - self.imprint(node_name, changed_values) - - def collect_placeholders(self): - placeholders = [] - nodes_by_identifier = self._collect_scene_placeholders() - for node in nodes_by_identifier.get(self.identifier, []): - # TODO do data validations and maybe upgrades if they are invalid - placeholder_data = self.read(node) - placeholders.append( - self.item_class(scene_identifier=node, - data=placeholder_data, - plugin=self) - ) - - return placeholders - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Hide placeholder, add them to placeholder set. - Used only by PlaceholderCreateMixin and PlaceholderLoadMixin - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # Hide placeholder and add them to placeholder set - node = placeholder.scene_identifier - - # If we just populate the placeholders from current scene, the - # placeholder set will not be created so account for that. - if not cmds.objExists(PLACEHOLDER_SET): - cmds.sets(name=PLACEHOLDER_SET, empty=True) - - cmds.sets(node, addElement=PLACEHOLDER_SET) - cmds.hide(node) - cmds.setAttr("{}.hiddenInOutliner".format(node), True) - - def delete_placeholder(self, placeholder): - """Remove placeholder if building was successful - - Used only by PlaceholderCreateMixin and PlaceholderLoadMixin. - """ - node = placeholder.scene_identifier - - # To avoid that deleting a placeholder node will have Maya delete - # any objectSets the node was a member of we will first remove it - # from any sets it was a member of. This way the `PLACEHOLDERS_SET` - # will survive long enough - sets = cmds.listSets(o=node) or [] - for object_set in sets: - cmds.sets(node, remove=object_set) - - cmds.delete(node) - - def imprint(self, node, data): - """Imprint call for placeholder node""" - - # Complicated data that can't be represented as flat maya attributes - # we write to json strings, e.g. multiselection EnumDef - for key, value in data.items(): - if isinstance(value, (list, tuple, dict)): - data[key] = "JSON::{}".format(json.dumps(value)) - - imprint(node, data) - - def read(self, node): - """Read call for placeholder node""" - - data = read(node) - - # Complicated data that can't be represented as flat maya attributes - # we read from json strings, e.g. multiselection EnumDef - for key, value in data.items(): - if isinstance(value, str) and value.startswith("JSON::"): - value = value[len("JSON::"):] # strip of JSON:: prefix - data[key] = json.loads(value) - - return data - - -def build_workfile_template(*args): - builder = MayaTemplateBuilder(registered_host()) - builder.build_template() - - -def update_workfile_template(*args): - builder = MayaTemplateBuilder(registered_host()) - builder.rebuild_template() - - -def create_placeholder(*args): - host = registered_host() - builder = MayaTemplateBuilder(host) - window = WorkfileBuildPlaceholderDialog(host, builder, - parent=get_main_window()) - window.show() - - -def update_placeholder(*args): - host = registered_host() - builder = MayaTemplateBuilder(host) - placeholder_items_by_id = { - placeholder_item.scene_identifier: placeholder_item - for placeholder_item in builder.get_placeholders() - } - placeholder_items = [] - for node_name in cmds.ls(selection=True, long=True): - if node_name in placeholder_items_by_id: - placeholder_items.append(placeholder_items_by_id[node_name]) - - # TODO show UI at least - if len(placeholder_items) == 0: - raise ValueError("No node selected") - - if len(placeholder_items) > 1: - raise ValueError("Too many selected nodes") - - placeholder_item = placeholder_items[0] - window = WorkfileBuildPlaceholderDialog(host, builder, - parent=get_main_window()) - window.set_update_mode(placeholder_item) - window.exec_() diff --git a/server_addon/maya/client/ayon_maya/api/workio.py b/server_addon/maya/client/ayon_maya/api/workio.py deleted file mode 100644 index ff6c11eb4f..0000000000 --- a/server_addon/maya/client/ayon_maya/api/workio.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Host API required Work Files tool""" -import os -from maya import cmds - - -def file_extensions(): - return [".ma", ".mb"] - - -def has_unsaved_changes(): - return cmds.file(query=True, modified=True) - - -def save_file(filepath): - cmds.file(rename=filepath) - ext = os.path.splitext(filepath)[1] - if ext == ".mb": - file_type = "mayaBinary" - else: - file_type = "mayaAscii" - cmds.file(save=True, type=file_type) - - -def open_file(filepath): - return cmds.file(filepath, open=True, force=True) - - -def current_file(): - - current_filepath = cmds.file(query=True, sceneName=True) - if not current_filepath: - return None - - return current_filepath - - -def work_root(session): - work_dir = session["AYON_WORKDIR"] - scene_dir = None - - # Query scene file rule from workspace.mel if it exists in WORKDIR - # We are parsing the workspace.mel manually as opposed to temporarily - # setting the Workspace in Maya in a context manager since Maya had a - # tendency to crash on frequently changing the workspace when this - # function was called many times as one scrolled through Work Files assets. - workspace_mel = os.path.join(work_dir, "workspace.mel") - if os.path.exists(workspace_mel): - scene_rule = 'workspace -fr "scene" ' - # We need to use builtins as `open` is overridden by the workio API - open_file = __builtins__["open"] - with open_file(workspace_mel, "r") as f: - for line in f: - if line.strip().startswith(scene_rule): - # remainder == "rule"; - remainder = line[len(scene_rule):] - # scene_dir == rule - scene_dir = remainder.split('"')[1] - else: - # We can't query a workspace that does not exist - # so we return similar to what we do in other hosts. - scene_dir = session.get("AVALON_SCENEDIR") - - if scene_dir: - return os.path.join(work_dir, scene_dir) - else: - return work_dir diff --git a/server_addon/maya/client/ayon_maya/api/yeti.py b/server_addon/maya/client/ayon_maya/api/yeti.py deleted file mode 100644 index 1526c3a2f3..0000000000 --- a/server_addon/maya/client/ayon_maya/api/yeti.py +++ /dev/null @@ -1,101 +0,0 @@ -from typing import List - -from maya import cmds - - -def get_yeti_user_variables(yeti_shape_node: str) -> List[str]: - """Get user defined yeti user variables for a `pgYetiMaya` shape node. - - Arguments: - yeti_shape_node (str): The `pgYetiMaya` shape node. - - Returns: - list: Attribute names (for a vector attribute it only lists the top - parent attribute, not the attribute per axis) - """ - - attrs = cmds.listAttr(yeti_shape_node, - userDefined=True, - string=("yetiVariableV_*", - "yetiVariableF_*")) or [] - valid_attrs = [] - for attr in attrs: - attr_type = cmds.attributeQuery(attr, node=yeti_shape_node, - attributeType=True) - if attr.startswith("yetiVariableV_") and attr_type == "double3": - # vector - valid_attrs.append(attr) - elif attr.startswith("yetiVariableF_") and attr_type == "double": - valid_attrs.append(attr) - - return valid_attrs - - -def create_yeti_variable(yeti_shape_node: str, - attr_name: str, - value=None, - force_value: bool = False) -> bool: - """Get user defined yeti user variables for a `pgYetiMaya` shape node. - - Arguments: - yeti_shape_node (str): The `pgYetiMaya` shape node. - attr_name (str): The fully qualified yeti variable name, e.g. - "yetiVariableF_myfloat" or "yetiVariableV_myvector" - value (object): The value to set (must match the type of the attribute) - When value is None it will ignored and not be set. - force_value (bool): Whether to set the value if the attribute already - exists or not. - - Returns: - bool: Whether the attribute value was set or not. - - """ - exists = cmds.attributeQuery(attr_name, node=yeti_shape_node, exists=True) - if not exists: - if attr_name.startswith("yetiVariableV_"): - _create_vector_yeti_user_variable(yeti_shape_node, attr_name) - if attr_name.startswith("yetiVariableF_"): - _create_float_yeti_user_variable(yeti_shape_node, attr_name) - - if value is not None and (not exists or force_value): - plug = "{}.{}".format(yeti_shape_node, attr_name) - if ( - isinstance(value, (list, tuple)) - and attr_name.startswith("yetiVariableV_") - ): - cmds.setAttr(plug, *value, type="double3") - else: - cmds.setAttr(plug, value) - - return True - return False - - -def _create_vector_yeti_user_variable(yeti_shape_node: str, attr_name: str): - if not attr_name.startswith("yetiVariableV_"): - raise ValueError("Must start with yetiVariableV_") - cmds.addAttr(yeti_shape_node, - longName=attr_name, - attributeType="double3", - cachedInternally=True, - keyable=True) - for axis in "XYZ": - cmds.addAttr(yeti_shape_node, - longName="{}{}".format(attr_name, axis), - attributeType="double", - parent=attr_name, - cachedInternally=True, - keyable=True) - - -def _create_float_yeti_user_variable(yeti_node: str, attr_name: str): - if not attr_name.startswith("yetiVariableF_"): - raise ValueError("Must start with yetiVariableF_") - - cmds.addAttr(yeti_node, - longName=attr_name, - attributeType="double", - cachedInternally=True, - softMinValue=0, - softMaxValue=100, - keyable=True) diff --git a/server_addon/maya/client/ayon_maya/hooks/pre_auto_load_plugins.py b/server_addon/maya/client/ayon_maya/hooks/pre_auto_load_plugins.py deleted file mode 100644 index 45785ac354..0000000000 --- a/server_addon/maya/client/ayon_maya/hooks/pre_auto_load_plugins.py +++ /dev/null @@ -1,30 +0,0 @@ -from ayon_applications import PreLaunchHook, LaunchTypes - - -class MayaPreAutoLoadPlugins(PreLaunchHook): - """Define -noAutoloadPlugins command flag.""" - - # Before AddLastWorkfileToLaunchArgs - order = 9 - app_groups = {"maya"} - launch_types = {LaunchTypes.local} - - def execute(self): - - # Ignore if there's no last workfile to start. - if not self.data.get("start_last_workfile"): - return - - maya_settings = self.data["project_settings"]["maya"] - enabled = maya_settings["explicit_plugins_loading"]["enabled"] - if enabled: - # Force disable the `AddLastWorkfileToLaunchArgs`. - self.data.pop("start_last_workfile") - - # Force post initialization so our dedicated plug-in load can run - # prior to Maya opening a scene file. - key = "AYON_OPEN_WORKFILE_POST_INITIALIZATION" - self.launch_context.env[key] = "1" - - self.log.debug("Explicit plugins loading.") - self.launch_context.launch_args.append("-noAutoloadPlugins") diff --git a/server_addon/maya/client/ayon_maya/hooks/pre_copy_mel.py b/server_addon/maya/client/ayon_maya/hooks/pre_copy_mel.py deleted file mode 100644 index c3268b09ee..0000000000 --- a/server_addon/maya/client/ayon_maya/hooks/pre_copy_mel.py +++ /dev/null @@ -1,23 +0,0 @@ -from ayon_applications import PreLaunchHook, LaunchTypes -from ayon_maya.lib import create_workspace_mel - - -class PreCopyMel(PreLaunchHook): - """Copy workspace.mel to workdir. - - Hook `GlobalHostDataHook` must be executed before this hook. - """ - app_groups = {"maya", "mayapy"} - launch_types = {LaunchTypes.local} - - def execute(self): - project_entity = self.data["project_entity"] - workdir = self.launch_context.env.get("AYON_WORKDIR") - if not workdir: - self.log.warning("BUG: Workdir is not filled.") - return - - project_settings = self.data["project_settings"] - create_workspace_mel( - workdir, project_entity["name"], project_settings - ) diff --git a/server_addon/maya/client/ayon_maya/hooks/pre_open_workfile_post_initialization.py b/server_addon/maya/client/ayon_maya/hooks/pre_open_workfile_post_initialization.py deleted file mode 100644 index a54f17c6c6..0000000000 --- a/server_addon/maya/client/ayon_maya/hooks/pre_open_workfile_post_initialization.py +++ /dev/null @@ -1,26 +0,0 @@ -from ayon_applications import PreLaunchHook, LaunchTypes - - -class MayaPreOpenWorkfilePostInitialization(PreLaunchHook): - """Define whether open last workfile should run post initialize.""" - - # Before AddLastWorkfileToLaunchArgs. - order = 9 - app_groups = {"maya"} - launch_types = {LaunchTypes.local} - - def execute(self): - - # Ignore if there's no last workfile to start. - if not self.data.get("start_last_workfile"): - return - - maya_settings = self.data["project_settings"]["maya"] - enabled = maya_settings["open_workfile_post_initialization"] - if enabled: - # Force disable the `AddLastWorkfileToLaunchArgs`. - self.data.pop("start_last_workfile") - - self.log.debug("Opening workfile post initialization.") - key = "AYON_OPEN_WORKFILE_POST_INITIALIZATION" - self.launch_context.env[key] = "1" diff --git a/server_addon/maya/client/ayon_maya/lib.py b/server_addon/maya/client/ayon_maya/lib.py deleted file mode 100644 index 6fa8dfdce9..0000000000 --- a/server_addon/maya/client/ayon_maya/lib.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from ayon_core.settings import get_project_settings -from ayon_core.lib import Logger - - -def create_workspace_mel(workdir, project_name, project_settings=None): - dst_filepath = os.path.join(workdir, "workspace.mel") - if os.path.exists(dst_filepath): - return - - if not os.path.exists(workdir): - os.makedirs(workdir) - - if not project_settings: - project_settings = get_project_settings(project_name) - mel_script = project_settings["maya"].get("mel_workspace") - - # Skip if mel script in settings is empty - if not mel_script: - log = Logger.get_logger("create_workspace_mel") - log.debug("File 'workspace.mel' not created. Settings value is empty.") - return - - with open(dst_filepath, "w") as mel_file: - mel_file.write(mel_script) diff --git a/server_addon/maya/client/ayon_maya/plugins/__init__.py b/server_addon/maya/client/ayon_maya/plugins/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/maya/client/ayon_maya/plugins/create/convert_legacy.py b/server_addon/maya/client/ayon_maya/plugins/create/convert_legacy.py deleted file mode 100644 index 8616413bdd..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/convert_legacy.py +++ /dev/null @@ -1,190 +0,0 @@ -import ayon_api - -from ayon_core.pipeline.create.creator_plugins import ProductConvertorPlugin -from ayon_maya.api import plugin -from ayon_maya.api.lib import read - -from maya import cmds -from maya.app.renderSetup.model import renderSetup - - -class MayaLegacyConvertor(ProductConvertorPlugin, - plugin.MayaCreatorBase): - """Find and convert any legacy products in the scene. - - This Converter will find all legacy products in the scene and will - transform them to the current system. Since the old products doesn't - retain any information about their original creators, the only mapping - we can do is based on their families. - - Its limitation is that you can have multiple creators creating product - of the same type and there is no way to handle it. This code should - nevertheless cover all creators that came with AYON. - - """ - identifier = "io.openpype.creators.maya.legacy" - - # Cases where the identifier or new product type doesn't correspond to the - # original family on the legacy instances - product_type_mapping = { - "rendering": "io.openpype.creators.maya.renderlayer", - } - - def find_instances(self): - - self.cache_instance_data(self.collection_shared_data) - legacy = self.collection_shared_data.get( - "maya_cached_legacy_instances" - ) - if not legacy: - return - - self.add_convertor_item("Convert legacy instances") - - def convert(self): - self.remove_convertor_item() - - # We can't use the collected shared data cache here - # we re-query it here directly to convert all found. - cache = {} - self.cache_instance_data(cache) - legacy = cache.get("maya_cached_legacy_instances") - if not legacy: - return - - # From all current new style manual creators find the mapping - # from product type to identifier - product_type_to_id = {} - for identifier, creator in self.create_context.creators.items(): - product_type = getattr(creator, "product_type", None) - if not product_type: - continue - - if product_type in product_type_to_id: - # We have a clash of product type -> identifier. Multiple - # new style creators use the same product type - self.log.warning( - "Clash on product type->identifier: {}".format(identifier) - ) - product_type_to_id[product_type] = identifier - - product_type_to_id.update(self.product_type_mapping) - - # We also embed the current 'task' into the instance since legacy - # instances didn't store that data on the instances. The old style - # logic was thus to be live to the current task to begin with. - data = dict() - data["task"] = self.create_context.get_current_task_name() - for product_type, instance_nodes in legacy.items(): - if product_type not in product_type_to_id: - self.log.warning(( - "Unable to convert legacy instance with family '{}'" - " because there is no matching new creator" - ).format(product_type)) - continue - - creator_id = product_type_to_id[product_type] - creator = self.create_context.creators[creator_id] - data["creator_identifier"] = creator_id - - if isinstance(creator, plugin.RenderlayerCreator): - self._convert_per_renderlayer(instance_nodes, data, creator) - else: - self._convert_regular(instance_nodes, data) - - def _convert_regular(self, instance_nodes, data): - # We only imprint the creator identifier for it to identify - # as the new style creator - for instance_node in instance_nodes: - self.imprint_instance_node(instance_node, - data=data.copy()) - - def _convert_per_renderlayer(self, instance_nodes, data, creator): - # Split the instance into an instance per layer - rs = renderSetup.instance() - layers = rs.getRenderLayers() - if not layers: - self.log.error( - "Can't convert legacy renderlayer instance because no existing" - " renderSetup layers exist in the scene." - ) - return - - creator_attribute_names = { - attr_def.key for attr_def in creator.get_instance_attr_defs() - } - - for instance_node in instance_nodes: - - # Ensure we have the new style singleton node generated - # TODO: Make function public - singleton_node = creator._get_singleton_node() - if singleton_node: - self.log.error( - "Can't convert legacy renderlayer instance '{}' because" - " new style instance '{}' already exists".format( - instance_node, - singleton_node - ) - ) - continue - - creator.create_singleton_node() - - # We are creating new nodes to replace the original instance - # Copy the attributes of the original instance to the new node - original_data = read(instance_node) - - # The product type gets converted to the new product type (this - # is due to "rendering" being converted to "renderlayer") - original_data["productType"] = creator.product_type - - # recreate product name as without it would be - # `renderingMain` vs correct `renderMain` - project_name = self.create_context.get_current_project_name() - folder_entities = list(ayon_api.get_folders( - project_name, folder_names=[original_data["asset"]] - )) - if not folder_entities: - cmds.delete(instance_node) - continue - folder_entity = folder_entities[0] - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], data["task"] - ) - - product_name = creator.get_product_name( - project_name, - folder_entity, - task_entity, - original_data["variant"], - ) - original_data["productName"] = product_name - - # Convert to creator attributes when relevant - creator_attributes = {} - for key in list(original_data.keys()): - # Iterate in order of the original attributes to preserve order - # in the output creator attributes - if key in creator_attribute_names: - creator_attributes[key] = original_data.pop(key) - original_data["creator_attributes"] = creator_attributes - - # For layer in maya layers - for layer in layers: - layer_instance_node = creator.find_layer_instance_node(layer) - if not layer_instance_node: - # TODO: Make function public - layer_instance_node = creator._create_layer_instance_node( - layer - ) - - # Transfer the main attributes of the original instance - layer_data = original_data.copy() - layer_data.update(data) - - self.imprint_instance_node(layer_instance_node, - data=layer_data) - - # Delete the legacy instance node - cmds.delete(instance_node) diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_animation_pointcache.py b/server_addon/maya/client/ayon_maya/plugins/create/create_animation_pointcache.py deleted file mode 100644 index d98b0dd5fa..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_animation_pointcache.py +++ /dev/null @@ -1,134 +0,0 @@ -from maya import cmds - -from ayon_maya.api import lib, plugin - -from ayon_core.lib import ( - BoolDef, - NumberDef, -) - - -def _get_animation_attr_defs(): - """Get Animation generic definitions.""" - defs = lib.collect_animation_defs() - defs.extend( - [ - BoolDef("farm", label="Submit to Farm"), - NumberDef("priority", label="Farm job Priority", default=50), - BoolDef("refresh", label="Refresh viewport during export"), - BoolDef( - "includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip=( - "Whether to include parent hierarchy of nodes in the " - "publish instance." - ) - ), - BoolDef( - "includeUserDefinedAttributes", - label="Include User Defined Attributes", - tooltip=( - "Whether to include all custom maya attributes found " - "on nodes as attributes in the Alembic data." - ) - ), - ] - ) - - return defs - - -def convert_legacy_alembic_creator_attributes(node_data, class_name): - """This is a legacy transfer of creator attributes to publish attributes - for ExtractAlembic/ExtractAnimation plugin. - """ - publish_attributes = node_data["publish_attributes"] - - if class_name in publish_attributes: - return node_data - - attributes = [ - "attr", - "attrPrefix", - "visibleOnly", - "writeColorSets", - "writeFaceSets", - "writeNormals", - "renderableOnly", - "visibleOnly", - "worldSpace", - "renderableOnly" - ] - plugin_attributes = {} - for attr in attributes: - if attr not in node_data["creator_attributes"]: - continue - value = node_data["creator_attributes"].pop(attr) - - plugin_attributes[attr] = value - - publish_attributes[class_name] = plugin_attributes - - return node_data - - -class CreateAnimation(plugin.MayaHiddenCreator): - """Animation output for character rigs - - We hide the animation creator from the UI since the creation of it is - automated upon loading a rig. There's an inventory action to recreate it - for loaded rigs if by chance someone deleted the animation instance. - """ - - identifier = "io.openpype.creators.maya.animation" - name = "animationDefault" - label = "Animation" - product_type = "animation" - icon = "male" - - write_color_sets = False - write_face_sets = False - include_parent_hierarchy = False - include_user_defined_attributes = False - - def read_instance_node(self, node): - node_data = super(CreateAnimation, self).read_instance_node(node) - node_data = convert_legacy_alembic_creator_attributes( - node_data, "ExtractAnimation" - ) - return node_data - - def get_instance_attr_defs(self): - return _get_animation_attr_defs() - - -class CreatePointCache(plugin.MayaCreator): - """Alembic pointcache for animated data""" - - identifier = "io.openpype.creators.maya.pointcache" - label = "Pointcache" - product_type = "pointcache" - icon = "gears" - write_color_sets = False - write_face_sets = False - include_user_defined_attributes = False - - def read_instance_node(self, node): - node_data = super(CreatePointCache, self).read_instance_node(node) - node_data = convert_legacy_alembic_creator_attributes( - node_data, "ExtractAlembic" - ) - return node_data - - def get_instance_attr_defs(self): - return _get_animation_attr_defs() - - def create(self, product_name, instance_data, pre_create_data): - instance = super(CreatePointCache, self).create( - product_name, instance_data, pre_create_data - ) - instance_node = instance.get("instance_node") - - # For Arnold standin proxy - proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True) - cmds.sets(proxy_set, forceElement=instance_node) diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_arnold_scene_source.py b/server_addon/maya/client/ayon_maya/plugins/create/create_arnold_scene_source.py deleted file mode 100644 index 8ae2759628..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_arnold_scene_source.py +++ /dev/null @@ -1,112 +0,0 @@ -from maya import cmds - -from ayon_maya.api import ( - lib, - plugin -) -from ayon_core.lib import ( - NumberDef, - BoolDef -) - - -class CreateArnoldSceneSource(plugin.MayaCreator): - """Arnold Scene Source""" - - identifier = "io.openpype.creators.maya.ass" - label = "Arnold Scene Source" - product_type = "ass" - icon = "cube" - settings_name = "CreateAss" - - expandProcedurals = False - motionBlur = True - motionBlurKeys = 2 - motionBlurLength = 0.5 - maskOptions = False - maskCamera = False - maskLight = False - maskShape = False - maskShader = False - maskOverride = False - maskDriver = False - maskFilter = False - maskColor_manager = False - maskOperator = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("expandProcedural", - label="Expand Procedural", - default=self.expandProcedurals), - BoolDef("motionBlur", - label="Motion Blur", - default=self.motionBlur), - NumberDef("motionBlurKeys", - label="Motion Blur Keys", - decimals=0, - default=self.motionBlurKeys), - NumberDef("motionBlurLength", - label="Motion Blur Length", - decimals=3, - default=self.motionBlurLength), - - # Masks - BoolDef("maskOptions", - label="Export Options", - default=self.maskOptions), - BoolDef("maskCamera", - label="Export Cameras", - default=self.maskCamera), - BoolDef("maskLight", - label="Export Lights", - default=self.maskLight), - BoolDef("maskShape", - label="Export Shapes", - default=self.maskShape), - BoolDef("maskShader", - label="Export Shaders", - default=self.maskShader), - BoolDef("maskOverride", - label="Export Override Nodes", - default=self.maskOverride), - BoolDef("maskDriver", - label="Export Drivers", - default=self.maskDriver), - BoolDef("maskFilter", - label="Export Filters", - default=self.maskFilter), - BoolDef("maskOperator", - label="Export Operators", - default=self.maskOperator), - BoolDef("maskColor_manager", - label="Export Color Managers", - default=self.maskColor_manager), - ]) - - return defs - - -class CreateArnoldSceneSourceProxy(CreateArnoldSceneSource): - """Arnold Scene Source Proxy - - This product type facilitates working with proxy geometry in the viewport. - """ - - identifier = "io.openpype.creators.maya.assproxy" - label = "Arnold Scene Source Proxy" - product_type = "assProxy" - icon = "cube" - - def create(self, product_name, instance_data, pre_create_data): - instance = super(CreateArnoldSceneSource, self).create( - product_name, instance_data, pre_create_data - ) - - instance_node = instance.get("instance_node") - - proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True) - cmds.sets([proxy], forceElement=instance_node) diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_assembly.py b/server_addon/maya/client/ayon_maya/plugins/create/create_assembly.py deleted file mode 100644 index dff04f059e..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_assembly.py +++ /dev/null @@ -1,10 +0,0 @@ -from ayon_maya.api import plugin - - -class CreateAssembly(plugin.MayaCreator): - """A grouped package of loaded content""" - - identifier = "io.openpype.creators.maya.assembly" - label = "Assembly" - product_type = "assembly" - icon = "cubes" diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_camera.py b/server_addon/maya/client/ayon_maya/plugins/create/create_camera.py deleted file mode 100644 index 393176f5dd..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_camera.py +++ /dev/null @@ -1,36 +0,0 @@ -from ayon_maya.api import ( - lib, - plugin -) -from ayon_core.lib import BoolDef - - -class CreateCamera(plugin.MayaCreator): - """Single baked camera""" - - identifier = "io.openpype.creators.maya.camera" - label = "Camera" - product_type = "camera" - icon = "video-camera" - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("bakeToWorldSpace", - label="Bake to World-Space", - tooltip="Bake to World-Space", - default=True), - ]) - - return defs - - -class CreateCameraRig(plugin.MayaCreator): - """Complex hierarchy with camera.""" - - identifier = "io.openpype.creators.maya.camerarig" - label = "Camera Rig" - product_type = "camerarig" - icon = "video-camera" diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_layout.py b/server_addon/maya/client/ayon_maya/plugins/create/create_layout.py deleted file mode 100644 index 1d9bc2c1c8..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_layout.py +++ /dev/null @@ -1,21 +0,0 @@ -from ayon_maya.api import plugin -from ayon_core.lib import BoolDef - - -class CreateLayout(plugin.MayaCreator): - """A grouped package of loaded content""" - - identifier = "io.openpype.creators.maya.layout" - label = "Layout" - product_type = "layout" - icon = "cubes" - - def get_instance_attr_defs(self): - - return [ - BoolDef("groupLoadedAssets", - label="Group Loaded Assets", - tooltip="Enable this when you want to publish group of " - "loaded asset", - default=False) - ] diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_look.py b/server_addon/maya/client/ayon_maya/plugins/create/create_look.py deleted file mode 100644 index 3e1ec103ba..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_look.py +++ /dev/null @@ -1,47 +0,0 @@ -from ayon_maya.api import ( - plugin, - lib -) -from ayon_core.lib import ( - BoolDef, - TextDef -) - - -class CreateLook(plugin.MayaCreator): - """Shader connections defining shape look""" - - identifier = "io.openpype.creators.maya.look" - label = "Look" - product_type = "look" - icon = "paint-brush" - - make_tx = True - rs_tex = False - - def get_instance_attr_defs(self): - - return [ - # TODO: This value should actually get set on create! - TextDef("renderLayer", - # TODO: Bug: Hidden attribute's label is still shown in UI? - hidden=True, - default=lib.get_current_renderlayer(), - label="Renderlayer", - tooltip="Renderlayer to extract the look from"), - BoolDef("maketx", - label="MakeTX", - tooltip="Whether to generate .tx files for your textures", - default=self.make_tx), - BoolDef("rstex", - label="Convert textures to .rstex", - tooltip="Whether to generate Redshift .rstex files for " - "your textures", - default=self.rs_tex) - ] - - def get_pre_create_attr_defs(self): - # Show same attributes on create but include use selection - defs = list(super().get_pre_create_attr_defs()) - defs.extend(self.get_instance_attr_defs()) - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_matchmove.py b/server_addon/maya/client/ayon_maya/plugins/create/create_matchmove.py deleted file mode 100644 index 9cb2a3dd47..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_matchmove.py +++ /dev/null @@ -1,32 +0,0 @@ -from ayon_maya.api import ( - lib, - plugin -) -from ayon_core.lib import BoolDef - - -class CreateMatchmove(plugin.MayaCreator): - """Instance for more complex setup of cameras. - - Might contain multiple cameras, geometries etc. - - It is expected to be extracted into .abc or .ma - """ - - identifier = "io.openpype.creators.maya.matchmove" - label = "Matchmove" - product_type = "matchmove" - icon = "video-camera" - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("bakeToWorldSpace", - label="Bake Cameras to World-Space", - tooltip="Bake Cameras to World-Space", - default=True), - ]) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_maya_usd.py b/server_addon/maya/client/ayon_maya/plugins/create/create_maya_usd.py deleted file mode 100644 index 19b55384f3..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_maya_usd.py +++ /dev/null @@ -1,102 +0,0 @@ -from ayon_maya.api import plugin, lib -from ayon_core.lib import ( - BoolDef, - EnumDef, - TextDef -) - -from maya import cmds - - -class CreateMayaUsd(plugin.MayaCreator): - """Create Maya USD Export""" - - identifier = "io.openpype.creators.maya.mayausd" - label = "Maya USD" - product_type = "usd" - icon = "cubes" - description = "Create Maya USD Export" - - cache = {} - - def get_publish_families(self): - return ["usd", "mayaUsd"] - - def get_instance_attr_defs(self): - - if "jobContextItems" not in self.cache: - # Query once instead of per instance - job_context_items = {} - try: - cmds.loadPlugin("mayaUsdPlugin", quiet=True) - job_context_items = { - cmds.mayaUSDListJobContexts(jobContext=name): name - for name in cmds.mayaUSDListJobContexts(export=True) or [] - } - except RuntimeError: - # Likely `mayaUsdPlugin` plug-in not available - self.log.warning("Unable to retrieve available job " - "contexts for `mayaUsdPlugin` exports") - - if not job_context_items: - # enumdef multiselection may not be empty - job_context_items = [""] - - self.cache["jobContextItems"] = job_context_items - - defs = lib.collect_animation_defs() - defs.extend([ - EnumDef("defaultUSDFormat", - label="File format", - items={ - "usdc": "Binary", - "usda": "ASCII" - }, - default="usdc"), - BoolDef("stripNamespaces", - label="Strip Namespaces", - tooltip=( - "Remove namespaces during export. By default, " - "namespaces are exported to the USD file in the " - "following format: nameSpaceExample_pPlatonic1" - ), - default=True), - BoolDef("mergeTransformAndShape", - label="Merge Transform and Shape", - tooltip=( - "Combine Maya transform and shape into a single USD" - "prim that has transform and geometry, for all" - " \"geometric primitives\" (gprims).\n" - "This results in smaller and faster scenes. Gprims " - "will be \"unpacked\" back into transform and shape " - "nodes when imported into Maya from USD." - ), - default=True), - BoolDef("includeUserDefinedAttributes", - label="Include User Defined Attributes", - tooltip=( - "Whether to include all custom maya attributes found " - "on nodes as metadata (userProperties) in USD." - ), - default=False), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - default="", - placeholder="prefix1, prefix2"), - EnumDef("jobContext", - label="Job Context", - items=self.cache["jobContextItems"], - tooltip=( - "Specifies an additional export context to handle.\n" - "These usually contain extra schemas, primitives,\n" - "and materials that are to be exported for a " - "specific\ntask, a target renderer for example." - ), - multiselection=True), - ]) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_mayascene.py b/server_addon/maya/client/ayon_maya/plugins/create/create_mayascene.py deleted file mode 100644 index 9913efc016..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_mayascene.py +++ /dev/null @@ -1,11 +0,0 @@ -from ayon_maya.api import plugin - - -class CreateMayaScene(plugin.MayaCreator): - """Raw Maya Scene file export""" - - identifier = "io.openpype.creators.maya.mayascene" - name = "mayaScene" - label = "Maya Scene" - product_type = "mayaScene" - icon = "file-archive-o" diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_model.py b/server_addon/maya/client/ayon_maya/plugins/create/create_model.py deleted file mode 100644 index 87696c58d2..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_model.py +++ /dev/null @@ -1,43 +0,0 @@ -from ayon_maya.api import plugin -from ayon_core.lib import ( - BoolDef, - TextDef -) - - -class CreateModel(plugin.MayaCreator): - """Polygonal static geometry""" - - identifier = "io.openpype.creators.maya.model" - label = "Model" - product_type = "model" - icon = "cube" - default_variants = ["Main", "Proxy", "_MD", "_HD", "_LD"] - - write_color_sets = False - write_face_sets = False - - def get_instance_attr_defs(self): - - return [ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=self.write_color_sets), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=self.write_face_sets), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=False), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - placeholder="prefix1, prefix2") - ] diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_multishot_layout.py b/server_addon/maya/client/ayon_maya/plugins/create/create_multishot_layout.py deleted file mode 100644 index 5229823110..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_multishot_layout.py +++ /dev/null @@ -1,223 +0,0 @@ -import collections - -from ayon_api import ( - get_folder_by_name, - get_folder_by_path, - get_folders, - get_tasks, -) -from maya import cmds # noqa: F401 - -from ayon_maya.api import plugin -from ayon_core.lib import BoolDef, EnumDef, TextDef -from ayon_core.pipeline import ( - Creator, - get_current_folder_path, - get_current_project_name, -) -from ayon_core.pipeline.create import CreatorError - - -class CreateMultishotLayout(plugin.MayaCreator): - """Create a multi-shot layout in the Maya scene. - - This creator will create a Camera Sequencer in the Maya scene based on - the shots found under the specified folder. The shots will be added to - the sequencer in the order of their clipIn and clipOut values. For each - shot a Layout will be created. - - """ - identifier = "io.openpype.creators.maya.multishotlayout" - label = "Multi-shot Layout" - product_type = "layout" - icon = "project-diagram" - - def get_pre_create_attr_defs(self): - # Present artist with a list of parents of the current context - # to choose from. This will be used to get the shots under the - # selected folder to create the Camera Sequencer. - - """ - Todo: `get_folder_by_name` should be switched to `get_folder_by_path` - once the fork to pure AYON is done. - - Warning: this will not work for projects where the folder name - is not unique across the project until the switch mentioned - above is done. - """ - - project_name = get_current_project_name() - folder_path = get_current_folder_path() - if "/" in folder_path: - current_folder = get_folder_by_path(project_name, folder_path) - else: - current_folder = get_folder_by_name( - project_name, folder_name=folder_path - ) - - current_path_parts = current_folder["path"].split("/") - - # populate the list with parents of the current folder - # this will create menu items like: - # [ - # { - # "value": "", - # "label": "project (shots directly under the project)" - # }, { - # "value": "shots/shot_01", "label": "shot_01 (current)" - # }, { - # "value": "shots", "label": "shots" - # } - # ] - - # add the project as the first item - items_with_label = [ - { - "label": f"{self.project_name} " - "(shots directly under the project)", - "value": "" - } - ] - - # go through the current folder path and add each part to the list, - # but mark the current folder. - for part_idx, part in enumerate(current_path_parts): - label = part - if label == current_folder["name"]: - label = f"{label} (current)" - - value = "/".join(current_path_parts[:part_idx + 1]) - - items_with_label.append({"label": label, "value": value}) - - return [ - EnumDef("shotParent", - default=current_folder["name"], - label="Shot Parent Folder", - items=items_with_label, - ), - BoolDef("groupLoadedAssets", - label="Group Loaded Assets", - tooltip="Enable this when you want to publish group of " - "loaded asset", - default=False), - TextDef("taskName", - label="Associated Task Name", - tooltip=("Task name to be associated " - "with the created Layout"), - default="layout"), - ] - - def create(self, product_name, instance_data, pre_create_data): - shots = list( - self.get_related_shots(folder_path=pre_create_data["shotParent"]) - ) - if not shots: - # There are no shot folders under the specified folder. - # We are raising an error here but in the future we might - # want to create a new shot folders by publishing the layouts - # and shot defined in the sequencer. Sort of editorial publish - # in side of Maya. - raise CreatorError(( - "No shots found under the specified " - f"folder: {pre_create_data['shotParent']}.")) - - # Get layout creator - layout_creator_id = "io.openpype.creators.maya.layout" - layout_creator: Creator = self.create_context.creators.get( - layout_creator_id) - if not layout_creator: - raise CreatorError( - f"Creator {layout_creator_id} not found.") - - folder_ids = {s["id"] for s in shots} - folder_entities = get_folders(self.project_name, folder_ids) - task_entities = get_tasks( - self.project_name, folder_ids=folder_ids - ) - task_entities_by_folder_id = collections.defaultdict(dict) - for task_entity in task_entities: - folder_id = task_entity["folderId"] - task_name = task_entity["name"] - task_entities_by_folder_id[folder_id][task_name] = task_entity - - folder_entities_by_id = {fe["id"]: fe for fe in folder_entities} - for shot in shots: - # we are setting shot name to be displayed in the sequencer to - # `shot name (shot label)` if the label is set, otherwise just - # `shot name`. So far, labels are used only when the name is set - # with characters that are not allowed in the shot name. - if not shot["active"]: - continue - - # get task for shot - folder_id = shot["id"] - folder_entity = folder_entities_by_id[folder_id] - task_entities = task_entities_by_folder_id[folder_id] - - layout_task_name = None - layout_task_entity = None - if pre_create_data["taskName"] in task_entities: - layout_task_name = pre_create_data["taskName"] - layout_task_entity = task_entities[layout_task_name] - - shot_name = f"{shot['name']}%s" % ( - f" ({shot['label']})" if shot["label"] else "") - cmds.shot(sequenceStartTime=shot["attrib"]["clipIn"], - sequenceEndTime=shot["attrib"]["clipOut"], - shotName=shot_name) - - # Create layout instance by the layout creator - - instance_data = { - "folderPath": shot["path"], - "variant": layout_creator.get_default_variant() - } - if layout_task_name: - instance_data["task"] = layout_task_name - - layout_creator.create( - product_name=layout_creator.get_product_name( - self.project_name, - folder_entity, - layout_task_entity, - layout_creator.get_default_variant(), - ), - instance_data=instance_data, - pre_create_data={ - "groupLoadedAssets": pre_create_data["groupLoadedAssets"] - } - ) - - def get_related_shots(self, folder_path: str): - """Get all shots related to the current folder. - - Get all folders of type Shot under specified folder. - - Args: - folder_path (str): Path of the folder. - - Returns: - list: List of dicts with folder data. - - """ - # if folder_path is None, project is selected as a root - # and its name is used as a parent id - parent_id = self.project_name - if folder_path: - current_folder = get_folder_by_path( - project_name=self.project_name, - folder_path=folder_path, - ) - parent_id = current_folder["id"] - - # get all child folders of the current one - return get_folders( - project_name=self.project_name, - parent_ids=[parent_id], - fields=[ - "attrib.clipIn", "attrib.clipOut", - "attrib.frameStart", "attrib.frameEnd", - "name", "label", "path", "folderType", "id" - ] - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_look.py b/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_look.py deleted file mode 100644 index f2dcb77187..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_look.py +++ /dev/null @@ -1,27 +0,0 @@ -from ayon_maya.api import plugin -from ayon_core.lib import ( - BoolDef, - EnumDef -) - - -class CreateMultiverseLook(plugin.MayaCreator): - """Create Multiverse Look""" - - identifier = "io.openpype.creators.maya.mvlook" - label = "Multiverse Look" - product_type = "mvLook" - icon = "cubes" - - def get_instance_attr_defs(self): - - return [ - EnumDef("fileFormat", - label="File Format", - tooltip="USD export file format", - items=["usda", "usd"], - default="usda"), - BoolDef("publishMipMap", - label="Publish MipMap", - default=True), - ] diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd.py b/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd.py deleted file mode 100644 index bdcea4cd2c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd.py +++ /dev/null @@ -1,139 +0,0 @@ -from ayon_maya.api import plugin, lib -from ayon_core.lib import ( - BoolDef, - NumberDef, - TextDef, - EnumDef -) - - -class CreateMultiverseUsd(plugin.MayaCreator): - """Create Multiverse USD Asset""" - - identifier = "io.openpype.creators.maya.mvusdasset" - label = "Multiverse USD Asset" - product_type = "usd" - icon = "cubes" - description = "Create Multiverse USD Asset" - - def get_publish_families(self): - return ["usd", "mvUsd"] - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs(fps=True) - defs.extend([ - EnumDef("fileFormat", - label="File format", - items=["usd", "usda", "usdz"], - default="usd"), - BoolDef("stripNamespaces", - label="Strip Namespaces", - default=True), - BoolDef("mergeTransformAndShape", - label="Merge Transform and Shape", - default=False), - BoolDef("writeAncestors", - label="Write Ancestors", - default=True), - BoolDef("flattenParentXforms", - label="Flatten Parent Xforms", - default=False), - BoolDef("writeSparseOverrides", - label="Write Sparse Overrides", - default=False), - BoolDef("useMetaPrimPath", - label="Use Meta Prim Path", - default=False), - TextDef("customRootPath", - label="Custom Root Path", - default=''), - TextDef("customAttributes", - label="Custom Attributes", - tooltip="Comma-separated list of attribute names", - default=''), - TextDef("nodeTypesToIgnore", - label="Node Types to Ignore", - tooltip="Comma-separated list of node types to be ignored", - default=''), - BoolDef("writeMeshes", - label="Write Meshes", - default=True), - BoolDef("writeCurves", - label="Write Curves", - default=True), - BoolDef("writeParticles", - label="Write Particles", - default=True), - BoolDef("writeCameras", - label="Write Cameras", - default=False), - BoolDef("writeLights", - label="Write Lights", - default=False), - BoolDef("writeJoints", - label="Write Joints", - default=False), - BoolDef("writeCollections", - label="Write Collections", - default=False), - BoolDef("writePositions", - label="Write Positions", - default=True), - BoolDef("writeNormals", - label="Write Normals", - default=True), - BoolDef("writeUVs", - label="Write UVs", - default=True), - BoolDef("writeColorSets", - label="Write Color Sets", - default=False), - BoolDef("writeTangents", - label="Write Tangents", - default=False), - BoolDef("writeRefPositions", - label="Write Ref Positions", - default=True), - BoolDef("writeBlendShapes", - label="Write BlendShapes", - default=False), - BoolDef("writeDisplayColor", - label="Write Display Color", - default=True), - BoolDef("writeSkinWeights", - label="Write Skin Weights", - default=False), - BoolDef("writeMaterialAssignment", - label="Write Material Assignment", - default=False), - BoolDef("writeHardwareShader", - label="Write Hardware Shader", - default=False), - BoolDef("writeShadingNetworks", - label="Write Shading Networks", - default=False), - BoolDef("writeTransformMatrix", - label="Write Transform Matrix", - default=True), - BoolDef("writeUsdAttributes", - label="Write USD Attributes", - default=True), - BoolDef("writeInstancesAsReferences", - label="Write Instances as References", - default=False), - BoolDef("timeVaryingTopology", - label="Time Varying Topology", - default=False), - TextDef("customMaterialNamespace", - label="Custom Material Namespace", - default=''), - NumberDef("numTimeSamples", - label="Num Time Samples", - default=1), - NumberDef("timeSamplesSpan", - label="Time Samples Span", - default=0.0), - ]) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd_comp.py b/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd_comp.py deleted file mode 100644 index 2459704d14..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd_comp.py +++ /dev/null @@ -1,48 +0,0 @@ -from ayon_maya.api import plugin, lib -from ayon_core.lib import ( - BoolDef, - NumberDef, - EnumDef -) - - -class CreateMultiverseUsdComp(plugin.MayaCreator): - """Create Multiverse USD Composition""" - - identifier = "io.openpype.creators.maya.mvusdcomposition" - label = "Multiverse USD Composition" - product_type = "mvUsdComposition" - icon = "cubes" - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs(fps=True) - defs.extend([ - EnumDef("fileFormat", - label="File format", - items=["usd", "usda"], - default="usd"), - BoolDef("stripNamespaces", - label="Strip Namespaces", - default=False), - BoolDef("mergeTransformAndShape", - label="Merge Transform and Shape", - default=False), - BoolDef("flattenContent", - label="Flatten Content", - default=False), - BoolDef("writeAsCompoundLayers", - label="Write As Compound Layers", - default=False), - BoolDef("writePendingOverrides", - label="Write Pending Overrides", - default=False), - NumberDef("numTimeSamples", - label="Num Time Samples", - default=1), - NumberDef("timeSamplesSpan", - label="Time Samples Span", - default=0.0), - ]) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd_over.py b/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd_over.py deleted file mode 100644 index b070daf550..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_multiverse_usd_over.py +++ /dev/null @@ -1,59 +0,0 @@ -from ayon_maya.api import plugin, lib -from ayon_core.lib import ( - BoolDef, - NumberDef, - EnumDef -) - - -class CreateMultiverseUsdOver(plugin.MayaCreator): - """Create Multiverse USD Override""" - - identifier = "io.openpype.creators.maya.mvusdoverride" - label = "Multiverse USD Override" - product_type = "mvUsdOverride" - icon = "cubes" - - def get_instance_attr_defs(self): - defs = lib.collect_animation_defs(fps=True) - defs.extend([ - EnumDef("fileFormat", - label="File format", - items=["usd", "usda"], - default="usd"), - BoolDef("writeAll", - label="Write All", - default=False), - BoolDef("writeTransforms", - label="Write Transforms", - default=True), - BoolDef("writeVisibility", - label="Write Visibility", - default=True), - BoolDef("writeAttributes", - label="Write Attributes", - default=True), - BoolDef("writeMaterials", - label="Write Materials", - default=True), - BoolDef("writeVariants", - label="Write Variants", - default=True), - BoolDef("writeVariantsDefinition", - label="Write Variants Definition", - default=True), - BoolDef("writeActiveState", - label="Write Active State", - default=True), - BoolDef("writeNamespaces", - label="Write Namespaces", - default=False), - NumberDef("numTimeSamples", - label="Num Time Samples", - default=1), - NumberDef("timeSamplesSpan", - label="Time Samples Span", - default=0.0), - ]) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_proxy_abc.py b/server_addon/maya/client/ayon_maya/plugins/create/create_proxy_abc.py deleted file mode 100644 index 431f113941..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_proxy_abc.py +++ /dev/null @@ -1,50 +0,0 @@ -from ayon_maya.api import ( - lib, - plugin -) -from ayon_core.lib import ( - BoolDef, - TextDef -) - - -class CreateProxyAlembic(plugin.MayaCreator): - """Proxy Alembic for animated data""" - - identifier = "io.openpype.creators.maya.proxyabc" - label = "Proxy Alembic" - product_type = "proxyAbc" - icon = "gears" - write_color_sets = False - write_face_sets = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=self.write_color_sets), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=self.write_face_sets), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - TextDef("nameSuffix", - label="Name Suffix for Bounding Box", - default="_BBox", - placeholder="_BBox"), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - placeholder="prefix1, prefix2") - ]) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_redshift_proxy.py b/server_addon/maya/client/ayon_maya/plugins/create/create_redshift_proxy.py deleted file mode 100644 index c4cc874a2a..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_redshift_proxy.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator of Redshift proxy product types.""" - -from ayon_maya.api import plugin, lib -from ayon_core.lib import BoolDef - - -class CreateRedshiftProxy(plugin.MayaCreator): - """Create instance of Redshift Proxy product.""" - - identifier = "io.openpype.creators.maya.redshiftproxy" - label = "Redshift Proxy" - product_type = "redshiftproxy" - icon = "gears" - - def get_instance_attr_defs(self): - - defs = [ - BoolDef("animation", - label="Export animation", - default=False) - ] - - defs.extend(lib.collect_animation_defs()) - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_render.py b/server_addon/maya/client/ayon_maya/plugins/create/create_render.py deleted file mode 100644 index 5defee7d07..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_render.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- -"""Create ``Render`` instance in Maya.""" - -from ayon_maya.api import ( - lib_rendersettings, - plugin -) -from ayon_core.pipeline import CreatorError -from ayon_core.lib import ( - BoolDef, - NumberDef, -) - - -class CreateRenderlayer(plugin.RenderlayerCreator): - """Create and manages renderlayer product per renderLayer in workfile. - - This generates a single node in the scene which tells the Creator to if - it exists collect Maya rendersetup renderlayers as individual instances. - As such, triggering create doesn't actually create the instance node per - layer but only the node which tells the Creator it may now collect - the renderlayers. - - """ - - identifier = "io.openpype.creators.maya.renderlayer" - product_type = "renderlayer" - label = "Render" - icon = "eye" - - layer_instance_prefix = "render" - singleton_node_name = "renderingMain" - - render_settings = {} - - @classmethod - def apply_settings(cls, project_settings): - cls.render_settings = project_settings["maya"]["render_settings"] - - def create(self, product_name, instance_data, pre_create_data): - # Only allow a single render instance to exist - if self._get_singleton_node(): - raise CreatorError( - "A Render instance already exists - only one can be " - "configured.\n\n" - "To render multiple render layers, create extra Render Setup " - "Layers via Maya's Render Setup UI.\n" - "Then refresh the publisher to detect the new layers for " - "rendering.\n\n" - "With a render instance present all Render Setup layers in " - "your workfile are renderable instances.") - - # Apply default project render settings on create - if self.render_settings.get("apply_render_settings"): - lib_rendersettings.RenderSettings().set_default_renderer_settings() - - super(CreateRenderlayer, self).create(product_name, - instance_data, - pre_create_data) - - def get_instance_attr_defs(self): - """Create instance settings.""" - - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - BoolDef("extendFrames", - label="Extend Frames", - tooltip="Extends the frames on top of the previous " - "publish.\nIf the previous was 1001-1050 and you " - "would now submit 1020-1070 only the new frames " - "1051-1070 would be rendered and published " - "together with the previously rendered frames.\n" - "If 'overrideExistingFrame' is enabled it *will* " - "render any existing frames.", - default=False), - BoolDef("overrideExistingFrame", - label="Override Existing Frame", - tooltip="Override existing rendered frames " - "(if they exist).", - default=True), - - # TODO: Should these move to submit_maya_deadline plugin? - # Tile rendering - BoolDef("tileRendering", - label="Enable tiled rendering", - default=False), - NumberDef("tilesX", - label="Tiles X", - default=2, - minimum=1, - decimals=0), - NumberDef("tilesY", - label="Tiles Y", - default=2, - minimum=1, - decimals=0), - - # Additional settings - BoolDef("convertToScanline", - label="Convert to Scanline", - tooltip="Convert the output images to scanline images", - default=False), - BoolDef("useReferencedAovs", - label="Use Referenced AOVs", - tooltip="Consider the AOVs from referenced scenes as well", - default=False), - - BoolDef("renderSetupIncludeLights", - label="Render Setup Include Lights", - default=self.render_settings.get("enable_all_lights", - False)) - ] diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_rendersetup.py b/server_addon/maya/client/ayon_maya/plugins/create/create_rendersetup.py deleted file mode 100644 index 415ab4ff8c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_rendersetup.py +++ /dev/null @@ -1,31 +0,0 @@ -from ayon_maya.api import plugin -from ayon_core.pipeline import CreatorError - - -class CreateRenderSetup(plugin.MayaCreator): - """Create rendersetup template json data""" - - identifier = "io.openpype.creators.maya.rendersetup" - label = "Render Setup Preset" - product_type = "rendersetup" - icon = "tablet" - - def get_pre_create_attr_defs(self): - # Do not show the "use_selection" setting from parent class - return [] - - def create(self, product_name, instance_data, pre_create_data): - - existing_instance = None - for instance in self.create_context.instances: - if instance.product_type == self.product_type: - existing_instance = instance - break - - if existing_instance: - raise CreatorError("A RenderSetup instance already exists - only " - "one can be configured.") - - super(CreateRenderSetup, self).create(product_name, - instance_data, - pre_create_data) diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_review.py b/server_addon/maya/client/ayon_maya/plugins/create/create_review.py deleted file mode 100644 index 26fad91ed9..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_review.py +++ /dev/null @@ -1,148 +0,0 @@ -import json - -from maya import cmds -import ayon_api - -from ayon_maya.api import ( - lib, - plugin -) -from ayon_core.lib import ( - BoolDef, - NumberDef, - EnumDef -) -from ayon_core.pipeline import CreatedInstance - -TRANSPARENCIES = [ - "preset", - "simple", - "object sorting", - "weighted average", - "depth peeling", - "alpha cut" -] - - -class CreateReview(plugin.MayaCreator): - """Playblast reviewable""" - - identifier = "io.openpype.creators.maya.review" - label = "Review" - product_type = "review" - icon = "video-camera" - - useMayaTimeline = True - panZoom = False - - # Overriding "create" method to prefill values from settings. - def create(self, product_name, instance_data, pre_create_data): - - members = list() - if pre_create_data.get("use_selection"): - members = cmds.ls(selection=True) - - project_name = self.project_name - folder_path = instance_data["folderPath"] - task_name = instance_data["task"] - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path, fields={"id"} - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name, fields={"taskType"} - ) - preset = lib.get_capture_preset( - task_name, - task_entity["taskType"], - product_name, - self.project_settings, - self.log - ) - self.log.debug( - "Using preset: {}".format( - json.dumps(preset, indent=4, sort_keys=True) - ) - ) - - with lib.undo_chunk(): - instance_node = cmds.sets(members, name=product_name) - instance_data["instance_node"] = instance_node - instance = CreatedInstance( - self.product_type, - product_name, - instance_data, - self) - - creator_attribute_defs_by_key = { - x.key: x for x in instance.creator_attribute_defs - } - mapping = { - "review_width": preset["Resolution"]["width"], - "review_height": preset["Resolution"]["height"], - "isolate": preset["Generic"]["isolate_view"], - "imagePlane": preset["ViewportOptions"]["imagePlane"], - "panZoom": preset["Generic"]["pan_zoom"] - } - for key, value in mapping.items(): - creator_attribute_defs_by_key[key].default = value - - self._add_instance_to_context(instance) - - self.imprint_instance_node(instance_node, - data=instance.data_to_store()) - return instance - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - # Option for using Maya or folder frame range in settings. - if not self.useMayaTimeline: - # Update the defaults to be the folder frame range - frame_range = lib.get_frame_range() - defs_by_key = {attr_def.key: attr_def for attr_def in defs} - for key, value in frame_range.items(): - if key not in defs_by_key: - raise RuntimeError("Attribute definition not found to be " - "updated for key: {}".format(key)) - attr_def = defs_by_key[key] - attr_def.default = value - - defs.extend([ - NumberDef("review_width", - label="Review width", - tooltip="A value of zero will use the folder resolution.", - decimals=0, - minimum=0, - default=0), - NumberDef("review_height", - label="Review height", - tooltip="A value of zero will use the folder resolution.", - decimals=0, - minimum=0, - default=0), - BoolDef("keepImages", - label="Keep Images", - tooltip="Whether to also publish along the image sequence " - "next to the video reviewable.", - default=False), - BoolDef("isolate", - label="Isolate render members of instance", - tooltip="When enabled only the members of the instance " - "will be included in the playblast review.", - default=False), - BoolDef("imagePlane", - label="Show Image Plane", - default=True), - EnumDef("transparency", - label="Transparency", - items=TRANSPARENCIES), - BoolDef("panZoom", - label="Enable camera pan/zoom", - default=True), - EnumDef("displayLights", - label="Display Lights", - items=lib.DISPLAY_LIGHTS_ENUM), - ]) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_rig.py b/server_addon/maya/client/ayon_maya/plugins/create/create_rig.py deleted file mode 100644 index 135e51bcbf..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_rig.py +++ /dev/null @@ -1,32 +0,0 @@ -from maya import cmds - -from ayon_maya.api import plugin - - -class CreateRig(plugin.MayaCreator): - """Artist-friendly rig with controls to direct motion""" - - identifier = "io.openpype.creators.maya.rig" - label = "Rig" - product_type = "rig" - icon = "wheelchair" - - def create(self, product_name, instance_data, pre_create_data): - - instance = super(CreateRig, self).create(product_name, - instance_data, - pre_create_data) - - instance_node = instance.get("instance_node") - - self.log.info("Creating Rig instance set up ...") - # TODO:change name (_controls_SET -> _rigs_SET) - controls = cmds.sets(name=product_name + "_controls_SET", empty=True) - # TODO:change name (_out_SET -> _geo_SET) - pointcache = cmds.sets(name=product_name + "_out_SET", empty=True) - skeleton = cmds.sets( - name=product_name + "_skeletonAnim_SET", empty=True) - skeleton_mesh = cmds.sets( - name=product_name + "_skeletonMesh_SET", empty=True) - cmds.sets([controls, pointcache, - skeleton, skeleton_mesh], forceElement=instance_node) diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_setdress.py b/server_addon/maya/client/ayon_maya/plugins/create/create_setdress.py deleted file mode 100644 index 6e1c4e1c4f..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_setdress.py +++ /dev/null @@ -1,24 +0,0 @@ -from ayon_maya.api import plugin -from ayon_core.lib import BoolDef - - -class CreateSetDress(plugin.MayaCreator): - """A grouped package of loaded content""" - - identifier = "io.openpype.creators.maya.setdress" - label = "Set Dress" - product_type = "setdress" - icon = "cubes" - exactSetMembersOnly = True - shader = True - default_variants = ["Main", "Anim"] - - def get_instance_attr_defs(self): - return [ - BoolDef("exactSetMembersOnly", - label="Exact Set Members Only", - default=self.exactSetMembersOnly), - BoolDef("shader", - label="Include shader", - default=self.shader) - ] diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_unreal_skeletalmesh.py b/server_addon/maya/client/ayon_maya/plugins/create/create_unreal_skeletalmesh.py deleted file mode 100644 index a182fe7a24..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_unreal_skeletalmesh.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator for Unreal Skeletal Meshes.""" -from ayon_maya.api import plugin, lib -from ayon_core.lib import ( - BoolDef, - TextDef -) - -from maya import cmds # noqa - - -class CreateUnrealSkeletalMesh(plugin.MayaCreator): - """Unreal Static Meshes with collisions.""" - - identifier = "io.openpype.creators.maya.unrealskeletalmesh" - label = "Unreal - Skeletal Mesh" - product_type = "skeletalMesh" - icon = "thumbs-up" - - # Defined in settings - joint_hints = set() - - def get_dynamic_data( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ): - """ - The default product name templates for Unreal include {asset} and thus - we should pass that along as dynamic data. - """ - dynamic_data = super(CreateUnrealSkeletalMesh, self).get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ) - dynamic_data["asset"] = folder_entity["name"] - return dynamic_data - - def create(self, product_name, instance_data, pre_create_data): - - with lib.undo_chunk(): - instance = super(CreateUnrealSkeletalMesh, self).create( - product_name, instance_data, pre_create_data) - instance_node = instance.get("instance_node") - - # We reorganize the geometry that was originally added into the - # set into either 'joints_SET' or 'geometry_SET' based on the - # joint_hints from project settings - members = cmds.sets(instance_node, query=True) or [] - cmds.sets(clear=instance_node) - - geometry_set = cmds.sets(name="geometry_SET", empty=True) - joints_set = cmds.sets(name="joints_SET", empty=True) - - cmds.sets([geometry_set, joints_set], forceElement=instance_node) - - for node in members: - if node in self.joint_hints: - cmds.sets(node, forceElement=joints_set) - else: - cmds.sets(node, forceElement=geometry_set) - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("renderableOnly", - label="Renderable Only", - tooltip="Only export renderable visible shapes", - default=False), - BoolDef("visibleOnly", - label="Visible Only", - tooltip="Only export dag objects visible during " - "frame range", - default=False), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=False), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - BoolDef("refresh", - label="Refresh viewport during export", - default=False), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - placeholder="prefix1, prefix2") - ]) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_unreal_staticmesh.py b/server_addon/maya/client/ayon_maya/plugins/create/create_unreal_staticmesh.py deleted file mode 100644 index e5436bca64..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_unreal_staticmesh.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator for Unreal Static Meshes.""" -from ayon_maya.api import plugin, lib -from maya import cmds # noqa - - -class CreateUnrealStaticMesh(plugin.MayaCreator): - """Unreal Static Meshes with collisions.""" - - identifier = "io.openpype.creators.maya.unrealstaticmesh" - label = "Unreal - Static Mesh" - product_type = "staticMesh" - icon = "cube" - - # Defined in settings - collision_prefixes = [] - - def get_dynamic_data( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ): - """ - The default product name templates for Unreal include {asset} and thus - we should pass that along as dynamic data. - """ - dynamic_data = super(CreateUnrealStaticMesh, self).get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ) - dynamic_data["asset"] = folder_entity["name"] - return dynamic_data - - def create(self, product_name, instance_data, pre_create_data): - - with lib.undo_chunk(): - instance = super(CreateUnrealStaticMesh, self).create( - product_name, instance_data, pre_create_data) - instance_node = instance.get("instance_node") - - # We reorganize the geometry that was originally added into the - # set into either 'collision_SET' or 'geometry_SET' based on the - # collision_prefixes from project settings - members = cmds.sets(instance_node, query=True) - cmds.sets(clear=instance_node) - - geometry_set = cmds.sets(name="geometry_SET", empty=True) - collisions_set = cmds.sets(name="collisions_SET", empty=True) - - cmds.sets([geometry_set, collisions_set], - forceElement=instance_node) - - members = cmds.ls(members, long=True) or [] - children = cmds.listRelatives(members, allDescendents=True, - fullPath=True) or [] - transforms = cmds.ls(members + children, type="transform") - for transform in transforms: - - if not cmds.listRelatives(transform, - type="shape", - noIntermediate=True): - # Exclude all transforms that have no direct shapes - continue - - if self.has_collision_prefix(transform): - cmds.sets(transform, forceElement=collisions_set) - else: - cmds.sets(transform, forceElement=geometry_set) - - def has_collision_prefix(self, node_path): - """Return whether node name of path matches collision prefix. - - If the node name matches the collision prefix we add it to the - `collisions_SET` instead of the `geometry_SET`. - - Args: - node_path (str): Maya node path. - - Returns: - bool: Whether the node should be considered a collision mesh. - - """ - node_name = node_path.rsplit("|", 1)[-1] - for prefix in self.collision_prefixes: - if node_name.startswith(prefix): - return True - return False diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_unreal_yeticache.py b/server_addon/maya/client/ayon_maya/plugins/create/create_unreal_yeticache.py deleted file mode 100644 index eea866d406..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_unreal_yeticache.py +++ /dev/null @@ -1,39 +0,0 @@ -from ayon_maya.api import ( - lib, - plugin -) -from ayon_core.lib import NumberDef - - -class CreateUnrealYetiCache(plugin.MayaCreator): - """Output for procedural plugin nodes of Yeti """ - - identifier = "io.openpype.creators.maya.unrealyeticache" - label = "Unreal - Yeti Cache" - product_type = "yeticacheUE" - icon = "pagelines" - - def get_instance_attr_defs(self): - - defs = [ - NumberDef("preroll", - label="Preroll", - minimum=0, - default=0, - decimals=0) - ] - - # Add animation data without step and handles - defs.extend(lib.collect_animation_defs()) - remove = {"step", "handleStart", "handleEnd"} - defs = [attr_def for attr_def in defs if attr_def.key not in remove] - - # Add samples after frame range - defs.append( - NumberDef("samples", - label="Samples", - default=3, - decimals=0) - ) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_vrayproxy.py b/server_addon/maya/client/ayon_maya/plugins/create/create_vrayproxy.py deleted file mode 100644 index 742e14ace0..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_vrayproxy.py +++ /dev/null @@ -1,50 +0,0 @@ -from ayon_maya.api import ( - plugin, - lib -) -from ayon_core.lib import BoolDef - - -class CreateVrayProxy(plugin.MayaCreator): - """Alembic pointcache for animated data""" - - identifier = "io.openpype.creators.maya.vrayproxy" - label = "VRay Proxy" - product_type = "vrayproxy" - icon = "gears" - - vrmesh = True - alembic = True - - def get_instance_attr_defs(self): - - defs = [ - BoolDef("animation", - label="Export Animation", - default=False) - ] - - # Add time range attributes but remove some attributes - # which this instance actually doesn't use - defs.extend(lib.collect_animation_defs()) - remove = {"handleStart", "handleEnd", "step"} - defs = [attr_def for attr_def in defs if attr_def.key not in remove] - - defs.extend([ - BoolDef("vertexColors", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=False), - BoolDef("vrmesh", - label="Export VRayMesh", - tooltip="Publish a .vrmesh (VRayMesh) file for " - "this VRayProxy", - default=self.vrmesh), - BoolDef("alembic", - label="Export Alembic", - tooltip="Publish a .abc (Alembic) file for " - "this VRayProxy", - default=self.alembic), - ]) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_vrayscene.py b/server_addon/maya/client/ayon_maya/plugins/create/create_vrayscene.py deleted file mode 100644 index 11c356fdef..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_vrayscene.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -"""Create instance of vrayscene.""" - -from ayon_maya.api import ( - lib_rendersettings, - plugin -) -from ayon_core.pipeline import CreatorError -from ayon_core.lib import BoolDef - - -class CreateVRayScene(plugin.RenderlayerCreator): - """Create Vray Scene.""" - - identifier = "io.openpype.creators.maya.vrayscene" - - product_type = "vrayscene" - label = "VRay Scene" - icon = "cubes" - - render_settings = {} - singleton_node_name = "vraysceneMain" - - @classmethod - def apply_settings(cls, project_settings): - cls.render_settings = project_settings["maya"]["render_settings"] - - def create(self, product_name, instance_data, pre_create_data): - # Only allow a single render instance to exist - if self._get_singleton_node(): - raise CreatorError("A Render instance already exists - only " - "one can be configured.") - - super(CreateVRayScene, self).create(product_name, - instance_data, - pre_create_data) - - # Apply default project render settings on create - if self.render_settings.get("apply_render_settings"): - lib_rendersettings.RenderSettings().set_default_renderer_settings() - - def get_instance_attr_defs(self): - """Create instance settings.""" - - return [ - BoolDef("vraySceneMultipleFiles", - label="V-Ray Scene Multiple Files", - default=False), - BoolDef("exportOnFarm", - label="Export on farm", - default=False) - ] diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_workfile.py b/server_addon/maya/client/ayon_maya/plugins/create/create_workfile.py deleted file mode 100644 index e0c94611b0..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_workfile.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating workfiles.""" -import ayon_api - -from ayon_core.pipeline import CreatedInstance, AutoCreator -from ayon_maya.api import plugin -from maya import cmds - - -class CreateWorkfile(plugin.MayaCreatorBase, AutoCreator): - """Workfile auto-creator.""" - identifier = "io.openpype.creators.maya.workfile" - label = "Workfile" - product_type = "workfile" - icon = "fa5.file" - - default_variant = "Main" - - def create(self): - - variant = self.default_variant - current_instance = next( - ( - instance for instance in self.create_context.instances - if instance.creator_identifier == self.identifier - ), None) - - project_name = self.project_name - folder_path = self.create_context.get_current_folder_path() - task_name = self.create_context.get_current_task_name() - host_name = self.create_context.host_name - - current_folder_path = None - if current_instance is not None: - current_folder_path = current_instance["folderPath"] - - if current_instance is None: - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name, - ) - data = { - "folderPath": folder_path, - "task": task_name, - "variant": variant - } - data.update( - self.get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - current_instance) - ) - self.log.info("Auto-creating workfile instance...") - current_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - self._add_instance_to_context(current_instance) - elif ( - current_folder_path != folder_path - or current_instance["task"] != task_name - ): - # Update instance context if is not the same - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name, - ) - - current_instance["folderPath"] = folder_entity["path"] - current_instance["task"] = task_name - current_instance["productName"] = product_name - - def collect_instances(self): - self.cache_instance_data(self.collection_shared_data) - cached_instances = ( - self.collection_shared_data["maya_cached_instance_data"] - ) - for node in cached_instances.get(self.identifier, []): - node_data = self.read_instance_node(node) - - created_instance = CreatedInstance.from_existing(node_data, self) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - for created_inst, _changes in update_list: - data = created_inst.data_to_store() - node = data.get("instance_node") - if not node: - node = self.create_node() - created_inst["instance_node"] = node - data = created_inst.data_to_store() - - self.imprint_instance_node(node, data) - - def create_node(self): - node = cmds.sets(empty=True, name="workfileMain") - cmds.setAttr(node + ".hiddenInOutliner", True) - return node diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_xgen.py b/server_addon/maya/client/ayon_maya/plugins/create/create_xgen.py deleted file mode 100644 index d13d032a13..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_xgen.py +++ /dev/null @@ -1,10 +0,0 @@ -from ayon_maya.api import plugin - - -class CreateXgen(plugin.MayaCreator): - """Xgen""" - - identifier = "io.openpype.creators.maya.xgen" - label = "Xgen" - product_type = "xgen" - icon = "pagelines" diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_yeti_cache.py b/server_addon/maya/client/ayon_maya/plugins/create/create_yeti_cache.py deleted file mode 100644 index 8a834f18c0..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_yeti_cache.py +++ /dev/null @@ -1,39 +0,0 @@ -from ayon_maya.api import ( - lib, - plugin -) -from ayon_core.lib import NumberDef - - -class CreateYetiCache(plugin.MayaCreator): - """Output for procedural plugin nodes of Yeti """ - - identifier = "io.openpype.creators.maya.yeticache" - label = "Yeti Cache" - product_type = "yeticache" - icon = "pagelines" - - def get_instance_attr_defs(self): - - defs = [ - NumberDef("preroll", - label="Preroll", - minimum=0, - default=0, - decimals=0) - ] - - # Add animation data without step and handles - defs.extend(lib.collect_animation_defs()) - remove = {"step", "handleStart", "handleEnd"} - defs = [attr_def for attr_def in defs if attr_def.key not in remove] - - # Add samples after frame range - defs.append( - NumberDef("samples", - label="Samples", - default=3, - decimals=0) - ) - - return defs diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_yeti_rig.py b/server_addon/maya/client/ayon_maya/plugins/create/create_yeti_rig.py deleted file mode 100644 index c5378dc1b8..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_yeti_rig.py +++ /dev/null @@ -1,27 +0,0 @@ -from maya import cmds - -from ayon_maya.api import ( - lib, - plugin -) - - -class CreateYetiRig(plugin.MayaCreator): - """Output for procedural plugin nodes ( Yeti / XGen / etc)""" - - identifier = "io.openpype.creators.maya.yetirig" - label = "Yeti Rig" - product_type = "yetiRig" - icon = "usb" - - def create(self, product_name, instance_data, pre_create_data): - - with lib.undo_chunk(): - instance = super(CreateYetiRig, self).create(product_name, - instance_data, - pre_create_data) - instance_node = instance.get("instance_node") - - self.log.info("Creating Rig instance set up ...") - input_meshes = cmds.sets(name="input_SET", empty=True) - cmds.sets(input_meshes, forceElement=instance_node) diff --git a/server_addon/maya/client/ayon_maya/plugins/inventory/connect_geometry.py b/server_addon/maya/client/ayon_maya/plugins/inventory/connect_geometry.py deleted file mode 100644 index ccb88313e9..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/inventory/connect_geometry.py +++ /dev/null @@ -1,158 +0,0 @@ -from maya import cmds - -from ayon_core.pipeline import InventoryAction, get_repres_contexts -from ayon_maya.api.lib import get_id - - -class ConnectGeometry(InventoryAction): - """Connect geometries within containers. - - Source container will connect to the target containers, by searching for - matching geometry IDs (cbid). - Source containers are of product type: "animation" and "pointcache". - The connection with be done with a live world space blendshape. - """ - - label = "Connect Geometry" - icon = "link" - color = "white" - - def process(self, containers): - # Validate selection is more than 1. - message = ( - "Only 1 container selected. 2+ containers needed for this action." - ) - if len(containers) == 1: - self.display_warning(message) - return - - # Categorize containers by family. - containers_by_product_type = {} - repre_ids = { - container["representation"] - for container in containers - } - repre_contexts_by_id = get_repres_contexts(repre_ids) - for container in containers: - repre_id = container["representation"] - repre_context = repre_contexts_by_id[repre_id] - - product_type = repre_context["product"]["productType"] - - containers_by_product_type.setdefault(product_type, []) - containers_by_product_type[product_type].append(container) - - # Validate to only 1 source container. - source_containers = containers_by_product_type.get("animation", []) - source_containers += containers_by_product_type.get("pointcache", []) - source_container_namespaces = [ - x["namespace"] for x in source_containers - ] - message = ( - "{} animation containers selected:\n\n{}\n\nOnly select 1 of type " - "\"animation\" or \"pointcache\".".format( - len(source_containers), source_container_namespaces - ) - ) - if len(source_containers) != 1: - self.display_warning(message) - return - - source_object = source_containers[0]["objectName"] - - # Collect matching geometry transforms based cbId attribute. - target_containers = [] - for product_type, containers in containers_by_product_type.items(): - if product_type in ["animation", "pointcache"]: - continue - - target_containers.extend(containers) - - source_data = self.get_container_data(source_object) - matches = [] - node_types = set() - for target_container in target_containers: - target_data = self.get_container_data( - target_container["objectName"] - ) - node_types.update(target_data["node_types"]) - for id, transform in target_data["ids"].items(): - source_match = source_data["ids"].get(id) - if source_match: - matches.append([source_match, transform]) - - # Message user about what is about to happen. - if not matches: - self.display_warning("No matching geometries found.") - return - - message = "Connecting geometries:\n\n" - for match in matches: - message += "{} > {}\n".format(match[0], match[1]) - - choice = self.display_warning(message, show_cancel=True) - if choice is False: - return - - # Setup live worldspace blendshape connection. - for source, target in matches: - blendshape = cmds.blendShape(source, target)[0] - cmds.setAttr(blendshape + ".origin", 0) - cmds.setAttr(blendshape + "." + target.split(":")[-1], 1) - - # Update Xgen if in any of the containers. - if "xgmPalette" in node_types: - cmds.xgmPreview() - - def get_container_data(self, container): - """Collects data about the container nodes. - - Args: - container (dict): Container instance. - - Returns: - data (dict): - "node_types": All node types in container nodes. - "ids": If the node is a mesh, we collect its parent transform - id. - """ - data = {"node_types": set(), "ids": {}} - ref_node = cmds.sets(container, query=True, nodesOnly=True)[0] - for node in cmds.referenceQuery(ref_node, nodes=True): - node_type = cmds.nodeType(node) - data["node_types"].add(node_type) - - # Only interested in mesh transforms for connecting geometry with - # blendshape. - if node_type != "mesh": - continue - - transform = cmds.listRelatives(node, parent=True)[0] - data["ids"][get_id(transform)] = transform - - return data - - def display_warning(self, message, show_cancel=False): - """Show feedback to user. - - Returns: - bool - """ - - from qtpy import QtWidgets - - accept = QtWidgets.QMessageBox.Ok - if show_cancel: - buttons = accept | QtWidgets.QMessageBox.Cancel - else: - buttons = accept - - state = QtWidgets.QMessageBox.warning( - None, - "", - message, - buttons=buttons, - defaultButton=accept - ) - - return state == accept diff --git a/server_addon/maya/client/ayon_maya/plugins/inventory/connect_xgen.py b/server_addon/maya/client/ayon_maya/plugins/inventory/connect_xgen.py deleted file mode 100644 index 166c419072..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/inventory/connect_xgen.py +++ /dev/null @@ -1,174 +0,0 @@ -from maya import cmds -import xgenm - -from ayon_core.pipeline import ( - InventoryAction, - get_repres_contexts, - get_representation_path, -) - - -class ConnectXgen(InventoryAction): - """Connect Xgen with an animation or pointcache. - """ - - label = "Connect Xgen" - icon = "link" - color = "white" - - def process(self, containers): - # Validate selection is more than 1. - message = ( - "Only 1 container selected. 2+ containers needed for this action." - ) - if len(containers) == 1: - self.display_warning(message) - return - - # Categorize containers by product type. - containers_by_product_type = {} - repre_ids = { - container["representation"] - for container in containers - } - repre_contexts_by_id = get_repres_contexts(repre_ids) - for container in containers: - repre_id = container["representation"] - repre_context = repre_contexts_by_id[repre_id] - - product_type = repre_context["product"]["productType"] - - containers_by_product_type.setdefault(product_type, []) - containers_by_product_type[product_type].append(container) - - # Validate to only 1 source container. - source_containers = containers_by_product_type.get("animation", []) - source_containers += containers_by_product_type.get("pointcache", []) - source_container_namespaces = [ - x["namespace"] for x in source_containers - ] - message = ( - "{} animation containers selected:\n\n{}\n\nOnly select 1 of type " - "\"animation\" or \"pointcache\".".format( - len(source_containers), source_container_namespaces - ) - ) - if len(source_containers) != 1: - self.display_warning(message) - return - - source_container = source_containers[0] - source_repre_id = source_container["representation"] - source_object = source_container["objectName"] - - # Validate source representation is an alembic. - source_path = get_representation_path( - repre_contexts_by_id[source_repre_id]["representation"] - ).replace("\\", "/") - message = "Animation container \"{}\" is not an alembic:\n{}".format( - source_container["namespace"], source_path - ) - if not source_path.endswith(".abc"): - self.display_warning(message) - return - - # Target containers. - target_containers = [] - for product_type, containers in containers_by_product_type.items(): - if product_type in ["animation", "pointcache"]: - continue - - target_containers.extend(containers) - - # Inform user of connections from source representation to target - # descriptions. - descriptions_data = [] - connections_msg = "" - for target_container in target_containers: - reference_node = cmds.sets( - target_container["objectName"], query=True - )[0] - palettes = cmds.ls( - cmds.referenceQuery(reference_node, nodes=True), - type="xgmPalette" - ) - for palette in palettes: - for description in xgenm.descriptions(palette): - descriptions_data.append([palette, description]) - connections_msg += "\n{}/{}".format(palette, description) - - message = "Connecting \"{}\" to:\n".format( - source_container["namespace"] - ) - message += connections_msg - choice = self.display_warning(message, show_cancel=True) - if choice is False: - return - - # Recreate "xgenContainers" attribute to reset. - compound_name = "xgenContainers" - attr = "{}.{}".format(source_object, compound_name) - if cmds.objExists(attr): - cmds.deleteAttr(attr) - - cmds.addAttr( - source_object, - longName=compound_name, - attributeType="compound", - numberOfChildren=1, - multi=True - ) - - # Connect target containers. - for target_container in target_containers: - cmds.addAttr( - source_object, - longName="container", - attributeType="message", - parent=compound_name - ) - index = target_containers.index(target_container) - cmds.connectAttr( - target_container["objectName"] + ".message", - source_object + ".{}[{}].container".format( - compound_name, index - ) - ) - - # Setup cache on Xgen - object = "SplinePrimitive" - for palette, description in descriptions_data: - xgenm.setAttr("useCache", "true", palette, description, object) - xgenm.setAttr("liveMode", "false", palette, description, object) - xgenm.setAttr( - "cacheFileName", source_path, palette, description, object - ) - - # Refresh UI and viewport. - de = xgenm.xgGlobal.DescriptionEditor - de.refresh("Full") - - def display_warning(self, message, show_cancel=False): - """Show feedback to user. - - Returns: - bool - """ - - from qtpy import QtWidgets - - accept = QtWidgets.QMessageBox.Ok - if show_cancel: - buttons = accept | QtWidgets.QMessageBox.Cancel - else: - buttons = accept - - state = QtWidgets.QMessageBox.warning( - None, - "", - message, - buttons=buttons, - defaultButton=accept - ) - - return state == accept diff --git a/server_addon/maya/client/ayon_maya/plugins/inventory/connect_yeti_rig.py b/server_addon/maya/client/ayon_maya/plugins/inventory/connect_yeti_rig.py deleted file mode 100644 index 2385444403..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/inventory/connect_yeti_rig.py +++ /dev/null @@ -1,187 +0,0 @@ -import os -import json -from collections import defaultdict - -from maya import cmds - -from ayon_core.pipeline import ( - InventoryAction, - get_repres_contexts, - get_representation_path, -) -from ayon_maya.api.lib import get_container_members, get_id - - -class ConnectYetiRig(InventoryAction): - """Connect Yeti Rig with an animation or pointcache.""" - - label = "Connect Yeti Rig" - icon = "link" - color = "white" - - def process(self, containers): - # Validate selection is more than 1. - message = ( - "Only 1 container selected. 2+ containers needed for this action." - ) - if len(containers) == 1: - self.display_warning(message) - return - - # Categorize containers by product type. - containers_by_product_type = defaultdict(list) - repre_ids = { - container["representation"] - for container in containers - } - repre_contexts_by_id = get_repres_contexts(repre_ids) - for container in containers: - repre_id = container["representation"] - repre_context = repre_contexts_by_id[repre_id] - - product_type = repre_context["product"]["productType"] - - containers_by_product_type.setdefault(product_type, []) - containers_by_product_type[product_type].append(container) - - # Validate to only 1 source container. - source_containers = containers_by_product_type.get("animation", []) - source_containers += containers_by_product_type.get("pointcache", []) - source_container_namespaces = [ - x["namespace"] for x in source_containers - ] - message = ( - "{} animation containers selected:\n\n{}\n\nOnly select 1 of type " - "\"animation\" or \"pointcache\".".format( - len(source_containers), source_container_namespaces - ) - ) - if len(source_containers) != 1: - self.display_warning(message) - return - - source_container = source_containers[0] - source_ids = self.nodes_by_id(source_container) - - # Target containers. - target_ids = {} - inputs = [] - - yeti_rig_containers = containers_by_product_type.get("yetiRig") - if not yeti_rig_containers: - self.display_warning( - "Select at least one yetiRig container" - ) - return - - for container in yeti_rig_containers: - target_ids.update(self.nodes_by_id(container)) - repre_id = container["representation"] - - maya_file = get_representation_path( - repre_contexts_by_id[repre_id]["representation"] - ) - _, ext = os.path.splitext(maya_file) - settings_file = maya_file.replace(ext, ".rigsettings") - if not os.path.exists(settings_file): - continue - - with open(settings_file) as f: - inputs.extend(json.load(f)["inputs"]) - - # Compare loaded connections to scene. - for input in inputs: - source_node = source_ids.get(input["sourceID"]) - target_node = target_ids.get(input["destinationID"]) - - if not source_node or not target_node: - self.log.debug( - "Could not find nodes for input:\n" + - json.dumps(input, indent=4, sort_keys=True) - ) - continue - source_attr, target_attr = input["connections"] - - if not cmds.attributeQuery( - source_attr, node=source_node, exists=True - ): - self.log.debug( - "Could not find attribute {} on node {} for " - "input:\n{}".format( - source_attr, - source_node, - json.dumps(input, indent=4, sort_keys=True) - ) - ) - continue - - if not cmds.attributeQuery( - target_attr, node=target_node, exists=True - ): - self.log.debug( - "Could not find attribute {} on node {} for " - "input:\n{}".format( - target_attr, - target_node, - json.dumps(input, indent=4, sort_keys=True) - ) - ) - continue - - source_plug = "{}.{}".format( - source_node, source_attr - ) - target_plug = "{}.{}".format( - target_node, target_attr - ) - if cmds.isConnected( - source_plug, target_plug, ignoreUnitConversion=True - ): - self.log.debug( - "Connection already exists: {} -> {}".format( - source_plug, target_plug - ) - ) - continue - - cmds.connectAttr(source_plug, target_plug, force=True) - self.log.debug( - "Connected attributes: {} -> {}".format( - source_plug, target_plug - ) - ) - - def nodes_by_id(self, container): - ids = {} - for member in get_container_members(container): - id = get_id(member) - if not id: - continue - ids[id] = member - - return ids - - def display_warning(self, message, show_cancel=False): - """Show feedback to user. - - Returns: - bool - """ - - from qtpy import QtWidgets - - accept = QtWidgets.QMessageBox.Ok - if show_cancel: - buttons = accept | QtWidgets.QMessageBox.Cancel - else: - buttons = accept - - state = QtWidgets.QMessageBox.warning( - None, - "", - message, - buttons=buttons, - defaultButton=accept - ) - - return state == accept diff --git a/server_addon/maya/client/ayon_maya/plugins/inventory/import_modelrender.py b/server_addon/maya/client/ayon_maya/plugins/inventory/import_modelrender.py deleted file mode 100644 index 5e36ec6bc1..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/inventory/import_modelrender.py +++ /dev/null @@ -1,169 +0,0 @@ -import re -import json - -import ayon_api - -from ayon_core.pipeline.load import get_representation_contexts_by_ids -from ayon_core.pipeline import ( - InventoryAction, - get_current_project_name, -) -from ayon_maya.api.lib import ( - maintained_selection, - apply_shaders -) - - -class ImportModelRender(InventoryAction): - - label = "Import Model Render Sets" - icon = "industry" - color = "#55DDAA" - - scene_type_regex = "meta.render.m[ab]" - look_data_type = "meta.render.json" - - @staticmethod - def is_compatible(container): - return ( - container.get("loader") == "ReferenceLoader" - and container.get("name", "").startswith("model") - ) - - def process(self, containers): - from maya import cmds # noqa: F401 - - # --- Query entities that will be used --- - project_name = get_current_project_name() - # Collect representation ids from all containers - repre_ids = { - container["representation"] - for container in containers - } - # Create mapping of representation id to version id - # - used in containers loop - version_id_by_repre_id = { - repre_entity["id"]: repre_entity["versionId"] - for repre_entity in ayon_api.get_representations( - project_name, - representation_ids=repre_ids, - fields={"id", "versionId"} - ) - } - - # Find all representations of the versions - version_ids = set(version_id_by_repre_id.values()) - repre_entities = ayon_api.get_representations( - project_name, - version_ids=version_ids, - fields={"id", "name", "versionId"} - ) - repre_entities_by_version_id = { - version_id: [] - for version_id in version_ids - } - for repre_entity in repre_entities: - version_id = repre_entity["versionId"] - repre_entities_by_version_id[version_id].append(repre_entity) - - look_repres_by_version_id = {} - look_repre_ids = set() - for version_id, repre_entities in ( - repre_entities_by_version_id.items() - ): - json_repre = None - look_repres = [] - scene_type_regex = re.compile(self.scene_type_regex) - for repre_entity in repre_entities: - repre_name = repre_entity["name"] - if repre_name == self.look_data_type: - json_repre = repre_entity - - elif scene_type_regex.fullmatch(repre_name): - look_repres.append(repre_entity) - - look_repre = look_repres[0] if look_repres else None - if look_repre: - look_repre_ids.add(look_repre["id"]) - if json_repre: - look_repre_ids.add(json_repre["id"]) - - look_repres_by_version_id[version_id] = (json_repre, look_repre) - - contexts_by_repre_id = get_representation_contexts_by_ids( - project_name, look_repre_ids - ) - - # --- Real process logic --- - # Loop over containers and assign the looks - for container in containers: - con_name = container["objectName"] - nodes = [] - for n in cmds.sets(con_name, query=True, nodesOnly=True) or []: - if cmds.nodeType(n) == "reference": - nodes += cmds.referenceQuery(n, nodes=True) - else: - nodes.append(n) - - repre_id = container["representation"] - version_id = version_id_by_repre_id.get(repre_id) - if version_id is None: - print("Representation '{}' was not found".format(repre_id)) - continue - - json_repre, look_repre = look_repres_by_version_id[version_id] - - print("Importing render sets for model %r" % con_name) - self._assign_model_render( - nodes, json_repre, look_repre, contexts_by_repre_id - ) - - def _assign_model_render( - self, nodes, json_repre, look_repre, contexts_by_repre_id - ): - """Assign nodes a specific published model render data version by id. - - This assumes the nodes correspond with the asset. - - Args: - nodes (list): nodes to assign render data to - json_repre (dict[str, Any]): Representation entity of the json - file. - look_repre (dict[str, Any]): First representation entity of the - look files. - contexts_by_repre_id (dict[str, Any]): Mapping of representation - id to its context. - - Returns: - None - """ - - from maya import cmds # noqa: F401 - - # QUESTION shouldn't be json representation validated too? - if not look_repre: - print("No model render sets for this model version..") - return - - # TODO use 'get_representation_path_with_anatomy' instead - # of 'filepath_from_context' - context = contexts_by_repre_id.get(look_repre["id"]) - maya_file = self.filepath_from_context(context) - - context = contexts_by_repre_id.get(json_repre["id"]) - json_file = self.filepath_from_context(context) - - # Import the look file - with maintained_selection(): - shader_nodes = cmds.file(maya_file, - i=True, # import - returnNewNodes=True) - # imprint context data - - # Load relationships - shader_relation = json_file - with open(shader_relation, "r") as f: - relationships = json.load(f) - - # Assign relationships - apply_shaders(relationships, shader_nodes, nodes) diff --git a/server_addon/maya/client/ayon_maya/plugins/inventory/import_reference.py b/server_addon/maya/client/ayon_maya/plugins/inventory/import_reference.py deleted file mode 100644 index 5e42facad4..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/inventory/import_reference.py +++ /dev/null @@ -1,27 +0,0 @@ -from maya import cmds - -from ayon_core.pipeline import InventoryAction -from ayon_maya.api.lib import get_reference_node - - -class ImportReference(InventoryAction): - """Imports selected reference to inside of the file.""" - - label = "Import Reference" - icon = "download" - color = "#d8d8d8" - - def process(self, containers): - for container in containers: - if container["loader"] != "ReferenceLoader": - print("Not a reference, skipping") - continue - - node = container["objectName"] - members = cmds.sets(node, query=True, nodesOnly=True) - ref_node = get_reference_node(members) - - ref_file = cmds.referenceQuery(ref_node, f=True) - cmds.file(ref_file, importReference=True) - - return True # return anything to trigger model refresh diff --git a/server_addon/maya/client/ayon_maya/plugins/inventory/rig_recreate_animation_instance.py b/server_addon/maya/client/ayon_maya/plugins/inventory/rig_recreate_animation_instance.py deleted file mode 100644 index 796a651f8a..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/inventory/rig_recreate_animation_instance.py +++ /dev/null @@ -1,44 +0,0 @@ -from ayon_core.pipeline import ( - InventoryAction, - get_current_project_name, -) -from ayon_core.pipeline.load import get_representation_contexts_by_ids -from ayon_maya.api.lib import ( - create_rig_animation_instance, - get_container_members, -) - - -class RecreateRigAnimationInstance(InventoryAction): - """Recreate animation publish instance for loaded rigs""" - - label = "Recreate rig animation instance" - icon = "wrench" - color = "#888888" - - @staticmethod - def is_compatible(container): - return ( - container.get("loader") == "ReferenceLoader" - and container.get("name", "").startswith("rig") - ) - - def process(self, containers): - project_name = get_current_project_name() - repre_ids = { - container["representation"] - for container in containers - } - contexts_by_repre_id = get_representation_contexts_by_ids( - project_name, repre_ids - ) - - for container in containers: - # todo: delete an existing entry if it exist or skip creation - - namespace = container["namespace"] - repre_id = container["representation"] - context = contexts_by_repre_id[repre_id] - nodes = get_container_members(container) - - create_rig_animation_instance(nodes, context, namespace) diff --git a/server_addon/maya/client/ayon_maya/plugins/inventory/select_containers.py b/server_addon/maya/client/ayon_maya/plugins/inventory/select_containers.py deleted file mode 100644 index e45c8a5706..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/inventory/select_containers.py +++ /dev/null @@ -1,46 +0,0 @@ -from maya import cmds - -from ayon_core.pipeline import InventoryAction, registered_host -from ayon_maya.api.lib import get_container_members - - -class SelectInScene(InventoryAction): - """Select nodes in the scene from selected containers in scene inventory""" - - label = "Select in scene" - icon = "search" - color = "#888888" - order = 99 - - def process(self, containers): - - all_members = [] - for container in containers: - members = get_container_members(container) - all_members.extend(members) - cmds.select(all_members, replace=True, noExpand=True) - - -class HighlightBySceneSelection(InventoryAction): - """Select containers in scene inventory from the current scene selection""" - - label = "Highlight by scene selection" - icon = "search" - color = "#888888" - order = 100 - - def process(self, containers): - - selection = set(cmds.ls(selection=True, long=True, objectsOnly=True)) - host = registered_host() - - to_select = [] - for container in host.get_containers(): - members = get_container_members(container) - if any(member in selection for member in members): - to_select.append(container["objectName"]) - - return { - "objectNames": to_select, - "options": {"clear": True} - } diff --git a/server_addon/maya/client/ayon_maya/plugins/load/_load_animation.py b/server_addon/maya/client/ayon_maya/plugins/load/_load_animation.py deleted file mode 100644 index 6d4ebe250c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/_load_animation.py +++ /dev/null @@ -1,103 +0,0 @@ -import ayon_maya.api.plugin -import maya.cmds as cmds - - -def _process_reference(file_url, name, namespace, options): - """Load files by referencing scene in Maya. - - Args: - file_url (str): fileapth of the objects to be loaded - name (str): product name - namespace (str): namespace - options (dict): dict of storing the param - - Returns: - list: list of object nodes - """ - from ayon_maya.api.lib import unique_namespace - # Get name from asset being loaded - # Assuming name is product name from the animation, we split the number - # suffix from the name to ensure the namespace is unique - name = name.split("_")[0] - ext = file_url.split(".")[-1] - namespace = unique_namespace( - "{}_".format(name), - format="%03d", - suffix="_{}".format(ext) - ) - - attach_to_root = options.get("attach_to_root", True) - group_name = options["group_name"] - - # no group shall be created - if not attach_to_root: - group_name = namespace - - nodes = cmds.file(file_url, - namespace=namespace, - sharedReferenceFile=False, - groupReference=attach_to_root, - groupName=group_name, - reference=True, - returnNewNodes=True) - return nodes - - -class AbcLoader(ayon_maya.api.plugin.ReferenceLoader): - """Loader to reference an Alembic file""" - - product_types = { - "animation", - "camera", - "pointcache", - } - representations = {"abc"} - - label = "Reference animation" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, options): - - cmds.loadPlugin("AbcImport.mll", quiet=True) - # hero_001 (abc) - # asset_counter{optional} - path = self.filepath_from_context(context) - file_url = self.prepare_root_value(path, - context["project"]["name"]) - - nodes = _process_reference(file_url, name, namespace, options) - # load colorbleed ID attribute - self[:] = nodes - - return nodes - - -class FbxLoader(ayon_maya.api.plugin.ReferenceLoader): - """Loader to reference an Fbx files""" - - product_types = { - "animation", - "camera", - } - representations = {"fbx"} - - label = "Reference animation" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, options): - - cmds.loadPlugin("fbx4maya.mll", quiet=True) - - path = self.filepath_from_context(context) - file_url = self.prepare_root_value(path, - context["project"]["name"]) - - nodes = _process_reference(file_url, name, namespace, options) - - self[:] = nodes - - return nodes diff --git a/server_addon/maya/client/ayon_maya/plugins/load/actions.py b/server_addon/maya/client/ayon_maya/plugins/load/actions.py deleted file mode 100644 index d28645ea43..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/actions.py +++ /dev/null @@ -1,192 +0,0 @@ -"""A module containing generic loader actions that will display in the Loader. - -""" -import qargparse -from ayon_core.pipeline import load -from ayon_maya.api.lib import ( - maintained_selection, - get_custom_namespace -) -import ayon_maya.api.plugin - - -class SetFrameRangeLoader(load.LoaderPlugin): - """Set frame range excluding pre- and post-handles""" - - product_types = { - "animation", - "camera", - "proxyAbc", - "pointcache", - } - representations = {"abc"} - - label = "Set frame range" - order = 11 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - import maya.cmds as cmds - - version_attributes = context["version"]["attrib"] - start = version_attributes.get("frameStart") - end = version_attributes.get("frameEnd") - - if start is None or end is None: - print("Skipping setting frame range because start or " - "end frame data is missing..") - return - - cmds.playbackOptions(minTime=start, - maxTime=end, - animationStartTime=start, - animationEndTime=end) - - -class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): - """Set frame range including pre- and post-handles""" - - product_types = { - "animation", - "camera", - "proxyAbc", - "pointcache", - } - representations = {"abc"} - - label = "Set frame range (with handles)" - order = 12 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - import maya.cmds as cmds - - version_attributes = context["version"]["attrib"] - - start = version_attributes.get("frameStart") - end = version_attributes.get("frameEnd") - - if start is None or end is None: - print("Skipping setting frame range because start or " - "end frame data is missing..") - return - - # Include handles - start -= version_attributes.get("handleStart", 0) - end += version_attributes.get("handleEnd", 0) - - cmds.playbackOptions(minTime=start, - maxTime=end, - animationStartTime=start, - animationEndTime=end) - - -class ImportMayaLoader(ayon_maya.api.plugin.Loader): - """Import action for Maya (unmanaged) - - Warning: - The loaded content will be unmanaged and is *not* visible in the - scene inventory. It's purely intended to merge content into your scene - so you could also use it as a new base. - - """ - representations = {"ma", "mb", "obj"} - product_types = { - "model", - "pointcache", - "proxyAbc", - "animation", - "mayaAscii", - "mayaScene", - "setdress", - "layout", - "camera", - "rig", - "camerarig", - "staticMesh", - "workfile", - } - - label = "Import" - order = 10 - icon = "arrow-circle-down" - color = "#775555" - - options = [ - qargparse.Boolean( - "clean_import", - label="Clean import", - default=False, - help="Should all occurrences of cbId be purged?" - ) - ] - - @classmethod - def apply_settings(cls, project_settings): - super(ImportMayaLoader, cls).apply_settings(project_settings) - cls.enabled = cls.load_settings["import_loader"].get("enabled", True) - - def load(self, context, name=None, namespace=None, data=None): - import maya.cmds as cmds - - choice = self.display_warning() - if choice is False: - return - - custom_group_name, custom_namespace, options = \ - self.get_custom_namespace_and_group(context, data, - "import_loader") - - namespace = get_custom_namespace(custom_namespace) - - if not options.get("attach_to_root", True): - custom_group_name = namespace - - path = self.filepath_from_context(context) - with maintained_selection(): - nodes = cmds.file(path, - i=True, - preserveReferences=True, - namespace=namespace, - returnNewNodes=True, - groupReference=options.get("attach_to_root", - True), - groupName=custom_group_name) - - if data.get("clean_import", False): - remove_attributes = ["cbId"] - for node in nodes: - for attr in remove_attributes: - if cmds.attributeQuery(attr, node=node, exists=True): - full_attr = "{}.{}".format(node, attr) - print("Removing {}".format(full_attr)) - cmds.deleteAttr(full_attr) - - # We do not containerize imported content, it remains unmanaged - return - - def display_warning(self): - """Show warning to ensure the user can't import models by accident - - Returns: - bool - - """ - - from qtpy import QtWidgets - - accept = QtWidgets.QMessageBox.Ok - buttons = accept | QtWidgets.QMessageBox.Cancel - - message = "Are you sure you want import this" - state = QtWidgets.QMessageBox.warning(None, - "Are you sure?", - message, - buttons=buttons, - defaultButton=accept) - - return state == accept diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_arnold_standin.py b/server_addon/maya/client/ayon_maya/plugins/load/load_arnold_standin.py deleted file mode 100644 index d01cea3ad4..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_arnold_standin.py +++ /dev/null @@ -1,237 +0,0 @@ -import os - -import clique -import maya.cmds as cmds -from ayon_core.pipeline import get_representation_path -from ayon_core.settings import get_project_settings -from ayon_maya.api.lib import ( - get_attribute_input, - get_fps_for_current_context, - maintained_selection, - unique_namespace, -) -from ayon_maya.api.pipeline import containerise -from ayon_maya.api.plugin import get_load_color_for_product_type -from ayon_maya.api import plugin - - -def is_sequence(files): - sequence = False - collections, remainder = clique.assemble(files, minimum_items=1) - if collections: - sequence = True - return sequence - - -class ArnoldStandinLoader(plugin.Loader): - """Load as Arnold standin""" - - product_types = { - "ass", - "assProxy", - "animation", - "model", - "proxyAbc", - "pointcache", - "usd" - } - representations = {"ass", "abc", "usda", "usdc", "usd"} - - label = "Load as Arnold standin" - order = -5 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, options): - if not cmds.pluginInfo("mtoa", query=True, loaded=True): - cmds.loadPlugin("mtoa") - # Create defaultArnoldRenderOptions before creating aiStandin - # which tries to connect it. Since we load the plugin and directly - # create aiStandin without the defaultArnoldRenderOptions, - # we need to create the render options for aiStandin creation. - from mtoa.core import createOptions - createOptions() - - import mtoa.ui.arnoldmenu - - version_attributes = context["version"]["attrib"] - - self.log.info("version_attributes: {}\n".format(version_attributes)) - - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - # Root group - label = "{}:{}".format(namespace, name) - root = cmds.group(name=label, empty=True) - - # Set color. - settings = get_project_settings(context["project"]["name"]) - color = get_load_color_for_product_type("ass", settings) - if color is not None: - red, green, blue = color - cmds.setAttr(root + ".useOutlinerColor", True) - cmds.setAttr( - root + ".outlinerColor", red, green, blue - ) - - with maintained_selection(): - # Create transform with shape - transform_name = label + "_standin" - - standin_shape = mtoa.ui.arnoldmenu.createStandIn() - standin = cmds.listRelatives(standin_shape, parent=True)[0] - standin = cmds.rename(standin, transform_name) - standin_shape = cmds.listRelatives(standin, shapes=True)[0] - - cmds.parent(standin, root) - - # Set the standin filepath - repre_path = self.filepath_from_context(context) - path, operator = self._setup_proxy( - standin_shape, repre_path, namespace - ) - cmds.setAttr(standin_shape + ".dso", path, type="string") - sequence = is_sequence(os.listdir(os.path.dirname(repre_path))) - cmds.setAttr(standin_shape + ".useFrameExtension", sequence) - - fps = ( - version_attributes.get("fps") or get_fps_for_current_context() - ) - cmds.setAttr(standin_shape + ".abcFPS", float(fps)) - - nodes = [root, standin, standin_shape] - if operator is not None: - nodes.append(operator) - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def get_next_free_multi_index(self, attr_name): - """Find the next unconnected multi index at the input attribute.""" - for index in range(10000000): - connection_info = cmds.connectionInfo( - "{}[{}]".format(attr_name, index), - sourceFromDestination=True - ) - if len(connection_info or []) == 0: - return index - - def _get_proxy_path(self, path): - basename_split = os.path.basename(path).split(".") - proxy_basename = ( - basename_split[0] + "_proxy." + ".".join(basename_split[1:]) - ) - proxy_path = "/".join([os.path.dirname(path), proxy_basename]) - return proxy_basename, proxy_path - - def _update_operators(self, string_replace_operator, proxy_basename, path): - cmds.setAttr( - string_replace_operator + ".match", - proxy_basename.split(".")[0], - type="string" - ) - cmds.setAttr( - string_replace_operator + ".replace", - os.path.basename(path).split(".")[0], - type="string" - ) - - def _setup_proxy(self, shape, path, namespace): - proxy_basename, proxy_path = self._get_proxy_path(path) - - options_node = "defaultArnoldRenderOptions" - merge_operator = get_attribute_input(options_node + ".operator") - if merge_operator is None: - merge_operator = cmds.createNode("aiMerge") - cmds.connectAttr( - merge_operator + ".message", options_node + ".operator" - ) - - merge_operator = merge_operator.split(".")[0] - - string_replace_operator = cmds.createNode( - "aiStringReplace", name=namespace + ":string_replace_operator" - ) - node_type = "alembic" if path.endswith(".abc") else "procedural" - cmds.setAttr( - string_replace_operator + ".selection", - "*.(@node=='{}')".format(node_type), - type="string" - ) - self._update_operators(string_replace_operator, proxy_basename, path) - - cmds.connectAttr( - string_replace_operator + ".out", - "{}.inputs[{}]".format( - merge_operator, - self.get_next_free_multi_index(merge_operator + ".inputs") - ) - ) - - # We setup the string operator no matter whether there is a proxy or - # not. This makes it easier to update since the string operator will - # always be created. Return original path to use for standin. - if not os.path.exists(proxy_path): - return path, string_replace_operator - - return proxy_path, string_replace_operator - - def update(self, container, context): - # Update the standin - members = cmds.sets(container['objectName'], query=True) - for member in members: - if cmds.nodeType(member) == "aiStringReplace": - string_replace_operator = member - - shapes = cmds.listRelatives(member, shapes=True) - if not shapes: - continue - if cmds.nodeType(shapes[0]) == "aiStandIn": - standin = shapes[0] - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - proxy_basename, proxy_path = self._get_proxy_path(path) - - # Whether there is proxy or not, we still update the string operator. - # If no proxy exists, the string operator won't replace anything. - self._update_operators(string_replace_operator, proxy_basename, path) - - dso_path = path - if os.path.exists(proxy_path): - dso_path = proxy_path - cmds.setAttr(standin + ".dso", dso_path, type="string") - - sequence = is_sequence(os.listdir(os.path.dirname(path))) - cmds.setAttr(standin + ".useFrameExtension", sequence) - - cmds.setAttr( - container["objectName"] + ".representation", - repre_entity["id"], - type="string" - ) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_as_template.py b/server_addon/maya/client/ayon_maya/plugins/load/load_as_template.py deleted file mode 100644 index b6bd3c3ab6..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_as_template.py +++ /dev/null @@ -1,33 +0,0 @@ -from ayon_core.lib import BoolDef -from ayon_core.pipeline import registered_host -from ayon_maya.api import plugin -from ayon_maya.api.workfile_template_builder import MayaTemplateBuilder - - -class LoadAsTemplate(plugin.Loader): - """Load workfile as a template """ - - product_types = {"workfile", "mayaScene"} - label = "Load as template" - representations = ["ma", "mb"] - icon = "wrench" - color = "#775555" - order = 10 - - options = [ - BoolDef("keep_placeholders", - label="Keep Placeholders", - default=False), - BoolDef("create_first_version", - label="Create First Version", - default=False), - ] - - def load(self, context, name, namespace, data): - keep_placeholders = data.get("keep_placeholders", False) - create_first_version = data.get("create_first_version", False) - path = self.filepath_from_context(context) - builder = MayaTemplateBuilder(registered_host()) - builder.build_template(template_path=path, - keep_placeholders=keep_placeholders, - create_first_version=create_first_version) diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_assembly.py b/server_addon/maya/client/ayon_maya/plugins/load/load_assembly.py deleted file mode 100644 index 490631aa67..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_assembly.py +++ /dev/null @@ -1,75 +0,0 @@ -import maya.cmds as cmds -from ayon_core.pipeline import remove_container -from ayon_maya.api import setdress -from ayon_maya.api.lib import unique_namespace -from ayon_maya.api.pipeline import containerise -from ayon_maya.api import plugin - - -class AssemblyLoader(plugin.Loader): - - product_types = {"assembly"} - representations = {"json"} - - label = "Load Set Dress" - order = -9 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, data): - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - containers = setdress.load_package( - filepath=self.filepath_from_context(context), - name=name, - namespace=namespace - ) - - self[:] = containers - - # Only containerize if any nodes were loaded by the Loader - nodes = self[:] - if not nodes: - return - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, context): - - return setdress.update_package(container, context) - - def remove(self, container): - """Remove all sub containers""" - - # Remove all members - member_containers = setdress.get_contained_containers(container) - for member_container in member_containers: - self.log.info("Removing container %s", - member_container['objectName']) - remove_container(member_container) - - # Remove alembic hierarchy reference - # TODO: Check whether removing all contained references is safe enough - members = cmds.sets(container['objectName'], query=True) or [] - references = cmds.ls(members, type="reference") - for reference in references: - self.log.info("Removing %s", reference) - fname = cmds.referenceQuery(reference, filename=True) - cmds.file(fname, removeReference=True) - - # Delete container and its contents - if cmds.objExists(container['objectName']): - members = cmds.sets(container['objectName'], query=True) or [] - cmds.delete([container['objectName']] + members) - - # TODO: Ensure namespace is gone diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_audio.py b/server_addon/maya/client/ayon_maya/plugins/load/load_audio.py deleted file mode 100644 index d9f67fdd90..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_audio.py +++ /dev/null @@ -1,111 +0,0 @@ -from ayon_core.pipeline import get_representation_path -from ayon_maya.api.lib import get_container_members, unique_namespace -from ayon_maya.api.pipeline import containerise -from ayon_maya.api import plugin -from maya import cmds, mel - - -class AudioLoader(plugin.Loader): - """Specific loader of audio.""" - - product_types = {"audio"} - label = "Load audio" - representations = {"wav"} - icon = "volume-up" - color = "orange" - - def load(self, context, name, namespace, data): - - start_frame = cmds.playbackOptions(query=True, min=True) - sound_node = cmds.sound( - file=self.filepath_from_context(context), offset=start_frame - ) - cmds.timeControl( - mel.eval("$gPlayBackSlider=$gPlayBackSlider"), - edit=True, - sound=sound_node, - displaySound=True - ) - - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - return containerise( - name=name, - namespace=namespace, - nodes=[sound_node], - context=context, - loader=self.__class__.__name__ - ) - - def update(self, container, context): - repre_entity = context["representation"] - - members = get_container_members(container) - audio_nodes = cmds.ls(members, type="audio") - - assert audio_nodes is not None, "Audio node not found." - audio_node = audio_nodes[0] - - current_sound = cmds.timeControl( - mel.eval("$gPlayBackSlider=$gPlayBackSlider"), - query=True, - sound=True - ) - activate_sound = current_sound == audio_node - - path = get_representation_path(repre_entity) - - cmds.sound( - audio_node, - edit=True, - file=path - ) - - # The source start + end does not automatically update itself to the - # length of thew new audio file, even though maya does do that when - # creating a new audio node. So to update we compute it manually. - # This would however override any source start and source end a user - # might have done on the original audio node after load. - audio_frame_count = cmds.getAttr("{}.frameCount".format(audio_node)) - audio_sample_rate = cmds.getAttr("{}.sampleRate".format(audio_node)) - duration_in_seconds = audio_frame_count / audio_sample_rate - fps = mel.eval('currentTimeUnitToFPS()') # workfile FPS - source_start = 0 - source_end = (duration_in_seconds * fps) - cmds.setAttr("{}.sourceStart".format(audio_node), source_start) - cmds.setAttr("{}.sourceEnd".format(audio_node), source_end) - - if activate_sound: - # maya by default deactivates it from timeline on file change - cmds.timeControl( - mel.eval("$gPlayBackSlider=$gPlayBackSlider"), - edit=True, - sound=audio_node, - displaySound=True - ) - - cmds.setAttr( - container["objectName"] + ".representation", - repre_entity["id"], - type="string" - ) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_gpucache.py b/server_addon/maya/client/ayon_maya/plugins/load/load_gpucache.py deleted file mode 100644 index 795d01fd5a..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_gpucache.py +++ /dev/null @@ -1,101 +0,0 @@ -import maya.cmds as cmds -from ayon_core.pipeline import get_representation_path -from ayon_core.settings import get_project_settings -from ayon_maya.api.lib import unique_namespace -from ayon_maya.api.pipeline import containerise -from ayon_maya.api import plugin -from ayon_maya.api.plugin import get_load_color_for_product_type - - -class GpuCacheLoader(plugin.Loader): - """Load Alembic as gpuCache""" - - product_types = {"model", "animation", "proxyAbc", "pointcache"} - representations = {"abc", "gpu_cache"} - - label = "Load Gpu Cache" - order = -5 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, data): - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - cmds.loadPlugin("gpuCache", quiet=True) - - # Root group - label = "{}:{}".format(namespace, name) - root = cmds.group(name=label, empty=True) - - project_name = context["project"]["name"] - settings = get_project_settings(project_name) - color = get_load_color_for_product_type("model", settings) - if color is not None: - red, green, blue = color - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr( - root + ".outlinerColor", red, green, blue - ) - - # Create transform with shape - transform_name = label + "_GPU" - transform = cmds.createNode("transform", name=transform_name, - parent=root) - cache = cmds.createNode("gpuCache", - parent=transform, - name="{0}Shape".format(transform_name)) - - # Set the cache filepath - path = self.filepath_from_context(context) - cmds.setAttr(cache + '.cacheFileName', path, type="string") - cmds.setAttr(cache + '.cacheGeomPath', "|", type="string") # root - - # Lock parenting of the transform and cache - cmds.lockNode([transform, cache], lock=True) - - nodes = [root, transform, cache] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, context): - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - - # Update the cache - members = cmds.sets(container['objectName'], query=True) - caches = cmds.ls(members, type="gpuCache", long=True) - - assert len(caches) == 1, "This is a bug" - - for cache in caches: - cmds.setAttr(cache + ".cacheFileName", path, type="string") - - cmds.setAttr(container["objectName"] + ".representation", - repre_entity["id"], - type="string") - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_image.py b/server_addon/maya/client/ayon_maya/plugins/load/load_image.py deleted file mode 100644 index 453e24a2d5..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_image.py +++ /dev/null @@ -1,330 +0,0 @@ -import copy - -from ayon_core.lib import EnumDef -from ayon_core.pipeline import get_current_host_name -from ayon_core.pipeline.colorspace import ( - get_current_context_imageio_config_preset, - get_imageio_file_rules, - get_imageio_file_rules_colorspace_from_filepath, -) -from ayon_core.pipeline.load.utils import get_representation_path_from_context -from ayon_core.settings import get_project_settings -from ayon_maya.api.lib import namespaced, unique_namespace -from ayon_maya.api.pipeline import containerise -from ayon_maya.api import plugin -from maya import cmds - - -def create_texture(): - """Create place2dTexture with file node with uv connections - - Mimics Maya "file [Texture]" creation. - """ - - place = cmds.shadingNode("place2dTexture", asUtility=True, name="place2d") - file = cmds.shadingNode("file", asTexture=True, name="file") - - connections = ["coverage", "translateFrame", "rotateFrame", "rotateUV", - "mirrorU", "mirrorV", "stagger", "wrapV", "wrapU", - "repeatUV", "offset", "noiseUV", "vertexUvThree", - "vertexUvTwo", "vertexUvOne", "vertexCameraOne"] - for attr in connections: - src = "{}.{}".format(place, attr) - dest = "{}.{}".format(file, attr) - cmds.connectAttr(src, dest) - - cmds.connectAttr(place + '.outUV', file + '.uvCoord') - cmds.connectAttr(place + '.outUvFilterSize', file + '.uvFilterSize') - - return file, place - - -def create_projection(): - """Create texture with place3dTexture and projection - - Mimics Maya "file [Projection]" creation. - """ - - file, place = create_texture() - projection = cmds.shadingNode("projection", asTexture=True, - name="projection") - place3d = cmds.shadingNode("place3dTexture", asUtility=True, - name="place3d") - - cmds.connectAttr(place3d + '.worldInverseMatrix[0]', - projection + ".placementMatrix") - cmds.connectAttr(file + '.outColor', projection + ".image") - - return file, place, projection, place3d - - -def create_stencil(): - """Create texture with extra place2dTexture offset and stencil - - Mimics Maya "file [Stencil]" creation. - """ - - file, place = create_texture() - - place_stencil = cmds.shadingNode("place2dTexture", asUtility=True, - name="place2d_stencil") - stencil = cmds.shadingNode("stencil", asTexture=True, name="stencil") - - for src_attr, dest_attr in [ - ("outUV", "uvCoord"), - ("outUvFilterSize", "uvFilterSize") - ]: - src_plug = "{}.{}".format(place_stencil, src_attr) - cmds.connectAttr(src_plug, "{}.{}".format(place, dest_attr)) - cmds.connectAttr(src_plug, "{}.{}".format(stencil, dest_attr)) - - return file, place, stencil, place_stencil - - -class FileNodeLoader(plugin.Loader): - """File node loader.""" - - product_types = {"image", "plate", "render"} - label = "Load file node" - representations = {"exr", "tif", "png", "jpg"} - icon = "image" - color = "orange" - order = 2 - - options = [ - EnumDef( - "mode", - items={ - "texture": "Texture", - "projection": "Projection", - "stencil": "Stencil" - }, - default="texture", - label="Texture Mode" - ) - ] - - def load(self, context, name, namespace, data): - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - with namespaced(namespace, new=True) as namespace: - # Create the nodes within the namespace - nodes = { - "texture": create_texture, - "projection": create_projection, - "stencil": create_stencil - }[data.get("mode", "texture")]() - - file_node = cmds.ls(nodes, type="file")[0] - - self._apply_representation_context(context, file_node) - - # For ease of access for the user select all the nodes and select - # the file node last so that UI shows its attributes by default - cmds.select(list(nodes) + [file_node], replace=True) - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__ - ) - - def update(self, container, context): - repre_entity = context["representation"] - - members = cmds.sets(container['objectName'], query=True) - file_node = cmds.ls(members, type="file")[0] - - self._apply_representation_context(context, file_node) - - # Update representation - cmds.setAttr( - container["objectName"] + ".representation", - repre_entity["id"], - type="string" - ) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass - - def _apply_representation_context(self, context, file_node): - """Update the file node to match the context. - - This sets the file node's attributes for: - - file path - - udim tiling mode (if it is an udim tile) - - use frame extension (if it is a sequence) - - colorspace - - """ - - repre_context = context["representation"]["context"] - has_frames = repre_context.get("frame") is not None - has_udim = repre_context.get("udim") is not None - - # Set UV tiling mode if UDIM tiles - if has_udim: - cmds.setAttr(file_node + ".uvTilingMode", 3) # UDIM-tiles - else: - cmds.setAttr(file_node + ".uvTilingMode", 0) # off - - # Enable sequence if publish has `startFrame` and `endFrame` and - # `startFrame != endFrame` - if has_frames and self._is_sequence(context): - # When enabling useFrameExtension maya automatically - # connects an expression to .frameExtension to set - # the current frame. However, this expression is generated - # with some delay and thus it'll show a warning if frame 0 - # doesn't exist because we're explicitly setting the - # token. - cmds.setAttr(file_node + ".useFrameExtension", True) - else: - cmds.setAttr(file_node + ".useFrameExtension", False) - - # Set the file node path attribute - path = self._format_path(context) - cmds.setAttr(file_node + ".fileTextureName", path, type="string") - - # Set colorspace - colorspace = self._get_colorspace(context) - if colorspace: - cmds.setAttr(file_node + ".colorSpace", colorspace, type="string") - else: - self.log.debug("Unknown colorspace - setting colorspace skipped.") - - def _is_sequence(self, context): - """Check whether frameStart and frameEnd are not the same.""" - version = context["version"] - representation = context["representation"] - - # TODO this is invalid logic, it should be based only on - # representation entity - for entity in [representation, version]: - # Frame range can be set on version or representation. - # When set on representation it overrides version data. - attributes = entity["attrib"] - data = entity["data"] - start = data.get("frameStartHandle", attributes.get("frameStart")) - end = data.get("frameEndHandle", attributes.get("frameEnd")) - - if start is None or end is None: - continue - - if start != end: - return True - else: - return False - - return False - - def _get_colorspace(self, context): - """Return colorspace of the file to load. - - Retrieves the explicit colorspace from the publish. If no colorspace - data is stored with published content then project imageio settings - are used to make an assumption of the colorspace based on the file - rules. If no file rules match then None is returned. - - Returns: - str or None: The colorspace of the file or None if not detected. - - """ - - # We can't apply color spaces if management is not enabled - if not cmds.colorManagementPrefs(query=True, cmEnabled=True): - return - - representation = context["representation"] - colorspace_data = representation.get("data", {}).get("colorspaceData") - if colorspace_data: - return colorspace_data["colorspace"] - - # Assume colorspace from filepath based on project settings - project_name = context["project"]["name"] - host_name = get_current_host_name() - project_settings = get_project_settings(project_name) - - config_data = get_current_context_imageio_config_preset( - project_settings=project_settings - ) - - # ignore if host imageio is not enabled - if not config_data: - return - - file_rules = get_imageio_file_rules( - project_name, host_name, - project_settings=project_settings - ) - - path = get_representation_path_from_context(context) - colorspace = get_imageio_file_rules_colorspace_from_filepath( - path, - host_name, - project_name, - config_data=config_data, - file_rules=file_rules, - project_settings=project_settings - ) - - return colorspace - - def _format_path(self, context): - """Format the path with correct tokens for frames and udim tiles.""" - - context = copy.deepcopy(context) - representation = context["representation"] - template = representation.get("attrib", {}).get("template") - if not template: - # No template to find token locations for - return get_representation_path_from_context(context) - - def _placeholder(key): - # Substitute with a long placeholder value so that potential - # custom formatting with padding doesn't find its way into - # our formatting, so that wouldn't be padded as 0 - return "___{}___".format(key) - - # We format UDIM and Frame numbers with their specific tokens. To do so - # we in-place change the representation context data to format the path - # with our own data - tokens = { - "frame": "", - "udim": "" - } - has_tokens = False - repre_context = representation["context"] - for key, _token in tokens.items(): - if key in repre_context: - repre_context[key] = _placeholder(key) - has_tokens = True - - # Replace with our custom template that has the tokens set - representation["attrib"]["template"] = template - path = get_representation_path_from_context(context) - - if has_tokens: - for key, token in tokens.items(): - if key in repre_context: - path = path.replace(_placeholder(key), token) - - return path diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_image_plane.py b/server_addon/maya/client/ayon_maya/plugins/load/load_image_plane.py deleted file mode 100644 index 3da67221e2..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_image_plane.py +++ /dev/null @@ -1,271 +0,0 @@ -from ayon_core.pipeline import get_representation_path -from ayon_maya.api.lib import ( - get_container_members, - namespaced, - pairwise, - unique_namespace, -) -from ayon_maya.api.pipeline import containerise -from ayon_maya.api import plugin -from maya import cmds -from qtpy import QtCore, QtWidgets - - -def disconnect_inputs(plug): - overrides = cmds.listConnections(plug, - source=True, - destination=False, - plugs=True, - connections=True) or [] - for dest, src in pairwise(overrides): - cmds.disconnectAttr(src, dest) - - -class CameraWindow(QtWidgets.QDialog): - - def __init__(self, cameras): - super(CameraWindow, self).__init__() - self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) - - self.camera = None - - self.widgets = { - "label": QtWidgets.QLabel("Select camera for image plane."), - "list": QtWidgets.QListWidget(), - "staticImagePlane": QtWidgets.QCheckBox(), - "showInAllViews": QtWidgets.QCheckBox(), - "warning": QtWidgets.QLabel("No cameras selected!"), - "buttons": QtWidgets.QWidget(), - "okButton": QtWidgets.QPushButton("Ok"), - "cancelButton": QtWidgets.QPushButton("Cancel") - } - - # Build warning. - self.widgets["warning"].setVisible(False) - self.widgets["warning"].setStyleSheet("color: red") - - # Build list. - for camera in cameras: - self.widgets["list"].addItem(camera) - - - # Build buttons. - layout = QtWidgets.QHBoxLayout(self.widgets["buttons"]) - layout.addWidget(self.widgets["okButton"]) - layout.addWidget(self.widgets["cancelButton"]) - - # Build layout. - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(self.widgets["label"]) - layout.addWidget(self.widgets["list"]) - layout.addWidget(self.widgets["buttons"]) - layout.addWidget(self.widgets["warning"]) - - self.widgets["okButton"].pressed.connect(self.on_ok_pressed) - self.widgets["cancelButton"].pressed.connect(self.on_cancel_pressed) - self.widgets["list"].itemPressed.connect(self.on_list_itemPressed) - - def on_list_itemPressed(self, item): - self.camera = item.text() - - def on_ok_pressed(self): - if self.camera is None: - self.widgets["warning"].setVisible(True) - return - - self.close() - - def on_cancel_pressed(self): - self.camera = None - self.close() - - -class ImagePlaneLoader(plugin.Loader): - """Specific loader of plate for image planes on selected camera.""" - - product_types = {"image", "plate", "render"} - label = "Load imagePlane" - representations = {"mov", "exr", "preview", "png", "jpg"} - icon = "image" - color = "orange" - - def load(self, context, name, namespace, data, options=None): - - image_plane_depth = 1000 - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - # Get camera from user selection. - # is_static_image_plane = None - # is_in_all_views = None - camera = data.get("camera") if data else None - - if not camera: - cameras = cmds.ls(type="camera") - - # Cameras by names - camera_names = {} - for camera in cameras: - parent = cmds.listRelatives(camera, parent=True, path=True)[0] - camera_names[parent] = camera - - camera_names["Create new camera."] = "create-camera" - window = CameraWindow(camera_names.keys()) - window.exec_() - # Skip if no camera was selected (Dialog was closed) - if window.camera not in camera_names: - return - camera = camera_names[window.camera] - - if camera == "create-camera": - camera = cmds.createNode("camera") - - if camera is None: - return - - try: - cmds.setAttr("{}.displayResolution".format(camera), True) - cmds.setAttr("{}.farClipPlane".format(camera), - image_plane_depth * 10) - except RuntimeError: - pass - - # Create image plane - with namespaced(namespace): - # Create inside the namespace - image_plane_transform, image_plane_shape = cmds.imagePlane( - fileName=self.filepath_from_context(context), - camera=camera - ) - - # Set colorspace - colorspace = self.get_colorspace(context["representation"]) - if colorspace: - cmds.setAttr( - "{}.ignoreColorSpaceFileRules".format(image_plane_shape), - True - ) - cmds.setAttr("{}.colorSpace".format(image_plane_shape), - colorspace, type="string") - - # Set offset frame range - start_frame = cmds.playbackOptions(query=True, min=True) - end_frame = cmds.playbackOptions(query=True, max=True) - - for attr, value in { - "depth": image_plane_depth, - "frameOffset": 0, - "frameIn": start_frame, - "frameOut": end_frame, - "frameCache": end_frame, - "useFrameExtension": True - }.items(): - plug = "{}.{}".format(image_plane_shape, attr) - cmds.setAttr(plug, value) - - movie_representations = {"mov", "preview"} - if context["representation"]["name"] in movie_representations: - cmds.setAttr(image_plane_shape + ".type", 2) - - # Ask user whether to use sequence or still image. - if context["representation"]["name"] == "exr": - # Ensure OpenEXRLoader plugin is loaded. - cmds.loadPlugin("OpenEXRLoader", quiet=True) - - message = ( - "Hold image sequence on first frame?" - "\n{} files available.".format( - len(context["representation"]["files"]) - ) - ) - reply = QtWidgets.QMessageBox.information( - None, - "Frame Hold.", - message, - QtWidgets.QMessageBox.Yes, - QtWidgets.QMessageBox.No - ) - if reply == QtWidgets.QMessageBox.Yes: - frame_extension_plug = "{}.frameExtension".format(image_plane_shape) # noqa - - # Remove current frame expression - disconnect_inputs(frame_extension_plug) - - cmds.setAttr(frame_extension_plug, start_frame) - - new_nodes = [image_plane_transform, image_plane_shape] - - return containerise( - name=name, - namespace=namespace, - nodes=new_nodes, - context=context, - loader=self.__class__.__name__ - ) - - def update(self, container, context): - folder_entity = context["folder"] - repre_entity = context["representation"] - - members = get_container_members(container) - image_planes = cmds.ls(members, type="imagePlane") - assert image_planes, "Image plane not found." - image_plane_shape = image_planes[0] - - path = get_representation_path(repre_entity) - cmds.setAttr("{}.imageName".format(image_plane_shape), - path, - type="string") - cmds.setAttr("{}.representation".format(container["objectName"]), - repre_entity["id"], - type="string") - - colorspace = self.get_colorspace(repre_entity) - if colorspace: - cmds.setAttr( - "{}.ignoreColorSpaceFileRules".format(image_plane_shape), - True - ) - cmds.setAttr("{}.colorSpace".format(image_plane_shape), - colorspace, type="string") - - # Set frame range. - start_frame = folder_entity["attrib"]["frameStart"] - end_frame = folder_entity["attrib"]["frameEnd"] - - for attr, value in { - "frameOffset": 0, - "frameIn": start_frame, - "frameOut": end_frame, - "frameCache": end_frame - }: - plug = "{}.{}".format(image_plane_shape, attr) - cmds.setAttr(plug, value) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass - - def get_colorspace(self, representation): - - data = representation.get("data", {}).get("colorspaceData", {}) - if not data: - return - - colorspace = data.get("colorspace") - return colorspace diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_look.py b/server_addon/maya/client/ayon_maya/plugins/load/load_look.py deleted file mode 100644 index da7b3691fd..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_look.py +++ /dev/null @@ -1,138 +0,0 @@ -# -*- coding: utf-8 -*- -"""Look loader.""" -import json -from collections import defaultdict - -import ayon_maya.api.plugin -from ayon_api import get_representation_by_name -from ayon_core.pipeline import get_representation_path -from ayon_core.tools.utils import ScrollMessageBox -from ayon_maya.api import lib -from ayon_maya.api.lib import get_reference_node -from qtpy import QtWidgets - - -class LookLoader(ayon_maya.api.plugin.ReferenceLoader): - """Specific loader for lookdev""" - - product_types = {"look"} - representations = {"ma"} - - label = "Reference look" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, options): - from maya import cmds - - with lib.maintained_selection(): - file_url = self.prepare_root_value( - file_url=self.filepath_from_context(context), - project_name=context["project"]["name"] - ) - nodes = cmds.file(file_url, - namespace=namespace, - reference=True, - returnNewNodes=True) - - self[:] = nodes - - def switch(self, container, context): - self.update(container, context) - - def update(self, container, context): - """ - Called by Scene Inventory when look should be updated to current - version. - If any reference edits cannot be applied, eg. shader renamed and - material not present, reference is unloaded and cleaned. - All failed edits are highlighted to the user via message box. - - Args: - container: object that has look to be updated - context: (dict): relationship data to get proper - representation from DB and persisted - data in .json - Returns: - None - """ - from maya import cmds - - # Get reference node from container members - members = lib.get_container_members(container) - reference_node = get_reference_node(members, log=self.log) - - shader_nodes = cmds.ls(members, type='shadingEngine') - orig_nodes = set(self._get_nodes_with_shader(shader_nodes)) - - # Trigger the regular reference update on the ReferenceLoader - super(LookLoader, self).update(container, context) - - # get new applied shaders and nodes from new version - shader_nodes = cmds.ls(members, type='shadingEngine') - nodes = set(self._get_nodes_with_shader(shader_nodes)) - - version_id = context["version"]["id"] - project_name = context["project"]["name"] - json_representation = get_representation_by_name( - project_name, "json", version_id - ) - - # Load relationships - shader_relation = get_representation_path(json_representation) - with open(shader_relation, "r") as f: - json_data = json.load(f) - - # update of reference could result in failed edits - material is not - # present because of renaming etc. If so highlight failed edits to user - failed_edits = cmds.referenceQuery(reference_node, - editStrings=True, - failedEdits=True, - successfulEdits=False) - if failed_edits: - # clean references - removes failed reference edits - cmds.file(cr=reference_node) # cleanReference - - # reapply shading groups from json representation on orig nodes - lib.apply_shaders(json_data, shader_nodes, orig_nodes) - - msg = ["During reference update some edits failed.", - "All successful edits were kept intact.\n", - "Failed and removed edits:"] - msg.extend(failed_edits) - - msg = ScrollMessageBox(QtWidgets.QMessageBox.Warning, - "Some reference edit failed", - msg) - msg.exec_() - - attributes = json_data.get("attributes", []) - - # region compute lookup - nodes_by_id = defaultdict(list) - for node in nodes: - nodes_by_id[lib.get_id(node)].append(node) - lib.apply_attributes(attributes, nodes_by_id) - - def _get_nodes_with_shader(self, shader_nodes): - """ - Returns list of nodes belonging to specific shaders - Args: - shader_nodes: of Shader groups - Returns - node names - """ - from maya import cmds - - for shader in shader_nodes: - future = cmds.listHistory(shader, future=True) - connections = cmds.listConnections(future, - type='mesh') - if connections: - # Ensure unique entries only to optimize query and results - connections = list(set(connections)) - return cmds.listRelatives(connections, - shapes=True, - fullPath=True) or [] - return [] diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_matchmove.py b/server_addon/maya/client/ayon_maya/plugins/load/load_matchmove.py deleted file mode 100644 index 7689a3ca5e..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_matchmove.py +++ /dev/null @@ -1,31 +0,0 @@ -from ayon_maya.api import plugin -from maya import mel - - -class MatchmoveLoader(plugin.Loader): - """ - This will run matchmove script to create track in scene. - - Supported script types are .py and .mel - """ - - product_types = {"matchmove"} - representations = {"py", "mel"} - defaults = ["Camera", "Object", "Mocap"] - - label = "Run matchmove script" - icon = "empire" - color = "orange" - - def load(self, context, name, namespace, data): - path = self.filepath_from_context(context) - if path.lower().endswith(".py"): - exec(open(path).read()) - - elif path.lower().endswith(".mel"): - mel.eval('source "{}"'.format(path)) - - else: - self.log.error("Unsupported script type") - - return True diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_maya_usd.py b/server_addon/maya/client/ayon_maya/plugins/load/load_maya_usd.py deleted file mode 100644 index 79fc1fc94f..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_maya_usd.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- -import maya.cmds as cmds -from ayon_core.pipeline import get_representation_path -from ayon_core.pipeline.load import get_representation_path_from_context -from ayon_maya.api.lib import namespaced, unique_namespace -from ayon_maya.api.pipeline import containerise -from ayon_maya.api import plugin - - -class MayaUsdLoader(plugin.Loader): - """Read USD data in a Maya USD Proxy""" - - product_types = {"model", "usd", "pointcache", "animation"} - representations = {"usd", "usda", "usdc", "usdz", "abc"} - - label = "Load USD to Maya Proxy" - order = -1 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, options=None): - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - # Make sure we can load the plugin - cmds.loadPlugin("mayaUsdPlugin", quiet=True) - - path = get_representation_path_from_context(context) - - # Create the shape - cmds.namespace(addNamespace=namespace) - with namespaced(namespace, new=False): - transform = cmds.createNode("transform", - name=name, - skipSelect=True) - proxy = cmds.createNode('mayaUsdProxyShape', - name="{}Shape".format(name), - parent=transform, - skipSelect=True) - - cmds.connectAttr("time1.outTime", "{}.time".format(proxy)) - cmds.setAttr("{}.filePath".format(proxy), path, type="string") - - # By default, we force the proxy to not use a shared stage because - # when doing so Maya will quite easily allow to save into the - # loaded usd file. Since we are loading published files we want to - # avoid altering them. Unshared stages also save their edits into - # the workfile as an artist might expect it to do. - cmds.setAttr("{}.shareStage".format(proxy), False) - # cmds.setAttr("{}.shareStage".format(proxy), lock=True) - - nodes = [transform, proxy] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, context): - # type: (dict, dict) -> None - """Update container with specified representation.""" - node = container['objectName'] - assert cmds.objExists(node), "Missing container" - - members = cmds.sets(node, query=True) or [] - shapes = cmds.ls(members, type="mayaUsdProxyShape") - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - for shape in shapes: - cmds.setAttr("{}.filePath".format(shape), path, type="string") - - cmds.setAttr("{}.representation".format(node), - repre_entity["id"], - type="string") - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - # type: (dict) -> None - """Remove loaded container.""" - # Delete container and its contents - if cmds.objExists(container['objectName']): - members = cmds.sets(container['objectName'], query=True) or [] - cmds.delete([container['objectName']] + members) - - # Remove the namespace, if empty - namespace = container['namespace'] - if cmds.namespace(exists=namespace): - members = cmds.namespaceInfo(namespace, listNamespace=True) - if not members: - cmds.namespace(removeNamespace=namespace) - else: - self.log.warning("Namespace not deleted because it " - "still has members: %s", namespace) diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_multiverse_usd.py b/server_addon/maya/client/ayon_maya/plugins/load/load_multiverse_usd.py deleted file mode 100644 index 026740a957..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_multiverse_usd.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import maya.cmds as cmds -from ayon_api import get_representation_by_id -from ayon_core.pipeline import get_representation_path -from ayon_maya.api import plugin -from ayon_maya.api.lib import maintained_selection, namespaced, unique_namespace -from ayon_maya.api.pipeline import containerise -from maya import mel - - -class MultiverseUsdLoader(plugin.Loader): - """Read USD data in a Multiverse Compound""" - - product_types = { - "model", - "usd", - "mvUsdComposition", - "mvUsdOverride", - "pointcache", - "animation", - } - representations = {"usd", "usda", "usdc", "usdz", "abc"} - - label = "Load USD to Multiverse" - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, options=None): - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - path = self.filepath_from_context(context) - - # Make sure we can load the plugin - cmds.loadPlugin("MultiverseForMaya", quiet=True) - import multiverse - - # Create the shape - with maintained_selection(): - cmds.namespace(addNamespace=namespace) - with namespaced(namespace, new=False): - shape = multiverse.CreateUsdCompound(path) - transform = cmds.listRelatives( - shape, parent=True, fullPath=True)[0] - - nodes = [transform, shape] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, context): - # type: (dict, dict) -> None - """Update container with specified representation.""" - node = container['objectName'] - assert cmds.objExists(node), "Missing container" - - members = cmds.sets(node, query=True) or [] - shapes = cmds.ls(members, type="mvUsdCompoundShape") - assert shapes, "Cannot find mvUsdCompoundShape in container" - - project_name = context["project"]["name"] - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - prev_representation_id = cmds.getAttr("{}.representation".format(node)) - prev_representation = get_representation_by_id(project_name, - prev_representation_id) - prev_path = os.path.normpath(prev_representation["attrib"]["path"]) - - # Make sure we can load the plugin - cmds.loadPlugin("MultiverseForMaya", quiet=True) - import multiverse - - for shape in shapes: - - asset_paths = multiverse.GetUsdCompoundAssetPaths(shape) - asset_paths = [os.path.normpath(p) for p in asset_paths] - - assert asset_paths.count(prev_path) == 1, \ - "Couldn't find matching path (or too many)" - prev_path_idx = asset_paths.index(prev_path) - - asset_paths[prev_path_idx] = path - - multiverse.SetUsdCompoundAssetPaths(shape, asset_paths) - - cmds.setAttr("{}.representation".format(node), - repre_entity["id"], - type="string") - mel.eval('refreshEditorTemplates;') - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - # type: (dict) -> None - """Remove loaded container.""" - # Delete container and its contents - if cmds.objExists(container['objectName']): - members = cmds.sets(container['objectName'], query=True) or [] - cmds.delete([container['objectName']] + members) - - # Remove the namespace, if empty - namespace = container['namespace'] - if cmds.namespace(exists=namespace): - members = cmds.namespaceInfo(namespace, listNamespace=True) - if not members: - cmds.namespace(removeNamespace=namespace) - else: - self.log.warning("Namespace not deleted because it " - "still has members: %s", namespace) diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_multiverse_usd_over.py b/server_addon/maya/client/ayon_maya/plugins/load/load_multiverse_usd_over.py deleted file mode 100644 index a8fff12577..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_multiverse_usd_over.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import maya.cmds as cmds -import qargparse -from ayon_api import get_representation_by_id -from ayon_core.pipeline import get_representation_path -from ayon_maya.api import plugin -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api.pipeline import containerise -from maya import mel - - -class MultiverseUsdOverLoader(plugin.Loader): - """Reference file""" - - product_types = {"mvUsdOverride"} - representations = {"usda", "usd", "udsz"} - - label = "Load Usd Override into Compound" - order = -10 - icon = "code-fork" - color = "orange" - - options = [ - qargparse.String( - "Which Compound", - label="Compound", - help="Select which compound to add this as a layer to." - ) - ] - - def load(self, context, name=None, namespace=None, options=None): - current_usd = cmds.ls(selection=True, - type="mvUsdCompoundShape", - dag=True, - long=True) - if len(current_usd) != 1: - self.log.error("Current selection invalid: '{}', " - "must contain exactly 1 mvUsdCompoundShape." - "".format(current_usd)) - return - - # Make sure we can load the plugin - cmds.loadPlugin("MultiverseForMaya", quiet=True) - import multiverse - - path = self.filepath_from_context(context) - nodes = current_usd - with maintained_selection(): - multiverse.AddUsdCompoundAssetPath(current_usd[0], path) - - namespace = current_usd[0].split("|")[1].split(":")[0] - - container = containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - cmds.addAttr(container, longName="mvUsdCompoundShape", - niceName="mvUsdCompoundShape", dataType="string") - cmds.setAttr(container + ".mvUsdCompoundShape", - current_usd[0], type="string") - - return container - - def update(self, container, context): - # type: (dict, dict) -> None - """Update container with specified representation.""" - - cmds.loadPlugin("MultiverseForMaya", quiet=True) - import multiverse - - node = container['objectName'] - assert cmds.objExists(node), "Missing container" - - members = cmds.sets(node, query=True) or [] - shapes = cmds.ls(members, type="mvUsdCompoundShape") - assert shapes, "Cannot find mvUsdCompoundShape in container" - - mvShape = container['mvUsdCompoundShape'] - assert mvShape, "Missing mv source" - - project_name = context["project"]["name"] - repre_entity = context["representation"] - prev_representation_id = cmds.getAttr("{}.representation".format(node)) - prev_representation = get_representation_by_id(project_name, - prev_representation_id) - prev_path = os.path.normpath(prev_representation["attrib"]["path"]) - - path = get_representation_path(repre_entity) - - for shape in shapes: - asset_paths = multiverse.GetUsdCompoundAssetPaths(shape) - asset_paths = [os.path.normpath(p) for p in asset_paths] - - assert asset_paths.count(prev_path) == 1, \ - "Couldn't find matching path (or too many)" - prev_path_idx = asset_paths.index(prev_path) - asset_paths[prev_path_idx] = path - multiverse.SetUsdCompoundAssetPaths(shape, asset_paths) - - cmds.setAttr("{}.representation".format(node), - repre_entity["id"], - type="string") - mel.eval('refreshEditorTemplates;') - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - # type: (dict) -> None - """Remove loaded container.""" - # Delete container and its contents - if cmds.objExists(container['objectName']): - members = cmds.sets(container['objectName'], query=True) or [] - cmds.delete([container['objectName']] + members) - - # Remove the namespace, if empty - namespace = container['namespace'] - if cmds.namespace(exists=namespace): - members = cmds.namespaceInfo(namespace, listNamespace=True) - if not members: - cmds.namespace(removeNamespace=namespace) - else: - self.log.warning("Namespace not deleted because it " - "still has members: %s", namespace) diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_redshift_proxy.py b/server_addon/maya/client/ayon_maya/plugins/load/load_redshift_proxy.py deleted file mode 100644 index 92bf6dfe26..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_redshift_proxy.py +++ /dev/null @@ -1,150 +0,0 @@ -# -*- coding: utf-8 -*- -"""Loader for Redshift proxy.""" -import os - -import clique -import maya.cmds as cmds -from ayon_core.pipeline import get_representation_path -from ayon_core.settings import get_project_settings -from ayon_maya.api import plugin -from ayon_maya.api.lib import maintained_selection, namespaced, unique_namespace -from ayon_maya.api.pipeline import containerise -from ayon_maya.api.plugin import get_load_color_for_product_type - - -class RedshiftProxyLoader(plugin.Loader): - """Load Redshift proxy""" - - product_types = {"redshiftproxy"} - representations = {"rs"} - - label = "Import Redshift Proxy" - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, options=None): - """Plugin entry point.""" - product_type = context["product"]["productType"] - - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - # Ensure Redshift for Maya is loaded. - cmds.loadPlugin("redshift4maya", quiet=True) - - path = self.filepath_from_context(context) - with maintained_selection(): - cmds.namespace(addNamespace=namespace) - with namespaced(namespace, new=False): - nodes, group_node = self.create_rs_proxy(name, path) - - self[:] = nodes - if not nodes: - return - - # colour the group node - project_name = context["project"]["name"] - settings = get_project_settings(project_name) - color = get_load_color_for_product_type(product_type, settings) - if color is not None: - red, green, blue = color - cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1) - cmds.setAttr( - "{0}.outlinerColor".format(group_node), red, green, blue - ) - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, context): - - node = container['objectName'] - assert cmds.objExists(node), "Missing container" - - members = cmds.sets(node, query=True) or [] - rs_meshes = cmds.ls(members, type="RedshiftProxyMesh") - assert rs_meshes, "Cannot find RedshiftProxyMesh in container" - repre_entity = context["representation"] - filename = get_representation_path(repre_entity) - - for rs_mesh in rs_meshes: - cmds.setAttr("{}.fileName".format(rs_mesh), - filename, - type="string") - - # Update metadata - cmds.setAttr("{}.representation".format(node), - repre_entity["id"], - type="string") - - def remove(self, container): - - # Delete container and its contents - if cmds.objExists(container['objectName']): - members = cmds.sets(container['objectName'], query=True) or [] - cmds.delete([container['objectName']] + members) - - # Remove the namespace, if empty - namespace = container['namespace'] - if cmds.namespace(exists=namespace): - members = cmds.namespaceInfo(namespace, listNamespace=True) - if not members: - cmds.namespace(removeNamespace=namespace) - else: - self.log.warning("Namespace not deleted because it " - "still has members: %s", namespace) - - def switch(self, container, context): - self.update(container, context) - - def create_rs_proxy(self, name, path): - """Creates Redshift Proxies showing a proxy object. - - Args: - name (str): Proxy name. - path (str): Path to proxy file. - - Returns: - (str, str): Name of mesh with Redshift proxy and its parent - transform. - - """ - rs_mesh = cmds.createNode( - 'RedshiftProxyMesh', name="{}_RS".format(name)) - mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name)) - - cmds.setAttr("{}.fileName".format(rs_mesh), - path, - type="string") - - cmds.connectAttr("{}.outMesh".format(rs_mesh), - "{}.inMesh".format(mesh_shape)) - - # TODO: use the assigned shading group as shaders if existed - # assign default shader to redshift proxy - if cmds.ls("initialShadingGroup", type="shadingEngine"): - cmds.sets(mesh_shape, forceElement="initialShadingGroup") - - group_node = cmds.group(empty=True, name="{}_GRP".format(name)) - mesh_transform = cmds.listRelatives(mesh_shape, - parent=True, fullPath=True) - cmds.parent(mesh_transform, group_node) - nodes = [rs_mesh, mesh_shape, group_node] - - # determine if we need to enable animation support - files_in_folder = os.listdir(os.path.dirname(path)) - collections, remainder = clique.assemble(files_in_folder) - - if collections: - cmds.setAttr("{}.useFrameExtension".format(rs_mesh), 1) - - return nodes, group_node diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_reference.py b/server_addon/maya/client/ayon_maya/plugins/load/load_reference.py deleted file mode 100644 index 92cee414fd..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_reference.py +++ /dev/null @@ -1,382 +0,0 @@ -import contextlib -import difflib - -import qargparse -from ayon_core.settings import get_project_settings -from ayon_maya.api import plugin -from ayon_maya.api.lib import ( - create_rig_animation_instance, - get_container_members, - maintained_selection, - parent_nodes, -) -from maya import cmds - - -@contextlib.contextmanager -def preserve_time_units(): - """Preserve current frame, frame range and fps""" - frame = cmds.currentTime(query=True) - fps = cmds.currentUnit(query=True, time=True) - start = cmds.playbackOptions(query=True, minTime=True) - end = cmds.playbackOptions(query=True, maxTime=True) - anim_start = cmds.playbackOptions(query=True, animationStartTime=True) - anim_end = cmds.playbackOptions(query=True, animationEndTime=True) - try: - yield - finally: - cmds.currentUnit(time=fps, updateAnimation=False) - cmds.currentTime(frame) - cmds.playbackOptions(minTime=start, - maxTime=end, - animationStartTime=anim_start, - animationEndTime=anim_end) - - -@contextlib.contextmanager -def preserve_modelpanel_cameras(container, log=None): - """Preserve camera members of container in the modelPanels. - - This is used to ensure a camera remains in the modelPanels after updating - to a new version. - - """ - - # Get the modelPanels that used the old camera - members = get_container_members(container) - old_cameras = set(cmds.ls(members, type="camera", long=True)) - if not old_cameras: - # No need to manage anything - yield - return - - panel_cameras = {} - for panel in cmds.getPanel(type="modelPanel"): - cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True), - long=True)[0] - - # Often but not always maya returns the transform from the - # modelPanel as opposed to the camera shape, so we convert it - # to explicitly be the camera shape - if cmds.nodeType(cam) != "camera": - cam = cmds.listRelatives(cam, - children=True, - fullPath=True, - type="camera")[0] - if cam in old_cameras: - panel_cameras[panel] = cam - - if not panel_cameras: - # No need to manage anything - yield - return - - try: - yield - finally: - new_members = get_container_members(container) - new_cameras = set(cmds.ls(new_members, type="camera", long=True)) - if not new_cameras: - return - - for panel, cam_name in panel_cameras.items(): - new_camera = None - if cam_name in new_cameras: - new_camera = cam_name - elif len(new_cameras) == 1: - new_camera = next(iter(new_cameras)) - else: - # Multiple cameras in the updated container but not an exact - # match detected by name. Find the closest match - matches = difflib.get_close_matches(word=cam_name, - possibilities=new_cameras, - n=1) - if matches: - new_camera = matches[0] # best match - if log: - log.info("Camera in '{}' restored with " - "closest match camera: {} (before: {})" - .format(panel, new_camera, cam_name)) - - if not new_camera: - # Unable to find the camera to re-apply in the modelpanel - continue - - cmds.modelPanel(panel, edit=True, camera=new_camera) - - -class ReferenceLoader(plugin.ReferenceLoader): - """Reference file""" - - product_types = { - "model", - "pointcache", - "proxyAbc", - "animation", - "mayaAscii", - "mayaScene", - "setdress", - "layout", - "camera", - "rig", - "camerarig", - "staticMesh", - "skeletalMesh", - "mvLook", - "matchmove", - } - - representations = {"ma", "abc", "fbx", "mb"} - - label = "Reference" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, options): - import maya.cmds as cmds - - product_type = context["product"]["productType"] - project_name = context["project"]["name"] - # True by default to keep legacy behaviours - attach_to_root = options.get("attach_to_root", True) - group_name = options["group_name"] - - # no group shall be created - if not attach_to_root: - group_name = namespace - - kwargs = {} - if "file_options" in options: - kwargs["options"] = options["file_options"] - if "file_type" in options: - kwargs["type"] = options["file_type"] - - path = self.filepath_from_context(context) - with maintained_selection(): - cmds.loadPlugin("AbcImport.mll", quiet=True) - - file_url = self.prepare_root_value(path, project_name) - nodes = cmds.file(file_url, - namespace=namespace, - sharedReferenceFile=False, - reference=True, - returnNewNodes=True, - groupReference=attach_to_root, - groupName=group_name, - **kwargs) - - shapes = cmds.ls(nodes, shapes=True, long=True) - - new_nodes = (list(set(nodes) - set(shapes))) - - # if there are cameras, try to lock their transforms - self._lock_camera_transforms(new_nodes) - - current_namespace = cmds.namespaceInfo(currentNamespace=True) - - if current_namespace != ":": - group_name = current_namespace + ":" + group_name - - self[:] = new_nodes - - if attach_to_root: - group_name = "|" + group_name - roots = cmds.listRelatives(group_name, - children=True, - fullPath=True) or [] - - if product_type not in { - "layout", "setdress", "mayaAscii", "mayaScene" - }: - # QUESTION Why do we need to exclude these families? - with parent_nodes(roots, parent=None): - cmds.xform(group_name, zeroTransformPivots=True) - - settings = get_project_settings(project_name) - - display_handle = settings['maya']['load'].get( - 'reference_loader', {} - ).get('display_handle', True) - cmds.setAttr( - "{}.displayHandle".format(group_name), display_handle - ) - - color = plugin.get_load_color_for_product_type( - product_type, settings - ) - if color is not None: - red, green, blue = color - cmds.setAttr("{}.useOutlinerColor".format(group_name), 1) - cmds.setAttr( - "{}.outlinerColor".format(group_name), - red, - green, - blue - ) - - cmds.setAttr( - "{}.displayHandle".format(group_name), display_handle - ) - # get bounding box - bbox = cmds.exactWorldBoundingBox(group_name) - # get pivot position on world space - pivot = cmds.xform(group_name, q=True, sp=True, ws=True) - # center of bounding box - cx = (bbox[0] + bbox[3]) / 2 - cy = (bbox[1] + bbox[4]) / 2 - cz = (bbox[2] + bbox[5]) / 2 - # add pivot position to calculate offset - cx = cx + pivot[0] - cy = cy + pivot[1] - cz = cz + pivot[2] - # set selection handle offset to center of bounding box - cmds.setAttr("{}.selectHandleX".format(group_name), cx) - cmds.setAttr("{}.selectHandleY".format(group_name), cy) - cmds.setAttr("{}.selectHandleZ".format(group_name), cz) - - if product_type == "rig": - self._post_process_rig(namespace, context, options) - else: - if "translate" in options: - if not attach_to_root and new_nodes: - root_nodes = cmds.ls(new_nodes, assemblies=True, - long=True) - # we assume only a single root is ever loaded - group_name = root_nodes[0] - cmds.setAttr("{}.translate".format(group_name), - *options["translate"]) - return new_nodes - - def switch(self, container, context): - self.update(container, context) - - def update(self, container, context): - with preserve_modelpanel_cameras(container, log=self.log): - super(ReferenceLoader, self).update(container, context) - - # We also want to lock camera transforms on any new cameras in the - # reference or for a camera which might have changed names. - members = get_container_members(container) - self._lock_camera_transforms(members) - - def _post_process_rig(self, namespace, context, options): - - nodes = self[:] - create_rig_animation_instance( - nodes, context, namespace, options=options, log=self.log - ) - - def _lock_camera_transforms(self, nodes): - cameras = cmds.ls(nodes, type="camera") - if not cameras: - return - - # Check the Maya version, lockTransform has been introduced since - # Maya 2016.5 Ext 2 - version = int(cmds.about(version=True)) - if version >= 2016: - for camera in cameras: - cmds.camera(camera, edit=True, lockTransform=True) - else: - self.log.warning("This version of Maya does not support locking of" - " transforms of cameras.") - - -class MayaUSDReferenceLoader(ReferenceLoader): - """Reference USD file to native Maya nodes using MayaUSDImport reference""" - - label = "Reference Maya USD" - product_types = {"usd"} - representations = {"usd"} - extensions = {"usd", "usda", "usdc"} - - options = ReferenceLoader.options + [ - qargparse.Boolean( - "readAnimData", - label="Load anim data", - default=True, - help="Load animation data from USD file" - ), - qargparse.Boolean( - "useAsAnimationCache", - label="Use as animation cache", - default=True, - help=( - "Imports geometry prims with time-sampled point data using a " - "point-based deformer that references the imported " - "USD file.\n" - "This provides better import and playback performance when " - "importing time-sampled geometry from USD, and should " - "reduce the weight of the resulting Maya scene." - ) - ), - qargparse.Boolean( - "importInstances", - label="Import instances", - default=True, - help=( - "Import USD instanced geometries as Maya instanced shapes. " - "Will flatten the scene otherwise." - ) - ), - qargparse.String( - "primPath", - label="Prim Path", - default="/", - help=( - "Name of the USD scope where traversing will begin.\n" - "The prim at the specified primPath (including the prim) will " - "be imported.\n" - "Specifying the pseudo-root (/) means you want " - "to import everything in the file.\n" - "If the passed prim path is empty, it will first try to " - "import the defaultPrim for the rootLayer if it exists.\n" - "Otherwise, it will behave as if the pseudo-root was passed " - "in." - ) - ) - ] - - file_type = "USD Import" - - def process_reference(self, context, name, namespace, options): - cmds.loadPlugin("mayaUsdPlugin", quiet=True) - - def bool_option(key, default): - # Shorthand for getting optional boolean file option from options - value = int(bool(options.get(key, default))) - return "{}={}".format(key, value) - - def string_option(key, default): - # Shorthand for getting optional string file option from options - value = str(options.get(key, default)) - return "{}={}".format(key, value) - - options["file_options"] = ";".join([ - string_option("primPath", default="/"), - bool_option("importInstances", default=True), - bool_option("useAsAnimationCache", default=True), - bool_option("readAnimData", default=True), - # TODO: Expose more parameters - # "preferredMaterial=none", - # "importRelativeTextures=Automatic", - # "useCustomFrameRange=0", - # "startTime=0", - # "endTime=0", - # "importUSDZTextures=0" - ]) - options["file_type"] = self.file_type - - # Maya USD import reference has the tendency to change the time slider - # range and current frame, so we force revert it after - with preserve_time_units(): - return super(MayaUSDReferenceLoader, self).process_reference( - context, name, namespace, options - ) - - def update(self, container, context): - # Maya USD import reference has the tendency to change the time slider - # range and current frame, so we force revert it after - with preserve_time_units(): - super(MayaUSDReferenceLoader, self).update(container, context) diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_rendersetup.py b/server_addon/maya/client/ayon_maya/plugins/load/load_rendersetup.py deleted file mode 100644 index 3b323698c4..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_rendersetup.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- -"""Load and update RenderSetup settings. - -Working with RenderSetup setting is Maya is done utilizing json files. -When this json is loaded, it will overwrite all settings on RenderSetup -instance. -""" - -import contextlib -import json -import sys - -import maya.app.renderSetup.model.renderSetup as renderSetup -import six -from ayon_core.lib import BoolDef, EnumDef -from ayon_core.pipeline import get_representation_path -from ayon_maya.api import lib -from ayon_maya.api import plugin -from ayon_maya.api.pipeline import containerise -from maya import cmds - - -@contextlib.contextmanager -def mark_all_imported(enabled): - """Mark all imported nodes accepted by removing the `imported` attribute""" - if not enabled: - yield - return - - node_types = cmds.pluginInfo("renderSetup", query=True, dependNode=True) - - # Get node before load, then we can disable `imported` - # attribute on all new render setup layers after import - before = cmds.ls(type=node_types, long=True) - try: - yield - finally: - after = cmds.ls(type=node_types, long=True) - for node in (node for node in after if node not in before): - if cmds.attributeQuery("imported", - node=node, - exists=True): - plug = "{}.imported".format(node) - if cmds.getAttr(plug): - cmds.deleteAttr(plug) - - -class RenderSetupLoader(plugin.Loader): - """Load json preset for RenderSetup overwriting current one.""" - - product_types = {"rendersetup"} - representations = {"json"} - defaults = ['Main'] - - label = "Load RenderSetup template" - icon = "tablet" - color = "orange" - - options = [ - BoolDef("accept_import", - label="Accept import on load", - tooltip=( - "By default importing or pasting Render Setup collections " - "will display them italic in the Render Setup list.\nWith " - "this enabled the load will directly mark the import " - "'accepted' and remove the italic view." - ), - default=True), - BoolDef("load_managed", - label="Load Managed", - tooltip=( - "Containerize the rendersetup on load so it can be " - "'updated' later." - ), - default=True), - EnumDef("import_mode", - label="Import mode", - items={ - renderSetup.DECODE_AND_OVERWRITE: ( - "Flush existing render setup and " - "add without any namespace" - ), - renderSetup.DECODE_AND_MERGE: ( - "Merge with the existing render setup objects and " - "rename the unexpected objects" - ), - renderSetup.DECODE_AND_RENAME: ( - "Renaming all decoded render setup objects to not " - "conflict with the existing render setup" - ), - }, - default=renderSetup.DECODE_AND_OVERWRITE) - ] - - def load(self, context, name, namespace, data): - """Load RenderSetup settings.""" - - path = self.filepath_from_context(context) - - accept_import = data.get("accept_import", True) - import_mode = data.get("import_mode", renderSetup.DECODE_AND_OVERWRITE) - - self.log.info(">>> loading json [ {} ]".format(path)) - with mark_all_imported(accept_import): - with open(path, "r") as file: - renderSetup.instance().decode( - json.load(file), import_mode, None) - - if data.get("load_managed", True): - self.log.info(">>> containerising [ {} ]".format(name)) - folder_name = context["folder"]["name"] - namespace = namespace or lib.unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - return containerise( - name=name, - namespace=namespace, - nodes=[], - context=context, - loader=self.__class__.__name__) - - def remove(self, container): - """Remove RenderSetup settings instance.""" - container_name = container["objectName"] - - self.log.info("Removing '%s' from Maya.." % container["name"]) - - container_content = cmds.sets(container_name, query=True) or [] - nodes = cmds.ls(container_content, long=True) - - nodes.append(container_name) - - try: - cmds.delete(nodes) - except ValueError: - # Already implicitly deleted by Maya upon removing reference - pass - - def update(self, container, context): - """Update RenderSetup setting by overwriting existing settings.""" - lib.show_message( - "Render setup update", - "Render setup setting will be overwritten by new version. All " - "setting specified by user not included in loaded version " - "will be lost.") - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - with open(path, "r") as file: - try: - renderSetup.instance().decode( - json.load(file), renderSetup.DECODE_AND_OVERWRITE, None) - except Exception: - self.log.error("There were errors during loading") - six.reraise(*sys.exc_info()) - - # Update metadata - node = container["objectName"] - cmds.setAttr("{}.representation".format(node), - repre_entity["id"], - type="string") - self.log.info("... updated") - - def switch(self, container, context): - """Switch representations.""" - self.update(container, context) diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_arnold.py b/server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_arnold.py deleted file mode 100644 index 4515ec499d..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_arnold.py +++ /dev/null @@ -1,138 +0,0 @@ -"""Load OpenVDB for Arnold in aiVolume. - -TODO: - `aiVolume` doesn't automatically set velocity fps correctly, set manual? - -""" -import os - -from ayon_core.pipeline import get_representation_path -from ayon_core.settings import get_project_settings -from ayon_maya.api import plugin -from ayon_maya.api.plugin import get_load_color_for_product_type - - -class LoadVDBtoArnold(plugin.Loader): - """Load OpenVDB for Arnold in aiVolume""" - - product_types = {"vdbcache"} - representations = {"vdb"} - - label = "Load VDB to Arnold" - icon = "cloud" - color = "orange" - - def load(self, context, name, namespace, data): - - from ayon_maya.api.lib import unique_namespace - from ayon_maya.api.pipeline import containerise - from maya import cmds - - product_type = context["product"]["productType"] - - # Check if the plugin for arnold is available on the pc - try: - cmds.loadPlugin("mtoa", quiet=True) - except Exception as exc: - self.log.error("Encountered exception:\n%s" % exc) - return - - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - # Root group - label = "{}:{}".format(namespace, name) - root = cmds.group(name=label, empty=True) - - project_name = context["project"]["name"] - settings = get_project_settings(project_name) - color = get_load_color_for_product_type(product_type, settings) - if color is not None: - red, green, blue = color - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", red, green, blue) - - # Create VRayVolumeGrid - grid_node = cmds.createNode("aiVolume", - name="{}Shape".format(root), - parent=root) - - path = self.filepath_from_context(context) - self._set_path(grid_node, - path=path, - repre_entity=context["representation"]) - - # Lock the shape node so the user can't delete the transform/shape - # as if it was referenced - cmds.lockNode(grid_node, lock=True) - - nodes = [root, grid_node] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, context): - - from maya import cmds - - repre_entity = context["representation"] - - path = get_representation_path(repre_entity) - - # Find VRayVolumeGrid - members = cmds.sets(container['objectName'], query=True) - grid_nodes = cmds.ls(members, type="aiVolume", long=True) - assert len(grid_nodes) == 1, "This is a bug" - - # Update the VRayVolumeGrid - self._set_path(grid_nodes[0], path=path, repre_entity=repre_entity) - - # Update container representation - cmds.setAttr(container["objectName"] + ".representation", - repre_entity["id"], - type="string") - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - - from maya import cmds - - # Get all members of the AYON container, ensure they are unlocked - # and delete everything - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass - - @staticmethod - def _set_path(grid_node, - path, - repre_entity): - """Apply the settings for the VDB path to the aiVolume node""" - from maya import cmds - - if not os.path.exists(path): - raise RuntimeError("Path does not exist: %s" % path) - - is_sequence = "frame" in repre_entity["context"] - cmds.setAttr(grid_node + ".useFrameExtension", is_sequence) - - # Set file path - cmds.setAttr(grid_node + ".filename", path, type="string") diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_redshift.py b/server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_redshift.py deleted file mode 100644 index c08004421b..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_redshift.py +++ /dev/null @@ -1,143 +0,0 @@ -import os - -from ayon_core.pipeline import get_representation_path -from ayon_core.settings import get_project_settings -from ayon_maya.api import plugin -from ayon_maya.api.plugin import get_load_color_for_product_type - - -class LoadVDBtoRedShift(plugin.Loader): - """Load OpenVDB in a Redshift Volume Shape - - Note that the RedshiftVolumeShape is created without a RedshiftVolume - shader assigned. To get the Redshift volume to render correctly assign - a RedshiftVolume shader (in the Hypershade) and set the density, scatter - and emission channels to the channel names of the volumes in the VDB file. - - """ - - product_types = {"vdbcache"} - representations = {"vdb"} - - label = "Load VDB to RedShift" - icon = "cloud" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - from ayon_maya.api.lib import unique_namespace - from ayon_maya.api.pipeline import containerise - from maya import cmds - - product_type = context["product"]["productType"] - - # Check if the plugin for redshift is available on the pc - try: - cmds.loadPlugin("redshift4maya", quiet=True) - except Exception as exc: - self.log.error("Encountered exception:\n%s" % exc) - return - - # Check if viewport drawing engine is Open GL Core (compat) - render_engine = None - compatible = "OpenGL" - if cmds.optionVar(exists="vp2RenderingEngine"): - render_engine = cmds.optionVar(query="vp2RenderingEngine") - - if not render_engine or not render_engine.startswith(compatible): - raise RuntimeError("Current scene's settings are incompatible." - "See Preferences > Display > Viewport 2.0 to " - "set the render engine to '%s'" - % compatible) - - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - # Root group - label = "{}:{}".format(namespace, name) - root = cmds.createNode("transform", name=label) - - project_name = context["project"]["name"] - settings = get_project_settings(project_name) - color = get_load_color_for_product_type(product_type, settings) - if color is not None: - red, green, blue = color - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", red, green, blue) - - # Create VR - volume_node = cmds.createNode("RedshiftVolumeShape", - name="{}RVSShape".format(label), - parent=root) - - self._set_path(volume_node, - path=self.filepath_from_context(context), - representation=context["representation"]) - - nodes = [root, volume_node] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, context): - from maya import cmds - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - - # Find VRayVolumeGrid - members = cmds.sets(container['objectName'], query=True) - grid_nodes = cmds.ls(members, type="RedshiftVolumeShape", long=True) - assert len(grid_nodes) == 1, "This is a bug" - - # Update the VRayVolumeGrid - self._set_path(grid_nodes[0], path=path, representation=repre_entity) - - # Update container representation - cmds.setAttr(container["objectName"] + ".representation", - repre_entity["id"], - type="string") - - def remove(self, container): - from maya import cmds - - # Get all members of the AYON container, ensure they are unlocked - # and delete everything - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass - - def switch(self, container, context): - self.update(container, context) - - @staticmethod - def _set_path(grid_node, - path, - representation): - """Apply the settings for the VDB path to the RedshiftVolumeShape""" - from maya import cmds - - if not os.path.exists(path): - raise RuntimeError("Path does not exist: %s" % path) - - is_sequence = "frame" in representation["context"] - cmds.setAttr(grid_node + ".useFrameExtension", is_sequence) - - # Set file path - cmds.setAttr(grid_node + ".fileName", path, type="string") diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_vray.py b/server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_vray.py deleted file mode 100644 index f022f8be5e..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_vdb_to_vray.py +++ /dev/null @@ -1,286 +0,0 @@ -import os - -from ayon_core.pipeline import get_representation_path -from ayon_core.settings import get_project_settings -from ayon_maya.api import plugin -from ayon_maya.api.plugin import get_load_color_for_product_type -from maya import cmds - -# List of 3rd Party Channels Mapping names for VRayVolumeGrid -# See: https://docs.chaosgroup.com/display/VRAY4MAYA/Input -# #Input-3rdPartyChannelsMapping -THIRD_PARTY_CHANNELS = { - 2: "Smoke", - 1: "Temperature", - 10: "Fuel", - 4: "Velocity.x", - 5: "Velocity.y", - 6: "Velocity.z", - 7: "Red", - 8: "Green", - 9: "Blue", - 14: "Wavelet Energy", - 19: "Wavelet.u", - 20: "Wavelet.v", - 21: "Wavelet.w", - # These are not in UI or documentation but V-Ray does seem to set these. - 15: "AdvectionOrigin.x", - 16: "AdvectionOrigin.y", - 17: "AdvectionOrigin.z", - -} - - -def _fix_duplicate_vvg_callbacks(): - """Workaround to kill duplicate VRayVolumeGrids attribute callbacks. - - This fixes a huge lag in Maya on switching 3rd Party Channels Mappings - or to different .vdb file paths because it spams an attribute changed - callback: `vvgUserChannelMappingsUpdateUI`. - - ChaosGroup bug ticket: 154-008-9890 - - Found with: - - Maya 2019.2 on Windows 10 - - V-Ray: V-Ray Next for Maya, update 1 version 4.12.01.00001 - - Bug still present in: - - Maya 2022.1 on Windows 10 - - V-Ray 5 for Maya, Update 2.1 (v5.20.01 from Dec 16 2021) - - """ - # todo(roy): Remove when new V-Ray release fixes duplicate calls - - jobs = cmds.scriptJob(listJobs=True) - - matched = set() - for entry in jobs: - # Remove the number - index, callback = entry.split(":", 1) - callback = callback.strip() - - # Detect whether it is a `vvgUserChannelMappingsUpdateUI` - # attribute change callback - if callback.startswith('"-runOnce" 1 "-attributeChange" "'): - if '"vvgUserChannelMappingsUpdateUI(' in callback: - if callback in matched: - # If we've seen this callback before then - # delete the duplicate callback - cmds.scriptJob(kill=int(index)) - else: - matched.add(callback) - - -class LoadVDBtoVRay(plugin.Loader): - """Load OpenVDB in a V-Ray Volume Grid""" - - product_types = {"vdbcache"} - representations = {"vdb"} - - label = "Load VDB to VRay" - icon = "cloud" - color = "orange" - - def load(self, context, name, namespace, data): - - from ayon_maya.api.lib import unique_namespace - from ayon_maya.api.pipeline import containerise - - path = self.filepath_from_context(context) - assert os.path.exists(path), ( - "Path does not exist: %s" % path - ) - - product_type = context["product"]["productType"] - - # Ensure V-ray is loaded with the vrayvolumegrid - if not cmds.pluginInfo("vrayformaya", query=True, loaded=True): - cmds.loadPlugin("vrayformaya") - if not cmds.pluginInfo("vrayvolumegrid", query=True, loaded=True): - cmds.loadPlugin("vrayvolumegrid") - - # Check if viewport drawing engine is Open GL Core (compat) - render_engine = None - compatible = "OpenGLCoreProfileCompat" - if cmds.optionVar(exists="vp2RenderingEngine"): - render_engine = cmds.optionVar(query="vp2RenderingEngine") - - if not render_engine or render_engine != compatible: - self.log.warning("Current scene's settings are incompatible." - "See Preferences > Display > Viewport 2.0 to " - "set the render engine to '%s'" % compatible) - - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - # Root group - label = "{}:{}_VDB".format(namespace, name) - root = cmds.group(name=label, empty=True) - - project_name = context["project"]["name"] - settings = get_project_settings(project_name) - color = get_load_color_for_product_type(product_type, settings) - if color is not None: - red, green, blue = color - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", red, green, blue) - - # Create VRayVolumeGrid - grid_node = cmds.createNode("VRayVolumeGrid", - name="{}Shape".format(label), - parent=root) - - # Ensure .currentTime is connected to time1.outTime - cmds.connectAttr("time1.outTime", grid_node + ".currentTime") - - # Set path - self._set_path(grid_node, path, show_preset_popup=True) - - # Lock the shape node so the user can't delete the transform/shape - # as if it was referenced - cmds.lockNode(grid_node, lock=True) - - nodes = [root, grid_node] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def _set_path(self, grid_node, path, show_preset_popup=True): - - from ayon_maya.api.lib import attribute_values - from maya import cmds - - def _get_filename_from_folder(path): - # Using the sequence of .vdb files we check the frame range, etc. - # to set the filename with #### padding. - files = sorted(x for x in os.listdir(path) if x.endswith(".vdb")) - if not files: - raise RuntimeError("Couldn't find .vdb files in: %s" % path) - - if len(files) == 1: - # Ensure check for single file is also done in folder - fname = files[0] - else: - # Sequence - import clique - - # todo: check support for negative frames as input - collections, remainder = clique.assemble(files) - assert len(collections) == 1, ( - "Must find a single image sequence, " - "found: %s" % (collections,) - ) - collection = collections[0] - - fname = collection.format('{head}{{padding}}{tail}') - padding = collection.padding - if padding == 0: - # Clique doesn't provide padding if the frame number never - # starts with a zero and thus has never any visual padding. - # So we fall back to the smallest frame number as padding. - padding = min(len(str(i)) for i in collection.indexes) - - # Supply frame/padding with # signs - padding_str = "#" * padding - fname = fname.format(padding=padding_str) - - return os.path.join(path, fname) - - # The path is either a single file or sequence in a folder so - # we do a quick lookup for our files - if os.path.isfile(path): - path = os.path.dirname(path) - path = _get_filename_from_folder(path) - - # Even when not applying a preset V-Ray will reset the 3rd Party - # Channels Mapping of the VRayVolumeGrid when setting the .inPath - # value. As such we try and preserve the values ourselves. - # Reported as ChaosGroup bug ticket: 154-011-2909  - # todo(roy): Remove when new V-Ray release preserves values - original_user_mapping = cmds.getAttr(grid_node + ".usrchmap") or "" - - # Workaround for V-Ray bug: fix lag on path change, see function - _fix_duplicate_vvg_callbacks() - - # Suppress preset pop-up if we want. - popup_attr = "{0}.inDontOfferPresets".format(grid_node) - popup = {popup_attr: not show_preset_popup} - with attribute_values(popup): - cmds.setAttr(grid_node + ".inPath", path, type="string") - - # Reapply the 3rd Party channels user mapping when no preset popup - # was shown to the user - if not show_preset_popup: - channels = cmds.getAttr(grid_node + ".usrchmapallch").split(";") - channels = set(channels) # optimize lookup - restored_mapping = "" - for entry in original_user_mapping.split(";"): - if not entry: - # Ignore empty entries - continue - - # If 3rd Party Channels selection channel still exists then - # add it again. - index, channel = entry.split(",") - attr = THIRD_PARTY_CHANNELS.get(int(index), - # Fallback for when a mapping - # was set that is not in the - # documentation - "???") - if channel in channels: - restored_mapping += entry + ";" - else: - self.log.warning("Can't preserve '%s' mapping due to " - "missing channel '%s' on node: " - "%s" % (attr, channel, grid_node)) - - if restored_mapping: - cmds.setAttr(grid_node + ".usrchmap", - restored_mapping, - type="string") - - def update(self, container, context): - repre_entity = context["representation"] - - path = get_representation_path(repre_entity) - - # Find VRayVolumeGrid - members = cmds.sets(container['objectName'], query=True) - grid_nodes = cmds.ls(members, type="VRayVolumeGrid", long=True) - assert len(grid_nodes) > 0, "This is a bug" - - # Update the VRayVolumeGrid - for grid_node in grid_nodes: - self._set_path(grid_node, path=path, show_preset_popup=False) - - # Update container representation - cmds.setAttr(container["objectName"] + ".representation", - repre_entity["id"], - type="string") - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - - # Get all members of the AYON container, ensure they are unlocked - # and delete everything - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_vrayproxy.py b/server_addon/maya/client/ayon_maya/plugins/load/load_vrayproxy.py deleted file mode 100644 index c71a48247c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_vrayproxy.py +++ /dev/null @@ -1,192 +0,0 @@ -# -*- coding: utf-8 -*- -"""Loader for Vray Proxy files. - -If there are Alembics published along vray proxy (in the same version), -loader will use them instead of native vray vrmesh format. - -""" -import os - -import ayon_api -import maya.cmds as cmds -from ayon_core.pipeline import get_representation_path -from ayon_core.settings import get_project_settings -from ayon_maya.api.lib import maintained_selection, namespaced, unique_namespace -from ayon_maya.api.pipeline import containerise -from ayon_maya.api import plugin -from ayon_maya.api.plugin import get_load_color_for_product_type - - -class VRayProxyLoader(plugin.Loader): - """Load VRay Proxy with Alembic or VrayMesh.""" - - product_types = {"vrayproxy", "model", "pointcache", "animation"} - representations = {"vrmesh", "abc"} - - label = "Import VRay Proxy" - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, options=None): - # type: (dict, str, str, dict) -> None - """Loader entry point. - - Args: - context (dict): Loaded representation context. - name (str): Name of container. - namespace (str): Optional namespace name. - options (dict): Optional loader options. - - """ - - product_type = context["product"]["productType"] - - # get all representations for this version - filename = self._get_abc( - context["project"]["name"], context["version"]["id"] - ) - if not filename: - filename = self.filepath_from_context(context) - - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - # Ensure V-Ray for Maya is loaded. - cmds.loadPlugin("vrayformaya", quiet=True) - - with maintained_selection(): - cmds.namespace(addNamespace=namespace) - with namespaced(namespace, new=False): - nodes, group_node = self.create_vray_proxy( - name, filename=filename) - - self[:] = nodes - if not nodes: - return - - # colour the group node - project_name = context["project"]["name"] - settings = get_project_settings(project_name) - color = get_load_color_for_product_type(product_type, settings) - if color is not None: - red, green, blue = color - cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1) - cmds.setAttr( - "{0}.outlinerColor".format(group_node), red, green, blue - ) - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, context): - # type: (dict, dict) -> None - """Update container with specified representation.""" - node = container['objectName'] - assert cmds.objExists(node), "Missing container" - - members = cmds.sets(node, query=True) or [] - vraymeshes = cmds.ls(members, type="VRayProxy") - assert vraymeshes, "Cannot find VRayMesh in container" - - # get all representations for this version - repre_entity = context["representation"] - filename = self._get_abc( - context["project"]["name"], context["version"]["id"] - ) - if not filename: - filename = get_representation_path(repre_entity) - - for vray_mesh in vraymeshes: - cmds.setAttr("{}.fileName".format(vray_mesh), - filename, - type="string") - - # Update metadata - cmds.setAttr("{}.representation".format(node), - repre_entity["id"], - type="string") - - def remove(self, container): - # type: (dict) -> None - """Remove loaded container.""" - # Delete container and its contents - if cmds.objExists(container['objectName']): - members = cmds.sets(container['objectName'], query=True) or [] - cmds.delete([container['objectName']] + members) - - # Remove the namespace, if empty - namespace = container['namespace'] - if cmds.namespace(exists=namespace): - members = cmds.namespaceInfo(namespace, listNamespace=True) - if not members: - cmds.namespace(removeNamespace=namespace) - else: - self.log.warning("Namespace not deleted because it " - "still has members: %s", namespace) - - def switch(self, container, context): - # type: (dict, dict) -> None - """Switch loaded representation.""" - self.update(container, context) - - def create_vray_proxy(self, name, filename): - # type: (str, str) -> (list, str) - """Re-create the structure created by VRay to support vrmeshes - - Args: - name (str): Name of the asset. - filename (str): File name of vrmesh. - - Returns: - nodes(list) - - """ - - if name is None: - name = os.path.splitext(os.path.basename(filename))[0] - - parent = cmds.createNode("transform", name=name) - proxy = cmds.createNode( - "VRayProxy", name="{}Shape".format(name), parent=parent) - cmds.setAttr(proxy + ".fileName", filename, type="string") - cmds.connectAttr("time1.outTime", proxy + ".currentFrame") - - return [parent, proxy], parent - - def _get_abc(self, project_name, version_id): - # type: (str) -> str - """Get abc representation file path if present. - - If here is published Alembic (abc) representation published along - vray proxy, get is file path. - - Args: - project_name (str): Project name. - version_id (str): Version hash id. - - Returns: - str: Path to file. - None: If abc not found. - - """ - self.log.debug( - "Looking for abc in published representations of this version.") - abc_rep = ayon_api.get_representation_by_name( - project_name, "abc", version_id - ) - if abc_rep: - self.log.debug("Found, we'll link alembic to vray proxy.") - file_name = get_representation_path(abc_rep) - self.log.debug("File: {}".format(file_name)) - return file_name - - return "" diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_vrayscene.py b/server_addon/maya/client/ayon_maya/plugins/load/load_vrayscene.py deleted file mode 100644 index 255ca844ba..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_vrayscene.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -import maya.cmds as cmds # noqa -from ayon_core.pipeline import get_representation_path -from ayon_core.settings import get_project_settings -from ayon_maya.api.lib import maintained_selection, namespaced, unique_namespace -from ayon_maya.api.pipeline import containerise -from ayon_maya.api import plugin -from ayon_maya.api.plugin import get_load_color_for_product_type - - -class VRaySceneLoader(plugin.Loader): - """Load Vray scene""" - - product_types = {"vrayscene_layer"} - representations = {"vrscene"} - - label = "Import VRay Scene" - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, data): - product_type = context["product"]["productType"] - - folder_name = context["folder"]["name"] - namespace = namespace or unique_namespace( - folder_name + "_", - prefix="_" if folder_name[0].isdigit() else "", - suffix="_", - ) - - # Ensure V-Ray for Maya is loaded. - cmds.loadPlugin("vrayformaya", quiet=True) - - with maintained_selection(): - cmds.namespace(addNamespace=namespace) - with namespaced(namespace, new=False): - nodes, root_node = self.create_vray_scene( - name, - filename=self.filepath_from_context(context) - ) - - self[:] = nodes - if not nodes: - return - - # colour the group node - project_name = context["project"]["name"] - settings = get_project_settings(project_name) - color = get_load_color_for_product_type(product_type, settings) - if color is not None: - red, green, blue = color - cmds.setAttr("{0}.useOutlinerColor".format(root_node), 1) - cmds.setAttr( - "{0}.outlinerColor".format(root_node), red, green, blue - ) - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, context): - - node = container['objectName'] - assert cmds.objExists(node), "Missing container" - - members = cmds.sets(node, query=True) or [] - vraymeshes = cmds.ls(members, type="VRayScene") - assert vraymeshes, "Cannot find VRayScene in container" - - repre_entity = context["representation"] - filename = get_representation_path(repre_entity) - - for vray_mesh in vraymeshes: - cmds.setAttr("{}.FilePath".format(vray_mesh), - filename, - type="string") - - # Update metadata - cmds.setAttr("{}.representation".format(node), - repre_entity["id"], - type="string") - - def remove(self, container): - - # Delete container and its contents - if cmds.objExists(container['objectName']): - members = cmds.sets(container['objectName'], query=True) or [] - cmds.delete([container['objectName']] + members) - - # Remove the namespace, if empty - namespace = container['namespace'] - if cmds.namespace(exists=namespace): - members = cmds.namespaceInfo(namespace, listNamespace=True) - if not members: - cmds.namespace(removeNamespace=namespace) - else: - self.log.warning("Namespace not deleted because it " - "still has members: %s", namespace) - - def switch(self, container, context): - self.update(container, context) - - def create_vray_scene(self, name, filename): - """Re-create the structure created by VRay to support vrscenes - - Args: - name(str): name of the asset - - Returns: - nodes(list) - """ - - # Create nodes - mesh_node_name = "VRayScene_{}".format(name) - - trans = cmds.createNode( - "transform", name=mesh_node_name) - vray_scene = cmds.createNode( - "VRayScene", name="{}_VRSCN".format(mesh_node_name), parent=trans) - mesh = cmds.createNode( - "mesh", name="{}_Shape".format(mesh_node_name), parent=trans) - - cmds.connectAttr( - "{}.outMesh".format(vray_scene), "{}.inMesh".format(mesh)) - - cmds.setAttr("{}.FilePath".format(vray_scene), filename, type="string") - - # Lock the shape nodes so the user cannot delete these - cmds.lockNode(mesh, lock=True) - cmds.lockNode(vray_scene, lock=True) - - # Create important connections - cmds.connectAttr("time1.outTime", - "{0}.inputTime".format(trans)) - - # Connect mesh to initialShadingGroup - cmds.sets([mesh], forceElement="initialShadingGroup") - - nodes = [trans, vray_scene, mesh] - - # Fix: Force refresh so the mesh shows correctly after creation - cmds.refresh() - - return nodes, trans diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_xgen.py b/server_addon/maya/client/ayon_maya/plugins/load/load_xgen.py deleted file mode 100644 index 88d9d550da..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_xgen.py +++ /dev/null @@ -1,185 +0,0 @@ -import os -import shutil - -from ayon_maya.api import plugin -import maya.cmds as cmds -import xgenm -from ayon_core.pipeline import get_representation_path -from ayon_maya.api import current_file -from ayon_maya.api.lib import ( - attribute_values, - get_container_members, - maintained_selection, - write_xgen_file, -) -from qtpy import QtWidgets - - -class XgenLoader(plugin.ReferenceLoader): - """Load Xgen as reference""" - - product_types = {"xgen"} - representations = {"ma", "mb"} - - label = "Reference Xgen" - icon = "code-fork" - color = "orange" - - def get_xgen_xgd_paths(self, palette): - _, maya_extension = os.path.splitext(current_file()) - xgen_file = current_file().replace( - maya_extension, - "__{}.xgen".format(palette.replace("|", "").replace(":", "__")) - ) - xgd_file = xgen_file.replace(".xgen", ".xgd") - return xgen_file, xgd_file - - def process_reference(self, context, name, namespace, options): - # Validate workfile has a path. - if current_file() is None: - QtWidgets.QMessageBox.warning( - None, - "", - "Current workfile has not been saved. Please save the workfile" - " before loading an Xgen." - ) - return - - maya_filepath = self.prepare_root_value( - file_url=self.filepath_from_context(context), - project_name=context["project"]["name"] - ) - - # Reference xgen. Xgen does not like being referenced in under a group. - with maintained_selection(): - nodes = cmds.file( - maya_filepath, - namespace=namespace, - sharedReferenceFile=False, - reference=True, - returnNewNodes=True - ) - - xgen_palette = cmds.ls( - nodes, type="xgmPalette", long=True - )[0].replace("|", "") - - xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette) - self.set_palette_attributes(xgen_palette, xgen_file, xgd_file) - - # Change the cache and disk values of xgDataPath and xgProjectPath - # to ensure paths are setup correctly. - project_path = os.path.dirname(current_file()).replace("\\", "/") - xgenm.setAttr("xgProjectPath", project_path, xgen_palette) - data_path = "${{PROJECT}}xgen/collections/{};{}".format( - xgen_palette.replace(":", "__ns__"), - xgenm.getAttr("xgDataPath", xgen_palette) - ) - xgenm.setAttr("xgDataPath", data_path, xgen_palette) - - data = {"xgProjectPath": project_path, "xgDataPath": data_path} - write_xgen_file(data, xgen_file) - - # This create an expression attribute of float. If we did not add - # any changes to collection, then Xgen does not create an xgd file - # on save. This gives errors when launching the workfile again due - # to trying to find the xgd file. - name = "custom_float_ignore" - if name not in xgenm.customAttrs(xgen_palette): - xgenm.addCustomAttr( - "custom_float_ignore", xgen_palette - ) - - shapes = cmds.ls(nodes, shapes=True, long=True) - - new_nodes = (list(set(nodes) - set(shapes))) - - self[:] = new_nodes - - return new_nodes - - def set_palette_attributes(self, xgen_palette, xgen_file, xgd_file): - cmds.setAttr( - "{}.xgBaseFile".format(xgen_palette), - os.path.basename(xgen_file), - type="string" - ) - cmds.setAttr( - "{}.xgFileName".format(xgen_palette), - os.path.basename(xgd_file), - type="string" - ) - cmds.setAttr("{}.xgExportAsDelta".format(xgen_palette), True) - - def update(self, container, context): - """Workflow for updating Xgen. - - - Export changes to delta file. - - Copy and overwrite the workspace .xgen file. - - Set collection attributes to not include delta files. - - Update xgen maya file reference. - - Apply the delta file changes. - - Reset collection attributes to include delta files. - - We have to do this workflow because when using referencing of the xgen - collection, Maya implicitly imports the Xgen data from the xgen file so - we dont have any control over when adding the delta file changes. - - There is an implicit increment of the xgen and delta files, due to - using the workfile basename. - """ - # Storing current description to try and maintain later. - current_description = ( - xgenm.xgGlobal.DescriptionEditor.currentDescription() - ) - - container_node = container["objectName"] - members = get_container_members(container_node) - xgen_palette = cmds.ls( - members, type="xgmPalette", long=True - )[0].replace("|", "") - xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette) - - # Export current changes to apply later. - xgenm.createDelta(xgen_palette.replace("|", ""), xgd_file) - - self.set_palette_attributes(xgen_palette, xgen_file, xgd_file) - - repre_entity = context["representation"] - maya_file = get_representation_path(repre_entity) - _, extension = os.path.splitext(maya_file) - new_xgen_file = maya_file.replace(extension, ".xgen") - data_path = "" - with open(new_xgen_file, "r") as f: - for line in f: - if line.startswith("\txgDataPath"): - line = line.rstrip() - data_path = line.split("\t")[-1] - break - - project_path = os.path.dirname(current_file()).replace("\\", "/") - data_path = "${{PROJECT}}xgen/collections/{};{}".format( - xgen_palette.replace(":", "__ns__"), - data_path - ) - data = {"xgProjectPath": project_path, "xgDataPath": data_path} - shutil.copy(new_xgen_file, xgen_file) - write_xgen_file(data, xgen_file) - - attribute_data = { - "{}.xgFileName".format(xgen_palette): os.path.basename(xgen_file), - "{}.xgBaseFile".format(xgen_palette): "", - "{}.xgExportAsDelta".format(xgen_palette): False - } - with attribute_values(attribute_data): - super().update(container, context) - - xgenm.applyDelta(xgen_palette.replace("|", ""), xgd_file) - - # Restore current selected description if it exists. - if cmds.objExists(current_description): - xgenm.xgGlobal.DescriptionEditor.setCurrentDescription( - current_description - ) - # Full UI refresh. - xgenm.xgGlobal.DescriptionEditor.refresh("Full") diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_yeti_cache.py b/server_addon/maya/client/ayon_maya/plugins/load/load_yeti_cache.py deleted file mode 100644 index 6000de2507..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_yeti_cache.py +++ /dev/null @@ -1,397 +0,0 @@ -import json -import os -import re -from collections import defaultdict - -import clique -from ayon_core.pipeline import get_representation_path -from ayon_core.settings import get_project_settings -from ayon_maya.api import lib -from ayon_maya.api.pipeline import containerise -from ayon_maya.api import plugin -from ayon_maya.api.plugin import get_load_color_for_product_type -from ayon_maya.api.yeti import create_yeti_variable -from maya import cmds - -# Do not reset these values on update but only apply on first load -# to preserve any potential local overrides -SKIP_UPDATE_ATTRS = { - "displayOutput", - "viewportDensity", - "viewportWidth", - "viewportLength", - "renderDensity", - "renderWidth", - "renderLength", - "increaseRenderBounds" -} - -SKIP_ATTR_MESSAGE = ( - "Skipping updating %s.%s to %s because it " - "is considered a local overridable attribute. " - "Either set manually or the load the cache " - "anew." -) - - -def set_attribute(node, attr, value): - """Wrapper of set attribute which ignores None values""" - if value is None: - return - lib.set_attribute(node, attr, value) - - -class YetiCacheLoader(plugin.Loader): - """Load Yeti Cache with one or more Yeti nodes""" - - product_types = {"yeticache", "yetiRig"} - representations = {"fur"} - - label = "Load Yeti Cache" - order = -9 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - """Loads a .fursettings file defining how to load .fur sequences - - A single yeticache or yetiRig can have more than a single pgYetiMaya - nodes and thus load more than a single yeti.fur sequence. - - The .fursettings file defines what the node names should be and also - what "cbId" attribute they should receive to match the original source - and allow published looks to also work for Yeti rigs and its caches. - - """ - - product_type = context["product"]["productType"] - - # Build namespace - folder_name = context["folder"]["name"] - if namespace is None: - namespace = self.create_namespace(folder_name) - - # Ensure Yeti is loaded - if not cmds.pluginInfo("pgYetiMaya", query=True, loaded=True): - cmds.loadPlugin("pgYetiMaya", quiet=True) - - # Create Yeti cache nodes according to settings - path = self.filepath_from_context(context) - settings = self.read_settings(path) - nodes = [] - for node in settings["nodes"]: - nodes.extend(self.create_node(namespace, node)) - - group_name = "{}:{}".format(namespace, name) - group_node = cmds.group(nodes, name=group_name) - project_name = context["project"]["name"] - - settings = get_project_settings(project_name) - color = get_load_color_for_product_type(product_type, settings) - if color is not None: - red, green, blue = color - cmds.setAttr(group_node + ".useOutlinerColor", 1) - cmds.setAttr(group_node + ".outlinerColor", red, green, blue) - - nodes.append(group_node) - - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__ - ) - - def remove(self, container): - - from maya import cmds - - namespace = container["namespace"] - container_name = container["objectName"] - - self.log.info("Removing '%s' from Maya.." % container["name"]) - - container_content = cmds.sets(container_name, query=True) - nodes = cmds.ls(container_content, long=True) - - nodes.append(container_name) - - try: - cmds.delete(nodes) - except ValueError: - # Already implicitly deleted by Maya upon removing reference - pass - - cmds.namespace(removeNamespace=namespace, deleteNamespaceContent=True) - - def update(self, container, context): - repre_entity = context["representation"] - namespace = container["namespace"] - container_node = container["objectName"] - - path = get_representation_path(repre_entity) - settings = self.read_settings(path) - - # Collect scene information of asset - set_members = lib.get_container_members(container) - container_root = lib.get_container_transforms(container, - members=set_members, - root=True) - scene_nodes = cmds.ls(set_members, type="pgYetiMaya", long=True) - - # Build lookup with cbId as keys - scene_lookup = defaultdict(list) - for node in scene_nodes: - cb_id = lib.get_id(node) - scene_lookup[cb_id].append(node) - - # Re-assemble metadata with cbId as keys - meta_data_lookup = {n["cbId"]: n for n in settings["nodes"]} - - # Delete nodes by "cbId" that are not in the updated version - to_delete_lookup = {cb_id for cb_id in scene_lookup.keys() if - cb_id not in meta_data_lookup} - if to_delete_lookup: - - # Get nodes and remove entry from lookup - to_remove = [] - for _id in to_delete_lookup: - # Get all related nodes - shapes = scene_lookup[_id] - # Get the parents of all shapes under the ID - transforms = cmds.listRelatives(shapes, - parent=True, - fullPath=True) or [] - to_remove.extend(shapes + transforms) - - # Remove id from lookup - scene_lookup.pop(_id, None) - - cmds.delete(to_remove) - - for cb_id, node_settings in meta_data_lookup.items(): - - if cb_id not in scene_lookup: - # Create new nodes - self.log.info("Creating new nodes ..") - - new_nodes = self.create_node(namespace, node_settings) - cmds.sets(new_nodes, addElement=container_node) - cmds.parent(new_nodes, container_root) - - else: - # Update the matching nodes - scene_nodes = scene_lookup[cb_id] - lookup_result = meta_data_lookup[cb_id]["name"] - - # Remove namespace if any (e.g.: "character_01_:head_YNShape") - node_name = lookup_result.rsplit(":", 1)[-1] - - for scene_node in scene_nodes: - - # Get transform node, this makes renaming easier - transforms = cmds.listRelatives(scene_node, - parent=True, - fullPath=True) or [] - assert len(transforms) == 1, "This is a bug!" - - # Get scene node's namespace and rename the transform node - lead = scene_node.rsplit(":", 1)[0] - namespace = ":{}".format(lead.rsplit("|")[-1]) - - new_shape_name = "{}:{}".format(namespace, node_name) - new_trans_name = new_shape_name.rsplit("Shape", 1)[0] - - transform_node = transforms[0] - cmds.rename(transform_node, - new_trans_name, - ignoreShape=False) - - # Get the newly named shape node - yeti_nodes = cmds.listRelatives(new_trans_name, - children=True) - yeti_node = yeti_nodes[0] - - for attr, value in node_settings["attrs"].items(): - if attr in SKIP_UPDATE_ATTRS: - self.log.info( - SKIP_ATTR_MESSAGE, yeti_node, attr, value - ) - continue - set_attribute(attr, value, yeti_node) - - # Set up user defined attributes - user_variables = node_settings.get("user_variables", {}) - for attr, value in user_variables.items(): - was_value_set = create_yeti_variable( - yeti_shape_node=yeti_node, - attr_name=attr, - value=value, - # We do not want to update the - # value if it already exists so - # that any local overrides that - # may have been applied still - # persist - force_value=False - ) - if not was_value_set: - self.log.info( - SKIP_ATTR_MESSAGE, yeti_node, attr, value - ) - - cmds.setAttr("{}.representation".format(container_node), - repre_entity["id"], - typ="string") - - def switch(self, container, context): - self.update(container, context) - - # helper functions - def create_namespace(self, folder_name): - """Create a unique namespace - Args: - asset (dict): asset information - - """ - - asset_name = "{}_".format(folder_name) - prefix = "_" if asset_name[0].isdigit() else "" - namespace = lib.unique_namespace( - asset_name, - prefix=prefix, - suffix="_" - ) - - return namespace - - def get_cache_node_filepath(self, root, node_name): - """Get the cache file path for one of the yeti nodes. - - All caches with more than 1 frame need cache file name set with `%04d` - If the cache has only one frame we return the file name as we assume - it is a snapshot. - - This expects the files to be named after the "node name" through - exports with in Yeti. - - Args: - root(str): Folder containing cache files to search in. - node_name(str): Node name to search cache files for - - Returns: - str: Cache file path value needed for cacheFileName attribute - - """ - - name = node_name.replace(":", "_") - pattern = r"^({name})(\.[0-9]+)?(\.fur)$".format(name=re.escape(name)) - - files = [fname for fname in os.listdir(root) if re.match(pattern, - fname)] - if not files: - self.log.error("Could not find cache files for '{}' " - "with pattern {}".format(node_name, pattern)) - return - - if len(files) == 1: - # Single file - return os.path.join(root, files[0]) - - # Get filename for the sequence with padding - collections, remainder = clique.assemble(files) - assert not remainder, "This is a bug" - assert len(collections) == 1, "This is a bug" - collection = collections[0] - - # Formats name as {head}%d{tail} like cache.%04d.fur - fname = collection.format("{head}{padding}{tail}") - return os.path.join(root, fname) - - def create_node(self, namespace, node_settings): - """Create nodes with the correct namespace and settings - - Args: - namespace(str): namespace - node_settings(dict): Single "nodes" entry from .fursettings file. - - Returns: - list: Created nodes - - """ - nodes = [] - - # Get original names and ids - orig_transform_name = node_settings["transform"]["name"] - orig_shape_name = node_settings["name"] - - # Add namespace - transform_name = "{}:{}".format(namespace, orig_transform_name) - shape_name = "{}:{}".format(namespace, orig_shape_name) - - # Create pgYetiMaya node - transform_node = cmds.createNode("transform", - name=transform_name) - yeti_node = cmds.createNode("pgYetiMaya", - name=shape_name, - parent=transform_node) - - lib.set_id(transform_node, node_settings["transform"]["cbId"]) - lib.set_id(yeti_node, node_settings["cbId"]) - - nodes.extend([transform_node, yeti_node]) - - # Update attributes with defaults - attributes = node_settings["attrs"] - attributes.update({ - "verbosity": 2, - "fileMode": 1, - - # Fix render stats, like Yeti's own - # ../scripts/pgYetiNode.mel script - "visibleInReflections": True, - "visibleInRefractions": True - }) - - if "viewportDensity" not in attributes: - attributes["viewportDensity"] = 0.1 - - # Apply attributes to pgYetiMaya node - for attr, value in attributes.items(): - set_attribute(attr, value, yeti_node) - - # Set up user defined attributes - user_variables = node_settings.get("user_variables", {}) - for attr, value in user_variables.items(): - create_yeti_variable(yeti_shape_node=yeti_node, - attr_name=attr, - value=value) - - # Connect to the time node - cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node) - - return nodes - - def read_settings(self, path): - """Read .fursettings file and compute some additional attributes""" - - with open(path, "r") as fp: - fur_settings = json.load(fp) - - if "nodes" not in fur_settings: - raise RuntimeError("Encountered invalid data, " - "expected 'nodes' in fursettings.") - - # Compute the cache file name values we want to set for the nodes - root = os.path.dirname(path) - for node in fur_settings["nodes"]: - cache_filename = self.get_cache_node_filepath( - root=root, node_name=node["name"]) - - attrs = node.get("attrs", {}) # allow 'attrs' to not exist - attrs["cacheFileName"] = cache_filename - node["attrs"] = attrs - - return fur_settings diff --git a/server_addon/maya/client/ayon_maya/plugins/load/load_yeti_rig.py b/server_addon/maya/client/ayon_maya/plugins/load/load_yeti_rig.py deleted file mode 100644 index a45e5c63ef..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/load/load_yeti_rig.py +++ /dev/null @@ -1,94 +0,0 @@ -from typing import List - -import maya.cmds as cmds -from ayon_core.pipeline import registered_host -from ayon_core.pipeline.create import CreateContext -from ayon_maya.api import lib, plugin - - -class YetiRigLoader(plugin.ReferenceLoader): - """This loader will load Yeti rig.""" - - product_types = {"yetiRig"} - representations = {"ma"} - - label = "Load Yeti Rig" - order = -9 - icon = "code-fork" - color = "orange" - - # From settings - create_cache_instance_on_load = True - - def process_reference( - self, context, name=None, namespace=None, options=None - ): - path = self.filepath_from_context(context) - - attach_to_root = options.get("attach_to_root", True) - group_name = options["group_name"] - - # no group shall be created - if not attach_to_root: - group_name = namespace - - with lib.maintained_selection(): - file_url = self.prepare_root_value( - path, context["project"]["name"] - ) - nodes = cmds.file( - file_url, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=attach_to_root, - groupName=group_name - ) - - color = plugin.get_load_color_for_product_type("yetiRig") - if color is not None: - red, green, blue = color - cmds.setAttr(group_name + ".useOutlinerColor", 1) - cmds.setAttr( - group_name + ".outlinerColor", red, green, blue - ) - self[:] = nodes - - if self.create_cache_instance_on_load: - # Automatically create in instance to allow publishing the loaded - # yeti rig into a yeti cache - self._create_yeti_cache_instance(nodes, variant=namespace) - - return nodes - - def _create_yeti_cache_instance(self, nodes: List[str], variant: str): - """Create a yeticache product type instance to publish the output. - - This is similar to how loading animation rig will automatically create - an animation instance for publishing any loaded character rigs, but - then for yeti rigs. - - Args: - nodes (List[str]): Nodes generated on load. - variant (str): Variant for the yeti cache instance to create. - - """ - - # Find the roots amongst the loaded nodes - yeti_nodes = cmds.ls(nodes, type="pgYetiMaya", long=True) - assert yeti_nodes, "No pgYetiMaya nodes in rig, this is a bug." - - self.log.info("Creating variant: {}".format(variant)) - - creator_identifier = "io.openpype.creators.maya.yeticache" - - host = registered_host() - create_context = CreateContext(host) - - with lib.maintained_selection(): - cmds.select(yeti_nodes, noExpand=True) - create_context.create( - creator_identifier=creator_identifier, - variant=variant, - pre_create_data={"use_selection": True} - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/__init__.py b/server_addon/maya/client/ayon_maya/plugins/publish/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_animation.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_animation.py deleted file mode 100644 index 528d981c4b..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_animation.py +++ /dev/null @@ -1,59 +0,0 @@ -import maya.cmds as cmds -import pyblish.api -from ayon_maya.api import plugin - - -class CollectAnimationOutputGeometry(plugin.MayaInstancePlugin): - """Collect out hierarchy data for instance. - - Collect all hierarchy nodes which reside in the out_SET of the animation - instance or point cache instance. This is to unify the logic of retrieving - that specific data. This eliminates the need to write two separate pieces - of logic to fetch all hierarchy nodes. - - Results in a list of nodes from the content of the instances - - """ - - order = pyblish.api.CollectorOrder + 0.4 - families = ["animation"] - label = "Collect Animation Output Geometry" - - ignore_type = ["constraints"] - - def process(self, instance): - """Collect the hierarchy nodes""" - - product_type = instance.data["productType"] - out_set = next((i for i in instance.data["setMembers"] if - i.endswith("out_SET")), None) - - if out_set is None: - self.log.warning(( - "Expecting out_SET for instance of product type '{}'" - ).format(product_type)) - return - - members = cmds.ls(cmds.sets(out_set, query=True), long=True) - - # Get all the relatives of the members - descendants = cmds.listRelatives(members, - allDescendents=True, - fullPath=True) or [] - descendants = cmds.ls(descendants, noIntermediate=True, long=True) - - # Add members and descendants together for a complete overview - - hierarchy = members + descendants - - # Ignore certain node types (e.g. constraints) - ignore = cmds.ls(hierarchy, type=self.ignore_type, long=True) - if ignore: - ignore = set(ignore) - hierarchy = [node for node in hierarchy if node not in ignore] - - # Store data in the instance for the validator - instance.data["out_hierarchy"] = hierarchy - - if instance.data.get("farm"): - instance.data["families"].append("publish.farm") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_arnold_scene_source.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_arnold_scene_source.py deleted file mode 100644 index c9dd0b8063..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_arnold_scene_source.py +++ /dev/null @@ -1,58 +0,0 @@ -import pyblish.api -from ayon_maya.api.lib import get_all_children -from ayon_maya.api import plugin -from maya import cmds - - -class CollectArnoldSceneSource(plugin.MayaInstancePlugin): - """Collect Arnold Scene Source data.""" - - # Offset to be after renderable camera collection. - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Arnold Scene Source" - families = ["ass", "assProxy"] - - def process(self, instance): - instance.data["members"] = [] - for set_member in instance.data["setMembers"]: - if cmds.nodeType(set_member) != "objectSet": - instance.data["members"].extend(self.get_hierarchy(set_member)) - continue - - members = cmds.sets(set_member, query=True) - members = cmds.ls(members, long=True) - if members is None: - self.log.warning( - "Skipped empty instance: \"%s\" " % set_member - ) - continue - if set_member.endswith("proxy_SET"): - instance.data["proxy"] = self.get_hierarchy(members) - - # Use camera in object set if present else default to render globals - # camera. - cameras = cmds.ls(type="camera", long=True) - renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)] - if renderable: - camera = renderable[0] - for node in instance.data["members"]: - camera_shapes = cmds.listRelatives( - node, shapes=True, type="camera" - ) - if camera_shapes: - camera = node - instance.data["camera"] = camera - else: - self.log.debug("No renderable cameras found.") - - self.log.debug("data: {}".format(instance.data)) - - def get_hierarchy(self, nodes): - """Return nodes with all their children""" - nodes = cmds.ls(nodes, long=True) - if not nodes: - return [] - children = get_all_children(nodes) - # Make sure nodes merged with children only - # contains unique entries - return list(set(nodes + list(children))) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_assembly.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_assembly.py deleted file mode 100644 index e57d70662c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_assembly.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Collect all relevant assembly items. - -Todo: - Publish of assembly need unique namespace for all assets, we should - create validator for this. - -""" -from collections import defaultdict -import pyblish.api - -from maya import cmds, mel -from ayon_maya import api -from ayon_maya.api import lib -from ayon_maya.api import plugin - - -class CollectAssembly(plugin.MayaInstancePlugin): - """Collect all relevant assembly items - - Collected data: - - * File name - * Compatible loader - * Matrix per instance - * Namespace - - Note: GPU caches are currently not supported in the pipeline. There is no - logic yet which supports the swapping of GPU cache to renderable objects. - - """ - - order = pyblish.api.CollectorOrder + 0.49 - label = "Assembly" - families = ["assembly"] - - def process(self, instance): - - # Find containers - containers = api.ls() - - # Get all content from the instance - instance_lookup = set(cmds.ls(instance, type="transform", long=True)) - data = defaultdict(list) - - hierarchy_nodes = [] - for container in containers: - - root = lib.get_container_transforms(container, root=True) - if not root or root not in instance_lookup: - continue - - # Retrieve the hierarchy - parent = cmds.listRelatives(root, parent=True, fullPath=True)[0] - hierarchy_nodes.append(parent) - - # Temporary warning for GPU cache which are not supported yet - loader = container["loader"] - if loader == "GpuCacheLoader": - self.log.warning("GPU Cache Loader is currently not supported" - "in the pipeline, we will export it tho") - - # Gather info for new data entry - representation_id = container["representation"] - instance_data = {"loader": loader, - "parent": parent, - "namespace": container["namespace"]} - - # Check if matrix differs from default and store changes - matrix_data = self.get_matrix_data(root) - if matrix_data: - instance_data["matrix"] = matrix_data - - data[representation_id].append(instance_data) - - instance.data["scenedata"] = dict(data) - instance.data["nodesHierarchy"] = list(set(hierarchy_nodes)) - - def get_file_rule(self, rule): - return mel.eval('workspace -query -fileRuleEntry "{}"'.format(rule)) - - def get_matrix_data(self, node): - """Get the matrix of all members when they are not default - - Each matrix which differs from the default will be stored in a - dictionary - - Args: - members (list): list of transform nmodes - Returns: - dict - """ - - matrix = cmds.xform(node, query=True, matrix=True) - if matrix == lib.DEFAULT_MATRIX: - return - - return matrix diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_current_file.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_current_file.py deleted file mode 100644 index 8bd1908c73..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,14 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds - - -class CollectCurrentFile(plugin.MayaContextPlugin): - """Inject the current working file.""" - - order = pyblish.api.CollectorOrder - 0.4 - label = "Maya Current File" - - def process(self, context): - """Inject the current working file""" - context.data['currentFile'] = cmds.file(query=True, sceneName=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_animation.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_animation.py deleted file mode 100644 index 83f42667a5..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_animation.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_core.pipeline import OptionalPyblishPluginMixin -from ayon_maya.api import plugin -from maya import cmds # noqa - - -class CollectFbxAnimation(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Collect Animated Rig Data for FBX Extractor.""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Fbx Animation" - families = ["animation"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - skeleton_sets = [ - i for i in instance - if i.endswith("skeletonAnim_SET") - ] - if not skeleton_sets: - return - - instance.data["families"].append("animation.fbx") - instance.data["animated_skeleton"] = [] - for skeleton_set in skeleton_sets: - skeleton_content = cmds.sets(skeleton_set, query=True) - self.log.debug( - "Collected animated skeleton data: {}".format( - skeleton_content - )) - if skeleton_content: - instance.data["animated_skeleton"] = skeleton_content diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_camera.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_camera.py deleted file mode 100644 index f6791b6e72..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_camera.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds # noqa - - -class CollectFbxCamera(plugin.MayaInstancePlugin): - """Collect Camera for FBX export.""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Camera for FBX export" - families = ["camera"] - - def process(self, instance): - if not instance.data.get("families"): - instance.data["families"] = [] - - if "fbx" not in instance.data["families"]: - instance.data["families"].append("fbx") - - instance.data["cameras"] = True diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_model.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_model.py deleted file mode 100644 index f3902a2868..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_fbx_model.py +++ /dev/null @@ -1,29 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import OptionalPyblishPluginMixin -from ayon_maya.api import plugin - - - -class CollectFbxModel(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Collect Camera for FBX export.""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Fbx Model" - families = ["model"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - if not instance.data.get("families"): - instance.data["families"] = [] - - if "fbx" not in instance.data["families"]: - instance.data["families"].append("fbx") - - for key in { - "bakeComplexAnimation", "bakeResampleAnimation", - "skins", "constraints", "lights"}: - instance.data[key] = False diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_file_dependencies.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_file_dependencies.py deleted file mode 100644 index db797f0d09..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_file_dependencies.py +++ /dev/null @@ -1,39 +0,0 @@ -from maya import cmds -from ayon_maya.api import plugin -import pyblish.api - - -class CollectFileDependencies(plugin.MayaContextPlugin): - """Gather all files referenced in this scene.""" - - label = "Collect File Dependencies" - order = pyblish.api.CollectorOrder - 0.49 - families = ["renderlayer"] - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if not used for deadline submission anyway - if "deadline" not in project_settings: - cls.enabled = False - return - settings = ( - project_settings - ["deadline"] - ["publish"] - ["MayaSubmitDeadline"] - ) - cls.enabled = settings.get("asset_dependencies", True) - - def process(self, context): - dependencies = set() - for node in cmds.ls(type="file"): - path = cmds.getAttr("{}.{}".format(node, "fileTextureName")) - if path not in dependencies: - dependencies.add(path) - - for node in cmds.ls(type="AlembicNode"): - path = cmds.getAttr("{}.{}".format(node, "abc_File")) - if path not in dependencies: - dependencies.add(path) - - context.data["fileDependencies"] = list(dependencies) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_gltf.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_gltf.py deleted file mode 100644 index 7ee23d289c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_gltf.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_maya.api import plugin - - -class CollectGLTF(plugin.MayaInstancePlugin): - """Collect Assets for GLTF/GLB export.""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Asset for GLTF/GLB export" - families = ["model", "animation", "pointcache"] - - def process(self, instance): - if not instance.data.get("families"): - instance.data["families"] = [] - - if "gltf" not in instance.data["families"]: - instance.data["families"].append("gltf") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_history.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_history.py deleted file mode 100644 index 9041d4d1d5..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_history.py +++ /dev/null @@ -1,46 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds - - -class CollectMayaHistory(plugin.MayaInstancePlugin): - """Collect history for instances from the Maya scene - - Note: - This removes render layers collected in the history - - This is separate from Collect Instances so we can target it towards only - specific product types. - - """ - - order = pyblish.api.CollectorOrder + 0.1 - hosts = ["maya"] - label = "Maya History" - families = ["rig"] - - def process(self, instance): - - kwargs = {} - if int(cmds.about(version=True)) >= 2020: - # New flag since Maya 2020 which makes cmds.listHistory faster - kwargs = {"fastIteration": True} - else: - self.log.debug("Ignoring `fastIteration` flag before Maya 2020..") - - # Collect the history with long names - history = set(cmds.listHistory(instance, leaf=False, **kwargs) or []) - history = cmds.ls(list(history), long=True) - - # Exclude invalid nodes (like renderlayers) - exclude = cmds.ls(type="renderLayer", long=True) - if exclude: - exclude = set(exclude) # optimize lookup - history = [x for x in history if x not in exclude] - - # Combine members with history - members = instance[:] + history - members = list(set(members)) # ensure unique - - # Update the instance - instance[:] = members diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_inputs.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_inputs.py deleted file mode 100644 index 67d4a3f378..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_inputs.py +++ /dev/null @@ -1,213 +0,0 @@ -import copy - -from maya import cmds -import maya.api.OpenMaya as om -import pyblish.api - -from ayon_core.pipeline import registered_host -from ayon_maya.api.lib import get_container_members -from ayon_maya.api.lib_rendersetup import get_shader_in_layer -from ayon_maya.api import plugin - - -def iter_history(nodes, - filter=om.MFn.kInvalid, - direction=om.MItDependencyGraph.kUpstream): - """Iterate unique upstream history for list of nodes. - - This acts as a replacement to maya.cmds.listHistory. - It's faster by about 2x-3x. It returns less than - maya.cmds.listHistory as it excludes the input nodes - from the output (unless an input node was history - for another input node). It also excludes duplicates. - - Args: - nodes (list): Maya node names to start search from. - filter (om.MFn.Type): Filter to only specific types. - e.g. to dag nodes using om.MFn.kDagNode - direction (om.MItDependencyGraph.Direction): Direction to traverse in. - Defaults to upstream. - - Yields: - str: Node names in upstream history. - - """ - if not nodes: - return - - sel = om.MSelectionList() - for node in nodes: - sel.add(node) - - it = om.MItDependencyGraph(sel.getDependNode(0)) # init iterator - handle = om.MObjectHandle - - traversed = set() - fn_dep = om.MFnDependencyNode() - fn_dag = om.MFnDagNode() - for i in range(sel.length()): - - start_node = sel.getDependNode(i) - start_node_hash = handle(start_node).hashCode() - if start_node_hash in traversed: - continue - - it.resetTo(start_node, - filter=filter, - direction=direction) - while not it.isDone(): - - node = it.currentNode() - node_hash = handle(node).hashCode() - - if node_hash in traversed: - it.prune() - it.next() # noqa: B305 - continue - - traversed.add(node_hash) - - if node.hasFn(om.MFn.kDagNode): - fn_dag.setObject(node) - yield fn_dag.fullPathName() - else: - fn_dep.setObject(node) - yield fn_dep.name() - - it.next() # noqa: B305 - - -def collect_input_containers(containers, nodes): - """Collect containers that contain any of the node in `nodes`. - - This will return any loaded AYON container that contains at least one of - the nodes. As such, the AYON container is an input for it. Or in short, - there are member nodes of that container. - - Returns: - list: Input loaded containers - - """ - # Assume the containers have collected their cached '_members' data - # in the collector. - return [container for container in containers - if any(node in container["_members"] for node in nodes)] - - -class CollectUpstreamInputs(plugin.MayaInstancePlugin): - """Collect input source inputs for this publish. - - This will include `inputs` data of which loaded publishes were used in the - generation of this publish. This leaves an upstream trace to what was used - as input. - - """ - - label = "Collect Inputs" - order = pyblish.api.CollectorOrder + 0.34 - - def process(self, instance): - - # For large scenes the querying of "host.ls()" can be relatively slow - # e.g. up to a second. Many instances calling it easily slows this - # down. As such, we cache it so we trigger it only once. - # todo: Instead of hidden cache make "CollectContainers" plug-in - cache_key = "__cache_containers" - scene_containers = instance.context.data.get(cache_key, None) - if scene_containers is None: - # Query the scenes' containers if there's no cache yet - host = registered_host() - scene_containers = list(host.ls()) - for container in scene_containers: - # Embed the members into the container dictionary - container_members = set(get_container_members(container)) - container["_members"] = container_members - instance.context.data["__cache_containers"] = scene_containers - - # Collect the relevant input containers for this instance - if "renderlayer" in set(instance.data.get("families", [])): - # Special behavior for renderlayers - self.log.debug("Collecting renderlayer inputs....") - containers = self._collect_renderlayer_inputs(scene_containers, - instance) - - else: - # Basic behavior - nodes = instance[:] - - # Include any input connections of history with long names - # For optimization purposes only trace upstream from shape nodes - # looking for used dag nodes. This way having just a constraint - # on a transform is also ignored which tended to give irrelevant - # inputs for the majority of our use cases. We tend to care more - # about geometry inputs. - shapes = cmds.ls(nodes, - type=("mesh", "nurbsSurface", "nurbsCurve"), - noIntermediate=True) - if shapes: - history = list(iter_history(shapes, filter=om.MFn.kShape)) - history = cmds.ls(history, long=True) - - # Include the transforms in the collected history as shapes - # are excluded from containers - transforms = cmds.listRelatives(cmds.ls(history, shapes=True), - parent=True, - fullPath=True, - type="transform") - if transforms: - history.extend(transforms) - - if history: - nodes = list(set(nodes + history)) - - # Collect containers for the given set of nodes - containers = collect_input_containers(scene_containers, - nodes) - - inputs = [c["representation"] for c in containers] - instance.data["inputRepresentations"] = inputs - self.log.debug("Collected inputs: %s" % inputs) - - def _collect_renderlayer_inputs(self, scene_containers, instance): - """Collects inputs from nodes in renderlayer, incl. shaders + camera""" - - # Get the renderlayer - renderlayer = instance.data.get("setMembers") - - if renderlayer == "defaultRenderLayer": - # Assume all loaded containers in the scene are inputs - # for the masterlayer - return copy.deepcopy(scene_containers) - else: - # Get the members of the layer - members = cmds.editRenderLayerMembers(renderlayer, - query=True, - fullNames=True) or [] - - # In some cases invalid objects are returned from - # `editRenderLayerMembers` so we filter them out - members = cmds.ls(members, long=True) - - # Include all children - children = cmds.listRelatives(members, - allDescendents=True, - fullPath=True) or [] - members.extend(children) - - # Include assigned shaders in renderlayer - shapes = cmds.ls(members, shapes=True, long=True) - shaders = set() - for shape in shapes: - shape_shaders = get_shader_in_layer(shape, layer=renderlayer) - if not shape_shaders: - continue - shaders.update(shape_shaders) - members.extend(shaders) - - # Explicitly include the camera being rendered in renderlayer - cameras = instance.data.get("cameras") - members.extend(cameras) - - containers = collect_input_containers(scene_containers, members) - - return containers diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_instances.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_instances.py deleted file mode 100644 index 0ca43d4be9..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_instances.py +++ /dev/null @@ -1,115 +0,0 @@ -from maya import cmds - -import pyblish.api -from ayon_maya.api.lib import get_all_children -from ayon_maya.api import plugin - - -class CollectNewInstances(plugin.MayaInstancePlugin): - """Gather members for instances and pre-defined attribute - - This collector takes into account assets that are associated with - an objectSet and marked with a unique identifier; - - Identifier: - id (str): "ayon.create.instance" - - Limitations: - - Does not take into account nodes connected to those - within an objectSet. Extractors are assumed to export - with history preserved, but this limits what they will - be able to achieve and the amount of data available - to validators. An additional collector could also - append this input data into the instance, as we do - for `pype.rig` with collect_history. - - """ - - label = "Collect New Instance Data" - order = pyblish.api.CollectorOrder - hosts = ["maya"] - - valid_empty_product_types = {"workfile", "renderlayer"} - - def process(self, instance): - - objset = instance.data.get("instance_node") - if not objset: - self.log.debug("Instance has no `instance_node` data") - - # TODO: We might not want to do this in the future - # Merge creator attributes into instance.data just backwards compatible - # code still runs as expected - creator_attributes = instance.data.get("creator_attributes", {}) - if creator_attributes: - instance.data.update(creator_attributes) - - members = cmds.sets(objset, query=True) or [] - if members: - # Collect members - members = cmds.ls(members, long=True) or [] - - # Collect full hierarchy - dag_members = cmds.ls(members, type="dagNode", long=True) - children = get_all_children(dag_members, - ignore_intermediate_objects=True) - - members_hierarchy = set(members) - members_hierarchy.update(children) - if creator_attributes.get("includeParentHierarchy", True): - members_hierarchy.update(self.get_all_parents(dag_members)) - - instance[:] = members_hierarchy - - elif ( - instance.data["productType"] not in self.valid_empty_product_types - ): - self.log.warning("Empty instance: \"%s\" " % objset) - # Store the exact members of the object set - instance.data["setMembers"] = members - - # TODO: This might make more sense as a separate collector - # Convert frame values to integers - for attr_name in ( - "handleStart", "handleEnd", "frameStart", "frameEnd", - ): - value = instance.data.get(attr_name) - if value is not None: - instance.data[attr_name] = int(value) - - # Append start frame and end frame to label if present - if "frameStart" in instance.data and "frameEnd" in instance.data: - # Take handles from context if not set locally on the instance - for key in ["handleStart", "handleEnd"]: - if key not in instance.data: - value = instance.context.data[key] - if value is not None: - value = int(value) - instance.data[key] = value - - instance.data["frameStartHandle"] = int( - instance.data["frameStart"] - instance.data["handleStart"] - ) - instance.data["frameEndHandle"] = int( - instance.data["frameEnd"] + instance.data["handleEnd"] - ) - - def get_all_parents(self, nodes): - """Get all parents by using string operations (optimization) - - Args: - nodes (iterable): the nodes which are found in the objectSet - - Returns: - set - """ - - parents = set() - for node in nodes: - split_parts = node.split("|") - items = [ - "|".join(split_parts[:i]) for i in range(2, len(split_parts)) - ] - parents.update(items) - - return parents diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_look.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_look.py deleted file mode 100644 index 691933babd..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_look.py +++ /dev/null @@ -1,674 +0,0 @@ -# -*- coding: utf-8 -*- -"""Maya look collector.""" -import glob -import os -import re - -import pyblish.api -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds # noqa - -SHAPE_ATTRS = {"castsShadows", - "receiveShadows", - "motionBlur", - "primaryVisibility", - "smoothShading", - "visibleInReflections", - "visibleInRefractions", - "doubleSided", - "opposite"} - - -def get_pxr_multitexture_file_attrs(node): - attrs = [] - for i in range(9): - if cmds.attributeQuery("filename{}".format(i), node=node, ex=True): - file = cmds.getAttr("{}.filename{}".format(node, i)) - if file: - attrs.append("filename{}".format(i)) - return attrs - - -FILE_NODES = { - # maya - "file": "fileTextureName", - # arnold (mtoa) - "aiImage": "filename", - # redshift - "RedshiftNormalMap": "tex0", - # renderman - "PxrBump": "filename", - "PxrNormalMap": "filename", - "PxrMultiTexture": get_pxr_multitexture_file_attrs, - "PxrPtexture": "filename", - "PxrTexture": "filename" -} - -RENDER_SET_TYPES = [ - "VRayDisplacement", - "VRayLightMesh", - "VRayObjectProperties", - "RedshiftObjectId", - "RedshiftMeshParameters", -] - -# Keep only node types that actually exist -all_node_types = set(cmds.allNodeTypes()) -for node_type in list(FILE_NODES.keys()): - if node_type not in all_node_types: - FILE_NODES.pop(node_type) - -RENDER_SET_TYPES = [node_type for node_type in RENDER_SET_TYPES - if node_type in all_node_types] -del all_node_types - -# Cache pixar dependency node types so we can perform a type lookup against it -PXR_NODES = set() -if cmds.pluginInfo("RenderMan_for_Maya", query=True, loaded=True): - PXR_NODES = set( - cmds.pluginInfo("RenderMan_for_Maya", - query=True, - dependNode=True) - ) - - -def get_attributes(dictionary, attr, node=None): - # type: (dict, str, str) -> list - if callable(dictionary[attr]): - val = dictionary[attr](node) - else: - val = dictionary.get(attr, []) - - return val if isinstance(val, list) else [val] - - -def get_look_attrs(node): - """Returns attributes of a node that are important for the look. - - These are the "changed" attributes (those that have edits applied - in the current scene). - - Returns: - list: Attribute names to extract - - """ - # When referenced get only attributes that are "changed since file open" - # which includes any reference edits, otherwise take *all* user defined - # attributes - is_referenced = cmds.referenceQuery(node, isNodeReferenced=True) - result = cmds.listAttr(node, userDefined=True, - changedSinceFileOpen=is_referenced) or [] - - # `cbId` is added when a scene is saved, ignore by default - if "cbId" in result: - result.remove("cbId") - - # For shapes allow render stat changes - if cmds.objectType(node, isAType="shape"): - attrs = cmds.listAttr(node, changedSinceFileOpen=True) or [] - for attr in attrs: - if attr in SHAPE_ATTRS or attr.startswith('ai'): - result.append(attr) - return result - - -def node_uses_image_sequence(node, node_path): - # type: (str, str) -> bool - """Return whether file node uses an image sequence or single image. - - Determine if a node uses an image sequence or just a single image, - not always obvious from its file path alone. - - Args: - node (str): Name of the Maya node - node_path (str): The file path of the node - - Returns: - bool: True if node uses an image sequence - - """ - - # useFrameExtension indicates an explicit image sequence - try: - use_frame_extension = cmds.getAttr('%s.useFrameExtension' % node) - except ValueError: - use_frame_extension = False - if use_frame_extension: - return True - - # The following tokens imply a sequence - patterns = ["", "", "", - "u_v", ""] - node_path_lowered = node_path.lower() - return any(pattern in node_path_lowered for pattern in patterns) - - -def seq_to_glob(path): - """Takes an image sequence path and returns it in glob format, - with the frame number replaced by a '*'. - - Image sequences may be numerical sequences, e.g. /path/to/file.1001.exr - will return as /path/to/file.*.exr. - - Image sequences may also use tokens to denote sequences, e.g. - /path/to/texture..tif will return as /path/to/texture.*.tif. - - Args: - path (str): the image sequence path - - Returns: - str: Return glob string that matches the filename pattern. - - """ - - if path is None: - return path - - # If any of the patterns, convert the pattern - patterns = { - "": "", - "": "", - "": "", - "#": "#", - "u_v": "|", - "", - "": "" - } - - lower = path.lower() - has_pattern = False - for pattern, regex_pattern in patterns.items(): - if pattern in lower: - path = re.sub(regex_pattern, "*", path, flags=re.IGNORECASE) - has_pattern = True - - if has_pattern: - return path - - base = os.path.basename(path) - matches = list(re.finditer(r'\d+', base)) - if matches: - match = matches[-1] - new_base = '{0}*{1}'.format(base[:match.start()], - base[match.end():]) - head = os.path.dirname(path) - return os.path.join(head, new_base) - else: - return path - - -def get_file_node_paths(node): - # type: (str) -> list - """Get the file path used by a Maya file node. - - Args: - node (str): Name of the Maya file node - - Returns: - list: the file paths in use - - """ - # if the path appears to be sequence, use computedFileTextureNamePattern, - # this preserves the <> tag - if cmds.attributeQuery('computedFileTextureNamePattern', - node=node, - exists=True): - plug = '{0}.computedFileTextureNamePattern'.format(node) - texture_pattern = cmds.getAttr(plug) - - patterns = ["", - "", - "u_v", - "", - ""] - lower = texture_pattern.lower() - if any(pattern in lower for pattern in patterns): - return [texture_pattern] - - try: - file_attributes = get_attributes( - FILE_NODES, cmds.nodeType(node), node) - except AttributeError: - file_attributes = "fileTextureName" - - files = [] - for file_attr in file_attributes: - if cmds.attributeQuery(file_attr, node=node, exists=True): - files.append(cmds.getAttr("{}.{}".format(node, file_attr))) - - return files - - -def get_file_node_files(node): - """Return the file paths related to the file node - - Note: - Will only return existing files. Returns an empty list - if not valid existing files are linked. - - Returns: - list: List of full file paths. - - """ - paths = get_file_node_paths(node) - - # For sequences get all files and filter to only existing files - result = [] - for path in paths: - if node_uses_image_sequence(node, path): - glob_pattern = seq_to_glob(path) - result.extend(glob.glob(glob_pattern)) - elif os.path.exists(path): - result.append(path) - - return result - - -class CollectLook(plugin.MayaInstancePlugin): - """Collect look data for instance. - - For the shapes/transforms of the referenced object to collect look for - retrieve the user-defined attributes (like V-ray attributes) and their - values as they were created in the current scene. - - For the members of the instance collect the sets (shadingEngines and - other sets, e.g. VRayDisplacement) they are in along with the exact - membership relations. - - Collects: - lookAttributes (list): Nodes in instance with their altered attributes - lookSetRelations (list): Sets and their memberships - lookSets (list): List of set names included in the look - - """ - - order = pyblish.api.CollectorOrder + 0.2 - families = ["look"] - label = "Collect Look" - - def process(self, instance): - """Collect the Look in the instance with the correct layer settings""" - renderlayer = instance.data.get("renderlayer", "defaultRenderLayer") - with lib.renderlayer(renderlayer): - self.collect(instance) - - def collect(self, instance): - """Collect looks. - - Args: - instance (pyblish.api.Instance): Instance to collect. - - """ - self.log.debug("Looking for look associations " - "for %s" % instance.data['name']) - - # Discover related object sets - self.log.debug("Gathering sets ...") - sets = self.collect_sets(instance) - - # Lookup set (optimization) - instance_lookup = set(cmds.ls(instance, long=True)) - - self.log.debug("Gathering set relations ...") - # Ensure iteration happen in a list to allow removing keys from the - # dict within the loop - for obj_set in list(sets): - self.log.debug("From {}".format(obj_set)) - # Get all nodes of the current objectSet (shadingEngine) - for member in cmds.ls(cmds.sets(obj_set, query=True), long=True): - member_data = self.collect_member_data(member, - instance_lookup) - if member_data: - # Add information of the node to the members list - sets[obj_set]["members"].append(member_data) - - # Remove sets that didn't have any members assigned in the end - # Thus the data will be limited to only what we need. - if not sets[obj_set]["members"]: - self.log.debug( - "Removing redundant set information: {}".format(obj_set) - ) - sets.pop(obj_set, None) - - self.log.debug("Gathering attribute changes to instance members..") - attributes = self.collect_attributes_changed(instance) - - # Store data on the instance - instance.data["lookData"] = { - "attributes": attributes, - "relationships": sets - } - - # Collect file nodes used by shading engines (if we have any) - files = [] - look_sets = list(sets.keys()) - if look_sets: - self.log.debug("Found look sets: {}".format(look_sets)) - files = self.collect_file_nodes(look_sets) - - self.log.debug("Collected file nodes:\n{}".format(files)) - - # Collect texture resources if any file nodes are found - resources = [] - for node in files: - resources.extend(self.collect_resources(node)) - instance.data["resources"] = resources - self.log.debug("Collected resources: {}".format(resources)) - - # Log warning when no relevant sets were retrieved for the look. - if ( - not instance.data["lookData"]["relationships"] - and "model" not in self.families - ): - self.log.warning("No sets found for the nodes in the " - "instance: %s" % instance[:]) - - # Ensure unique shader sets - # Add shader sets to the instance for unify ID validation - instance.extend(shader for shader in look_sets if shader - not in instance_lookup) - - self.log.debug("Collected look for %s" % instance) - - def collect_file_nodes(self, look_sets): - """Get the entire node chain of the look sets and return file nodes - - Arguments: - look_sets (List[str]): List of sets and shading engines relevant - to the look. - - Returns: - List[str]: List of file node names. - - """ - - shader_attrs = [ - "surfaceShader", - "volumeShader", - "displacementShader", - "aiSurfaceShader", - "aiVolumeShader", - "rman__surface", - "rman__displacement" - ] - - # Get all material attrs for all look sets to retrieve their inputs - existing_attrs = [] - for look_set in look_sets: - for attr in shader_attrs: - if cmds.attributeQuery(attr, node=look_set, exists=True): - existing_attrs.append("{}.{}".format(look_set, attr)) - - materials = cmds.listConnections(existing_attrs, - source=True, - destination=False) or [] - - self.log.debug("Found materials:\n{}".format(materials)) - - # Get the entire node chain of the look sets - # history = cmds.listHistory(look_sets, allConnections=True) - # if materials list is empty, listHistory() will crash with - # RuntimeError - history = set() - if materials: - history.update(cmds.listHistory(materials, allConnections=True)) - - # Since we retrieved history only of the connected materials connected - # to the look sets above we now add direct history for some of the - # look sets directly handling render attribute sets - - # Maya (at least 2024) crashes with Warning when render set type - # isn't available. cmds.ls() will return empty list - if RENDER_SET_TYPES: - render_sets = cmds.ls(look_sets, type=RENDER_SET_TYPES) - if render_sets: - history.update( - cmds.listHistory(render_sets, - future=False, - pruneDagObjects=True) - or [] - ) - - # Get file nodes in the material history - files = cmds.ls(list(history), - # It's important only node types are passed that - # exist (e.g. for loaded plugins) because otherwise - # the result will turn back empty - type=list(FILE_NODES.keys()), - long=True) - - # Sort for log readability - files.sort() - - return files - - def collect_sets(self, instance): - """Collect all objectSets which are of importance for publishing - - It checks if all nodes in the instance are related to any objectSet - which need to be - - Args: - instance (pyblish.api.Instance): publish instance containing all - nodes to be published. - - Returns: - dict - """ - - sets = {} - for node in instance: - related_sets = lib.get_related_sets(node) - if not related_sets: - continue - - for objset in related_sets: - if objset in sets: - continue - - sets[objset] = {"uuid": lib.get_id(objset), "members": list()} - - return sets - - def collect_member_data(self, member, instance_members): - """Get all information of the node - Args: - member (str): the name of the node to check - instance_members (set): the collected instance members - - Returns: - dict - - """ - - node, components = (member.rsplit(".", 1) + [None])[:2] - - # Only include valid members of the instance - if node not in instance_members: - return - - node_id = lib.get_id(node) - if not node_id: - self.log.error("Member '{}' has no attribute 'cbId'".format(node)) - return - - member_data = {"name": node, "uuid": node_id} - if components: - member_data["components"] = components - - return member_data - - def collect_attributes_changed(self, instance): - """Collect all userDefined attributes which have changed - - Each node gets checked for user defined attributes which have been - altered during development. Each changes gets logged in a dictionary - - [{name: node, - uuid: uuid, - attributes: {attribute: value}}] - - Args: - instance (list): all nodes which will be published - - Returns: - list - """ - - attributes = [] - for node in instance: - - # Collect changes to "custom" attributes - node_attrs = get_look_attrs(node) - - # Only include if there are any properties we care about - if not node_attrs: - continue - - self.log.debug( - "Node \"{0}\" attributes: {1}".format(node, node_attrs) - ) - - node_attributes = {} - for attr in node_attrs: - if not cmds.attributeQuery(attr, node=node, exists=True): - continue - attribute = "{}.{}".format(node, attr) - # We don't support mixed-type attributes yet. - if cmds.attributeQuery(attr, node=node, multi=True): - self.log.warning("Attribute '{}' is mixed-type and is " - "not supported yet.".format(attribute)) - continue - if cmds.getAttr(attribute, type=True) == "message": - continue - node_attributes[attr] = cmds.getAttr(attribute, asString=True) - # Only include if there are any properties we care about - if not node_attributes: - continue - attributes.append({"name": node, - "uuid": lib.get_id(node), - "attributes": node_attributes}) - - return attributes - - def collect_resources(self, node): - """Collect the link to the file(s) used (resource) - Args: - node (str): name of the node - - Returns: - dict - """ - if cmds.nodeType(node) not in FILE_NODES: - self.log.error( - "Unsupported file node: {}".format(cmds.nodeType(node))) - raise AssertionError("Unsupported file node") - - self.log.debug( - "Collecting resource: {} ({})".format(node, cmds.nodeType(node)) - ) - - attributes = get_attributes(FILE_NODES, cmds.nodeType(node), node) - for attribute in attributes: - source = cmds.getAttr("{}.{}".format( - node, - attribute - )) - - self.log.debug(" - file source: {}".format(source)) - color_space_attr = "{}.colorSpace".format(node) - try: - color_space = cmds.getAttr(color_space_attr) - except ValueError: - # node doesn't have colorspace attribute - color_space = "Raw" - - # Compare with the computed file path, e.g. the one with - # the pattern in it, to generate some logging information - # about this difference - # Only for file nodes with `fileTextureName` attribute - if attribute == "fileTextureName": - computed_source = cmds.getAttr( - "{}.computedFileTextureNamePattern".format(node) - ) - if source != computed_source: - self.log.debug("Detected computed file pattern difference " - "from original pattern: {0} " - "({1} -> {2})".format(node, - source, - computed_source)) - - # renderman allows nodes to have filename attribute empty while - # you can have another incoming connection from different node. - if not source and cmds.nodeType(node) in PXR_NODES: - self.log.debug("Renderman: source is empty, skipping...") - continue - # We replace backslashes with forward slashes because V-Ray - # can't handle the UDIM files with the backslashes in the - # paths as the computed patterns - source = source.replace("\\", "/") - - files = get_file_node_files(node) - if len(files) == 0: - self.log.debug("No valid files found from node `%s`" % node) - - self.log.debug("collection of resource done:") - self.log.debug(" - node: {}".format(node)) - self.log.debug(" - attribute: {}".format(attribute)) - self.log.debug(" - source: {}".format(source)) - self.log.debug(" - file: {}".format(files)) - self.log.debug(" - color space: {}".format(color_space)) - - # Define the resource - yield { - "node": node, - # here we are passing not only attribute, but with node again - # this should be simplified and changed extractor. - "attribute": "{}.{}".format(node, attribute), - "source": source, # required for resources - "files": files, - "color_space": color_space - } - - -class CollectModelRenderSets(CollectLook): - """Collect render attribute sets for model instance. - - Collects additional render attribute sets so they can be - published with model. - - """ - - order = pyblish.api.CollectorOrder + 0.21 - families = ["model"] - label = "Collect Model Render Sets" - - def collect_sets(self, instance): - """Collect all related objectSets except shadingEngines - - Args: - instance (pyblish.api.Instance): publish instance containing all - nodes to be published. - - Returns: - dict - """ - - sets = {} - for node in instance: - related_sets = lib.get_related_sets(node) - if not related_sets: - continue - - for objset in related_sets: - if objset in sets: - continue - - if cmds.objectType(objset, isAType="shadingEngine"): - continue - - sets[objset] = {"uuid": lib.get_id(objset), "members": list()} - - return sets diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_scene_time.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_scene_time.py deleted file mode 100644 index c10d0fffbe..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_scene_time.py +++ /dev/null @@ -1,28 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds - - -class CollectMayaSceneTime(plugin.MayaInstancePlugin): - """Collect Maya Scene playback range - - This allows to reproduce the playback range for the content to be loaded. - It does *not* limit the extracted data to only data inside that time range. - - """ - - order = pyblish.api.CollectorOrder + 0.2 - label = 'Collect Maya Scene Time' - families = ["mayaScene"] - - def process(self, instance): - instance.data.update({ - "frameStart": int( - cmds.playbackOptions(query=True, minTime=True)), - "frameEnd": int( - cmds.playbackOptions(query=True, maxTime=True)), - "frameStartHandle": int( - cmds.playbackOptions(query=True, animationStartTime=True)), - "frameEndHandle": int( - cmds.playbackOptions(query=True, animationEndTime=True)) - }) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_units.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_units.py deleted file mode 100644 index 47888506ff..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_units.py +++ /dev/null @@ -1,31 +0,0 @@ -import maya.cmds as cmds -import maya.mel as mel -from ayon_maya.api import plugin - - -import pyblish.api - - -class CollectMayaUnits(plugin.MayaContextPlugin): - """Collect Maya's scene units.""" - - label = "Maya Units" - order = pyblish.api.CollectorOrder - - def process(self, context): - - # Get the current linear units - units = cmds.currentUnit(query=True, linear=True) - - # Get the current angular units ('deg' or 'rad') - units_angle = cmds.currentUnit(query=True, angle=True) - - # Get the current time units - # Using the mel command is simpler than using - # `cmds.currentUnit(q=1, time=1)`. Otherwise we - # have to parse the returned string value to FPS - fps = mel.eval('currentTimeUnitToFPS()') - - context.data['linearUnits'] = units - context.data['angularUnits'] = units_angle - context.data['fps'] = fps diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_workspace.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_workspace.py deleted file mode 100644 index a7b51e1fb3..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_maya_workspace.py +++ /dev/null @@ -1,25 +0,0 @@ -import os - -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds - - -class CollectMayaWorkspace(plugin.MayaContextPlugin): - """Inject the current workspace into context""" - - order = pyblish.api.CollectorOrder - 0.5 - label = "Maya Workspace" - - - def process(self, context): - workspace = cmds.workspace(rootDirectory=True, query=True) - if not workspace: - # Project has not been set. Files will - # instead end up next to the working file. - workspace = cmds.workspace(dir=True, query=True) - - # Maya returns forward-slashes by default - normalised = os.path.normpath(workspace) - - context.set_data('workspaceDir', value=normalised) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_model.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_model.py deleted file mode 100644 index 13e5a609e7..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_model.py +++ /dev/null @@ -1,29 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds - - -class CollectModelData(plugin.MayaInstancePlugin): - """Collect model data - - Ensures always only a single frame is extracted (current frame). - - Todo: - Validate if is this plugin still useful. - - Note: - This is a workaround so that the `model` product type can use the - same pointcache extractor implementation as animation and pointcaches. - This always enforces the "current" frame to be published. - - """ - - order = pyblish.api.CollectorOrder + 0.2 - label = 'Collect Model Data' - families = ["model"] - - def process(self, instance): - # Extract only current frame (override) - frame = cmds.currentTime(query=True) - instance.data["frameStart"] = frame - instance.data["frameEnd"] = frame diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_multiverse_look.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_multiverse_look.py deleted file mode 100644 index ddf36b7eda..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_multiverse_look.py +++ /dev/null @@ -1,422 +0,0 @@ -import glob -import os -import re - -import pyblish.api -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - -SHAPE_ATTRS = ["castsShadows", - "receiveShadows", - "motionBlur", - "primaryVisibility", - "smoothShading", - "visibleInReflections", - "visibleInRefractions", - "doubleSided", - "opposite"] - -SHAPE_ATTRS = set(SHAPE_ATTRS) -COLOUR_SPACES = ['sRGB', 'linear', 'auto'] -MIPMAP_EXTENSIONS = ['tdl'] - - -class _NodeTypeAttrib(object): - """docstring for _NodeType""" - - def __init__(self, name, fname, computed_fname=None, colour_space=None): - self.name = name - self.fname = fname - self.computed_fname = computed_fname or fname - self.colour_space = colour_space or "colorSpace" - - def get_fname(self, node): - return "{}.{}".format(node, self.fname) - - def get_computed_fname(self, node): - return "{}.{}".format(node, self.computed_fname) - - def get_colour_space(self, node): - return "{}.{}".format(node, self.colour_space) - - def __str__(self): - return ( - "_NodeTypeAttrib(name={}, fname={}, " - "computed_fname={}, colour_space={})".format( - self.name, self.fname, self.computed_fname, self.colour_space) - ) - - -NODETYPES = { - "file": [_NodeTypeAttrib("file", "fileTextureName", - "computedFileTextureNamePattern")], - "aiImage": [_NodeTypeAttrib("aiImage", "filename")], - "RedshiftNormalMap": [_NodeTypeAttrib("RedshiftNormalMap", "tex0")], - "dlTexture": [_NodeTypeAttrib("dlTexture", "textureFile", - None, "textureFile_meta_colorspace")], - "dlTriplanar": [_NodeTypeAttrib("dlTriplanar", "colorTexture", - None, "colorTexture_meta_colorspace"), - _NodeTypeAttrib("dlTriplanar", "floatTexture", - None, "floatTexture_meta_colorspace"), - _NodeTypeAttrib("dlTriplanar", "heightTexture", - None, "heightTexture_meta_colorspace")] -} - - -def get_file_paths_for_node(node): - """Gets all the file paths in this node. - - Returns all filepaths that this node references. Some node types only - reference one, but others, like dlTriplanar, can reference 3. - - Args: - node (str): Name of the Maya node - - Returns - list(str): A list with all evaluated maya attributes for filepaths. - """ - - node_type = cmds.nodeType(node) - if node_type not in NODETYPES: - return [] - - paths = [] - for node_type_attr in NODETYPES[node_type]: - fname = cmds.getAttr("{}.{}".format(node, node_type_attr.fname)) - paths.append(fname) - return paths - - -def node_uses_image_sequence(node): - """Return whether file node uses an image sequence or single image. - - Determine if a node uses an image sequence or just a single image, - not always obvious from its file path alone. - - Args: - node (str): Name of the Maya node - - Returns: - bool: True if node uses an image sequence - - """ - - # useFrameExtension indicates an explicit image sequence - paths = get_file_node_paths(node) - paths = [path.lower() for path in paths] - - # The following tokens imply a sequence - patterns = ["", "", "", "u_v", ".tif will return as /path/to/texture.*.tif. - - Args: - path (str): the image sequence path - - Returns: - str: Return glob string that matches the filename pattern. - - """ - - if path is None: - return path - - # If any of the patterns, convert the pattern - patterns = { - "": "", - "": "", - "": "", - "#": "#", - "u_v": "|", - "", # noqa - copied from collect_look.py - "": "" - } - - lower = path.lower() - has_pattern = False - for pattern, regex_pattern in patterns.items(): - if pattern in lower: - path = re.sub(regex_pattern, "*", path, flags=re.IGNORECASE) - has_pattern = True - - if has_pattern: - return path - - base = os.path.basename(path) - matches = list(re.finditer(r'\d+', base)) - if matches: - match = matches[-1] - new_base = '{0}*{1}'.format(base[:match.start()], - base[match.end():]) - head = os.path.dirname(path) - return os.path.join(head, new_base) - else: - return path - - -def get_file_node_paths(node): - """Get the file path used by a Maya file node. - - Args: - node (str): Name of the Maya file node - - Returns: - str: the file path in use - - """ - # if the path appears to be sequence, use computedFileTextureNamePattern, - # this preserves the <> tag - if cmds.attributeQuery('computedFileTextureNamePattern', - node=node, - exists=True): - plug = '{0}.computedFileTextureNamePattern'.format(node) - texture_pattern = cmds.getAttr(plug) - - patterns = ["", - "", - "u_v", - "", - ""] - lower = texture_pattern.lower() - if any(pattern in lower for pattern in patterns): - return [texture_pattern] - - return get_file_paths_for_node(node) - - -def get_file_node_files(node): - """Return the file paths related to the file node - - Note: - Will only return existing files. Returns an empty list - if not valid existing files are linked. - - Returns: - list: List of full file paths. - - """ - - paths = get_file_node_paths(node) - paths = [cmds.workspace(expandName=path) for path in paths] - if node_uses_image_sequence(node): - globs = [] - for path in paths: - globs += glob.glob(seq_to_glob(path)) - return globs - else: - return list(filter(lambda x: os.path.exists(x), paths)) - - -def get_mipmap(fname): - for colour_space in COLOUR_SPACES: - for mipmap_ext in MIPMAP_EXTENSIONS: - mipmap_fname = '.'.join([fname, colour_space, mipmap_ext]) - if os.path.exists(mipmap_fname): - return mipmap_fname - return None - - -def is_mipmap(fname): - ext = os.path.splitext(fname)[1][1:] - if ext in MIPMAP_EXTENSIONS: - return True - return False - - -class CollectMultiverseLookData(plugin.MayaInstancePlugin): - """Collect Multiverse Look - - Searches through the overrides finding all material overrides. From there - it extracts the shading group and then finds all texture files in the - shading group network. It also checks for mipmap versions of texture files - and adds them to the resources to get published. - - """ - - order = pyblish.api.CollectorOrder + 0.2 - label = 'Collect Multiverse Look' - families = ["mvLook"] - - def process(self, instance): - # Load plugin first - cmds.loadPlugin("MultiverseForMaya", quiet=True) - import multiverse - - self.log.debug("Processing mvLook for '{}'".format(instance)) - - nodes = set() - for node in instance: - # We want only mvUsdCompoundShape nodes. - nodes_of_interest = cmds.ls(node, - dag=True, - shapes=False, - type="mvUsdCompoundShape", - noIntermediate=True, - long=True) - nodes.update(nodes_of_interest) - - sets = {} - instance.data["resources"] = [] - publishMipMap = instance.data["publishMipMap"] - - for node in nodes: - self.log.debug("Getting resources for '{}'".format(node)) - - # We know what nodes need to be collected, now we need to - # extract the materials overrides. - overrides = multiverse.ListMaterialOverridePrims(node) - for override in overrides: - matOver = multiverse.GetMaterialOverride(node, override) - - if isinstance(matOver, multiverse.MaterialSourceShadingGroup): - # We now need to grab the shadingGroup so add it to the - # sets we pass down the pipe. - shadingGroup = matOver.shadingGroupName - self.log.debug("ShadingGroup = '{}'".format(shadingGroup)) - sets[shadingGroup] = {"uuid": lib.get_id( - shadingGroup), "members": list()} - - # The SG may reference files, add those too! - history = cmds.listHistory( - shadingGroup, allConnections=True) - - # We need to iterate over node_types since `cmds.ls` may - # error out if we don't have the appropriate plugin loaded. - files = [] - for node_type in NODETYPES.keys(): - files += cmds.ls(history, - type=node_type, - long=True) - - for f in files: - resources = self.collect_resource(f, publishMipMap) - instance.data["resources"] += resources - - elif isinstance(matOver, multiverse.MaterialSourceUsdPath): - # TODO: Handle this later. - pass - - # Store data on the instance for validators, extractos, etc. - instance.data["lookData"] = { - "attributes": [], - "relationships": sets - } - - def collect_resource(self, node, publishMipMap): - """Collect the link to the file(s) used (resource) - Args: - node (str): name of the node - - Returns: - dict - """ - - node_type = cmds.nodeType(node) - self.log.debug("processing: {}/{}".format(node, node_type)) - - if node_type not in NODETYPES: - self.log.error("Unsupported file node: {}".format(node_type)) - raise AssertionError("Unsupported file node") - - resources = [] - for node_type_attr in NODETYPES[node_type]: - fname_attrib = node_type_attr.get_fname(node) - computed_fname_attrib = node_type_attr.get_computed_fname(node) - colour_space_attrib = node_type_attr.get_colour_space(node) - - source = cmds.getAttr(fname_attrib) - color_space = "Raw" - try: - color_space = cmds.getAttr(colour_space_attrib) - except ValueError: - # node doesn't have colorspace attribute, use "Raw" from before - pass - # Compare with the computed file path, e.g. the one with the - # pattern in it, to generate some logging information about this - # difference - # computed_attribute = "{}.computedFileTextureNamePattern".format(node) # noqa - computed_source = cmds.getAttr(computed_fname_attrib) - if source != computed_source: - self.log.debug("Detected computed file pattern difference " - "from original pattern: {0} " - "({1} -> {2})".format(node, - source, - computed_source)) - - # We replace backslashes with forward slashes because V-Ray - # can't handle the UDIM files with the backslashes in the - # paths as the computed patterns - source = source.replace("\\", "/") - - files = get_file_node_files(node) - files = self.handle_files(files, publishMipMap) - if len(files) == 0: - self.log.error("No valid files found from node `%s`" % node) - - self.log.debug("collection of resource done:") - self.log.debug(" - node: {}".format(node)) - self.log.debug(" - attribute: {}".format(fname_attrib)) - self.log.debug(" - source: {}".format(source)) - self.log.debug(" - file: {}".format(files)) - self.log.debug(" - color space: {}".format(color_space)) - - # Define the resource - resource = {"node": node, - "attribute": fname_attrib, - "source": source, # required for resources - "files": files, - "color_space": color_space} # required for resources - resources.append(resource) - return resources - - def handle_files(self, files, publishMipMap): - """This will go through all the files and make sure that they are - either already mipmapped or have a corresponding mipmap sidecar and - add that to the list.""" - if not publishMipMap: - return files - - extra_files = [] - self.log.debug("Expecting MipMaps, going to look for them.") - for fname in files: - self.log.debug("Checking '{}' for mipmaps".format(fname)) - if is_mipmap(fname): - self.log.debug(" - file is already MipMap, skipping.") - continue - - mipmap = get_mipmap(fname) - if mipmap: - self.log.debug(" mipmap found for '{}'".format(fname)) - extra_files.append(mipmap) - else: - self.log.warning(" no mipmap found for '{}'".format(fname)) - return files + extra_files diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_pointcache.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_pointcache.py deleted file mode 100644 index 8d0b45137f..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_pointcache.py +++ /dev/null @@ -1,46 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds - - -class CollectPointcache(plugin.MayaInstancePlugin): - """Collect pointcache data for instance.""" - - order = pyblish.api.CollectorOrder + 0.4 - families = ["pointcache"] - label = "Collect Pointcache" - - def process(self, instance): - if instance.data.get("farm"): - instance.data["families"].append("publish.farm") - - proxy_set = None - for node in cmds.ls(instance.data["setMembers"], - exactType="objectSet"): - # Find proxy_SET objectSet in the instance for proxy meshes - if node.endswith("proxy_SET"): - members = cmds.sets(node, query=True) - if members is None: - self.log.debug("Skipped empty proxy_SET: \"%s\" " % node) - continue - self.log.debug("Found proxy set: {}".format(node)) - - proxy_set = node - instance.data["proxy"] = [] - instance.data["proxyRoots"] = [] - for member in members: - instance.data["proxy"].extend(cmds.ls(member, long=True)) - instance.data["proxyRoots"].extend( - cmds.ls(member, long=True) - ) - instance.data["proxy"].extend( - cmds.listRelatives(member, shapes=True, fullPath=True) - ) - self.log.debug( - "Found proxy members: {}".format(instance.data["proxy"]) - ) - break - - if proxy_set: - instance.remove(proxy_set) - instance.data["setMembers"].remove(proxy_set) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_remove_marked.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_remove_marked.py deleted file mode 100644 index 14d914cac5..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_remove_marked.py +++ /dev/null @@ -1,25 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin - - -class CollectRemoveMarked(plugin.MayaContextPlugin): - """Remove marked data - - Remove instances that have 'remove' in their instance.data - - """ - - order = pyblish.api.CollectorOrder + 0.499 - label = 'Remove Marked Instances' - - def process(self, context): - - self.log.debug(context) - # make ftrack publishable - instances_to_remove = [] - for instance in context: - if instance.data.get('remove'): - instances_to_remove.append(instance) - - for instance in instances_to_remove: - context.remove(instance) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_render.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_render.py deleted file mode 100644 index 160a019540..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_render.py +++ /dev/null @@ -1,331 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect render data. - -This collector will go through renderlayer instances and prepare all data -needed to detect the expected rendered files for a layer, with resolution, -frame ranges and collects the data needed for publishing on the farm. - -Requires: - instance -> families - - context -> currentFile - context -> user - -Provides: - instance -> label - instance -> subset - instance -> attachTo - instance -> setMembers - instance -> publish - instance -> frameStart - instance -> frameEnd - instance -> byFrameStep - instance -> renderer - instance -> family - instance -> asset - instance -> time - instance -> author - instance -> source - instance -> expectedFiles - instance -> resolutionWidth - instance -> resolutionHeight - instance -> pixelAspect -""" - -import json -import os -import platform - -import pyblish.api -from ayon_core.lib import get_formatted_current_time -from ayon_core.pipeline import KnownPublishError -from ayon_maya.api import lib -from ayon_maya.api.lib_renderproducts import ( - UnsupportedRendererException, - get as get_layer_render_products, -) -from ayon_maya.api import plugin -from maya import cmds - - -class CollectMayaRender(plugin.MayaInstancePlugin): - """Gather all publishable render layers from renderSetup.""" - - order = pyblish.api.CollectorOrder + 0.01 - hosts = ["maya"] - families = ["renderlayer"] - label = "Collect Render Layers" - sync_workfile_version = False - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - - def process(self, instance): - - # TODO: Re-add force enable of workfile instance? - # TODO: Re-add legacy layer support with LAYER_ prefix but in Creator - context = instance.context - - layer = instance.data["transientData"]["layer"] - objset = instance.data.get("instance_node") - filepath = context.data["currentFile"].replace("\\", "/") - - # check if layer is renderable - if not layer.isRenderable(): - msg = "Render layer [ {} ] is not " "renderable".format( - layer.name() - ) - self.log.warning(msg) - - # detect if there are sets (products) to attach render to - sets = cmds.sets(objset, query=True) or [] - attach_to = [] - for s in sets: - if not cmds.attributeQuery("productType", node=s, exists=True): - continue - - attach_to.append( - { - "version": None, # we need integrator for that - "productName": s, - "productType": cmds.getAttr("{}.productType".format(s)), - } - ) - self.log.debug(" -> attach render to: {}".format(s)) - - layer_name = layer.name() - - # collect all frames we are expecting to be rendered - # return all expected files for all cameras and aovs in given - # frame range - try: - layer_render_products = get_layer_render_products(layer.name()) - except UnsupportedRendererException as exc: - raise KnownPublishError(exc) - render_products = layer_render_products.layer_data.products - if not render_products: - self.log.error( - "No render products generated for '%s'. You might not have " - "any render camera in the renderlayer or render end frame is " - "lower than start frame.", - instance.name - ) - expected_files = [] - multipart = False - for product in render_products: - if product.multipart: - multipart = True - product_name = product.productName - if product.camera and layer_render_products.has_camera_token(): - product_name = "{}{}".format( - product.camera, - "_{}".format(product_name) if product_name else "") - expected_files.append( - { - product_name: layer_render_products.get_files( - product) - }) - - has_cameras = any(product.camera for product in render_products) - if render_products and not has_cameras: - self.log.error( - "No render cameras found for: %s", - instance - ) - if not expected_files: - self.log.warning( - "No file names were generated, this is a bug.") - - for render_product in render_products: - self.log.debug(render_product) - self.log.debug("multipart: {}".format(multipart)) - self.log.debug("expected files: {}".format( - json.dumps(expected_files, indent=4, sort_keys=True) - )) - - # if we want to attach render to product, check if we have AOV's - # in expectedFiles. If so, raise error as we cannot attach AOV - # (considered to be product on its own) to another product - if attach_to: - assert isinstance(expected_files, list), ( - "attaching multiple AOVs or renderable cameras to " - "product is not supported" - ) - - # append full path - image_directory = os.path.join( - cmds.workspace(query=True, rootDirectory=True), - cmds.workspace(fileRuleEntry="images") - ) - # replace relative paths with absolute. Render products are - # returned as list of dictionaries. - publish_meta_path = "NOT-SET" - aov_dict = {} - for aov in expected_files: - full_paths = [] - aov_first_key = list(aov.keys())[0] - for file in aov[aov_first_key]: - full_path = os.path.join(image_directory, file) - full_path = full_path.replace("\\", "/") - full_paths.append(full_path) - publish_meta_path = os.path.dirname(full_path) - aov_dict[aov_first_key] = full_paths - full_exp_files = [aov_dict] - - frame_start_render = int(self.get_render_attribute( - "startFrame", layer=layer_name)) - frame_end_render = int(self.get_render_attribute( - "endFrame", layer=layer_name)) - - if (int(context.data["frameStartHandle"]) == frame_start_render - and int(context.data["frameEndHandle"]) == frame_end_render): # noqa: W503, E501 - - handle_start = context.data["handleStart"] - handle_end = context.data["handleEnd"] - frame_start = context.data["frameStart"] - frame_end = context.data["frameEnd"] - frame_start_handle = context.data["frameStartHandle"] - frame_end_handle = context.data["frameEndHandle"] - else: - handle_start = 0 - handle_end = 0 - frame_start = frame_start_render - frame_end = frame_end_render - frame_start_handle = frame_start_render - frame_end_handle = frame_end_render - - # find common path to store metadata - # so if image prefix is branching to many directories - # metadata file will be located in top-most common - # directory. - # TODO: use `os.path.commonpath()` after switch to Python 3 - publish_meta_path = os.path.normpath(publish_meta_path) - common_publish_meta_path = os.path.splitdrive( - publish_meta_path)[0] - if common_publish_meta_path: - common_publish_meta_path += os.path.sep - for part in publish_meta_path.replace( - common_publish_meta_path, "").split(os.path.sep): - common_publish_meta_path = os.path.join( - common_publish_meta_path, part) - if part == layer_name: - break - - # TODO: replace this terrible linux hotfix with real solution :) - if platform.system().lower() in ["linux", "darwin"]: - common_publish_meta_path = "/" + common_publish_meta_path - - self.log.debug( - "Publish meta path: {}".format(common_publish_meta_path) - ) - - # Get layer specific settings, might be overrides - colorspace_data = lib.get_color_management_preferences() - data = { - "farm": True, - "attachTo": attach_to, - - "multipartExr": multipart, - "review": instance.data.get("review") or False, - - # Frame range - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartHandle": frame_start_handle, - "frameEndHandle": frame_end_handle, - "byFrameStep": int( - self.get_render_attribute("byFrameStep", - layer=layer_name)), - - # Renderlayer - "renderer": self.get_render_attribute( - "currentRenderer", layer=layer_name).lower(), - "setMembers": layer._getLegacyNodeName(), # legacy renderlayer - "renderlayer": layer_name, - - # todo: is `time` and `author` still needed? - "time": get_formatted_current_time(), - "author": context.data["user"], - - # Add source to allow tracing back to the scene from - # which was submitted originally - "source": filepath, - "expectedFiles": full_exp_files, - "publishRenderMetadataFolder": common_publish_meta_path, - "renderProducts": layer_render_products, - "resolutionWidth": lib.get_attr_in_layer( - "defaultResolution.width", layer=layer_name - ), - "resolutionHeight": lib.get_attr_in_layer( - "defaultResolution.height", layer=layer_name - ), - "pixelAspect": lib.get_attr_in_layer( - "defaultResolution.pixelAspect", layer=layer_name - ), - - # todo: Following are likely not needed due to collecting from the - # instance itself if they are attribute definitions - "tileRendering": instance.data.get("tileRendering") or False, # noqa: E501 - "tilesX": instance.data.get("tilesX") or 2, - "tilesY": instance.data.get("tilesY") or 2, - "convertToScanline": instance.data.get( - "convertToScanline") or False, - "useReferencedAovs": instance.data.get( - "useReferencedAovs") or instance.data.get( - "vrayUseReferencedAovs") or False, - "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 - "renderSetupIncludeLights": instance.data.get( - "renderSetupIncludeLights" - ), - "colorspaceConfig": colorspace_data["config"], - "colorspaceDisplay": colorspace_data["display"], - "colorspaceView": colorspace_data["view"], - } - - manager = context.data["ayonAddonsManager"] - if manager.get_enabled_addon("royalrender") is not None: - data["rrPathName"] = instance.data.get("rrPathName") - self.log.debug(data["rrPathName"]) - - if self.sync_workfile_version: - data["version"] = context.data["version"] - for _instance in context: - if _instance.data["productType"] == "workfile": - _instance.data["version"] = context.data["version"] - - # Define nice label - label = "{0} ({1})".format(layer_name, instance.data["folderPath"]) - label += " [{0}-{1}]".format( - int(data["frameStartHandle"]), int(data["frameEndHandle"]) - ) - data["label"] = label - - # Override frames should be False if extendFrames is False. This is - # to ensure it doesn't go off doing crazy unpredictable things - extend_frames = instance.data.get("extendFrames", False) - if not extend_frames: - instance.data["overrideExistingFrame"] = False - - # Update the instance - instance.data.update(data) - - @staticmethod - def get_render_attribute(attr, layer): - """Get attribute from render options. - - Args: - attr (str): name of attribute to be looked up - layer (str): name of render layer - - Returns: - Attribute value - - """ - return lib.get_attr_in_layer( - "defaultRenderGlobals.{}".format(attr), layer=layer - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_render_layer_aovs.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_render_layer_aovs.py deleted file mode 100644 index dd4a8fefe5..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_render_layer_aovs.py +++ /dev/null @@ -1,95 +0,0 @@ -import pyblish.api -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class CollectRenderLayerAOVS(plugin.MayaInstancePlugin): - """Collect all render layer's AOVs / Render Elements that will render. - - This collector is important to be able to Extend Frames. - - Technical information: - Each renderer uses different logic to work with render passes. - VRay - RenderElement - Simple node connection to the actual renderLayer node - - Arnold - AOV: - Uses its own render settings node and connects an aiOAV to it - - Redshift - AOV: - Uses its own render settings node and RedshiftAOV node. It is not - connected but all AOVs are enabled for all render layers by default. - - """ - - order = pyblish.api.CollectorOrder + 0.02 - label = "Render Elements / AOVs" - families = ["renderlayer"] - - def process(self, instance): - - # Check if Extend Frames is toggled - if not instance.data("extendFrames", False): - return - - # Get renderer - renderer = instance.data["renderer"] - self.log.debug("Renderer found: {}".format(renderer)) - - rp_node_types = {"vray": ["VRayRenderElement", "VRayRenderElementSet"], - "arnold": ["aiAOV"], - "redshift": ["RedshiftAOV"]} - - if renderer not in rp_node_types.keys(): - self.log.error("Unsupported renderer found: '{}'".format(renderer)) - return - - result = [] - - # Collect all AOVs / Render Elements - layer = instance.data["renderlayer"] - node_type = rp_node_types[renderer] - render_elements = cmds.ls(type=node_type) - - # Check if AOVs / Render Elements are enabled - for element in render_elements: - enabled = lib.get_attr_in_layer("{}.enabled".format(element), - layer=layer) - if not enabled: - continue - - pass_name = self.get_pass_name(renderer, element) - render_pass = "%s.%s" % (instance.data["productName"], pass_name) - - result.append(render_pass) - - self.log.debug("Found {} render elements / AOVs for " - "'{}'".format(len(result), instance.data["productName"])) - - instance.data["renderPasses"] = result - - def get_pass_name(self, renderer, node): - - if renderer == "vray": - - # Get render element pass type - vray_node_attr = next(attr for attr in cmds.listAttr(node) - if attr.startswith("vray_name")) - pass_type = vray_node_attr.rsplit("_", 1)[-1] - - # Support V-Ray extratex explicit name (if set by user) - if pass_type == "extratex": - explicit_attr = "{}.vray_explicit_name_extratex".format(node) - explicit_name = cmds.getAttr(explicit_attr) - if explicit_name: - return explicit_name - - # Node type is in the attribute name but we need to check if value - # of the attribute as it can be changed - return cmds.getAttr("{}.{}".format(node, vray_node_attr)) - - elif renderer in ["arnold", "redshift"]: - return cmds.getAttr("{}.name".format(node)) - else: - raise RuntimeError("Unsupported renderer: '{}'".format(renderer)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_renderable_camera.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_renderable_camera.py deleted file mode 100644 index fbd181de3e..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_renderable_camera.py +++ /dev/null @@ -1,31 +0,0 @@ -import pyblish.api -from ayon_maya.api.lib_rendersetup import get_attr_in_layer -from ayon_maya.api import plugin -from maya import cmds - - -class CollectRenderableCamera(plugin.MayaInstancePlugin): - """Collect the renderable camera(s) for the render layer""" - - # Offset to be after renderlayer collection. - order = pyblish.api.CollectorOrder + 0.02 - label = "Collect Renderable Camera(s)" - hosts = ["maya"] - families = ["vrayscene_layer", - "renderlayer"] - - def process(self, instance): - if "vrayscene_layer" in instance.data.get("families", []): - layer = instance.data.get("layer") - else: - layer = instance.data["renderlayer"] - - cameras = cmds.ls(type="camera", long=True) - renderable = [cam for cam in cameras if - get_attr_in_layer("{}.renderable".format(cam), layer)] - - self.log.debug( - "Found renderable cameras %s: %s", len(renderable), renderable - ) - - instance.data["cameras"] = renderable diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_review.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_review.py deleted file mode 100644 index 8a50c2f0a9..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_review.py +++ /dev/null @@ -1,185 +0,0 @@ -import ayon_api -import pyblish.api -from ayon_core.pipeline import KnownPublishError -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds, mel - - -class CollectReview(plugin.MayaInstancePlugin): - """Collect Review data - - """ - - order = pyblish.api.CollectorOrder + 0.3 - label = 'Collect Review Data' - families = ["review"] - - def process(self, instance): - - # Get panel. - instance.data["panel"] = cmds.playblast( - activeEditor=True - ).rsplit("|", 1)[-1] - - # get cameras - members = instance.data['setMembers'] - self.log.debug('members: {}'.format(members)) - cameras = cmds.ls(members, long=True, dag=True, cameras=True) - camera = cameras[0] if cameras else None - - context = instance.context - objectset = { - i.data.get("instance_node") for i in context - } - - # Collect display lights. - display_lights = instance.data.get("displayLights", "default") - if display_lights == "project_settings": - settings = instance.context.data["project_settings"] - settings = settings["maya"]["publish"]["ExtractPlayblast"] - settings = settings["capture_preset"]["ViewportOptions"] - display_lights = settings["displayLights"] - - # Collect camera focal length. - burninDataMembers = instance.data.get("burninDataMembers", {}) - if camera is not None: - attr = camera + ".focalLength" - if lib.get_attribute_input(attr): - start = instance.data["frameStart"] - end = instance.data["frameEnd"] + 1 - time_range = range(int(start), int(end)) - focal_length = [cmds.getAttr(attr, time=t) for t in time_range] - else: - focal_length = cmds.getAttr(attr) - - burninDataMembers["focalLength"] = focal_length - - # Account for nested instances like model. - reviewable_products = list(set(members) & objectset) - if reviewable_products: - if len(reviewable_products) > 1: - raise KnownPublishError( - "Multiple attached products for review are not supported. " - "Attached: {}".format(", ".join(reviewable_products)) - ) - - reviewable_product = reviewable_products[0] - self.log.debug( - "Product attached to review: {}".format(reviewable_product) - ) - - # Find the relevant publishing instance in the current context - reviewable_inst = next(inst for inst in context - if inst.name == reviewable_product) - data = reviewable_inst.data - - self.log.debug( - 'Adding review family to {}'.format(reviewable_product) - ) - if data.get('families'): - data['families'].append('review') - else: - data['families'] = ['review'] - - data["cameras"] = cameras - data['review_camera'] = camera - data['frameStartFtrack'] = instance.data["frameStartHandle"] - data['frameEndFtrack'] = instance.data["frameEndHandle"] - data['frameStartHandle'] = instance.data["frameStartHandle"] - data['frameEndHandle'] = instance.data["frameEndHandle"] - data['handleStart'] = instance.data["handleStart"] - data['handleEnd'] = instance.data["handleEnd"] - data["frameStart"] = instance.data["frameStart"] - data["frameEnd"] = instance.data["frameEnd"] - data['step'] = instance.data['step'] - # this (with other time related data) should be set on - # representations. Once plugins like Extract Review start - # using representations, this should be removed from here - # as Extract Playblast is already adding fps to representation. - data['fps'] = context.data['fps'] - data['review_width'] = instance.data['review_width'] - data['review_height'] = instance.data['review_height'] - data["isolate"] = instance.data["isolate"] - data["panZoom"] = instance.data.get("panZoom", False) - data["panel"] = instance.data["panel"] - data["displayLights"] = display_lights - data["burninDataMembers"] = burninDataMembers - - for key, value in instance.data["publish_attributes"].items(): - data["publish_attributes"][key] = value - - # The review instance must be active - cmds.setAttr(str(instance) + '.active', 1) - - instance.data['remove'] = True - - else: - project_name = instance.context.data["projectName"] - folder_entity = instance.context.data["folderEntity"] - task = instance.context.data["task"] - legacy_product_name = task + 'Review' - product_entity = ayon_api.get_product_by_name( - project_name, - legacy_product_name, - folder_entity["id"], - fields={"id"} - ) - if product_entity: - self.log.debug("Existing products found, keep legacy name.") - instance.data["productName"] = legacy_product_name - - instance.data["cameras"] = cameras - instance.data['review_camera'] = camera - instance.data['frameStartFtrack'] = \ - instance.data["frameStartHandle"] - instance.data['frameEndFtrack'] = \ - instance.data["frameEndHandle"] - instance.data["displayLights"] = display_lights - instance.data["burninDataMembers"] = burninDataMembers - # this (with other time related data) should be set on - # representations. Once plugins like Extract Review start - # using representations, this should be removed from here - # as Extract Playblast is already adding fps to representation. - instance.data["fps"] = instance.context.data["fps"] - - # make ftrack publishable - instance.data.setdefault("families", []).append('ftrack') - - cmds.setAttr(str(instance) + '.active', 1) - - # Collect audio - playback_slider = mel.eval('$tmpVar=$gPlayBackSlider') - audio_name = cmds.timeControl(playback_slider, - query=True, - sound=True) - display_sounds = cmds.timeControl( - playback_slider, query=True, displaySound=True - ) - - def get_audio_node_data(node): - return { - "offset": cmds.getAttr("{}.offset".format(node)), - "filename": cmds.getAttr("{}.filename".format(node)) - } - - audio_data = [] - - if audio_name: - audio_data.append(get_audio_node_data(audio_name)) - - elif display_sounds: - start_frame = int(cmds.playbackOptions(query=True, min=True)) - end_frame = int(cmds.playbackOptions(query=True, max=True)) - - for node in cmds.ls(type="audio"): - # Check if frame range and audio range intersections, - # for whether to include this audio node or not. - duration = cmds.getAttr("{}.duration".format(node)) - start_audio = cmds.getAttr("{}.offset".format(node)) - end_audio = start_audio + duration - - if start_audio <= end_frame and end_audio > start_frame: - audio_data.append(get_audio_node_data(node)) - - instance.data["audio"] = audio_data diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_rig_sets.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_rig_sets.py deleted file mode 100644 index 98f4d38ab2..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_rig_sets.py +++ /dev/null @@ -1,40 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds - - -class CollectRigSets(plugin.MayaInstancePlugin): - """Ensure rig contains pipeline-critical content - - Every rig must contain at least two object sets: - "controls_SET" - Set of all animatable controls - "out_SET" - Set of all cacheable meshes - - """ - - order = pyblish.api.CollectorOrder + 0.05 - label = "Collect Rig Sets" - families = ["rig"] - - accepted_output = ["mesh", "transform"] - accepted_controllers = ["transform"] - - def process(self, instance): - - # Find required sets by suffix - searching = {"controls_SET", "out_SET", - "skeletonAnim_SET", "skeletonMesh_SET"} - found = {} - for node in cmds.ls(instance, exactType="objectSet"): - for suffix in searching: - if node.endswith(suffix): - found[suffix] = node - searching.remove(suffix) - break - if not searching: - break - - self.log.debug("Found sets: {}".format(found)) - rig_sets = instance.data.setdefault("rig_sets", {}) - for name, objset in found.items(): - rig_sets[name] = objset diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_skeleton_mesh.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_skeleton_mesh.py deleted file mode 100644 index aaec4cb6d9..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_skeleton_mesh.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds # noqa - - -class CollectSkeletonMesh(plugin.MayaInstancePlugin): - """Collect Static Rig Data for FBX Extractor.""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Skeleton Mesh" - families = ["rig"] - - def process(self, instance): - skeleton_mesh_set = instance.data["rig_sets"].get( - "skeletonMesh_SET") - if not skeleton_mesh_set: - self.log.debug( - "No skeletonMesh_SET found. " - "Skipping collecting of skeleton mesh..." - ) - return - - # Store current frame to ensure single frame export - frame = cmds.currentTime(query=True) - instance.data["frameStart"] = frame - instance.data["frameEnd"] = frame - - instance.data["skeleton_mesh"] = [] - - skeleton_mesh_content = cmds.sets( - skeleton_mesh_set, query=True) or [] - if not skeleton_mesh_content: - self.log.debug( - "No object nodes in skeletonMesh_SET. " - "Skipping collecting of skeleton mesh..." - ) - return - instance.data["families"] += ["rig.fbx"] - instance.data["skeleton_mesh"] = skeleton_mesh_content - self.log.debug( - "Collected skeletonMesh_SET members: {}".format( - skeleton_mesh_content - )) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_unreal_skeletalmesh.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_unreal_skeletalmesh.py deleted file mode 100644 index 32515a5957..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_unreal_skeletalmesh.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -from maya import cmds # noqa -import pyblish.api -from ayon_maya.api import plugin - - -class CollectUnrealSkeletalMesh(plugin.MayaInstancePlugin): - """Collect Unreal Skeletal Mesh.""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Unreal Skeletal Meshes" - families = ["skeletalMesh"] - - def process(self, instance): - frame = cmds.currentTime(query=True) - instance.data["frameStart"] = frame - instance.data["frameEnd"] = frame - - geo_sets = [ - i for i in instance[:] - if i.lower().startswith("geometry_set") - ] - - joint_sets = [ - i for i in instance[:] - if i.lower().startswith("joints_set") - ] - - instance.data["geometry"] = [] - instance.data["joints"] = [] - - for geo_set in geo_sets: - geo_content = cmds.ls(cmds.sets(geo_set, query=True), long=True) - if geo_content: - instance.data["geometry"] += geo_content - - for join_set in joint_sets: - join_content = cmds.ls(cmds.sets(join_set, query=True), long=True) - if join_content: - instance.data["joints"] += join_content diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_unreal_staticmesh.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_unreal_staticmesh.py deleted file mode 100644 index 35295d6e3b..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_unreal_staticmesh.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -from maya import cmds # noqa -import pyblish.api -from ayon_maya.api import plugin -from pprint import pformat - - -class CollectUnrealStaticMesh(plugin.MayaInstancePlugin): - """Collect Unreal Static Mesh.""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Unreal Static Meshes" - families = ["staticMesh"] - - def process(self, instance): - geometry_set = [ - i for i in instance - if i.startswith("geometry_SET") - ] - instance.data["geometryMembers"] = cmds.sets( - geometry_set, query=True) - - self.log.debug("geometry: {}".format( - pformat(instance.data.get("geometryMembers")))) - - collision_set = [ - i for i in instance - if i.startswith("collisions_SET") - ] - instance.data["collisionMembers"] = cmds.sets( - collision_set, query=True) - - self.log.debug("collisions: {}".format( - pformat(instance.data.get("collisionMembers")))) - - frame = cmds.currentTime(query=True) - instance.data["frameStart"] = frame - instance.data["frameEnd"] = frame diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_user_defined_attributes.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_user_defined_attributes.py deleted file mode 100644 index e468636def..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_user_defined_attributes.py +++ /dev/null @@ -1,40 +0,0 @@ -from maya import cmds -from ayon_maya.api import plugin -import pyblish.api - - -class CollectUserDefinedAttributes(plugin.MayaInstancePlugin): - """Collect user defined attributes for nodes in instance.""" - - order = pyblish.api.CollectorOrder + 0.45 - families = ["pointcache", "animation", "usd"] - label = "Collect User Defined Attributes" - - def process(self, instance): - - # Collect user defined attributes. - if not instance.data["creator_attributes"].get( - "includeUserDefinedAttributes" - ): - return - - if "out_hierarchy" in instance.data: - # animation family - nodes = instance.data["out_hierarchy"] - else: - nodes = instance[:] - if not nodes: - return - - shapes = cmds.listRelatives(nodes, shapes=True, fullPath=True) or [] - nodes = set(nodes).union(shapes) - - attrs = cmds.listAttr(list(nodes), userDefined=True) or [] - user_defined_attributes = list(sorted(set(attrs))) - instance.data["userDefinedAttributes"] = user_defined_attributes - - self.log.debug( - "Collected user defined attributes: {}".format( - ", ".join(user_defined_attributes) - ) - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_vrayproxy.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_vrayproxy.py deleted file mode 100644 index a5491e5f9b..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_vrayproxy.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect Vray Proxy.""" -import pyblish.api -from ayon_maya.api import plugin - - -class CollectVrayProxy(plugin.MayaInstancePlugin): - """Collect Vray Proxy instance. - - Add `pointcache` family for it. - """ - order = pyblish.api.CollectorOrder + 0.01 - label = "Collect Vray Proxy" - families = ["vrayproxy"] - - def process(self, instance): - """Collector entry point.""" - if not instance.data.get("families"): - instance.data["families"] = [] - - if instance.data.get("vrmesh"): - instance.data["families"].append("vrayproxy.vrmesh") - - if instance.data.get("alembic"): - instance.data["families"].append("vrayproxy.alembic") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_vrayscene.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_vrayscene.py deleted file mode 100644 index f14735574e..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_vrayscene.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect Vray Scene and prepare it for extraction and publishing.""" -import pyblish.api - -from ayon_core.lib import get_formatted_current_time -from ayon_maya.api import lib -from ayon_maya.api import plugin - - -class CollectVrayScene(plugin.MayaInstancePlugin): - """Collect Vray Scene. - - If export on farm is checked, job is created to export it. - """ - - order = pyblish.api.CollectorOrder + 0.01 - label = "Collect Vray Scene" - families = ["vrayscene"] - - def process(self, instance): - """Collector entry point.""" - - context = instance.context - - layer = instance.data["transientData"]["layer"] - layer_name = layer.name() - - renderer = self.get_render_attribute("currentRenderer", - layer=layer_name) - if renderer != "vray": - self.log.warning("Layer '{}' renderer is not set to V-Ray".format( - layer_name - )) - - # collect all frames we are expecting to be rendered - frame_start_render = int(self.get_render_attribute( - "startFrame", layer=layer_name)) - frame_end_render = int(self.get_render_attribute( - "endFrame", layer=layer_name)) - - if (int(context.data['frameStartHandle']) == frame_start_render - and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501 - - handle_start = context.data['handleStart'] - handle_end = context.data['handleEnd'] - frame_start = context.data['frameStart'] - frame_end = context.data['frameEnd'] - frame_start_handle = context.data['frameStartHandle'] - frame_end_handle = context.data['frameEndHandle'] - else: - handle_start = 0 - handle_end = 0 - frame_start = frame_start_render - frame_end = frame_end_render - frame_start_handle = frame_start_render - frame_end_handle = frame_end_render - - # Get layer specific settings, might be overrides - product_type = "vrayscene_layer" - data = { - "productName": layer_name, - "layer": layer_name, - # TODO: This likely needs fixing now - # Before refactor: cmds.sets(layer, q=True) or ["*"] - "setMembers": ["*"], - "review": False, - "publish": True, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartHandle": frame_start_handle, - "frameEndHandle": frame_end_handle, - "byFrameStep": int( - self.get_render_attribute("byFrameStep", - layer=layer_name)), - "renderer": renderer, - # instance product type - "productType": product_type, - "family": product_type, - "families": [product_type], - "time": get_formatted_current_time(), - "author": context.data["user"], - # Add source to allow tracing back to the scene from - # which was submitted originally - "source": context.data["currentFile"].replace("\\", "/"), - "resolutionWidth": lib.get_attr_in_layer( - "defaultResolution.height", layer=layer_name - ), - "resolutionHeight": lib.get_attr_in_layer( - "defaultResolution.width", layer=layer_name - ), - "pixelAspect": lib.get_attr_in_layer( - "defaultResolution.pixelAspect", layer=layer_name - ), - "priority": instance.data.get("priority"), - "useMultipleSceneFiles": instance.data.get( - "vraySceneMultipleFiles") - } - - instance.data.update(data) - - # Define nice label - label = "{0} ({1})".format(layer_name, instance.data["folderPath"]) - label += " [{0}-{1}]".format( - int(data["frameStartHandle"]), int(data["frameEndHandle"]) - ) - instance.data["label"] = label - - def get_render_attribute(self, attr, layer): - """Get attribute from render options. - - Args: - attr (str): name of attribute to be looked up. - - Returns: - Attribute value - - """ - return lib.get_attr_in_layer( - "defaultRenderGlobals.{}".format(attr), layer=layer - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_workfile.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_workfile.py deleted file mode 100644 index fa0689849a..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import pyblish.api -from ayon_maya.api import plugin - - -class CollectWorkfileData(plugin.MayaInstancePlugin): - """Inject data into Workfile instance""" - - order = pyblish.api.CollectorOrder - 0.01 - label = "Maya Workfile" - families = ["workfile"] - - def process(self, instance): - """Inject the current working file""" - - context = instance.context - current_file = instance.context.data['currentFile'] - folder, file = os.path.split(current_file) - filename, ext = os.path.splitext(file) - - data = { # noqa - "setMembers": [current_file], - "frameStart": context.data['frameStart'], - "frameEnd": context.data['frameEnd'], - "handleStart": context.data['handleStart'], - "handleEnd": context.data['handleEnd'] - } - - data['representations'] = [{ - 'name': ext.lstrip("."), - 'ext': ext.lstrip("."), - 'files': file, - "stagingDir": folder, - }] - - instance.data.update(data) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_workscene_fps.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_workscene_fps.py deleted file mode 100644 index a87483e67f..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_workscene_fps.py +++ /dev/null @@ -1,15 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin -from maya import mel - - -class CollectWorksceneFPS(plugin.MayaContextPlugin): - """Get the FPS of the work scene""" - - label = "Workscene FPS" - order = pyblish.api.CollectorOrder - - def process(self, context): - fps = mel.eval('currentTimeUnitToFPS()') - self.log.info("Workscene FPS: %s" % fps) - context.data.update({"fps": fps}) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_xgen.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_xgen.py deleted file mode 100644 index 9ef17f3399..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_xgen.py +++ /dev/null @@ -1,71 +0,0 @@ -import os - -import pyblish.api -from ayon_maya.api.lib import get_attribute_input -from ayon_maya.api import plugin -from maya import cmds - - -class CollectXgen(plugin.MayaInstancePlugin): - """Collect Xgen""" - - order = pyblish.api.CollectorOrder + 0.499999 - label = "Collect Xgen" - families = ["xgen"] - - def process(self, instance): - data = { - "xgmPalettes": cmds.ls(instance, type="xgmPalette", long=True), - "xgmDescriptions": cmds.ls( - instance, type="xgmDescription", long=True - ), - "xgmSubdPatches": cmds.ls(instance, type="xgmSubdPatch", long=True) - } - data["xgenNodes"] = ( - data["xgmPalettes"] + - data["xgmDescriptions"] + - data["xgmSubdPatches"] - ) - - if data["xgmPalettes"]: - data["xgmPalette"] = data["xgmPalettes"][0] - - data["xgenConnections"] = set() - for node in data["xgmSubdPatches"]: - connected_transform = get_attribute_input( - node + ".transform" - ).split(".")[0] - data["xgenConnections"].add(connected_transform) - - # Collect all files under palette root as resources. - import xgenm - - data_path = xgenm.getAttr( - "xgDataPath", data["xgmPalette"].replace("|", "") - ).split(os.pathsep)[0] - data_path = data_path.replace( - "${PROJECT}", - xgenm.getAttr("xgProjectPath", data["xgmPalette"].replace("|", "")) - ) - transfers = [] - - # Since we are duplicating this palette when extracting we predict that - # the name will be the basename without namespaces. - predicted_palette_name = data["xgmPalette"].split(":")[-1] - predicted_palette_name = predicted_palette_name.replace("|", "") - - for root, _, files in os.walk(data_path): - for file in files: - source = os.path.join(root, file).replace("\\", "/") - destination = os.path.join( - instance.data["resourcesDir"], - "collections", - predicted_palette_name, - source.replace(data_path, "")[1:] - ) - transfers.append((source, destination.replace("\\", "/"))) - - data["transfers"] = transfers - - self.log.debug(data) - instance.data.update(data) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_yeti_cache.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_yeti_cache.py deleted file mode 100644 index 44de461126..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_yeti_cache.py +++ /dev/null @@ -1,92 +0,0 @@ -import pyblish.api -from ayon_maya.api import lib -from ayon_maya.api import plugin -from ayon_maya.api.yeti import get_yeti_user_variables -from maya import cmds - -SETTINGS = { - # Preview - "displayOutput", - "colorR", "colorG", "colorB", - "viewportDensity", - "viewportWidth", - "viewportLength", - # Render attributes - "renderDensity", - "renderWidth", - "renderLength", - "increaseRenderBounds", - "imageSearchPath", - # Pipeline specific - "cbId" -} - - -class CollectYetiCache(plugin.MayaInstancePlugin): - """Collect all information of the Yeti caches - - The information contains the following attributes per Yeti node - - - "renderDensity" - - "renderWidth" - - "renderLength" - - "increaseRenderBounds" - - "imageSearchPath" - - Other information is the name of the transform and its `cbId` - """ - - order = pyblish.api.CollectorOrder + 0.45 - label = "Collect Yeti Cache" - families = ["yetiRig", "yeticache", "yeticacheUE"] - - def process(self, instance): - - # Collect fur settings - settings = {"nodes": []} - - # Get yeti nodes and their transforms - yeti_shapes = cmds.ls(instance, type="pgYetiMaya") - for shape in yeti_shapes: - - # Get specific node attributes - attr_data = {} - for attr in SETTINGS: - # Ignore non-existing attributes with a warning, e.g. cbId - # if they have not been generated yet - if not cmds.attributeQuery(attr, node=shape, exists=True): - self.log.warning( - "Attribute '{}' not found on Yeti node: {}".format( - attr, shape - ) - ) - continue - - current = cmds.getAttr("%s.%s" % (shape, attr)) - # change None to empty string as Maya doesn't support - # NoneType in attributes - if current is None: - current = "" - attr_data[attr] = current - - # Get user variable attributes - user_variable_attrs = { - attr: lib.get_attribute("{}.{}".format(shape, attr)) - for attr in get_yeti_user_variables(shape) - } - - # Get transform data - parent = cmds.listRelatives(shape, parent=True)[0] - transform_data = {"name": parent, "cbId": lib.get_id(parent)} - - shape_data = { - "transform": transform_data, - "name": shape, - "cbId": lib.get_id(shape), - "attrs": attr_data, - "user_variables": user_variable_attrs - } - - settings["nodes"].append(shape_data) - - instance.data["fursettings"] = settings diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/collect_yeti_rig.py b/server_addon/maya/client/ayon_maya/plugins/publish/collect_yeti_rig.py deleted file mode 100644 index dbdc10789f..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/collect_yeti_rig.py +++ /dev/null @@ -1,306 +0,0 @@ -import os -import re - -import pyblish.api -from ayon_core.pipeline.publish import KnownPublishError -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - -SETTINGS = {"renderDensity", - "renderWidth", - "renderLength", - "increaseRenderBounds", - "imageSearchPath", - "cbId"} - - -class CollectYetiRig(plugin.MayaInstancePlugin): - """Collect all information of the Yeti Rig""" - - order = pyblish.api.CollectorOrder + 0.4 - label = "Collect Yeti Rig" - families = ["yetiRig"] - - def process(self, instance): - - assert "input_SET" in instance.data["setMembers"], ( - "Yeti Rig must have an input_SET") - - input_connections = self.collect_input_connections(instance) - - # Collect any textures if used - yeti_resources = [] - yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya", long=True) - for node in yeti_nodes: - # Get Yeti resources (textures) - resources = self.get_yeti_resources(node) - yeti_resources.extend(resources) - - instance.data["rigsettings"] = {"inputs": input_connections} - - instance.data["resources"] = yeti_resources - - # Force frame range for yeti cache export for the rig - start = cmds.playbackOptions(query=True, animationStartTime=True) - for key in ["frameStart", "frameEnd", - "frameStartHandle", "frameEndHandle"]: - instance.data[key] = start - instance.data["preroll"] = 0 - - def collect_input_connections(self, instance): - """Collect the inputs for all nodes in the input_SET""" - - # Get the input meshes information - input_content = cmds.ls(cmds.sets("input_SET", query=True), long=True) - - # Include children - input_content += cmds.listRelatives(input_content, - allDescendents=True, - fullPath=True) or [] - - # Ignore intermediate objects - input_content = cmds.ls(input_content, long=True, noIntermediate=True) - if not input_content: - return [] - - # Store all connections - connections = cmds.listConnections(input_content, - source=True, - destination=False, - connections=True, - # Only allow inputs from dagNodes - # (avoid display layers, etc.) - type="dagNode", - plugs=True) or [] - connections = cmds.ls(connections, long=True) # Ensure long names - - inputs = [] - for dest, src in lib.pairwise(connections): - source_node, source_attr = src.split(".", 1) - dest_node, dest_attr = dest.split(".", 1) - - # Ensure the source of the connection is not included in the - # current instance's hierarchy. If so, we ignore that connection - # as we will want to preserve it even over a publish. - if source_node in instance: - self.log.debug("Ignoring input connection between nodes " - "inside the instance: %s -> %s" % (src, dest)) - continue - - inputs.append({"connections": [source_attr, dest_attr], - "sourceID": lib.get_id(source_node), - "destinationID": lib.get_id(dest_node)}) - - return inputs - - def get_yeti_resources(self, node): - """Get all resource file paths - - If a texture is a sequence it gathers all sibling files to ensure - the texture sequence is complete. - - References can be used in the Yeti graph, this means that it is - possible to load previously caches files. The information will need - to be stored and, if the file not publish, copied to the resource - folder. - - Args: - node (str): node name of the pgYetiMaya node - - Returns: - list - """ - resources = [] - - image_search_paths = cmds.getAttr("{}.imageSearchPath".format(node)) - if image_search_paths: - - # TODO: Somehow this uses OS environment path separator, `:` vs `;` - # Later on check whether this is pipeline OS cross-compatible. - image_search_paths = [p for p in - image_search_paths.split(os.path.pathsep) if p] - - # find all ${TOKEN} tokens and replace them with $TOKEN env. variable - image_search_paths = self._replace_tokens(image_search_paths) - - # List all related textures - texture_nodes = cmds.pgYetiGraph( - node, listNodes=True, type="texture") - texture_filenames = [ - cmds.pgYetiGraph( - node, node=texture_node, - param="file_name", getParamValue=True) - for texture_node in texture_nodes - ] - self.log.debug("Found %i texture(s)" % len(texture_filenames)) - - # Get all reference nodes - reference_nodes = cmds.pgYetiGraph(node, - listNodes=True, - type="reference") - self.log.debug("Found %i reference node(s)" % len(reference_nodes)) - - # Collect all texture files - # find all ${TOKEN} tokens and replace them with $TOKEN env. variable - texture_filenames = self._replace_tokens(texture_filenames) - for texture in texture_filenames: - - files = [] - if os.path.isabs(texture): - self.log.debug("Texture is absolute path, ignoring " - "image search paths for: %s" % texture) - files = self.search_textures(texture) - else: - for root in image_search_paths: - filepath = os.path.join(root, texture) - files = self.search_textures(filepath) - if files: - # Break out on first match in search paths.. - break - - if not files: - raise KnownPublishError( - "No texture found for: %s " - "(searched: %s)" % (texture, image_search_paths)) - - item = { - "files": files, - "source": texture, - "node": node - } - - resources.append(item) - - # For now validate that every texture has at least a single file - # resolved. Since a 'resource' does not have the requirement of having - # a `files` explicitly mapped it's not explicitly validated. - # TODO: Validate this as a validator - invalid_resources = [] - for resource in resources: - if not resource['files']: - invalid_resources.append(resource) - if invalid_resources: - raise RuntimeError("Invalid resources") - - # Collect all referenced files - for reference_node in reference_nodes: - ref_file = cmds.pgYetiGraph(node, - node=reference_node, - param="reference_file", - getParamValue=True) - - # Create resource dict - item = { - "source": ref_file, - "node": node, - "graphnode": reference_node, - "param": "reference_file", - "files": [] - } - - ref_file_name = os.path.basename(ref_file) - if "%04d" in ref_file_name: - item["files"] = self.get_sequence(ref_file) - else: - if os.path.exists(ref_file) and os.path.isfile(ref_file): - item["files"] = [ref_file] - - if not item["files"]: - self.log.warning("Reference node '%s' has no valid file " - "path set: %s" % (reference_node, ref_file)) - # TODO: This should allow to pass and fail in Validator instead - raise RuntimeError("Reference node must be a full file path!") - - resources.append(item) - - return resources - - def search_textures(self, filepath): - """Search all texture files on disk. - - This also parses to full sequences for those with dynamic patterns - like and %04d in the filename. - - Args: - filepath (str): The full path to the file, including any - dynamic patterns like or %04d - - Returns: - list: The files found on disk - - """ - filename = os.path.basename(filepath) - - # Collect full sequence if it matches a sequence pattern - if len(filename.split(".")) > 2: - - # For UDIM based textures (tiles) - if "" in filename: - sequences = self.get_sequence(filepath, - pattern="") - if sequences: - return sequences - - # Frame/time - Based textures (animated masks f.e) - elif "%04d" in filename: - sequences = self.get_sequence(filepath, - pattern="%04d") - if sequences: - return sequences - - # Assuming it is a fixed name (single file) - if os.path.exists(filepath): - return [filepath] - - return [] - - def get_sequence(self, filepath, pattern="%04d"): - """Get sequence from filename. - - This will only return files if they exist on disk as it tries - to collect the sequence using the filename pattern and searching - for them on disk. - - Supports negative frame ranges like -001, 0000, 0001 and -0001, - 0000, 0001. - - Arguments: - filepath (str): The full path to filename containing the given - pattern. - pattern (str): The pattern to swap with the variable frame number. - - Returns: - list: file sequence. - - """ - import clique - - escaped = re.escape(filepath) - re_pattern = escaped.replace(pattern, "-?[0-9]+") - - source_dir = os.path.dirname(filepath) - files = [f for f in os.listdir(source_dir) - if re.match(re_pattern, f)] - - pattern = [clique.PATTERNS["frames"]] - collection, remainder = clique.assemble(files, patterns=pattern) - - return collection - - def _replace_tokens(self, strings): - env_re = re.compile(r"\$\{(\w+)\}") - - replaced = [] - for s in strings: - matches = re.finditer(env_re, s) - for m in matches: - try: - s = s.replace(m.group(), os.environ[m.group(1)]) - except KeyError: - msg = "Cannot find requested {} in environment".format( - m.group(1)) - self.log.error(msg) - raise RuntimeError(msg) - replaced.append(s) - return replaced diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/determine_future_version.py b/server_addon/maya/client/ayon_maya/plugins/publish/determine_future_version.py deleted file mode 100644 index 0c05b499c0..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/determine_future_version.py +++ /dev/null @@ -1,36 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin - - -class DetermineFutureVersion(plugin.MayaInstancePlugin): - """ - This will determine version of product if we want render to be attached to. - """ - label = "Determine Product Version" - order = pyblish.api.IntegratorOrder - families = ["renderlayer"] - - def process(self, instance): - context = instance.context - attatch_to_products = [ - i["productName"] - for i in instance.data["attachTo"] - ] - if not attatch_to_products: - return - - for i in context: - if i.data["productName"] not in attatch_to_products: - continue - # # this will get corresponding product in attachTo list - # # so we can set version there - sub = next( - item - for item in instance.data["attachTo"] - if item["productName"] == i.data["productName"] - ) - - sub["version"] = i.data.get("version", 1) - self.log.info("render will be attached to {} v{}".format( - sub["productName"], sub["version"] - )) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_active_view_thumbnail.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_active_view_thumbnail.py deleted file mode 100644 index 290f7e24eb..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_active_view_thumbnail.py +++ /dev/null @@ -1,59 +0,0 @@ -import tempfile - -import maya.api.OpenMaya as om -import maya.api.OpenMayaUI as omui -import pyblish.api -from ayon_maya.api.lib import IS_HEADLESS -from ayon_maya.api import plugin - - -class ExtractActiveViewThumbnail(plugin.MayaInstancePlugin): - """Set instance thumbnail to a screengrab of current active viewport. - - This makes it so that if an instance does not have a thumbnail set yet that - it will get a thumbnail of the currently active view at the time of - publishing as a fallback. - - """ - order = pyblish.api.ExtractorOrder + 0.49 - label = "Active View Thumbnail" - families = ["workfile"] - - def process(self, instance): - if IS_HEADLESS: - self.log.debug( - "Skip extraction of active view thumbnail, due to being in" - "headless mode." - ) - return - - thumbnail = instance.data.get("thumbnailPath") - if not thumbnail: - view_thumbnail = self.get_view_thumbnail(instance) - if not view_thumbnail: - return - - self.log.debug("Setting instance thumbnail path to: {}".format( - view_thumbnail - )) - instance.data["thumbnailPath"] = view_thumbnail - - def get_view_thumbnail(self, instance): - cache_key = "__maya_view_thumbnail" - context = instance.context - - if cache_key not in context.data: - # Generate only a single thumbnail, even for multiple instances - with tempfile.NamedTemporaryFile(suffix="_thumbnail.jpg", - delete=False) as f: - path = f.name - - view = omui.M3dView.active3dView() - image = om.MImage() - view.readColorBuffer(image, True) - image.writeToFile(path, "jpg") - self.log.debug("Generated thumbnail: {}".format(path)) - - context.data["cleanupFullPaths"].append(path) - context.data[cache_key] = path - return context.data[cache_key] diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_arnold_scene_source.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_arnold_scene_source.py deleted file mode 100644 index b39c875400..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_arnold_scene_source.py +++ /dev/null @@ -1,244 +0,0 @@ -import json -import os -from collections import defaultdict - -import arnold -from ayon_maya.api import lib, plugin -from maya import cmds - - -class ExtractArnoldSceneSource(plugin.MayaExtractorPlugin): - """Extract the content of the instance to an Arnold Scene Source file.""" - - label = "Extract Arnold Scene Source" - families = ["ass"] - asciiAss = False - - def _pre_process(self, instance, staging_dir): - file_path = os.path.join(staging_dir, "{}.ass".format(instance.name)) - - # Mask - mask = arnold.AI_NODE_ALL - - node_types = { - "options": arnold.AI_NODE_OPTIONS, - "camera": arnold.AI_NODE_CAMERA, - "light": arnold.AI_NODE_LIGHT, - "shape": arnold.AI_NODE_SHAPE, - "shader": arnold.AI_NODE_SHADER, - "override": arnold.AI_NODE_OVERRIDE, - "driver": arnold.AI_NODE_DRIVER, - "filter": arnold.AI_NODE_FILTER, - "color_manager": arnold.AI_NODE_COLOR_MANAGER, - "operator": arnold.AI_NODE_OPERATOR - } - - for key in node_types.keys(): - if instance.data.get("mask" + key.title()): - mask = mask ^ node_types[key] - - # Motion blur - attribute_data = { - "defaultArnoldRenderOptions.motion_blur_enable": instance.data.get( - "motionBlur", True - ), - "defaultArnoldRenderOptions.motion_steps": instance.data.get( - "motionBlurKeys", 2 - ), - "defaultArnoldRenderOptions.motion_frames": instance.data.get( - "motionBlurLength", 0.5 - ) - } - - # Write out .ass file - kwargs = { - "filename": file_path, - "startFrame": instance.data.get("frameStartHandle", 1), - "endFrame": instance.data.get("frameEndHandle", 1), - "frameStep": instance.data.get("step", 1), - "selected": True, - "asciiAss": self.asciiAss, - "shadowLinks": True, - "lightLinks": True, - "boundingBox": True, - "expandProcedurals": instance.data.get("expandProcedurals", False), - "camera": instance.data["camera"], - "mask": mask - } - - if "representations" not in instance.data: - instance.data["representations"] = [] - - return attribute_data, kwargs - - def process(self, instance): - staging_dir = self.staging_dir(instance) - attribute_data, kwargs = self._pre_process(instance, staging_dir) - - filenames = self._extract( - instance.data["members"], attribute_data, kwargs - ) - - self._post_process( - instance, filenames, staging_dir, kwargs["startFrame"] - ) - - def _post_process(self, instance, filenames, staging_dir, frame_start): - nodes_by_id = self._nodes_by_id(instance[:]) - representation = { - "name": "ass", - "ext": "ass", - "files": filenames if len(filenames) > 1 else filenames[0], - "stagingDir": staging_dir, - "frameStart": frame_start - } - - instance.data["representations"].append(representation) - - json_path = os.path.join( - staging_dir, "{}.json".format(instance.name) - ) - with open(json_path, "w") as f: - json.dump(nodes_by_id, f) - - representation = { - "name": "json", - "ext": "json", - "files": os.path.basename(json_path), - "stagingDir": staging_dir - } - - instance.data["representations"].append(representation) - - self.log.debug( - "Extracted instance {} to: {}".format(instance.name, staging_dir) - ) - - def _nodes_by_id(self, nodes): - nodes_by_id = defaultdict(list) - - for node in nodes: - id = lib.get_id(node) - - if id is None: - continue - - # Converting Maya hierarchy separator "|" to Arnold separator "/". - nodes_by_id[id].append(node.replace("|", "/")) - - return nodes_by_id - - def _extract(self, nodes, attribute_data, kwargs): - filenames = [] - with lib.attribute_values(attribute_data): - with lib.maintained_selection(): - self.log.debug( - "Writing: {}".format(nodes) - ) - cmds.select(nodes, noExpand=True) - - self.log.debug( - "Extracting ass sequence with: {}".format(kwargs) - ) - - exported_files = cmds.arnoldExportAss(**kwargs) - - for file in exported_files: - filenames.append(os.path.split(file)[1]) - - self.log.debug("Exported: {}".format(filenames)) - - return filenames - - -class ExtractArnoldSceneSourceProxy(ExtractArnoldSceneSource): - """Extract the content of the instance to an Arnold Scene Source file.""" - - label = "Extract Arnold Scene Source Proxy" - hosts = ["maya"] - families = ["assProxy"] - asciiAss = True - - def process(self, instance): - staging_dir = self.staging_dir(instance) - attribute_data, kwargs = self._pre_process(instance, staging_dir) - - filenames, _ = self._duplicate_extract( - instance.data["members"], attribute_data, kwargs - ) - - self._post_process( - instance, filenames, staging_dir, kwargs["startFrame"] - ) - - kwargs["filename"] = os.path.join( - staging_dir, "{}_proxy.ass".format(instance.name) - ) - - filenames, _ = self._duplicate_extract( - instance.data["proxy"], attribute_data, kwargs - ) - - representation = { - "name": "proxy", - "ext": "ass", - "files": filenames if len(filenames) > 1 else filenames[0], - "stagingDir": staging_dir, - "frameStart": kwargs["startFrame"], - "outputName": "proxy" - } - - instance.data["representations"].append(representation) - - def _duplicate_extract(self, nodes, attribute_data, kwargs): - self.log.debug( - "Writing {} with:\n{}".format(kwargs["filename"], kwargs) - ) - filenames = [] - # Duplicating nodes so they are direct children of the world. This - # makes the hierarchy of any exported ass file the same. - with lib.delete_after() as delete_bin: - duplicate_nodes = [] - for node in nodes: - # Only interested in transforms: - if cmds.nodeType(node) != "transform": - continue - - # Only interested in transforms with shapes. - shapes = cmds.listRelatives( - node, shapes=True, noIntermediate=True - ) - if not shapes: - continue - - basename = cmds.duplicate(node)[0] - parents = cmds.ls(node, long=True)[0].split("|")[:-1] - duplicate_transform = "|".join(parents + [basename]) - - if cmds.listRelatives(duplicate_transform, parent=True): - duplicate_transform = cmds.parent( - duplicate_transform, world=True - )[0] - - basename = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - duplicate_transform = cmds.rename( - duplicate_transform, basename - ) - - # Discard children nodes that are not shapes - shapes = cmds.listRelatives( - duplicate_transform, shapes=True, fullPath=True - ) - children = cmds.listRelatives( - duplicate_transform, children=True, fullPath=True - ) - cmds.delete(set(children) - set(shapes)) - - duplicate_nodes.append(duplicate_transform) - duplicate_nodes.extend(shapes) - delete_bin.append(duplicate_transform) - - nodes_by_id = self._nodes_by_id(duplicate_nodes) - filenames = self._extract(duplicate_nodes, attribute_data, kwargs) - - return filenames, nodes_by_id diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_assembly.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_assembly.py deleted file mode 100644 index 8460fb716f..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_assembly.py +++ /dev/null @@ -1,67 +0,0 @@ -import json -import os - -from ayon_maya.api.alembic import extract_alembic -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractAssembly(plugin.MayaExtractorPlugin): - """Produce an alembic of just point positions and normals. - - Positions and normals are preserved, but nothing more, - for plain and predictable point caches. - - """ - - label = "Extract Assembly" - families = ["assembly"] - - def process(self, instance): - - staging_dir = self.staging_dir(instance) - hierarchy_filename = "{}.abc".format(instance.name) - hierarchy_path = os.path.join(staging_dir, hierarchy_filename) - json_filename = "{}.json".format(instance.name) - json_path = os.path.join(staging_dir, json_filename) - - self.log.debug("Dumping scene data for debugging ..") - with open(json_path, "w") as filepath: - json.dump(instance.data["scenedata"], filepath, ensure_ascii=False) - - self.log.debug("Extracting pointcache ..") - cmds.select(instance.data["nodesHierarchy"]) - - # Run basic alembic exporter - extract_alembic(file=hierarchy_path, - startFrame=1.0, - endFrame=1.0, - **{"step": 1.0, - "attr": ["cbId"], - "writeVisibility": True, - "writeCreases": True, - "uvWrite": True, - "selection": True}) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation_abc = { - 'name': 'abc', - 'ext': 'abc', - 'files': hierarchy_filename, - "stagingDir": staging_dir - } - instance.data["representations"].append(representation_abc) - - representation_json = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": staging_dir - } - instance.data["representations"].append(representation_json) - # Remove data - instance.data.pop("scenedata", None) - - cmds.select(clear=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_camera_alembic.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_camera_alembic.py deleted file mode 100644 index b5ce6a6a44..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_camera_alembic.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -import json - -from maya import cmds -from ayon_maya.api import plugin -from ayon_core.pipeline import publish -from ayon_maya.api import lib - - -class ExtractCameraAlembic(plugin.MayaExtractorPlugin, - publish.OptionalPyblishPluginMixin): - """Extract a Camera as Alembic. - - The camera gets baked to world space by default. Only when the instance's - `bakeToWorldSpace` is set to False it will include its full hierarchy. - - 'camera' product type expects only single camera, if multiple cameras - are needed, 'matchmove' is better choice. - - """ - - label = "Extract Camera (Alembic)" - hosts = ["maya"] - families = ["camera", "matchmove"] - bake_attributes = "[]" - - def process(self, instance): - - # Collect the start and end including handles - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - step = instance.data.get("step", 1.0) - bake_to_worldspace = instance.data("bakeToWorldSpace", True) - - # get cameras - members = instance.data['setMembers'] - cameras = cmds.ls(members, leaf=True, long=True, - dag=True, type="camera") - - # validate required settings - assert isinstance(step, float), "Step must be a float value" - - # Define extract output file path - dir_path = self.staging_dir(instance) - if not os.path.exists(dir_path): - os.makedirs(dir_path) - filename = "{0}.abc".format(instance.name) - path = os.path.join(dir_path, filename) - - # Perform alembic extraction - member_shapes = cmds.ls( - members, leaf=True, shapes=True, long=True, dag=True) - with lib.maintained_selection(): - cmds.select( - member_shapes, - replace=True, noExpand=True) - - # Enforce forward slashes for AbcExport because we're - # embedding it into a job string - path = path.replace("\\", "/") - - job_str = ' -selection -dataFormat "ogawa" ' - job_str += ' -attrPrefix cb' - job_str += ' -frameRange {0} {1} '.format(start, end) - job_str += ' -step {0} '.format(step) - - if bake_to_worldspace: - job_str += ' -worldSpace' - - # if baked, drop the camera hierarchy to maintain - # clean output and backwards compatibility - camera_roots = cmds.listRelatives( - cameras, parent=True, fullPath=True) - for camera_root in camera_roots: - job_str += ' -root {0}'.format(camera_root) - - for member in members: - descendants = cmds.listRelatives(member, - allDescendents=True, - fullPath=True) or [] - shapes = cmds.ls(descendants, shapes=True, - noIntermediate=True, long=True) - cameras = cmds.ls(shapes, type="camera", long=True) - if cameras: - if not set(shapes) - set(cameras): - continue - self.log.warning(( - "Camera hierarchy contains additional geometry. " - "Extraction will fail.") - ) - transform = cmds.listRelatives( - member, parent=True, fullPath=True) - transform = transform[0] if transform else member - job_str += ' -root {0}'.format(transform) - - job_str += ' -file "{0}"'.format(path) - - bake_attributes = json.loads(self.bake_attributes) - # bake specified attributes in preset - assert isinstance(bake_attributes, list), ( - "Attributes to bake must be specified as a list" - ) - for attr in bake_attributes: - self.log.debug("Adding {} attribute".format(attr)) - job_str += " -attr {0}".format(attr) - - with lib.evaluation("off"): - with lib.suspended_refresh(): - cmds.AbcExport(j=job_str, verbose=False) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": dir_path, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '{0}' to: {1}".format( - instance.name, path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_camera_mayaScene.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_camera_mayaScene.py deleted file mode 100644 index c5aa331cb2..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_camera_mayaScene.py +++ /dev/null @@ -1,306 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract camera as Maya Scene.""" -import contextlib -import itertools -import os - -from ayon_core.lib import BoolDef -from ayon_core.pipeline import publish -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -def massage_ma_file(path): - """Clean up .ma file for backwards compatibility. - - Massage the .ma of baked camera to stay - backwards compatible with older versions - of Fusion (6.4) - - """ - # Get open file's lines - f = open(path, "r+") - lines = f.readlines() - f.seek(0) # reset to start of file - - # Rewrite the file - for line in lines: - # Skip all 'rename -uid' lines - stripped = line.strip() - if stripped.startswith("rename -uid "): - continue - - f.write(line) - - f.truncate() # remove remainder - f.close() - - -def grouper(iterable, n, fillvalue=None): - """Collect data into fixed-length chunks or blocks. - - Examples: - grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx - - """ - args = [iter(iterable)] * n - from six.moves import zip_longest - return zip_longest(fillvalue=fillvalue, *args) - - -def unlock(plug): - """Unlocks attribute and disconnects inputs for a plug. - - This will also recursively unlock the attribute - upwards to any parent attributes for compound - attributes, to ensure it's fully unlocked and free - to change the value. - - """ - node, attr = plug.rsplit(".", 1) - - # Unlock attribute - cmds.setAttr(plug, lock=False) - - # Also unlock any parent attribute (if compound) - parents = cmds.attributeQuery(attr, node=node, listParent=True) - if parents: - for parent in parents: - unlock("{0}.{1}".format(node, parent)) - - # Break incoming connections - connections = cmds.listConnections(plug, - source=True, - destination=False, - plugs=True, - connections=True) - if connections: - for destination, source in grouper(connections, 2): - cmds.disconnectAttr(source, destination) - - -class ExtractCameraMayaScene(plugin.MayaExtractorPlugin, - publish.OptionalPyblishPluginMixin): - """Extract a Camera as Maya Scene. - - This will create a duplicate of the camera that will be baked *with* - substeps and handles for the required frames. This temporary duplicate - will be published. - - The cameras gets baked to world space by default. Only when the instance's - `bakeToWorldSpace` is set to False it will include its full hierarchy. - - 'camera' product type expects only single camera, if multiple cameras are - needed, 'matchmove' is better choice. - - Note: - The extracted Maya ascii file gets "massaged" removing the uuid values - so they are valid for older versions of Fusion (e.g. 6.4) - - """ - - label = "Extract Camera (Maya Scene)" - hosts = ["maya"] - families = ["camera", "matchmove"] - scene_type = "ma" - - keep_image_planes = True - - def process(self, instance): - """Plugin entry point.""" - # get settings - maya_settings = instance.context.data["project_settings"]["maya"] - ext_mapping = { - item["name"]: item["value"] - for item in maya_settings["ext_mapping"] - } - if ext_mapping: - self.log.debug("Looking in settings for scene type ...") - # use extension mapping for first family found - for family in self.families: - try: - self.scene_type = ext_mapping[family] - self.log.debug( - "Using {} as scene type".format(self.scene_type)) - break - except KeyError: - # no preset found - pass - - # Collect the start and end including handles - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - step = instance.data.get("step", 1.0) - bake_to_worldspace = instance.data("bakeToWorldSpace", True) - - if not bake_to_worldspace: - self.log.warning("Camera (Maya Scene) export only supports world" - "space baked camera extractions. The disabled " - "bake to world space is ignored...") - - # get cameras - members = set(cmds.ls(instance.data['setMembers'], leaf=True, - shapes=True, long=True, dag=True)) - cameras = set(cmds.ls(members, leaf=True, shapes=True, long=True, - dag=True, type="camera")) - - # validate required settings - assert isinstance(step, float), "Step must be a float value" - transforms = cmds.listRelatives(list(cameras), - parent=True, fullPath=True) - - # Define extract output file path - dir_path = self.staging_dir(instance) - filename = "{0}.{1}".format(instance.name, self.scene_type) - path = os.path.join(dir_path, filename) - - # Perform extraction - with lib.maintained_selection(): - with lib.evaluation("off"): - with lib.suspended_refresh(): - if bake_to_worldspace: - baked = lib.bake_to_world_space( - transforms, - frame_range=[start, end], - step=step - ) - baked_camera_shapes = set(cmds.ls(baked, - type="camera", - dag=True, - shapes=True, - long=True)) - - members.update(baked_camera_shapes) - members.difference_update(cameras) - else: - baked_camera_shapes = cmds.ls(list(cameras), - type="camera", - dag=True, - shapes=True, - long=True) - - attrs = {"backgroundColorR": 0.0, - "backgroundColorG": 0.0, - "backgroundColorB": 0.0, - "overscan": 1.0} - - # Fix PLN-178: Don't allow background color to be non-black - for cam, (attr, value) in itertools.product(cmds.ls( - baked_camera_shapes, type="camera", dag=True, - long=True), attrs.items()): - plug = "{0}.{1}".format(cam, attr) - unlock(plug) - cmds.setAttr(plug, value) - - attr_values = self.get_attr_values_from_data( - instance.data) - keep_image_planes = attr_values.get("keep_image_planes") - - with transfer_image_planes(sorted(cameras), - sorted(baked_camera_shapes), - keep_image_planes): - - self.log.info("Performing extraction..") - cmds.select(cmds.ls(list(members), dag=True, - shapes=True, long=True), - noExpand=True) - cmds.file(path, - force=True, - typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 - exportSelected=True, - preserveReferences=False, - constructionHistory=False, - channels=True, # allow animation - constraints=False, - shader=False, - expressions=False) - - # Delete the baked hierarchy - if bake_to_worldspace: - cmds.delete(baked) - if self.scene_type == "ma": - massage_ma_file(path) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': filename, - "stagingDir": dir_path, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '{0}' to: {1}".format( - instance.name, path)) - - @classmethod - def get_attribute_defs(cls): - defs = super(ExtractCameraMayaScene, cls).get_attribute_defs() - - defs.extend([ - BoolDef("keep_image_planes", - label="Keep Image Planes", - tooltip="Preserving connected image planes on camera", - default=cls.keep_image_planes), - - ]) - - return defs - - -@contextlib.contextmanager -def transfer_image_planes(source_cameras, target_cameras, - keep_input_connections): - """Reattaches image planes to baked or original cameras. - - Baked cameras are duplicates of original ones. - This attaches it to duplicated camera properly and after - export it reattaches it back to original to keep image plane in workfile. - """ - originals = {} - try: - for source_camera, target_camera in zip(source_cameras, - target_cameras): - image_plane_plug = "{}.imagePlane".format(source_camera) - image_planes = cmds.listConnections(image_plane_plug, - source=True, - destination=False, - type="imagePlane") or [] - - # Split of the parent path they are attached - we want - # the image plane node name if attached to a camera. - # TODO: Does this still mean the image plane name is unique? - image_planes = [x.split("->", 1)[-1] for x in image_planes] - - if not image_planes: - continue - - originals[source_camera] = [] - for image_plane in image_planes: - if keep_input_connections: - if source_camera == target_camera: - continue - _attach_image_plane(target_camera, image_plane) - else: # explicitly detach image planes - cmds.imagePlane(image_plane, edit=True, detach=True) - originals[source_camera].append(image_plane) - yield - finally: - for camera, image_planes in originals.items(): - for image_plane in image_planes: - _attach_image_plane(camera, image_plane) - - -def _attach_image_plane(camera, image_plane): - cmds.imagePlane(image_plane, edit=True, detach=True) - - # Attaching to a camera resets it to identity size, so we counter that - size_x = cmds.getAttr(f"{image_plane}.sizeX") - size_y = cmds.getAttr(f"{image_plane}.sizeY") - cmds.imagePlane(image_plane, edit=True, camera=camera) - cmds.setAttr(f"{image_plane}.sizeX", size_x) - cmds.setAttr(f"{image_plane}.sizeY", size_y) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_fbx.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_fbx.py deleted file mode 100644 index d9b0a789c5..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_fbx.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import maya.mel as mel # noqa -import pyblish.api -from ayon_maya.api import fbx -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin -from maya import cmds # noqa - - -class ExtractFBX(plugin.MayaExtractorPlugin): - """Extract FBX from Maya. - - This extracts reproducible FBX exports ignoring any of the - settings set on the local machine in the FBX export options window. - - """ - order = pyblish.api.ExtractorOrder - label = "Extract FBX" - families = ["fbx"] - - def process(self, instance): - fbx_exporter = fbx.FBXExtractor(log=self.log) - - # Define output path - staging_dir = self.staging_dir(instance) - filename = "{0}.fbx".format(instance.name) - path = os.path.join(staging_dir, filename) - - # The export requires forward slashes because we need - # to format it into a string in a mel expression - path = path.replace('\\', '/') - - self.log.debug("Extracting FBX to: {0}".format(path)) - - members = instance.data["setMembers"] - self.log.debug("Members: {0}".format(members)) - self.log.debug("Instance: {0}".format(instance[:])) - - fbx_exporter.set_options_from_instance(instance) - - # Export - with maintained_selection(): - fbx_exporter.export(members, path) - cmds.select(members, r=1, noExpand=True) - mel.eval('FBXExport -f "{}" -s'.format(path)) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extract FBX successful to: {0}".format(path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_fbx_animation.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_fbx_animation.py deleted file mode 100644 index c22241d2ca..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_fbx_animation.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import pyblish.api -from ayon_maya.api import fbx -from ayon_maya.api.lib import get_namespace, namespaced, strip_namespace -from ayon_maya.api import plugin -from maya import cmds # noqa - - -class ExtractFBXAnimation(plugin.MayaExtractorPlugin): - """Extract Rig in FBX format from Maya. - - This extracts the rig in fbx with the constraints - and referenced asset content included. - This also optionally extract animated rig in fbx with - geometries included. - - """ - order = pyblish.api.ExtractorOrder - label = "Extract Animation (FBX)" - families = ["animation.fbx"] - - def process(self, instance): - # Define output path - staging_dir = self.staging_dir(instance) - filename = "{0}.fbx".format(instance.name) - path = os.path.join(staging_dir, filename) - path = path.replace("\\", "/") - - fbx_exporter = fbx.FBXExtractor(log=self.log) - out_members = instance.data.get("animated_skeleton", []) - # Export - # TODO: need to set up the options for users to set up - # the flags they intended to export - instance.data["skeletonDefinitions"] = True - instance.data["referencedAssetsContent"] = True - fbx_exporter.set_options_from_instance(instance) - # Export from the rig's namespace so that the exported - # FBX does not include the namespace but preserves the node - # names as existing in the rig workfile - if not out_members: - skeleton_set = [ - i for i in instance - if i.endswith("skeletonAnim_SET") - ] - self.log.debug( - "Top group of animated skeleton not found in " - "{}.\nSkipping fbx animation extraction.".format(skeleton_set)) - return - - namespace = get_namespace(out_members[0]) - relative_out_members = [ - strip_namespace(node, namespace) for node in out_members - ] - with namespaced( - ":" + namespace, - new=False, - relative_names=True - ) as namespace: - fbx_exporter.export(relative_out_members, path) - - representations = instance.data.setdefault("representations", []) - representations.append({ - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": staging_dir - }) - - self.log.debug( - "Extracted FBX animation to: {0}".format(path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_gltf.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_gltf.py deleted file mode 100644 index 46da8f9463..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_gltf.py +++ /dev/null @@ -1,63 +0,0 @@ -import os - -import pyblish.api -from ayon_maya.api import lib -from ayon_maya.api.gltf import extract_gltf -from ayon_maya.api import plugin -from maya import cmds, mel - - -class ExtractGLB(plugin.MayaExtractorPlugin): - - order = pyblish.api.ExtractorOrder - label = "Extract GLB" - families = ["gltf"] - - def process(self, instance): - staging_dir = self.staging_dir(instance) - filename = "{0}.glb".format(instance.name) - path = os.path.join(staging_dir, filename) - - cmds.loadPlugin("maya2glTF", quiet=True) - - nodes = instance[:] - - start_frame = instance.data('frameStart') or \ - int(cmds.playbackOptions(query=True, - animationStartTime=True))# noqa - end_frame = instance.data('frameEnd') or \ - int(cmds.playbackOptions(query=True, - animationEndTime=True)) # noqa - fps = mel.eval('currentTimeUnitToFPS()') - - options = { - "sno": True, # selectedNodeOnly - "nbu": True, # .bin instead of .bin0 - "ast": start_frame, - "aet": end_frame, - "afr": fps, - "dsa": 1, - "acn": instance.name, # codespell:ignore acn - "glb": True, - "vno": True # visibleNodeOnly - } - - self.log.debug("Extracting GLB to: {}".format(path)) - with lib.maintained_selection(): - cmds.select(nodes, hi=True, noExpand=True) - extract_gltf(staging_dir, - instance.name, - **options) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'glb', - 'ext': 'glb', - 'files': filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extract GLB successful to: {0}".format(path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_gpu_cache.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_gpu_cache.py deleted file mode 100644 index 53944571a8..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_gpu_cache.py +++ /dev/null @@ -1,68 +0,0 @@ -import json - -from ayon_core.pipeline import publish -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractGPUCache(plugin.MayaExtractorPlugin, - publish.OptionalPyblishPluginMixin): - """Extract the content of the instance to a GPU cache file.""" - - label = "GPU Cache" - families = ["model", "animation", "pointcache"] - step = 1.0 - stepSave = 1 - optimize = True - optimizationThreshold = 40000 - optimizeAnimationsForMotionBlur = True - writeMaterials = True - useBaseTessellation = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - cmds.loadPlugin("gpuCache", quiet=True) - - staging_dir = self.staging_dir(instance) - filename = "{}_gpu_cache".format(instance.name) - - # Write out GPU cache file. - kwargs = { - "directory": staging_dir, - "fileName": filename, - "saveMultipleFiles": False, - "simulationRate": self.step, - "sampleMultiplier": self.stepSave, - "optimize": self.optimize, - "optimizationThreshold": self.optimizationThreshold, - "optimizeAnimationsForMotionBlur": ( - self.optimizeAnimationsForMotionBlur - ), - "writeMaterials": self.writeMaterials, - "useBaseTessellation": self.useBaseTessellation - } - self.log.debug( - "Extract {} with:\n{}".format( - instance[:], json.dumps(kwargs, indent=4, sort_keys=True) - ) - ) - cmds.gpuCache(instance[:], **kwargs) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "gpu_cache", - "ext": "abc", - "files": filename + ".abc", - "stagingDir": staging_dir, - "outputName": "gpu_cache" - } - - instance.data["representations"].append(representation) - - self.log.debug( - "Extracted instance {} to: {}".format(instance.name, staging_dir) - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_import_reference.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_import_reference.py deleted file mode 100644 index 6a0c7719a8..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_import_reference.py +++ /dev/null @@ -1,165 +0,0 @@ -import os -import sys -import tempfile - -import pyblish.api -from ayon_core.lib import run_subprocess -from ayon_core.pipeline.publish import OptionalPyblishPluginMixin -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractImportReference(plugin.MayaExtractorPlugin, - OptionalPyblishPluginMixin): - """ - - Extract the scene with imported reference. - The temp scene with imported reference is - published for rendering if this extractor is activated - - """ - - label = "Extract Import Reference" - order = pyblish.api.ExtractorOrder - 0.48 - families = ["renderlayer", "workfile"] - optional = True - tmp_format = "_tmp" - - @classmethod - def apply_settings(cls, project_settings): - if "deadline" not in project_settings: - cls.enabled = False - return - cls.active = ( - project_settings - ["deadline"] - ["publish"] - ["MayaSubmitDeadline"] - ["import_reference"] - ) - - def process(self, instance): - if not self.is_active(instance.data): - return - - maya_settings = instance.context.data["project_settings"]["maya"] - ext_mapping = { - item["name"]: item["value"] - for item in maya_settings["ext_mapping"] - } - if ext_mapping: - self.log.debug("Looking in settings for scene type ...") - # use extension mapping for first family found - for family in self.families: - try: - self.scene_type = ext_mapping[family] - self.log.debug( - "Using {} as scene type".format(self.scene_type)) - break - - except KeyError: - # set scene type to ma - self.scene_type = "ma" - - _scene_type = ("mayaAscii" - if self.scene_type == "ma" - else "mayaBinary") - - dir_path = self.staging_dir(instance) - # named the file with imported reference - if instance.name == "Main": - return - tmp_name = instance.name + self.tmp_format - current_name = cmds.file(query=True, sceneName=True) - ref_scene_name = "{0}.{1}".format(tmp_name, self.scene_type) - - reference_path = os.path.join(dir_path, ref_scene_name) - tmp_path = os.path.dirname(current_name) + "/" + ref_scene_name - - self.log.debug("Performing extraction..") - - # This generates script for mayapy to take care of reference - # importing outside current session. It is passing current scene - # name and destination scene name. - script = (""" -# -*- coding: utf-8 -*- -'''Script to import references to given scene.''' -import maya.standalone -maya.standalone.initialize() -# scene names filled by caller -current_name = "{current_name}" -ref_scene_name = "{ref_scene_name}" -print(">>> Opening {{}} ...".format(current_name)) -cmds.file(current_name, open=True, force=True) -print(">>> Processing references") -all_reference = cmds.file(q=True, reference=True) or [] -for ref in all_reference: - if cmds.referenceQuery(ref, il=True): - cmds.file(ref, importReference=True) - - nested_ref = cmds.file(q=True, reference=True) - if nested_ref: - for new_ref in nested_ref: - if new_ref not in all_reference: - all_reference.append(new_ref) - -print(">>> Finish importing references") -print(">>> Saving scene as {{}}".format(ref_scene_name)) - -cmds.file(rename=ref_scene_name) -cmds.file(save=True, force=True) -print("*** Done") - """).format(current_name=current_name, ref_scene_name=tmp_path) - mayapy_exe = os.path.join(os.getenv("MAYA_LOCATION"), "bin", "mayapy") - if sys.platform == "windows": - mayapy_exe += ".exe" - mayapy_exe = os.path.normpath(mayapy_exe) - # can't use TemporaryNamedFile as that can't be opened in another - # process until handles are closed by context manager. - with tempfile.TemporaryDirectory() as tmp_dir_name: - tmp_script_path = os.path.join(tmp_dir_name, "import_ref.py") - self.log.debug("Using script file: {}".format(tmp_script_path)) - with open(tmp_script_path, "wt") as tmp: - tmp.write(script) - - try: - run_subprocess([mayapy_exe, tmp_script_path]) - except Exception: - self.log.error("Import reference failed", exc_info=True) - raise - - with lib.maintained_selection(): - cmds.select(all=True, noExpand=True) - cmds.file(reference_path, - force=True, - typ=_scene_type, - exportSelected=True, - channels=True, - constraints=True, - shader=True, - expressions=True, - constructionHistory=True) - - instance.context.data["currentFile"] = tmp_path - - if "files" not in instance.data: - instance.data["files"] = [] - instance.data["files"].append(ref_scene_name) - - if instance.data.get("representations") is None: - instance.data["representations"] = [] - - ref_representation = { - "name": self.scene_type, - "ext": self.scene_type, - "files": ref_scene_name, - "stagingDir": os.path.dirname(current_name), - "outputName": "imported" - } - self.log.debug(ref_representation) - - instance.data["representations"].append(ref_representation) - - self.log.debug("Extracted instance '%s' to : '%s'" % (ref_scene_name, - reference_path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_layout.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_layout.py deleted file mode 100644 index ca53f563d4..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_layout.py +++ /dev/null @@ -1,168 +0,0 @@ -import json -import math -import os - -from ayon_api import get_representation_by_id -from ayon_maya.api import plugin -from maya import cmds -from maya.api import OpenMaya as om - - -class ExtractLayout(plugin.MayaExtractorPlugin): - """Extract a layout.""" - - label = "Extract Layout" - families = ["layout"] - project_container = "AVALON_CONTAINERS" - optional = True - - def process(self, instance): - # Define extract output file path - stagingdir = self.staging_dir(instance) - - # Perform extraction - self.log.debug("Performing extraction..") - - if "representations" not in instance.data: - instance.data["representations"] = [] - - json_data = [] - # TODO representation queries can be refactored to be faster - project_name = instance.context.data["projectName"] - - for asset in cmds.sets(str(instance), query=True): - # Find the container - project_container = self.project_container - container_list = cmds.ls(project_container) - if len(container_list) == 0: - self.log.warning("Project container is not found!") - self.log.warning("The asset(s) may not be properly loaded after published") # noqa - continue - - grp_loaded_ass = instance.data.get("groupLoadedAssets", False) - if grp_loaded_ass: - asset_list = cmds.listRelatives(asset, children=True) - # WARNING This does override 'asset' variable from parent loop - # is it correct? - for asset in asset_list: - grp_name = asset.split(':')[0] - else: - grp_name = asset.split(':')[0] - containers = cmds.ls("{}*_CON".format(grp_name)) - if len(containers) == 0: - self.log.warning("{} isn't from the loader".format(asset)) - self.log.warning("It may not be properly loaded after published") # noqa - continue - container = containers[0] - - representation_id = cmds.getAttr( - "{}.representation".format(container)) - - representation = get_representation_by_id( - project_name, - representation_id, - fields={"versionId", "context"} - ) - - self.log.debug(representation) - - version_id = representation["versionId"] - # TODO use product entity to get product type rather than - # data in representation 'context' - repre_context = representation["context"] - product_type = repre_context.get("product", {}).get("type") - if not product_type: - product_type = repre_context.get("family") - - json_element = { - "product_type": product_type, - "instance_name": cmds.getAttr( - "{}.namespace".format(container)), - "representation": str(representation_id), - "version": str(version_id) - } - - loc = cmds.xform(asset, query=True, translation=True) - rot = cmds.xform(asset, query=True, rotation=True, euler=True) - scl = cmds.xform(asset, query=True, relative=True, scale=True) - - json_element["transform"] = { - "translation": { - "x": loc[0], - "y": loc[1], - "z": loc[2] - }, - "rotation": { - "x": math.radians(rot[0]), - "y": math.radians(rot[1]), - "z": math.radians(rot[2]) - }, - "scale": { - "x": scl[0], - "y": scl[1], - "z": scl[2] - } - } - - row_length = 4 - t_matrix_list = cmds.xform(asset, query=True, matrix=True) - - transform_mm = om.MMatrix(t_matrix_list) - transform = om.MTransformationMatrix(transform_mm) - - t = transform.translation(om.MSpace.kWorld) - t = om.MVector(t.x, t.z, -t.y) - transform.setTranslation(t, om.MSpace.kWorld) - transform.rotateBy( - om.MEulerRotation(math.radians(-90), 0, 0), om.MSpace.kWorld) - transform.scaleBy([1.0, 1.0, -1.0], om.MSpace.kObject) - - t_matrix_list = list(transform.asMatrix()) - - t_matrix = [] - for i in range(0, len(t_matrix_list), row_length): - t_matrix.append(t_matrix_list[i:i + row_length]) - - json_element["transform_matrix"] = [ - list(row) - for row in t_matrix - ] - - basis_list = [ - 1, 0, 0, 0, - 0, 1, 0, 0, - 0, 0, -1, 0, - 0, 0, 0, 1 - ] - - basis_mm = om.MMatrix(basis_list) - basis = om.MTransformationMatrix(basis_mm) - - b_matrix_list = list(basis.asMatrix()) - b_matrix = [] - - for i in range(0, len(b_matrix_list), row_length): - b_matrix.append(b_matrix_list[i:i + row_length]) - - json_element["basis"] = [] - for row in b_matrix: - json_element["basis"].append(list(row)) - - json_data.append(json_element) - - json_filename = "{}.json".format(instance.name) - json_path = os.path.join(stagingdir, json_filename) - - with open(json_path, "w+") as file: - json.dump(json_data, fp=file, indent=2) - - json_representation = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(json_representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, json_representation) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_look.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_look.py deleted file mode 100644 index 8e57b22d64..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_look.py +++ /dev/null @@ -1,889 +0,0 @@ -# -*- coding: utf-8 -*- -"""Maya look extractor.""" -import contextlib -import json -import logging -import os -import platform -import sys -import tempfile -from abc import ABCMeta, abstractmethod -from collections import OrderedDict - -import attr -import pyblish.api -import six -from ayon_core.lib import ( - ToolNotFoundError, - find_executable, - get_oiio_tool_args, - run_subprocess, - source_hash, -) -from ayon_core.pipeline import KnownPublishError -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds # noqa - -# Modes for transfer -COPY = 1 -HARDLINK = 2 - - -@attr.s -class TextureResult(object): - """The resulting texture of a processed file for a resource""" - # Path to the file - path = attr.ib() - # Colorspace of the resulting texture. This might not be the input - # colorspace of the texture if a TextureProcessor has processed the file. - colorspace = attr.ib() - # Hash generated for the texture using ayon_core.lib.source_hash - file_hash = attr.ib() - # The transfer mode, e.g. COPY or HARDLINK - transfer_mode = attr.ib() - - -def find_paths_by_hash(texture_hash): - """Find the texture hash key in the dictionary. - - All paths that originate from it. - - Args: - texture_hash (str): Hash of the texture. - - Return: - str: path to texture if found. - - """ - raise KnownPublishError( - "This is a bug. \"find_paths_by_hash\" is not compatible with AYON." - ) - - -@contextlib.contextmanager -def no_workspace_dir(): - """Force maya to a fake temporary workspace directory. - - Note: This is not maya.cmds.workspace 'rootDirectory' but the 'directory' - - This helps to avoid Maya automatically remapping image paths to files - relative to the currently set directory. - - """ - - # Store current workspace - original = cmds.workspace(query=True, directory=True) - - # Set a fake workspace - fake_workspace_dir = tempfile.mkdtemp() - cmds.workspace(directory=fake_workspace_dir) - - try: - yield - finally: - try: - cmds.workspace(directory=original) - except RuntimeError: - # If the original workspace directory didn't exist either - # ignore the fact that it fails to reset it to the old path - pass - - # Remove the temporary directory - os.rmdir(fake_workspace_dir) - - -@six.add_metaclass(ABCMeta) -class TextureProcessor: - - extension = None - - def __init__(self, log=None): - if log is None: - log = logging.getLogger(self.__class__.__name__) - self.log = log - - def apply_settings(self, project_settings): - """Apply AYON system/project settings to the TextureProcessor - - Args: - project_settings (dict): AYON project settings - - Returns: - None - - """ - pass - - @abstractmethod - def process(self, - source, - colorspace, - color_management, - staging_dir): - """Process the `source` texture. - - Must be implemented on inherited class. - - This must always return a TextureResult even when it does not generate - a texture. If it doesn't generate a texture then it should return a - TextureResult using the input path and colorspace. - - Args: - source (str): Path to source file. - colorspace (str): Colorspace of the source file. - color_management (dict): Maya Color management data from - `lib.get_color_management_preferences` - staging_dir (str): Output directory to write to. - - Returns: - TextureResult: The resulting texture information. - - """ - pass - - def __repr__(self): - # Log instance as class name - return self.__class__.__name__ - - -class MakeRSTexBin(TextureProcessor): - """Make `.rstexbin` using `redshiftTextureProcessor`""" - - extension = ".rstexbin" - - def process(self, - source, - colorspace, - color_management, - staging_dir): - - texture_processor_path = self.get_redshift_tool( - "redshiftTextureProcessor" - ) - if not texture_processor_path: - raise KnownPublishError("Must have Redshift available.") - - subprocess_args = [ - texture_processor_path, - source - ] - - # if color management is enabled we pass color space information - if color_management["enabled"]: - config_path = color_management["config"] - if not os.path.exists(config_path): - raise RuntimeError("OCIO config not found at: " - "{}".format(config_path)) - - if not os.getenv("OCIO"): - self.log.debug( - "OCIO environment variable not set." - "Setting it with OCIO config from Maya." - ) - os.environ["OCIO"] = config_path - - self.log.debug("converting colorspace {0} to redshift render " - "colorspace".format(colorspace)) - subprocess_args.extend(["-cs", colorspace]) - - hash_args = ["rstex"] - texture_hash = source_hash(source, *hash_args) - - # Redshift stores the output texture next to the input but with - # the extension replaced to `.rstexbin` - basename, ext = os.path.splitext(source) - destination = "{}{}".format(basename, self.extension) - - self.log.debug(" ".join(subprocess_args)) - try: - run_subprocess(subprocess_args, logger=self.log) - except Exception: - self.log.error("Texture .rstexbin conversion failed", - exc_info=True) - six.reraise(*sys.exc_info()) - - return TextureResult( - path=destination, - file_hash=texture_hash, - colorspace=colorspace, - transfer_mode=COPY - ) - - @staticmethod - def get_redshift_tool(tool_name): - """Path to redshift texture processor. - - On Windows it adds .exe extension if missing from tool argument. - - Args: - tool_name (string): Tool name. - - Returns: - str: Full path to redshift texture processor executable. - """ - if "REDSHIFT_COREDATAPATH" not in os.environ: - raise RuntimeError("Must have Redshift available.") - - redshift_tool_path = os.path.join( - os.environ["REDSHIFT_COREDATAPATH"], - "bin", - tool_name - ) - - return find_executable(redshift_tool_path) - - -class MakeTX(TextureProcessor): - """Make `.tx` using `maketx` with some default settings. - - Some hardcoded arguments passed to `maketx` are based on the defaults used - in Arnold's txManager tool. - - """ - - extension = ".tx" - - def __init__(self, log=None): - super(MakeTX, self).__init__(log=log) - self.extra_args = [] - - def apply_settings(self, project_settings): - # Allow extra maketx arguments from project settings - args_settings = ( - project_settings["maya"]["publish"] - .get("ExtractLook", {}).get("maketx_arguments", []) - ) - extra_args = [] - for arg_data in args_settings: - argument = arg_data["argument"] - parameters = arg_data["parameters"] - if not argument: - self.log.debug("Ignoring empty parameter from " - "`maketx_arguments` setting..") - continue - - extra_args.append(argument) - extra_args.extend(parameters) - - self.extra_args = extra_args - - def process(self, - source, - colorspace, - color_management, - staging_dir): - """Process the texture. - - This function requires the `maketx` executable to be available in an - OpenImageIO toolset detectable by AYON. - - Args: - source (str): Path to source file. - colorspace (str): Colorspace of the source file. - color_management (dict): Maya Color management data from - `lib.get_color_management_preferences` - staging_dir (str): Output directory to write to. - - Returns: - TextureResult: The resulting texture information. - - """ - - try: - maketx_args = get_oiio_tool_args("maketx") - except ToolNotFoundError: - raise KnownPublishError( - "OpenImageIO is not available on the machine") - - # Define .tx filepath in staging if source file is not .tx - fname, ext = os.path.splitext(os.path.basename(source)) - if ext == ".tx": - # Do nothing if the source file is already a .tx file. - return TextureResult( - path=source, - file_hash=source_hash(source), - colorspace=colorspace, - transfer_mode=COPY - ) - - # Hardcoded default arguments for maketx conversion based on Arnold's - # txManager in Maya - args = [ - # unpremultiply before conversion (recommended when alpha present) - "--unpremult", - # use oiio-optimized settings for tile-size, planarconfig, metadata - "--oiio", - "--filter", "lanczos3", - ] - if color_management["enabled"]: - config_path = color_management["config"] - if not os.path.exists(config_path): - raise RuntimeError("OCIO config not found at: " - "{}".format(config_path)) - - render_colorspace = color_management["rendering_space"] - - self.log.debug("tx: converting colorspace {0} " - "-> {1}".format(colorspace, - render_colorspace)) - args.extend(["--colorconvert", colorspace, render_colorspace]) - args.extend(["--colorconfig", config_path]) - - else: - # Maya Color management is disabled. We cannot rely on an OCIO - self.log.debug("tx: Maya color management is disabled. No color " - "conversion will be applied to .tx conversion for: " - "{}".format(source)) - # Assume linear - render_colorspace = "linear" - - # Note: The texture hash is only reliable if we include any potential - # conversion arguments provide to e.g. `maketx` - hash_args = ["maketx"] + args + self.extra_args - texture_hash = source_hash(source, *hash_args) - - # Ensure folder exists - resources_dir = os.path.join(staging_dir, "resources") - if not os.path.exists(resources_dir): - os.makedirs(resources_dir) - - self.log.debug("Generating .tx file for %s .." % source) - - subprocess_args = maketx_args + [ - "-v", # verbose - "-u", # update mode - # --checknan doesn't influence the output file but aborts the - # conversion if it finds any. So we can avoid it for the file hash - "--checknan", - source - ] - - subprocess_args.extend(args) - if self.extra_args: - subprocess_args.extend(self.extra_args) - - # Add source hash attribute after other arguments for log readability - # Note: argument is excluded from the hash since it is the hash itself - subprocess_args.extend([ - "--sattrib", - "sourceHash", - texture_hash - ]) - - destination = os.path.join(resources_dir, fname + ".tx") - subprocess_args.extend(["-o", destination]) - - # We want to make sure we are explicit about what OCIO config gets - # used. So when we supply no --colorconfig flag that no fallback to - # an OCIO env var occurs. - env = os.environ.copy() - env.pop("OCIO", None) - - self.log.debug(" ".join(subprocess_args)) - try: - run_subprocess(subprocess_args, env=env) - except Exception: - self.log.error("Texture maketx conversion failed", - exc_info=True) - raise - - return TextureResult( - path=destination, - file_hash=texture_hash, - colorspace=render_colorspace, - transfer_mode=COPY - ) - - @staticmethod - def _has_arnold(): - """Return whether the arnold package is available and importable.""" - try: - import arnold # noqa: F401 - return True - except (ImportError, ModuleNotFoundError): - return False - - -class ExtractLook(plugin.MayaExtractorPlugin): - """Extract Look (Maya Scene + JSON) - - Only extracts the sets (shadingEngines and alike) alongside a .json file - that stores it relationships for the sets and "attribute" data for the - instance members. - - """ - - label = "Extract Look (Maya Scene + JSON)" - hosts = ["maya"] - families = ["look", "mvLook"] - order = pyblish.api.ExtractorOrder + 0.2 - scene_type = "ma" - look_data_type = "json" - - def get_maya_scene_type(self, instance): - """Get Maya scene type from settings. - - Args: - instance (pyblish.api.Instance): Instance with collected - project settings. - - """ - maya_settings = instance.context.data["project_settings"]["maya"] - ext_mapping = { - item["name"]: item["value"] - for item in maya_settings["ext_mapping"] - } - if ext_mapping: - self.log.debug("Looking in settings for scene type ...") - # use extension mapping for first family found - for family in self.families: - try: - self.scene_type = ext_mapping[family] - self.log.debug( - "Using {} as scene type".format(self.scene_type)) - break - except KeyError: - # no preset found - pass - - return "mayaAscii" if self.scene_type == "ma" else "mayaBinary" - - def process(self, instance): - """Plugin entry point. - - Args: - instance: Instance to process. - - """ - _scene_type = self.get_maya_scene_type(instance) - - # Define extract output file path - dir_path = self.staging_dir(instance) - maya_fname = "{0}.{1}".format(instance.name, self.scene_type) - json_fname = "{0}.{1}".format(instance.name, self.look_data_type) - maya_path = os.path.join(dir_path, maya_fname) - json_path = os.path.join(dir_path, json_fname) - - # Remove all members of the sets so they are not included in the - # exported file by accident - self.log.debug("Processing sets..") - lookdata = instance.data["lookData"] - relationships = lookdata["relationships"] - sets = list(relationships.keys()) - if not sets: - self.log.debug("No sets found for the look") - return - - # Specify texture processing executables to activate - # TODO: Load these more dynamically once we support more processors - processors = [] - context = instance.context - for key, Processor in { - # Instance data key to texture processor mapping - "maketx": MakeTX, - "rstex": MakeRSTexBin - }.items(): - if instance.data.get(key, False): - processor = Processor(log=self.log) - processor.apply_settings(context.data["project_settings"]) - processors.append(processor) - - if processors: - self.log.debug("Collected texture processors: " - "{}".format(processors)) - - self.log.debug("Processing resources..") - results = self.process_resources(instance, - staging_dir=dir_path, - processors=processors) - transfers = results["fileTransfers"] - hardlinks = results["fileHardlinks"] - hashes = results["fileHashes"] - remap = results["attrRemap"] - - # Extract in correct render layer - self.log.debug("Extracting look maya scene file: {}".format(maya_path)) - layer = instance.data.get("renderlayer", "defaultRenderLayer") - with lib.renderlayer(layer): - # TODO: Ensure membership edits don't become renderlayer overrides - with lib.empty_sets(sets, force=True): - # To avoid Maya trying to automatically remap the file - # textures relative to the `workspace -directory` we force - # it to a fake temporary workspace. This fixes textures - # getting incorrectly remapped. - with no_workspace_dir(): - with lib.attribute_values(remap): - with lib.maintained_selection(): - cmds.select(sets, noExpand=True) - cmds.file( - maya_path, - force=True, - typ=_scene_type, - exportSelected=True, - preserveReferences=False, - channels=True, - constraints=True, - expressions=True, - constructionHistory=True, - ) - - # Write the JSON data - data = { - "attributes": lookdata["attributes"], - "relationships": relationships - } - - self.log.debug("Extracting json file: {}".format(json_path)) - with open(json_path, "w") as f: - json.dump(data, f) - - if "files" not in instance.data: - instance.data["files"] = [] - if "hardlinks" not in instance.data: - instance.data["hardlinks"] = [] - if "transfers" not in instance.data: - instance.data["transfers"] = [] - - instance.data["files"].append(maya_fname) - instance.data["files"].append(json_fname) - - if instance.data.get("representations") is None: - instance.data["representations"] = [] - - instance.data["representations"].append( - { - "name": self.scene_type, - "ext": self.scene_type, - "files": os.path.basename(maya_fname), - "stagingDir": os.path.dirname(maya_fname), - } - ) - instance.data["representations"].append( - { - "name": self.look_data_type, - "ext": self.look_data_type, - "files": os.path.basename(json_fname), - "stagingDir": os.path.dirname(json_fname), - } - ) - - # Set up the resources transfers/links for the integrator - instance.data["transfers"].extend(transfers) - instance.data["hardlinks"].extend(hardlinks) - - # Source hash for the textures - instance.data["sourceHashes"] = hashes - - self.log.debug("Extracted instance '%s' to: %s" % (instance.name, - maya_path)) - - def _set_resource_result_colorspace(self, resource, colorspace): - """Update resource resulting colorspace after texture processing""" - if "result_color_space" in resource: - if resource["result_color_space"] == colorspace: - return - - self.log.warning( - "Resource already has a resulting colorspace but is now " - "being overridden to a new one: {} -> {}".format( - resource["result_color_space"], colorspace - ) - ) - resource["result_color_space"] = colorspace - - def process_resources(self, instance, staging_dir, processors): - """Process all resources in the instance. - - It is assumed that all resources are nodes using file textures. - - Extract the textures to transfer, possibly convert with maketx and - remap the node paths to the destination path. Note that a source - might be included more than once amongst the resources as they could - be the input file to multiple nodes. - - """ - - resources = instance.data["resources"] - color_management = lib.get_color_management_preferences() - - # TODO: Temporary disable all hardlinking, due to the feature not being - # used or properly working. - self.log.info( - "Forcing copy instead of hardlink." - ) - force_copy = True - - if not force_copy and platform.system().lower() == "windows": - # Temporary fix to NOT create hardlinks on windows machines - self.log.warning( - "Forcing copy instead of hardlink due to issues on Windows..." - ) - force_copy = True - - destinations_cache = {} - - def get_resource_destination_cached(path): - """Get resource destination with cached result per filepath""" - if path not in destinations_cache: - destination = self.get_resource_destination( - path, instance.data["resourcesDir"], processors) - destinations_cache[path] = destination - return destinations_cache[path] - - # Process all resource's individual files - processed_files = {} - transfers = [] - hardlinks = [] - hashes = {} - remap = OrderedDict() - for resource in resources: - colorspace = resource["color_space"] - - for filepath in resource["files"]: - filepath = os.path.normpath(filepath) - - if filepath in processed_files: - # The file was already processed, likely due to usage by - # another resource in the scene. We confirm here it - # didn't do color spaces different than the current - # resource. - processed_file = processed_files[filepath] - self.log.debug( - "File was already processed. Likely used by another " - "resource too: {}".format(filepath) - ) - - if colorspace != processed_file["color_space"]: - self.log.warning( - "File '{}' was already processed using colorspace " - "'{}' instead of the current resource's " - "colorspace '{}'. The already processed texture " - "result's colorspace '{}' will be used." - "".format(filepath, - colorspace, - processed_file["color_space"], - processed_file["result_color_space"])) - - self._set_resource_result_colorspace( - resource, - colorspace=processed_file["result_color_space"] - ) - continue - - texture_result = self._process_texture( - filepath, - processors=processors, - staging_dir=staging_dir, - force_copy=force_copy, - color_management=color_management, - colorspace=colorspace - ) - - # Set the resulting color space on the resource - self._set_resource_result_colorspace( - resource, colorspace=texture_result.colorspace - ) - - processed_files[filepath] = { - "color_space": colorspace, - "result_color_space": texture_result.colorspace, - } - - source = texture_result.path - destination = get_resource_destination_cached(source) - if force_copy or texture_result.transfer_mode == COPY: - transfers.append((source, destination)) - self.log.debug('file will be copied {} -> {}'.format( - source, destination)) - elif texture_result.transfer_mode == HARDLINK: - hardlinks.append((source, destination)) - self.log.debug('file will be hardlinked {} -> {}'.format( - source, destination)) - - # Store the hashes from hash to destination to include in the - # database - hashes[texture_result.file_hash] = destination - - # Set up remapping attributes for the node during the publish - # The order of these can be important if one attribute directly - # affects another, e.g. we set colorspace after filepath because - # maya sometimes tries to guess the colorspace when changing - # filepaths (which is avoidable, but we don't want to have those - # attributes changed in the resulting publish) - # Remap filepath to publish destination - # TODO It would be much better if we could use the destination path - # from the actual processed texture results, but since the - # attribute will need to preserve tokens like , etc for - # now we will define the output path from the attribute value - # including the tokens to persist them. - filepath_attr = resource["attribute"] - remap[filepath_attr] = get_resource_destination_cached( - resource["source"] - ) - - # Preserve color space values (force value after filepath change) - # This will also trigger in the same order at end of context to - # ensure after context it's still the original value. - node = resource["node"] - if cmds.attributeQuery("colorSpace", node=node, exists=True): - color_space_attr = "{}.colorSpace".format(node) - remap[color_space_attr] = resource["result_color_space"] - - self.log.debug("Finished remapping destinations ...") - - return { - "fileTransfers": transfers, - "fileHardlinks": hardlinks, - "fileHashes": hashes, - "attrRemap": remap, - } - - def get_resource_destination(self, filepath, resources_dir, processors): - """Get resource destination path. - - This is utility function to change path if resource file name is - changed by some external tool like `maketx`. - - Args: - filepath (str): Resource source path - resources_dir (str): Destination dir for resources in publish. - processors (list): Texture processors converting resource. - - Returns: - str: Path to resource file - - """ - # Compute destination location - basename, ext = os.path.splitext(os.path.basename(filepath)) - - # Get extension from the last processor - for processor in reversed(processors): - processor_ext = processor.extension - if processor_ext and ext != processor_ext: - self.log.debug("Processor {} overrides extension to '{}' " - "for path: {}".format(processor, - processor_ext, - filepath)) - ext = processor_ext - break - - return os.path.join( - resources_dir, basename + ext - ) - - def _get_existing_hashed_texture(self, texture_hash): - """Return the first found filepath from a texture hash""" - - # If source has been published before with the same settings, - # then don't reprocess but hardlink from the original - existing = find_paths_by_hash(texture_hash) - if existing: - source = next((p for p in existing if os.path.exists(p)), None) - if source: - return source - else: - self.log.warning( - "Paths not found on disk, " - "skipping hardlink: {}".format(existing) - ) - - def _process_texture(self, - filepath, - processors, - staging_dir, - force_copy, - color_management, - colorspace): - """Process a single texture file on disk for publishing. - - This will: - 1. Check whether it's already published, if so it will do hardlink - (if the texture hash is found and force copy is not enabled) - 2. It will process the texture using the supplied texture - processors like MakeTX and MakeRSTexBin if enabled. - 3. Compute the destination path for the source file. - - Args: - filepath (str): The source file path to process. - processors (list): List of TextureProcessor processing the texture - staging_dir (str): The staging directory to write to. - force_copy (bool): Whether to force a copy even if a file hash - might have existed already in the project, otherwise - hardlinking the existing file is allowed. - color_management (dict): Maya's Color Management settings from - `lib.get_color_management_preferences` - colorspace (str): The source colorspace of the resources this - texture belongs to. - - Returns: - TextureResult: The texture result information. - """ - - if len(processors) > 1: - raise KnownPublishError( - "More than one texture processor not supported. " - "Current processors enabled: {}".format(processors) - ) - - for processor in processors: - self.log.debug("Processing texture {} with processor {}".format( - filepath, processor - )) - - processed_result = processor.process(filepath, - colorspace, - color_management, - staging_dir) - if not processed_result: - raise RuntimeError("Texture Processor {} returned " - "no result.".format(processor)) - self.log.debug("Generated processed " - "texture: {}".format(processed_result.path)) - - # TODO: Currently all processors force copy instead of allowing - # hardlinks using source hashes. This should be refactored - return processed_result - - # No texture processing for this file - texture_hash = source_hash(filepath) - if not force_copy: - existing = self._get_existing_hashed_texture(filepath) - if existing: - self.log.debug("Found hash in database, preparing hardlink..") - return TextureResult( - path=filepath, - file_hash=texture_hash, - colorspace=colorspace, - transfer_mode=HARDLINK - ) - - return TextureResult( - path=filepath, - file_hash=texture_hash, - colorspace=colorspace, - transfer_mode=COPY - ) - - -class ExtractModelRenderSets(ExtractLook): - """Extract model render attribute sets as model metadata - - Only extracts the render attrib sets (NO shadingEngines) alongside - a .json file that stores it relationships for the sets and "attribute" - data for the instance members. - - """ - - label = "Model Render Sets" - hosts = ["maya"] - families = ["model"] - scene_type_prefix = "meta.render." - look_data_type = "meta.render.json" - - def get_maya_scene_type(self, instance): - typ = super(ExtractModelRenderSets, self).get_maya_scene_type(instance) - # add prefix - self.scene_type = self.scene_type_prefix + self.scene_type - - return typ diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_maya_scene_raw.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_maya_scene_raw.py deleted file mode 100644 index 047b7f6e6c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_maya_scene_raw.py +++ /dev/null @@ -1,151 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract data as Maya scene (raw).""" -import os -import contextlib -from ayon_core.lib import BoolDef -from ayon_core.pipeline import AVALON_CONTAINER_ID, AYON_CONTAINER_ID -from ayon_core.pipeline.publish import AYONPyblishPluginMixin -from ayon_maya.api.lib import maintained_selection, shader -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractMayaSceneRaw(plugin.MayaExtractorPlugin, AYONPyblishPluginMixin): - """Extract as Maya Scene (raw). - - This will preserve all references, construction history, etc. - """ - - label = "Maya Scene (Raw)" - families = ["mayaAscii", - "mayaScene", - "setdress", - "layout", - "camerarig"] - scene_type = "ma" - - @classmethod - def get_attribute_defs(cls): - return [ - BoolDef( - "preserve_references", - label="Preserve References", - tooltip=( - "When enabled references will still be references " - "in the published file.\nWhen disabled the references " - "are imported into the published file generating a " - "file without references." - ), - default=True - ) - ] - - def process(self, instance): - """Plugin entry point.""" - maya_settings = instance.context.data["project_settings"]["maya"] - ext_mapping = { - item["name"]: item["value"] - for item in maya_settings["ext_mapping"] - } - if ext_mapping: - self.log.debug("Looking in settings for scene type ...") - # use extension mapping for first family found - for family in self.families: - try: - self.scene_type = ext_mapping[family] - self.log.debug( - "Using {} as scene type".format(self.scene_type)) - break - except KeyError: - # no preset found - pass - # Define extract output file path - dir_path = self.staging_dir(instance) - filename = "{0}.{1}".format(instance.name, self.scene_type) - path = os.path.join(dir_path, filename) - - # Whether to include all nodes in the instance (including those from - # history) or only use the exact set members - members_only = instance.data.get("exactSetMembersOnly", False) - if members_only: - members = instance.data.get("setMembers", list()) - if not members: - raise RuntimeError("Can't export 'exact set members only' " - "when set is empty.") - else: - members = instance[:] - - selection = members - if set(self.add_for_families).intersection( - set(instance.data.get("families", []))) or \ - instance.data.get("productType") in self.add_for_families: - selection += self._get_loaded_containers(members) - - # Perform extraction - self.log.debug("Performing extraction ...") - attribute_values = self.get_attr_values_from_data( - instance.data - ) - with maintained_selection(): - cmds.select(selection, noExpand=True) - with contextlib.ExitStack() as stack: - if not instance.data.get("shader", True): - # Fix bug where export without shader may import the geometry 'green' - # due to the lack of any shader on import. - stack.enter_context(shader(selection, shadingEngine="initialShadingGroup")) - - cmds.file(path, - force=True, - typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", - exportSelected=True, - preserveReferences=attribute_values["preserve_references"], - constructionHistory=True, - shader=instance.data.get("shader", True), - constraints=True, - expressions=True) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': filename, - "stagingDir": dir_path - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s" % (instance.name, - path)) - - @staticmethod - def _get_loaded_containers(members): - # type: (list) -> list - refs_to_include = { - cmds.referenceQuery(node, referenceNode=True) - for node in members - if cmds.referenceQuery(node, isNodeReferenced=True) - } - - members_with_refs = refs_to_include.union(members) - - obj_sets = cmds.ls("*.id", long=True, type="objectSet", recursive=True, - objectsOnly=True) - - loaded_containers = [] - for obj_set in obj_sets: - - if not cmds.attributeQuery("id", node=obj_set, exists=True): - continue - - id_attr = "{}.id".format(obj_set) - if cmds.getAttr(id_attr) not in { - AYON_CONTAINER_ID, AVALON_CONTAINER_ID - }: - continue - - set_content = set(cmds.sets(obj_set, query=True)) - if set_content.intersection(members_with_refs): - loaded_containers.append(obj_set) - - return loaded_containers diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_maya_usd.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_maya_usd.py deleted file mode 100644 index d2bf98afbc..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_maya_usd.py +++ /dev/null @@ -1,291 +0,0 @@ -import contextlib -import json -import os - -import pyblish.api -import six -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin -from maya import cmds - - -@contextlib.contextmanager -def usd_export_attributes(nodes, attrs=None, attr_prefixes=None, mapping=None): - """Define attributes for the given nodes that should be exported. - - MayaUSDExport will export custom attributes if the Maya node has a - string attribute `USD_UserExportedAttributesJson` that provides an - export mapping for the maya attributes. This context manager will try - to autogenerate such an attribute during the export to include attributes - for the export. - - Arguments: - nodes (List[str]): Nodes to process. - attrs (Optional[List[str]]): Full name of attributes to include. - attr_prefixes (Optional[List[str]]): Prefixes of attributes to include. - mapping (Optional[Dict[Dict]]): A mapping per attribute name for the - conversion to a USD attribute, including renaming, defining type, - converting attribute precision, etc. This match the usual - `USD_UserExportedAttributesJson` json mapping of `mayaUSDExport`. - When no mapping provided for an attribute it will use `{}` as - value. - - Examples: - >>> with usd_export_attributes( - >>> ["pCube1"], attrs="myDoubleAttributeAsFloat", mapping={ - >>> "myDoubleAttributeAsFloat": { - >>> "usdAttrName": "my:namespace:attrib", - >>> "translateMayaDoubleToUsdSinglePrecision": True, - >>> } - >>> }) - - """ - # todo: this might be better done with a custom export chaser - # see `chaser` argument for `mayaUSDExport` - - import maya.api.OpenMaya as om - - if not attrs and not attr_prefixes: - # context manager does nothing - yield - return - - if attrs is None: - attrs = [] - if attr_prefixes is None: - attr_prefixes = [] - if mapping is None: - mapping = {} - - usd_json_attr = "USD_UserExportedAttributesJson" - strings = attrs + ["{}*".format(prefix) for prefix in attr_prefixes] - context_state = {} - for node in set(nodes): - node_attrs = cmds.listAttr(node, st=strings) - if not node_attrs: - # Nothing to do for this node - continue - - node_attr_data = {} - for node_attr in set(node_attrs): - node_attr_data[node_attr] = mapping.get(node_attr, {}) - - if cmds.attributeQuery(usd_json_attr, node=node, exists=True): - existing_node_attr_value = cmds.getAttr( - "{}.{}".format(node, usd_json_attr) - ) - if existing_node_attr_value and existing_node_attr_value != "{}": - # Any existing attribute mappings in an existing - # `USD_UserExportedAttributesJson` attribute always take - # precedence over what this function tries to imprint - existing_node_attr_data = json.loads(existing_node_attr_value) - node_attr_data.update(existing_node_attr_data) - - context_state[node] = json.dumps(node_attr_data) - - sel = om.MSelectionList() - dg_mod = om.MDGModifier() - fn_string = om.MFnStringData() - fn_typed = om.MFnTypedAttribute() - try: - for node, value in context_state.items(): - data = fn_string.create(value) - sel.clear() - if cmds.attributeQuery(usd_json_attr, node=node, exists=True): - # Set the attribute value - sel.add("{}.{}".format(node, usd_json_attr)) - plug = sel.getPlug(0) - dg_mod.newPlugValue(plug, data) - else: - # Create attribute with the value as default value - sel.add(node) - node_obj = sel.getDependNode(0) - attr_obj = fn_typed.create(usd_json_attr, - usd_json_attr, - om.MFnData.kString, - data) - dg_mod.addAttribute(node_obj, attr_obj) - dg_mod.doIt() - yield - finally: - dg_mod.undoIt() - - -class ExtractMayaUsd(plugin.MayaExtractorPlugin): - """Extractor for Maya USD Asset data. - - Upon publish a .usd (or .usdz) asset file will typically be written. - """ - - label = "Extract Maya USD Asset" - families = ["mayaUsd"] - - @property - def options(self): - """Overridable options for Maya USD Export - - Given in the following format - - {NAME: EXPECTED TYPE} - - If the overridden option's type does not match, - the option is not included and a warning is logged. - - """ - - # TODO: Support more `mayaUSDExport` parameters - return { - "defaultUSDFormat": str, - "stripNamespaces": bool, - "mergeTransformAndShape": bool, - "exportDisplayColor": bool, - "exportColorSets": bool, - "exportInstances": bool, - "exportUVs": bool, - "exportVisibility": bool, - "exportComponentTags": bool, - "exportRefsAsInstanceable": bool, - "eulerFilter": bool, - "renderableOnly": bool, - "jobContext": (list, None) # optional list - # "worldspace": bool, - } - - @property - def default_options(self): - """The default options for Maya USD Export.""" - - # TODO: Support more `mayaUSDExport` parameters - return { - "defaultUSDFormat": "usdc", - "stripNamespaces": False, - "mergeTransformAndShape": False, - "exportDisplayColor": False, - "exportColorSets": True, - "exportInstances": True, - "exportUVs": True, - "exportVisibility": True, - "exportComponentTags": True, - "exportRefsAsInstanceable": False, - "eulerFilter": True, - "renderableOnly": False, - "jobContext": None - # "worldspace": False - } - - def parse_overrides(self, instance, options): - """Inspect data of instance to determine overridden options""" - - for key in instance.data: - if key not in self.options: - continue - - # Ensure the data is of correct type - value = instance.data[key] - if isinstance(value, six.text_type): - value = str(value) - if not isinstance(value, self.options[key]): - self.log.warning( - "Overridden attribute {key} was of " - "the wrong type: {invalid_type} " - "- should have been {valid_type}".format( - key=key, - invalid_type=type(value).__name__, - valid_type=self.options[key].__name__)) - continue - - options[key] = value - - return options - - def filter_members(self, members): - # Can be overridden by inherited classes - return members - - def process(self, instance): - - # Load plugin first - cmds.loadPlugin("mayaUsdPlugin", quiet=True) - - # Define output file path - staging_dir = self.staging_dir(instance) - file_name = "{0}.usd".format(instance.name) - file_path = os.path.join(staging_dir, file_name) - file_path = file_path.replace('\\', '/') - - # Parse export options - options = self.default_options - options = self.parse_overrides(instance, options) - self.log.debug("Export options: {0}".format(options)) - - # Perform extraction - self.log.debug("Performing extraction ...") - - members = instance.data("setMembers") - self.log.debug('Collected objects: {}'.format(members)) - members = self.filter_members(members) - if not members: - self.log.error('No members!') - return - - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - def parse_attr_str(attr_str): - result = list() - for attr in attr_str.split(","): - attr = attr.strip() - if not attr: - continue - result.append(attr) - return result - - attrs = parse_attr_str(instance.data.get("attr", "")) - attrs += instance.data.get("userDefinedAttributes", []) - attrs += ["cbId"] - attr_prefixes = parse_attr_str(instance.data.get("attrPrefix", "")) - - self.log.debug('Exporting USD: {} / {}'.format(file_path, members)) - with maintained_selection(): - with usd_export_attributes(instance[:], - attrs=attrs, - attr_prefixes=attr_prefixes): - cmds.mayaUSDExport(file=file_path, - frameRange=(start, end), - frameStride=instance.data.get("step", 1.0), - exportRoots=members, - **options) - - representation = { - 'name': "usd", - 'ext': "usd", - 'files': file_name, - 'stagingDir': staging_dir - } - instance.data.setdefault("representations", []).append(representation) - - self.log.debug( - "Extracted instance {} to {}".format(instance.name, file_path) - ) - - -class ExtractMayaUsdAnim(ExtractMayaUsd): - """Extractor for Maya USD Animation Sparse Cache data. - - This will extract the sparse cache data from the scene and generate a - USD file with all the animation data. - - Upon publish a .usd sparse cache will be written. - """ - label = "Extract Maya USD Animation Sparse Cache" - families = ["animation", "mayaUsd"] - match = pyblish.api.Subset - - def filter_members(self, members): - out_set = next((i for i in members if i.endswith("out_SET")), None) - - if out_set is None: - self.log.warning("Expecting out_SET") - return None - - members = cmds.ls(cmds.sets(out_set, query=True), long=True) - return members diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_model.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_model.py deleted file mode 100644 index 7f257a2013..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_model.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract model as Maya Scene.""" -import os - -from ayon_core.pipeline import publish -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractModel(plugin.MayaExtractorPlugin, - publish.OptionalPyblishPluginMixin): - """Extract as Model (Maya Scene). - - Only extracts contents based on the original "setMembers" data to ensure - publishing the least amount of required shapes. From that it only takes - the shapes that are not intermediateObjects - - During export it sets a temporary context to perform a clean extraction. - The context ensures: - - Smooth preview is turned off for the geometry - - Default shader is assigned (no materials are exported) - - Remove display layers - - """ - - label = "Model (Maya Scene)" - families = ["model"] - scene_type = "ma" - optional = True - - def process(self, instance): - """Plugin entry point.""" - if not self.is_active(instance.data): - return - - maya_settings = instance.context.data["project_settings"]["maya"] - ext_mapping = { - item["name"]: item["value"] - for item in maya_settings["ext_mapping"] - } - if ext_mapping: - self.log.debug("Looking in settings for scene type ...") - # use extension mapping for first family found - for family in self.families: - try: - self.scene_type = ext_mapping[family] - self.log.debug( - "Using {} as scene type".format(self.scene_type)) - break - except KeyError: - # no preset found - pass - # Define extract output file path - stagingdir = self.staging_dir(instance) - filename = "{0}.{1}".format(instance.name, self.scene_type) - path = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction ...") - - # Get only the shape contents we need in such a way that we avoid - # taking along intermediateObjects - members = instance.data("setMembers") - members = cmds.ls(members, - dag=True, - shapes=True, - type=("mesh", "nurbsCurve"), - noIntermediate=True, - long=True) - - with lib.no_display_layers(instance): - with lib.displaySmoothness(members, - divisionsU=0, - divisionsV=0, - pointsWire=4, - pointsShaded=1, - polygonObject=1): - with lib.shader(members, - shadingEngine="initialShadingGroup"): - with lib.maintained_selection(): - cmds.select(members, noExpand=True) - cmds.file(path, - force=True, - typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 - exportSelected=True, - preserveReferences=False, - channels=False, - constraints=False, - expressions=False, - constructionHistory=False) - - # Store reference for integration - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s" % (instance.name, - path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_look.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_look.py deleted file mode 100644 index b6f8043a93..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_look.py +++ /dev/null @@ -1,156 +0,0 @@ -import os - -from maya import cmds - -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin - - -class ExtractMultiverseLook(plugin.MayaExtractorPlugin): - """Extractor for Multiverse USD look data. - - This will extract: - - - the shading networks that are assigned in MEOW as Maya material overrides - to a Multiverse Compound - - settings for a Multiverse Write Override operation. - - Relevant settings are visible in the Maya set node created by a Multiverse - USD Look instance creator. - - The input data contained in the set is: - - - a single Multiverse Compound node with any number of Maya material - overrides (typically set in MEOW) - - Upon publish two files will be written: - - - a .usda override file containing material assignment information - - a .ma file containing shading networks - - Note: when layering the material assignment override on a loaded Compound, - remember to set a matching attribute override with the namespace of - the loaded compound in order for the material assignment to resolve. - """ - - label = "Extract Multiverse USD Look" - families = ["mvLook"] - scene_type = "usda" - file_formats = ["usda", "usd"] - - @property - def options(self): - """Overridable options for Multiverse USD Export - - Given in the following format - - {NAME: EXPECTED TYPE} - - If the overridden option's type does not match, - the option is not included and a warning is logged. - - """ - - return { - "writeAll": bool, - "writeTransforms": bool, - "writeVisibility": bool, - "writeAttributes": bool, - "writeMaterials": bool, - "writeVariants": bool, - "writeVariantsDefinition": bool, - "writeActiveState": bool, - "writeNamespaces": bool, - "numTimeSamples": int, - "timeSamplesSpan": float - } - - @property - def default_options(self): - """The default options for Multiverse USD extraction.""" - - return { - "writeAll": False, - "writeTransforms": False, - "writeVisibility": False, - "writeAttributes": True, - "writeMaterials": True, - "writeVariants": False, - "writeVariantsDefinition": False, - "writeActiveState": False, - "writeNamespaces": True, - "numTimeSamples": 1, - "timeSamplesSpan": 0.0 - } - - def get_file_format(self, instance): - fileFormat = instance.data["fileFormat"] - if fileFormat in range(len(self.file_formats)): - self.scene_type = self.file_formats[fileFormat] - - def process(self, instance): - # Load plugin first - cmds.loadPlugin("MultiverseForMaya", quiet=True) - - # Define output file path - staging_dir = self.staging_dir(instance) - self.get_file_format(instance) - file_name = "{0}.{1}".format(instance.name, self.scene_type) - file_path = os.path.join(staging_dir, file_name) - file_path = file_path.replace('\\', '/') - - # Parse export options - options = self.default_options - self.log.debug("Export options: {0}".format(options)) - - # Perform extraction - self.log.debug("Performing extraction ...") - - with maintained_selection(): - members = instance.data("setMembers") - members = cmds.ls(members, - dag=True, - shapes=False, - type="mvUsdCompoundShape", - noIntermediate=True, - long=True) - self.log.debug('Collected object {}'.format(members)) - if len(members) > 1: - self.log.error('More than one member: {}'.format(members)) - - import multiverse - - over_write_opts = multiverse.OverridesWriteOptions() - options_discard_keys = { - "numTimeSamples", - "timeSamplesSpan", - "frameStart", - "frameEnd", - "handleStart", - "handleEnd", - "step", - "fps" - } - for key, value in options.items(): - if key in options_discard_keys: - continue - setattr(over_write_opts, key, value) - - for member in members: - # @TODO: Make sure there is only one here. - - self.log.debug("Writing Override for '{}'".format(member)) - multiverse.WriteOverrides(file_path, member, over_write_opts) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': file_name, - 'stagingDir': staging_dir - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance {} to {}".format( - instance.name, file_path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd.py deleted file mode 100644 index 477af9dc26..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd.py +++ /dev/null @@ -1,273 +0,0 @@ -import os - -import pyblish.api -import six -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin -from maya import cmds, mel - - -class ExtractMultiverseUsd(plugin.MayaExtractorPlugin): - """Extractor for Multiverse USD Asset data. - - This will extract settings for a Multiverse Write Asset operation: - they are visible in the Maya set node created by a Multiverse USD - Asset instance creator. - - The input data contained in the set is: - - - a single hierarchy of Maya nodes. Multiverse supports a variety of Maya - nodes such as transforms, mesh, curves, particles, instances, particle - instancers, pfx, MASH, lights, cameras, joints, connected materials, - shading networks etc. including many of their attributes. - - Upon publish a .usd (or .usdz) asset file will be typically written. - """ - - label = "Extract Multiverse USD Asset" - families = ["mvUsd"] - scene_type = "usd" - file_formats = ["usd", "usda", "usdz"] - - @property - def options(self): - """Overridable options for Multiverse USD Export - - Given in the following format - - {NAME: EXPECTED TYPE} - - If the overridden option's type does not match, - the option is not included and a warning is logged. - - """ - - return { - "stripNamespaces": bool, - "mergeTransformAndShape": bool, - "writeAncestors": bool, - "flattenParentXforms": bool, - "writeSparseOverrides": bool, - "useMetaPrimPath": bool, - "customRootPath": str, - "customAttributes": str, - "nodeTypesToIgnore": str, - "writeMeshes": bool, - "writeCurves": bool, - "writeParticles": bool, - "writeCameras": bool, - "writeLights": bool, - "writeJoints": bool, - "writeCollections": bool, - "writePositions": bool, - "writeNormals": bool, - "writeUVs": bool, - "writeColorSets": bool, - "writeTangents": bool, - "writeRefPositions": bool, - "writeBlendShapes": bool, - "writeDisplayColor": bool, - "writeSkinWeights": bool, - "writeMaterialAssignment": bool, - "writeHardwareShader": bool, - "writeShadingNetworks": bool, - "writeTransformMatrix": bool, - "writeUsdAttributes": bool, - "writeInstancesAsReferences": bool, - "timeVaryingTopology": bool, - "customMaterialNamespace": str, - "numTimeSamples": int, - "timeSamplesSpan": float - } - - @property - def default_options(self): - """The default options for Multiverse USD extraction.""" - - return { - "stripNamespaces": False, - "mergeTransformAndShape": False, - "writeAncestors": False, - "flattenParentXforms": False, - "writeSparseOverrides": False, - "useMetaPrimPath": False, - "customRootPath": str(), - "customAttributes": str(), - "nodeTypesToIgnore": str(), - "writeMeshes": True, - "writeCurves": True, - "writeParticles": True, - "writeCameras": False, - "writeLights": False, - "writeJoints": False, - "writeCollections": False, - "writePositions": True, - "writeNormals": True, - "writeUVs": True, - "writeColorSets": False, - "writeTangents": False, - "writeRefPositions": False, - "writeBlendShapes": False, - "writeDisplayColor": False, - "writeSkinWeights": False, - "writeMaterialAssignment": False, - "writeHardwareShader": False, - "writeShadingNetworks": False, - "writeTransformMatrix": True, - "writeUsdAttributes": False, - "writeInstancesAsReferences": False, - "timeVaryingTopology": False, - "customMaterialNamespace": str(), - "numTimeSamples": 1, - "timeSamplesSpan": 0.0 - } - - def parse_overrides(self, instance, options): - """Inspect data of instance to determine overridden options""" - - for key in instance.data: - if key not in self.options: - continue - - # Ensure the data is of correct type - value = instance.data[key] - if isinstance(value, six.text_type): - value = str(value) - if not isinstance(value, self.options[key]): - self.log.warning( - "Overridden attribute {key} was of " - "the wrong type: {invalid_type} " - "- should have been {valid_type}".format( - key=key, - invalid_type=type(value).__name__, - valid_type=self.options[key].__name__)) - continue - - options[key] = value - - return options - - def get_default_options(self): - return self.default_options - - def filter_members(self, members): - return members - - def process(self, instance): - - # Load plugin first - cmds.loadPlugin("MultiverseForMaya", quiet=True) - - # Define output file path - staging_dir = self.staging_dir(instance) - file_format = instance.data.get("fileFormat", 0) - if file_format in range(len(self.file_formats)): - self.scene_type = self.file_formats[file_format] - file_name = "{0}.{1}".format(instance.name, self.scene_type) - file_path = os.path.join(staging_dir, file_name) - file_path = file_path.replace('\\', '/') - - # Parse export options - options = self.get_default_options() - options = self.parse_overrides(instance, options) - self.log.debug("Export options: {0}".format(options)) - - # Perform extraction - self.log.debug("Performing extraction ...") - - with maintained_selection(): - members = instance.data("setMembers") - self.log.debug('Collected objects: {}'.format(members)) - members = self.filter_members(members) - if not members: - self.log.error('No members!') - return - self.log.debug(' - filtered: {}'.format(members)) - - import multiverse - - time_opts = None - frame_start = instance.data['frameStart'] - frame_end = instance.data['frameEnd'] - if frame_end != frame_start: - time_opts = multiverse.TimeOptions() - - time_opts.writeTimeRange = True - - handle_start = instance.data['handleStart'] - handle_end = instance.data['handleEnd'] - - time_opts.frameRange = ( - frame_start - handle_start, frame_end + handle_end) - time_opts.frameIncrement = instance.data['step'] - time_opts.numTimeSamples = instance.data.get( - 'numTimeSamples', options['numTimeSamples']) - time_opts.timeSamplesSpan = instance.data.get( - 'timeSamplesSpan', options['timeSamplesSpan']) - time_opts.framePerSecond = instance.data.get( - 'fps', mel.eval('currentTimeUnitToFPS()')) - - asset_write_opts = multiverse.AssetWriteOptions(time_opts) - options_discard_keys = { - 'numTimeSamples', - 'timeSamplesSpan', - 'frameStart', - 'frameEnd', - 'handleStart', - 'handleEnd', - 'step', - 'fps' - } - self.log.debug("Write Options:") - for key, value in options.items(): - if key in options_discard_keys: - continue - - self.log.debug(" - {}={}".format(key, value)) - setattr(asset_write_opts, key, value) - - self.log.debug('WriteAsset: {} / {}'.format(file_path, members)) - multiverse.WriteAsset(file_path, members, asset_write_opts) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': file_name, - 'stagingDir': staging_dir - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance {} to {}".format( - instance.name, file_path)) - - -class ExtractMultiverseUsdAnim(ExtractMultiverseUsd): - """Extractor for Multiverse USD Animation Sparse Cache data. - - This will extract the sparse cache data from the scene and generate a - USD file with all the animation data. - - Upon publish a .usd sparse cache will be written. - """ - label = "Extract Multiverse USD Animation Sparse Cache" - families = ["animation", "usd"] - match = pyblish.api.Subset - - def get_default_options(self): - anim_options = self.default_options - anim_options["writeSparseOverrides"] = True - anim_options["writeUsdAttributes"] = True - anim_options["stripNamespaces"] = True - return anim_options - - def filter_members(self, members): - out_set = next((i for i in members if i.endswith("out_SET")), None) - - if out_set is None: - self.log.warning("Expecting out_SET") - return None - - members = cmds.ls(cmds.sets(out_set, query=True), long=True) - return members diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd_comp.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd_comp.py deleted file mode 100644 index 3d18bb80e1..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd_comp.py +++ /dev/null @@ -1,177 +0,0 @@ -import os - -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractMultiverseUsdComposition(plugin.MayaExtractorPlugin): - """Extractor of Multiverse USD Composition data. - - This will extract settings for a Multiverse Write Composition operation: - they are visible in the Maya set node created by a Multiverse USD - Composition instance creator. - - The input data contained in the set is either: - - - a single hierarchy consisting of several Multiverse Compound nodes, with - any number of layers, and Maya transform nodes - - a single Compound node with more than one layer (in this case the "Write - as Compound Layers" option should be set). - - Upon publish a .usda composition file will be written. - """ - - label = "Extract Multiverse USD Composition" - families = ["mvUsdComposition"] - scene_type = "usd" - # Order of `fileFormat` must match create_multiverse_usd_comp.py - file_formats = ["usda", "usd"] - - @property - def options(self): - """Overridable options for Multiverse USD Export - - Given in the following format - - {NAME: EXPECTED TYPE} - - If the overridden option's type does not match, - the option is not included and a warning is logged. - - """ - - return { - "stripNamespaces": bool, - "mergeTransformAndShape": bool, - "flattenContent": bool, - "writeAsCompoundLayers": bool, - "writePendingOverrides": bool, - "numTimeSamples": int, - "timeSamplesSpan": float - } - - @property - def default_options(self): - """The default options for Multiverse USD extraction.""" - - return { - "stripNamespaces": True, - "mergeTransformAndShape": False, - "flattenContent": False, - "writeAsCompoundLayers": False, - "writePendingOverrides": False, - "numTimeSamples": 1, - "timeSamplesSpan": 0.0 - } - - def parse_overrides(self, instance, options): - """Inspect data of instance to determine overridden options""" - - for key in instance.data: - if key not in self.options: - continue - - # Ensure the data is of correct type - value = instance.data[key] - if not isinstance(value, self.options[key]): - self.log.warning( - "Overridden attribute {key} was of " - "the wrong type: {invalid_type} " - "- should have been {valid_type}".format( - key=key, - invalid_type=type(value).__name__, - valid_type=self.options[key].__name__)) - continue - - options[key] = value - - return options - - def process(self, instance): - # Load plugin first - cmds.loadPlugin("MultiverseForMaya", quiet=True) - - # Define output file path - staging_dir = self.staging_dir(instance) - file_format = instance.data.get("fileFormat", 0) - if file_format in range(len(self.file_formats)): - self.scene_type = self.file_formats[file_format] - file_name = "{0}.{1}".format(instance.name, self.scene_type) - file_path = os.path.join(staging_dir, file_name) - file_path = file_path.replace('\\', '/') - - # Parse export options - options = self.default_options - options = self.parse_overrides(instance, options) - self.log.debug("Export options: {0}".format(options)) - - # Perform extraction - self.log.debug("Performing extraction ...") - - with maintained_selection(): - members = instance.data("setMembers") - self.log.debug('Collected object {}'.format(members)) - - import multiverse - - time_opts = None - frame_start = instance.data['frameStart'] - frame_end = instance.data['frameEnd'] - handle_start = instance.data['handleStart'] - handle_end = instance.data['handleEnd'] - step = instance.data['step'] - fps = instance.data['fps'] - if frame_end != frame_start: - time_opts = multiverse.TimeOptions() - - time_opts.writeTimeRange = True - time_opts.frameRange = ( - frame_start - handle_start, frame_end + handle_end) - time_opts.frameIncrement = step - time_opts.numTimeSamples = instance.data["numTimeSamples"] - time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"] - time_opts.framePerSecond = fps - - comp_write_opts = multiverse.CompositionWriteOptions() - - """ - OP tells MV to write to a staging directory, and then moves the - file to it's final publish directory. By default, MV write relative - paths, but these paths will break when the referencing file moves. - This option forces writes to absolute paths, which is ok within OP - because all published assets have static paths, and MV can only - reference published assets. When a proper UsdAssetResolver is used, - this won't be needed. - """ - comp_write_opts.forceAbsolutePaths = True - - options_discard_keys = { - 'numTimeSamples', - 'timeSamplesSpan', - 'frameStart', - 'frameEnd', - 'handleStart', - 'handleEnd', - 'step', - 'fps' - } - for key, value in options.items(): - if key in options_discard_keys: - continue - setattr(comp_write_opts, key, value) - - multiverse.WriteComposition(file_path, members, comp_write_opts) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': file_name, - 'stagingDir': staging_dir - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance {} to {}".format(instance.name, - file_path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd_over.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd_over.py deleted file mode 100644 index a67f5c0a99..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_multiverse_usd_over.py +++ /dev/null @@ -1,155 +0,0 @@ -import os - -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractMultiverseUsdOverride(plugin.MayaExtractorPlugin): - """Extractor for Multiverse USD Override data. - - This will extract settings for a Multiverse Write Override operation: - they are visible in the Maya set node created by a Multiverse USD - Override instance creator. - - The input data contained in the set is: - - - a single Multiverse Compound node with any number of overrides (typically - set in MEOW) - - Upon publish a .usda override file will be written. - """ - - label = "Extract Multiverse USD Override" - families = ["mvUsdOverride"] - scene_type = "usd" - # Order of `fileFormat` must match create_multiverse_usd_over.py - file_formats = ["usda", "usd"] - - @property - def options(self): - """Overridable options for Multiverse USD Export - - Given in the following format - - {NAME: EXPECTED TYPE} - - If the overridden option's type does not match, - the option is not included and a warning is logged. - - """ - - return { - "writeAll": bool, - "writeTransforms": bool, - "writeVisibility": bool, - "writeAttributes": bool, - "writeMaterials": bool, - "writeVariants": bool, - "writeVariantsDefinition": bool, - "writeActiveState": bool, - "writeNamespaces": bool, - "numTimeSamples": int, - "timeSamplesSpan": float - } - - @property - def default_options(self): - """The default options for Multiverse USD extraction.""" - - return { - "writeAll": False, - "writeTransforms": True, - "writeVisibility": True, - "writeAttributes": True, - "writeMaterials": True, - "writeVariants": True, - "writeVariantsDefinition": True, - "writeActiveState": True, - "writeNamespaces": False, - "numTimeSamples": 1, - "timeSamplesSpan": 0.0 - } - - def process(self, instance): - # Load plugin first - cmds.loadPlugin("MultiverseForMaya", quiet=True) - - # Define output file path - staging_dir = self.staging_dir(instance) - file_format = instance.data.get("fileFormat", 0) - if file_format in range(len(self.file_formats)): - self.scene_type = self.file_formats[file_format] - file_name = "{0}.{1}".format(instance.name, self.scene_type) - file_path = os.path.join(staging_dir, file_name) - file_path = file_path.replace("\\", "/") - - # Parse export options - options = self.default_options - self.log.debug("Export options: {0}".format(options)) - - # Perform extraction - self.log.debug("Performing extraction ...") - - with maintained_selection(): - members = instance.data("setMembers") - members = cmds.ls(members, - dag=True, - shapes=False, - type="mvUsdCompoundShape", - noIntermediate=True, - long=True) - self.log.debug("Collected object {}".format(members)) - - # TODO: Deal with asset, composition, override with options. - import multiverse - - time_opts = None - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - handle_start = instance.data["handleStart"] - handle_end = instance.data["handleEnd"] - step = instance.data["step"] - fps = instance.data["fps"] - if frame_end != frame_start: - time_opts = multiverse.TimeOptions() - - time_opts.writeTimeRange = True - time_opts.frameRange = ( - frame_start - handle_start, frame_end + handle_end) - time_opts.frameIncrement = step - time_opts.numTimeSamples = instance.data["numTimeSamples"] - time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"] - time_opts.framePerSecond = fps - - over_write_opts = multiverse.OverridesWriteOptions(time_opts) - options_discard_keys = { - "numTimeSamples", - "timeSamplesSpan", - "frameStart", - "frameEnd", - "handleStart", - "handleEnd", - "step", - "fps" - } - for key, value in options.items(): - if key in options_discard_keys: - continue - setattr(over_write_opts, key, value) - - for member in members: - multiverse.WriteOverrides(file_path, member, over_write_opts) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': file_name, - 'stagingDir': staging_dir - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance {} to {}".format( - instance.name, file_path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_obj.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_obj.py deleted file mode 100644 index baf86b581e..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_obj.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import pyblish.api -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractObj(plugin.MayaExtractorPlugin): - """Extract OBJ from Maya. - - This extracts reproducible OBJ exports ignoring any of the settings - set on the local machine in the OBJ export options window. - - """ - order = pyblish.api.ExtractorOrder - label = "Extract OBJ" - families = ["model"] - - def process(self, instance): - - # Define output path - - staging_dir = self.staging_dir(instance) - filename = "{0}.obj".format(instance.name) - path = os.path.join(staging_dir, filename) - - # The export requires forward slashes because we need to - # format it into a string in a mel expression - - self.log.debug("Extracting OBJ to: {0}".format(path)) - - members = instance.data("setMembers") - members = cmds.ls(members, - dag=True, - shapes=True, - type=("mesh", "nurbsCurve"), - noIntermediate=True, - long=True) - self.log.debug("Members: {0}".format(members)) - self.log.debug("Instance: {0}".format(instance[:])) - - if not cmds.pluginInfo('objExport', query=True, loaded=True): - cmds.loadPlugin('objExport') - - # Export - with lib.no_display_layers(instance): - with lib.displaySmoothness(members, - divisionsU=0, - divisionsV=0, - pointsWire=4, - pointsShaded=1, - polygonObject=1): - with lib.shader(members, - shadingEngine="initialShadingGroup"): - with lib.maintained_selection(): - cmds.select(members, noExpand=True) - cmds.file(path, - exportSelected=True, - type='OBJexport', - preserveReferences=True, - force=True) - - if "representation" not in instance.data: - instance.data["representation"] = [] - - representation = { - 'name': 'obj', - 'ext': 'obj', - 'files': filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extract OBJ successful to: {0}".format(path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_playblast.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_playblast.py deleted file mode 100644 index 539246eef0..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_playblast.py +++ /dev/null @@ -1,103 +0,0 @@ -import os - -import clique -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractPlayblast(plugin.MayaExtractorPlugin): - """Extract viewport playblast. - - Takes review camera and creates review Quicktime video based on viewport - capture. - - """ - - label = "Extract Playblast" - families = ["review"] - optional = True - capture_preset = {} - profiles = None - - def process(self, instance): - self.log.debug("Extracting playblast..") - - # get scene fps - fps = instance.data.get("fps") or instance.context.data.get("fps") - - # if start and end frames cannot be determined, get them - # from Maya timeline - start = instance.data.get("frameStartFtrack") - end = instance.data.get("frameEndFtrack") - if start is None: - start = cmds.playbackOptions(query=True, animationStartTime=True) - if end is None: - end = cmds.playbackOptions(query=True, animationEndTime=True) - - self.log.debug("start: {}, end: {}".format(start, end)) - task_data = instance.data["anatomyData"].get("task", {}) - capture_preset = lib.get_capture_preset( - task_data.get("name"), - task_data.get("type"), - instance.data["productName"], - instance.context.data["project_settings"], - self.log - ) - stagingdir = self.staging_dir(instance) - filename = instance.name - path = os.path.join(stagingdir, filename) - self.log.debug("Outputting images to %s" % path) - # get cameras - camera = instance.data["review_camera"] - preset = lib.generate_capture_preset( - instance, camera, path, - start=start, end=end, - capture_preset=capture_preset) - lib.render_capture_preset(preset) - - # Find playblast sequence - collected_files = os.listdir(stagingdir) - patterns = [clique.PATTERNS["frames"]] - collections, remainder = clique.assemble(collected_files, - minimum_items=1, - patterns=patterns) - - self.log.debug("Searching playblast collection for: %s", path) - frame_collection = None - for collection in collections: - filebase = collection.format("{head}").rstrip(".") - self.log.debug("Checking collection head: %s", filebase) - if filebase in path: - frame_collection = collection - self.log.debug( - "Found playblast collection: %s", frame_collection - ) - - tags = ["review"] - if not instance.data.get("keepImages"): - tags.append("delete") - - # Add camera node name to representation data - camera_node_name = cmds.listRelatives(camera, parent=True)[0] - - collected_files = list(frame_collection) - # single frame file shouldn't be in list, only as a string - if len(collected_files) == 1: - collected_files = collected_files[0] - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": capture_preset["Codec"]["compression"], - "ext": capture_preset["Codec"]["compression"], - "files": collected_files, - "stagingDir": stagingdir, - "frameStart": int(start), - "frameEnd": int(end), - "fps": fps, - "tags": tags, - "camera_name": camera_node_name - } - instance.data["representations"].append(representation) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_pointcache.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_pointcache.py deleted file mode 100644 index d3e9d89aaf..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_pointcache.py +++ /dev/null @@ -1,524 +0,0 @@ -import os -from collections import OrderedDict - -from ayon_core.lib import ( - BoolDef, - EnumDef, - NumberDef, - TextDef, - UILabelDef, - UISeparatorDef, -) -from ayon_core.pipeline import KnownPublishError -from ayon_core.pipeline.publish import AYONPyblishPluginMixin -from ayon_maya.api.alembic import extract_alembic -from ayon_maya.api.lib import ( - get_all_children, - iter_visible_nodes_in_range, - maintained_selection, - suspended_refresh, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractAlembic(plugin.MayaExtractorPlugin, AYONPyblishPluginMixin): - """Produce an alembic of just point positions and normals. - - Positions and normals, uvs, creases are preserved, but nothing more, - for plain and predictable point caches. - - Plugin can run locally or remotely (on a farm - if instance is marked with - "farm" it will be skipped in local processing, but processed on farm) - """ - - label = "Extract Pointcache (Alembic)" - hosts = ["maya"] - families = ["pointcache", "model", "vrayproxy.alembic"] - targets = ["local", "remote"] - - # From settings - attr = [] - attrPrefix = [] - bake_attributes = [] - bake_attribute_prefixes = [] - dataFormat = "ogawa" - eulerFilter = False - melPerFrameCallback = "" - melPostJobCallback = "" - overrides = [] - preRoll = False - preRollStartFrame = 0 - pythonPerFrameCallback = "" - pythonPostJobCallback = "" - renderableOnly = False - stripNamespaces = True - uvsOnly = False - uvWrite = False - userAttr = "" - userAttrPrefix = "" - verbose = False - visibleOnly = False - wholeFrameGeo = False - worldSpace = True - writeColorSets = False - writeCreases = False - writeFaceSets = False - writeNormals = True - writeUVSets = False - writeVisibility = False - - def process(self, instance): - if instance.data.get("farm"): - self.log.debug("Should be processed on farm, skipping.") - return - - nodes, roots = self.get_members_and_roots(instance) - - # Collect the start and end including handles - start = float(instance.data.get("frameStartHandle", 1)) - end = float(instance.data.get("frameEndHandle", 1)) - - attribute_values = self.get_attr_values_from_data( - instance.data - ) - - attrs = [ - attr.strip() - for attr in attribute_values.get("attr", "").split(";") - if attr.strip() - ] - attrs += instance.data.get("userDefinedAttributes", []) - attrs += self.bake_attributes - attrs += ["cbId"] - - attr_prefixes = [ - attr.strip() - for attr in attribute_values.get("attrPrefix", "").split(";") - if attr.strip() - ] - attr_prefixes += self.bake_attribute_prefixes - - user_attrs = [ - attr.strip() - for attr in attribute_values.get("userAttr", "").split(";") - if attr.strip() - ] - - user_attr_prefixes = [ - attr.strip() - for attr in attribute_values.get("userAttrPrefix", "").split(";") - if attr.strip() - ] - - self.log.debug("Extracting pointcache..") - dirname = self.staging_dir(instance) - - parent_dir = self.staging_dir(instance) - filename = "{name}.abc".format(**instance.data) - path = os.path.join(parent_dir, filename) - - root = None - if not instance.data.get("includeParentHierarchy", True): - # Set the root nodes if we don't want to include parents - # The roots are to be considered the ones that are the actual - # direct members of the set - root = roots - - kwargs = { - "file": path, - "attr": attrs, - "attrPrefix": attr_prefixes, - "userAttr": user_attrs, - "userAttrPrefix": user_attr_prefixes, - "dataFormat": attribute_values.get("dataFormat", self.dataFormat), - "endFrame": end, - "eulerFilter": attribute_values.get( - "eulerFilter", self.eulerFilter - ), - "preRoll": attribute_values.get("preRoll", self.preRoll), - "preRollStartFrame": attribute_values.get( - "preRollStartFrame", self.preRollStartFrame - ), - "renderableOnly": attribute_values.get( - "renderableOnly", self.renderableOnly - ), - "root": root, - "selection": True, - "startFrame": start, - "step": instance.data.get( - "creator_attributes", {} - ).get("step", 1.0), - "stripNamespaces": attribute_values.get( - "stripNamespaces", self.stripNamespaces - ), - "uvWrite": attribute_values.get("uvWrite", self.uvWrite), - "verbose": attribute_values.get("verbose", self.verbose), - "wholeFrameGeo": attribute_values.get( - "wholeFrameGeo", self.wholeFrameGeo - ), - "worldSpace": attribute_values.get("worldSpace", self.worldSpace), - "writeColorSets": attribute_values.get( - "writeColorSets", self.writeColorSets - ), - "writeCreases": attribute_values.get( - "writeCreases", self.writeCreases - ), - "writeFaceSets": attribute_values.get( - "writeFaceSets", self.writeFaceSets - ), - "writeUVSets": attribute_values.get( - "writeUVSets", self.writeUVSets - ), - "writeVisibility": attribute_values.get( - "writeVisibility", self.writeVisibility - ), - "uvsOnly": attribute_values.get( - "uvsOnly", self.uvsOnly - ), - "melPerFrameCallback": attribute_values.get( - "melPerFrameCallback", self.melPerFrameCallback - ), - "melPostJobCallback": attribute_values.get( - "melPostJobCallback", self.melPostJobCallback - ), - "pythonPerFrameCallback": attribute_values.get( - "pythonPerFrameCallback", self.pythonPostJobCallback - ), - "pythonPostJobCallback": attribute_values.get( - "pythonPostJobCallback", self.pythonPostJobCallback - ), - # Note that this converts `writeNormals` to `noNormals` for the - # `AbcExport` equivalent in `extract_alembic` - "noNormals": not attribute_values.get( - "writeNormals", self.writeNormals - ), - } - - if instance.data.get("visibleOnly", False): - # If we only want to include nodes that are visible in the frame - # range then we need to do our own check. Alembic's `visibleOnly` - # flag does not filter out those that are only hidden on some - # frames as it counts "animated" or "connected" visibilities as - # if it's always visible. - nodes = list( - iter_visible_nodes_in_range(nodes, start=start, end=end) - ) - - suspend = not instance.data.get("refresh", False) - with suspended_refresh(suspend=suspend): - with maintained_selection(): - cmds.select(nodes, noExpand=True) - self.log.debug( - "Running `extract_alembic` with the keyword arguments: " - "{}".format(kwargs) - ) - extract_alembic(**kwargs) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "abc", - "ext": "abc", - "files": filename, - "stagingDir": dirname - } - instance.data["representations"].append(representation) - - if not instance.data.get("stagingDir_persistent", False): - instance.context.data["cleanupFullPaths"].append(path) - - self.log.debug("Extracted {} to {}".format(instance, dirname)) - - # Extract proxy. - if not instance.data.get("proxy"): - self.log.debug("No proxy nodes found. Skipping proxy extraction.") - return - - path = path.replace(".abc", "_proxy.abc") - kwargs["file"] = path - if not instance.data.get("includeParentHierarchy", True): - # Set the root nodes if we don't want to include parents - # The roots are to be considered the ones that are the actual - # direct members of the set - kwargs["root"] = instance.data["proxyRoots"] - - with suspended_refresh(suspend=suspend): - with maintained_selection(): - cmds.select(instance.data["proxy"]) - extract_alembic(**kwargs) - representation = { - "name": "proxy", - "ext": "abc", - "files": os.path.basename(path), - "stagingDir": dirname, - "outputName": "proxy" - } - instance.data["representations"].append(representation) - - def get_members_and_roots(self, instance): - return instance[:], instance.data.get("setMembers") - - @classmethod - def get_attribute_defs(cls): - if not cls.overrides: - return [] - - override_defs = OrderedDict({ - "eulerFilter": BoolDef( - "eulerFilter", - label="Euler Filter", - default=cls.eulerFilter, - tooltip="Apply Euler filter while sampling rotations." - ), - "renderableOnly": BoolDef( - "renderableOnly", - label="Renderable Only", - default=cls.renderableOnly, - tooltip="Only export renderable visible shapes." - ), - "stripNamespaces": BoolDef( - "stripNamespaces", - label="Strip Namespaces", - default=cls.stripNamespaces, - tooltip=( - "Namespaces will be stripped off of the node before being " - "written to Alembic." - ) - ), - "uvsOnly": BoolDef( - "uvsOnly", - label="UVs Only", - default=cls.uvsOnly, - tooltip=( - "If this flag is present, only uv data for PolyMesh and " - "SubD shapes will be written to the Alembic file." - ) - ), - "uvWrite": BoolDef( - "uvWrite", - label="UV Write", - default=cls.uvWrite, - tooltip=( - "Uv data for PolyMesh and SubD shapes will be written to " - "the Alembic file." - ) - ), - "verbose": BoolDef( - "verbose", - label="Verbose", - default=cls.verbose, - tooltip="Prints the current frame that is being evaluated." - ), - "visibleOnly": BoolDef( - "visibleOnly", - label="Visible Only", - default=cls.visibleOnly, - tooltip="Only export dag objects visible during frame range." - ), - "wholeFrameGeo": BoolDef( - "wholeFrameGeo", - label="Whole Frame Geo", - default=cls.wholeFrameGeo, - tooltip=( - "Data for geometry will only be written out on whole " - "frames." - ) - ), - "worldSpace": BoolDef( - "worldSpace", - label="World Space", - default=cls.worldSpace, - tooltip="Any root nodes will be stored in world space." - ), - "writeColorSets": BoolDef( - "writeColorSets", - label="Write Color Sets", - default=cls.writeColorSets, - tooltip="Write vertex colors with the geometry." - ), - "writeCreases": BoolDef( - "writeCreases", - label="Write Creases", - default=cls.writeCreases, - tooltip="Write the geometry's edge and vertex crease " - "information." - ), - "writeFaceSets": BoolDef( - "writeFaceSets", - label="Write Face Sets", - default=cls.writeFaceSets, - tooltip="Write face sets with the geometry." - ), - "writeNormals": BoolDef( - "writeNormals", - label="Write Normals", - default=cls.writeNormals, - tooltip="Write normals with the deforming geometry." - ), - "writeUVSets": BoolDef( - "writeUVSets", - label="Write UV Sets", - default=cls.writeUVSets, - tooltip=( - "Write all uv sets on MFnMeshes as vector 2 indexed " - "geometry parameters with face varying scope." - ) - ), - "writeVisibility": BoolDef( - "writeVisibility", - label="Write Visibility", - default=cls.writeVisibility, - tooltip=( - "Visibility state will be stored in the Alembic file. " - "Otherwise everything written out is treated as visible." - ) - ), - "preRoll": BoolDef( - "preRoll", - label="Pre Roll", - default=cls.preRoll, - tooltip="This frame range will not be sampled." - ), - "preRollStartFrame": NumberDef( - "preRollStartFrame", - label="Pre Roll Start Frame", - tooltip=( - "The frame to start scene evaluation at. This is used" - " to set the starting frame for time dependent " - "translations and can be used to evaluate run-up that" - " isn't actually translated." - ), - default=cls.preRollStartFrame - ), - "dataFormat": EnumDef( - "dataFormat", - label="Data Format", - items=["ogawa", "HDF"], - default=cls.dataFormat, - tooltip="The data format to use to write the file." - ), - "attr": TextDef( - "attr", - label="Custom Attributes", - placeholder="attr1; attr2; ...", - default=cls.attr, - tooltip=( - "Attributes matching by name will be included in the " - "Alembic export. Attributes should be separated by " - "semi-colon `;`" - ) - ), - "attrPrefix": TextDef( - "attrPrefix", - label="Custom Attributes Prefix", - placeholder="prefix1; prefix2; ...", - default=cls.attrPrefix, - tooltip=( - "Attributes starting with these prefixes will be included " - "in the Alembic export. Attributes should be separated by " - "semi-colon `;`" - ) - ), - "userAttr": TextDef( - "userAttr", - label="User Attr", - placeholder="attr1; attr2; ...", - default=cls.userAttr, - tooltip=( - "Attributes matching by name will be included in the " - "Alembic export. Attributes should be separated by " - "semi-colon `;`" - ) - ), - "userAttrPrefix": TextDef( - "userAttrPrefix", - label="User Attr Prefix", - placeholder="prefix1; prefix2; ...", - default=cls.userAttrPrefix, - tooltip=( - "Attributes starting with these prefixes will be included " - "in the Alembic export. Attributes should be separated by " - "semi-colon `;`" - ) - ), - "melPerFrameCallback": TextDef( - "melPerFrameCallback", - label="Mel Per Frame Callback", - default=cls.melPerFrameCallback, - tooltip=( - "When each frame (and the static frame) is evaluated the " - "string specified is evaluated as a Mel command." - ) - ), - "melPostJobCallback": TextDef( - "melPostJobCallback", - label="Mel Post Job Callback", - default=cls.melPostJobCallback, - tooltip=( - "When the translation has finished the string specified " - "is evaluated as a Mel command." - ) - ), - "pythonPerFrameCallback": TextDef( - "pythonPerFrameCallback", - label="Python Per Frame Callback", - default=cls.pythonPerFrameCallback, - tooltip=( - "When each frame (and the static frame) is evaluated the " - "string specified is evaluated as a python command." - ) - ), - "pythonPostJobCallback": TextDef( - "pythonPostJobCallback", - label="Python Post Frame Callback", - default=cls.pythonPostJobCallback, - tooltip=( - "When the translation has finished the string specified " - "is evaluated as a python command." - ) - ) - }) - - defs = super(ExtractAlembic, cls).get_attribute_defs() - - defs.extend([ - UISeparatorDef("sep_alembic_options"), - UILabelDef("Alembic Options"), - ]) - - # The Arguments that can be modified by the Publisher - overrides = set(cls.overrides) - for key, value in override_defs.items(): - if key not in overrides: - continue - - defs.append(value) - - defs.append( - UISeparatorDef("sep_alembic_options_end") - ) - - return defs - - -class ExtractAnimation(ExtractAlembic): - label = "Extract Animation (Alembic)" - families = ["animation"] - - def get_members_and_roots(self, instance): - # Collect the out set nodes - out_sets = [node for node in instance if node.endswith("out_SET")] - if len(out_sets) != 1: - raise KnownPublishError( - "Couldn't find exactly one out_SET: {0}".format(out_sets) - ) - out_set = out_sets[0] - roots = cmds.sets(out_set, query=True) or [] - - # Include all descendants - nodes = roots.copy() - nodes.extend(get_all_children(roots, ignore_intermediate_objects=True)) - - return nodes, roots diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_proxy_abc.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_proxy_abc.py deleted file mode 100644 index fc1c7981ed..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_proxy_abc.py +++ /dev/null @@ -1,108 +0,0 @@ -import os - -from ayon_maya.api.alembic import extract_alembic -from ayon_maya.api.lib import ( - iter_visible_nodes_in_range, - maintained_selection, - suspended_refresh, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractProxyAlembic(plugin.MayaExtractorPlugin): - """Produce an alembic for bounding box geometry - """ - - label = "Extract Proxy (Alembic)" - families = ["proxyAbc"] - - def process(self, instance): - name_suffix = instance.data.get("nameSuffix") - # Collect the start and end including handles - start = float(instance.data.get("frameStartHandle", 1)) - end = float(instance.data.get("frameEndHandle", 1)) - - attrs = instance.data.get("attr", "").split(";") - attrs = [value for value in attrs if value.strip()] - attrs += ["cbId"] - - attr_prefixes = instance.data.get("attrPrefix", "").split(";") - attr_prefixes = [value for value in attr_prefixes if value.strip()] - - self.log.debug("Extracting Proxy Alembic..") - dirname = self.staging_dir(instance) - - filename = "{name}.abc".format(**instance.data) - path = os.path.join(dirname, filename) - - proxy_root = self.create_proxy_geometry(instance, - name_suffix, - start, - end) - - options = { - "step": instance.data.get("step", 1.0), - "attr": attrs, - "attrPrefix": attr_prefixes, - "writeVisibility": True, - "writeCreases": True, - "writeColorSets": instance.data.get("writeColorSets", False), - "writeFaceSets": instance.data.get("writeFaceSets", False), - "uvWrite": True, - "selection": True, - "worldSpace": instance.data.get("worldSpace", True), - "root": proxy_root - } - - if int(cmds.about(version=True)) >= 2017: - # Since Maya 2017 alembic supports multiple uv sets - write them. - options["writeUVSets"] = True - - with suspended_refresh(): - with maintained_selection(): - cmds.select(proxy_root, hi=True, noExpand=True) - extract_alembic(file=path, - startFrame=start, - endFrame=end, - **options) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": dirname - } - instance.data["representations"].append(representation) - - if not instance.data.get("stagingDir_persistent", False): - instance.context.data["cleanupFullPaths"].append(path) - - self.log.debug("Extracted {} to {}".format(instance, dirname)) - # remove the bounding box - bbox_master = cmds.ls("bbox_grp") - cmds.delete(bbox_master) - - def create_proxy_geometry(self, instance, name_suffix, start, end): - nodes = instance[:] - nodes = list(iter_visible_nodes_in_range(nodes, - start=start, - end=end)) - - inst_selection = cmds.ls(nodes, long=True) - cmds.geomToBBox(inst_selection, - nameSuffix=name_suffix, - keepOriginal=True, - single=False, - bakeAnimation=True, - startTime=start, - endTime=end) - # create master group for bounding - # boxes as the main root - master_group = cmds.group(name="bbox_grp") - bbox_sel = cmds.ls(master_group, long=True) - self.log.debug("proxy_root: {}".format(bbox_sel)) - return bbox_sel diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_redshift_proxy.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_redshift_proxy.py deleted file mode 100644 index 909d3dd172..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_redshift_proxy.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -"""Redshift Proxy extractor.""" -import os - -from ayon_maya.api.lib import maintained_selection, renderlayer -from ayon_maya.api import plugin -from ayon_maya.api.render_setup_tools import ( - allow_export_from_render_setup_layer, -) -from maya import cmds - - -class ExtractRedshiftProxy(plugin.MayaExtractorPlugin): - """Extract the content of the instance to a redshift proxy file.""" - - label = "Redshift Proxy (.rs)" - families = ["redshiftproxy"] - - def process(self, instance): - """Extractor entry point.""" - - # Make sure Redshift is loaded - cmds.loadPlugin("redshift4maya", quiet=True) - - staging_dir = self.staging_dir(instance) - file_name = "{}.rs".format(instance.name) - file_path = os.path.join(staging_dir, file_name) - - anim_on = instance.data["animation"] - rs_options = "exportConnectivity=0;enableCompression=1;keepUnused=0;" - repr_files = file_name - - if not anim_on: - # Remove animation information because it is not required for - # non-animated products - keys = ["frameStart", - "frameEnd", - "handleStart", - "handleEnd", - "frameStartHandle", - "frameEndHandle"] - for key in keys: - instance.data.pop(key, None) - - else: - start_frame = instance.data["frameStartHandle"] - end_frame = instance.data["frameEndHandle"] - rs_options = "{}startFrame={};endFrame={};frameStep={};".format( - rs_options, start_frame, - end_frame, instance.data["step"] - ) - - root, ext = os.path.splitext(file_path) - # Padding is taken from number of digits of the end_frame. - # Not sure where Redshift is taking it. - repr_files = [ - "{}.{}{}".format(os.path.basename(root), str(frame).rjust(4, "0"), ext) # noqa: E501 - for frame in range( - int(start_frame), - int(end_frame) + 1, - int(instance.data["step"]) - )] - # vertex_colors = instance.data.get("vertexColors", False) - - # Write out rs file - self.log.debug("Writing: '%s'" % file_path) - - # Allow overriding what renderlayer to export from. By default force - # it to the default render layer. (Note that the renderlayer isn't - # currently exposed as an attribute to artists) - layer = instance.data.get("renderLayer", "defaultRenderLayer") - - with maintained_selection(): - with renderlayer(layer): - with allow_export_from_render_setup_layer(): - cmds.select(instance.data["setMembers"], noExpand=True) - cmds.file(file_path, - preserveReferences=False, - force=True, - type="Redshift Proxy", - exportSelected=True, - options=rs_options) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - self.log.debug("Files: {}".format(repr_files)) - - representation = { - 'name': 'rs', - 'ext': 'rs', - 'files': repr_files, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s" - % (instance.name, staging_dir)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_rendersetup.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_rendersetup.py deleted file mode 100644 index 8dcdd603b7..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_rendersetup.py +++ /dev/null @@ -1,40 +0,0 @@ -import json -import os - -import maya.app.renderSetup.model.renderSetup as renderSetup -from ayon_maya.api import plugin - - -class ExtractRenderSetup(plugin.MayaExtractorPlugin): - """ - Produce renderSetup template file - - This will save whole renderSetup to json file for later use. - """ - - label = "Extract RenderSetup" - families = ["rendersetup"] - - def process(self, instance): - parent_dir = self.staging_dir(instance) - json_filename = "{}.json".format(instance.name) - json_path = os.path.join(parent_dir, json_filename) - - with open(json_path, "w+") as file: - json.dump( - renderSetup.instance().encode(None), - fp=file, indent=2, sort_keys=True) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": parent_dir, - } - instance.data["representations"].append(representation) - - self.log.debug( - "Extracted instance '%s' to: %s" % (instance.name, json_path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_rig.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_rig.py deleted file mode 100644 index 3f96d7123d..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_rig.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract rig as Maya Scene.""" -import os - -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractRig(plugin.MayaExtractorPlugin): - """Extract rig as Maya Scene.""" - - label = "Extract Rig (Maya Scene)" - families = ["rig"] - scene_type = "ma" - - def process(self, instance): - """Plugin entry point.""" - maya_settings = instance.context.data["project_settings"]["maya"] - ext_mapping = { - item["name"]: item["value"] - for item in maya_settings["ext_mapping"] - } - if ext_mapping: - self.log.debug("Looking in settings for scene type ...") - # use extension mapping for first family found - for family in self.families: - try: - self.scene_type = ext_mapping[family] - self.log.debug( - "Using '.{}' as scene type".format(self.scene_type)) - break - except AttributeError: - # no preset found - pass - # Define extract output file path - dir_path = self.staging_dir(instance) - filename = "{0}.{1}".format(instance.name, self.scene_type) - path = os.path.join(dir_path, filename) - - # Perform extraction - self.log.debug("Performing extraction ...") - with maintained_selection(): - cmds.select(instance, noExpand=True) - cmds.file(path, - force=True, - typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 - exportSelected=True, - preserveReferences=False, - channels=True, - constraints=True, - expressions=True, - constructionHistory=True) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': filename, - "stagingDir": dir_path - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s", instance.name, path) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_skeleton_mesh.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_skeleton_mesh.py deleted file mode 100644 index e496d53d42..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_skeleton_mesh.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -from maya import cmds # noqa -import pyblish.api - -from ayon_core.pipeline.publish import OptionalPyblishPluginMixin -from ayon_maya.api import fbx -from ayon_maya.api import plugin - - -class ExtractSkeletonMesh(plugin.MayaExtractorPlugin, - OptionalPyblishPluginMixin): - """Extract Rig in FBX format from Maya. - - This extracts the rig in fbx with the constraints - and referenced asset content included. - This also optionally extract animated rig in fbx with - geometries included. - - """ - order = pyblish.api.ExtractorOrder - label = "Extract Skeleton Mesh" - families = ["rig.fbx"] - - def process(self, instance): - if not self.is_active(instance.data): - return - # Define output path - staging_dir = self.staging_dir(instance) - filename = "{0}.fbx".format(instance.name) - path = os.path.join(staging_dir, filename) - - fbx_exporter = fbx.FBXExtractor(log=self.log) - out_set = instance.data.get("skeleton_mesh", []) - - instance.data["constraints"] = True - instance.data["skeletonDefinitions"] = True - - fbx_exporter.set_options_from_instance(instance) - - # Export - fbx_exporter.export(out_set, path) - - representations = instance.data.setdefault("representations", []) - representations.append({ - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": staging_dir - }) - - self.log.debug("Extract FBX to: {0}".format(path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_thumbnail.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_thumbnail.py deleted file mode 100644 index c2ffedd67c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,119 +0,0 @@ -import glob -import os -import tempfile - -from ayon_maya.api import lib -from ayon_maya.api import plugin - - -class ExtractThumbnail(plugin.MayaExtractorPlugin): - """Extract viewport thumbnail. - - Takes review camera and creates a thumbnail based on viewport - capture. - - """ - - label = "Thumbnail" - families = ["review"] - - def process(self, instance): - self.log.debug("Extracting thumbnail..") - - camera = instance.data["review_camera"] - - task_data = instance.data["anatomyData"].get("task", {}) - capture_preset = lib.get_capture_preset( - task_data.get("name"), - task_data.get("type"), - instance.data["productName"], - instance.context.data["project_settings"], - self.log - ) - - # Create temp directory for thumbnail - # - this is to avoid "override" of source file - dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_thumbnail") - self.log.debug( - "Create temp directory {} for thumbnail".format(dst_staging) - ) - # Store new staging to cleanup paths - filename = instance.name - path = os.path.join(dst_staging, filename) - - self.log.debug("Outputting images to %s" % path) - - preset = lib.generate_capture_preset( - instance, camera, path, - start=1, end=1, - capture_preset=capture_preset) - - preset["camera_options"].update({ - "displayGateMask": False, - "displayResolution": False, - "displayFilmGate": False, - "displayFieldChart": False, - "displaySafeAction": False, - "displaySafeTitle": False, - "displayFilmPivot": False, - "displayFilmOrigin": False, - "overscan": 1.0, - }) - path = lib.render_capture_preset(preset) - - playblast = self._fix_playblast_output_path(path) - - _, thumbnail = os.path.split(playblast) - - self.log.debug("file list {}".format(thumbnail)) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "thumbnail", - "ext": "jpg", - "files": thumbnail, - "stagingDir": dst_staging, - "thumbnail": True - } - instance.data["representations"].append(representation) - - def _fix_playblast_output_path(self, filepath): - """Workaround a bug in maya.cmds.playblast to return correct filepath. - - When the `viewer` argument is set to False and maya.cmds.playblast - does not automatically open the playblasted file the returned - filepath does not have the file's extension added correctly. - - To workaround this we just glob.glob() for any file extensions and - assume the latest modified file is the correct file and return it. - - """ - # Catch cancelled playblast - if filepath is None: - self.log.warning("Playblast did not result in output path. " - "Playblast is probably interrupted.") - return None - - # Fix: playblast not returning correct filename (with extension) - # Lets assume the most recently modified file is the correct one. - if not os.path.exists(filepath): - directory = os.path.dirname(filepath) - filename = os.path.basename(filepath) - # check if the filepath is has frame based filename - # example : capture.####.png - parts = filename.split(".") - if len(parts) == 3: - query = os.path.join(directory, "{}.*.{}".format(parts[0], - parts[-1])) - files = glob.glob(query) - else: - files = glob.glob("{}.*".format(filepath)) - - if not files: - raise RuntimeError("Couldn't find playblast from: " - "{0}".format(filepath)) - filepath = max(files, key=os.path.getmtime) - - return filepath diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_skeletalmesh_abc.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_skeletalmesh_abc.py deleted file mode 100644 index a5d9303052..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_skeletalmesh_abc.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -"""Create Unreal Skeletal Mesh data to be extracted as FBX.""" -import os - -from ayon_maya.api.alembic import extract_alembic -from ayon_maya.api.lib import maintained_selection, suspended_refresh -from ayon_maya.api import plugin -from maya import cmds # noqa - - -class ExtractUnrealSkeletalMeshAbc(plugin.MayaExtractorPlugin): - """Extract Unreal Skeletal Mesh as FBX from Maya. """ - - label = "Extract Unreal Skeletal Mesh - Alembic" - families = ["skeletalMesh"] - optional = True - - def process(self, instance): - self.log.debug("Extracting pointcache..") - - geo = cmds.listRelatives( - instance.data.get("geometry"), allDescendents=True, fullPath=True) - joints = cmds.listRelatives( - instance.data.get("joints"), allDescendents=True, fullPath=True) - - nodes = geo + joints - - attrs = instance.data.get("attr", "").split(";") - attrs = [value for value in attrs if value.strip()] - attrs += ["cbId"] - - attr_prefixes = instance.data.get("attrPrefix", "").split(";") - attr_prefixes = [value for value in attr_prefixes if value.strip()] - - # Define output path - staging_dir = self.staging_dir(instance) - filename = "{0}.abc".format(instance.name) - path = os.path.join(staging_dir, filename) - - # The export requires forward slashes because we need - # to format it into a string in a mel expression - path = path.replace('\\', '/') - - self.log.debug("Extracting ABC to: {0}".format(path)) - self.log.debug("Members: {0}".format(nodes)) - self.log.debug("Instance: {0}".format(instance[:])) - - options = { - "step": instance.data.get("step", 1.0), - "attr": attrs, - "attrPrefix": attr_prefixes, - "writeVisibility": True, - "writeCreases": True, - "writeColorSets": instance.data.get("writeColorSets", False), - "writeFaceSets": instance.data.get("writeFaceSets", False), - "uvWrite": True, - "selection": True, - "worldSpace": instance.data.get("worldSpace", True) - } - - self.log.debug("Options: {}".format(options)) - - if int(cmds.about(version=True)) >= 2017: - # Since Maya 2017 alembic supports multiple uv sets - write them. - options["writeUVSets"] = True - - if not instance.data.get("includeParentHierarchy", True): - # Set the root nodes if we don't want to include parents - # The roots are to be considered the ones that are the actual - # direct members of the set - options["root"] = instance.data.get("setMembers") - - with suspended_refresh(suspend=instance.data.get("refresh", False)): - with maintained_selection(): - cmds.select(nodes, noExpand=True) - extract_alembic(file=path, - # startFrame=start, - # endFrame=end, - **options) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extract ABC successful to: {0}".format(path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py deleted file mode 100644 index 36324d3511..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -"""Create Unreal Skeletal Mesh data to be extracted as FBX.""" -import os -from contextlib import contextmanager - -import pyblish.api -from ayon_maya.api import fbx -from ayon_maya.api import plugin -from maya import cmds # noqa - - -@contextmanager -def renamed(original_name, renamed_name): - # type: (str, str) -> None - try: - cmds.rename(original_name, renamed_name) - yield - finally: - cmds.rename(renamed_name, original_name) - - -class ExtractUnrealSkeletalMeshFbx(plugin.MayaExtractorPlugin): - """Extract Unreal Skeletal Mesh as FBX from Maya. """ - - order = pyblish.api.ExtractorOrder - 0.1 - label = "Extract Unreal Skeletal Mesh - FBX" - families = ["skeletalMesh"] - optional = True - - def process(self, instance): - fbx_exporter = fbx.FBXExtractor(log=self.log) - - # Define output path - staging_dir = self.staging_dir(instance) - filename = "{0}.fbx".format(instance.name) - path = os.path.join(staging_dir, filename) - - geo = instance.data.get("geometry") - joints = instance.data.get("joints") - - to_extract = geo + joints - - # The export requires forward slashes because we need - # to format it into a string in a mel expression - path = path.replace('\\', '/') - - self.log.debug("Extracting FBX to: {0}".format(path)) - self.log.debug("Members: {0}".format(to_extract)) - self.log.debug("Instance: {0}".format(instance[:])) - - fbx_exporter.set_options_from_instance(instance) - - # This magic is done for variants. To let Unreal merge correctly - # existing data, top node must have the same name. So for every - # variant we extract we need to rename top node of the rig correctly. - # It is finally done in context manager so it won't affect current - # scene. - - # we rely on hierarchy under one root. - original_parent = to_extract[0].split("|")[1] - - parent_node = instance.data.get("folderPath") - # this needs to be done for AYON - # WARNING: since AYON supports duplicity of asset names, - # this needs to be refactored throughout the pipeline. - parent_node = parent_node.split("/")[-1] - - renamed_to_extract = [] - for node in to_extract: - node_path = node.split("|") - node_path[1] = parent_node - renamed_to_extract.append("|".join(node_path)) - - with renamed(original_parent, parent_node): - self.log.debug("Extracting: {}".format(renamed_to_extract)) - fbx_exporter.export(renamed_to_extract, path) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extract FBX successful to: {0}".format(path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_staticmesh.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_staticmesh.py deleted file mode 100644 index 215f82b338..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_staticmesh.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -"""Create Unreal Static Mesh data to be extracted as FBX.""" -import os - -import pyblish.api -from ayon_maya.api import fbx -from ayon_maya.api.lib import maintained_selection, parent_nodes -from ayon_maya.api import plugin -from maya import cmds # noqa - - -class ExtractUnrealStaticMesh(plugin.MayaExtractorPlugin): - """Extract Unreal Static Mesh as FBX from Maya. """ - - order = pyblish.api.ExtractorOrder - 0.1 - label = "Extract Unreal Static Mesh" - families = ["staticMesh"] - - def process(self, instance): - members = instance.data.get("geometryMembers", []) - if instance.data.get("collisionMembers"): - members = members + instance.data.get("collisionMembers") - - fbx_exporter = fbx.FBXExtractor(log=self.log) - - # Define output path - staging_dir = self.staging_dir(instance) - filename = "{0}.fbx".format(instance.name) - path = os.path.join(staging_dir, filename) - - # The export requires forward slashes because we need - # to format it into a string in a mel expression - path = path.replace('\\', '/') - - self.log.debug("Extracting FBX to: {0}".format(path)) - self.log.debug("Members: {0}".format(members)) - self.log.debug("Instance: {0}".format(instance[:])) - - fbx_exporter.set_options_from_instance(instance) - - with maintained_selection(): - with parent_nodes(members): - self.log.debug("Un-parenting: {}".format(members)) - fbx_exporter.export(members, path) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extract FBX successful to: {0}".format(path)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_yeticache.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_yeticache.py deleted file mode 100644 index 79f47fbe9b..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_unreal_yeticache.py +++ /dev/null @@ -1,59 +0,0 @@ -import os - -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractUnrealYetiCache(plugin.MayaExtractorPlugin): - """Producing Yeti cache files using scene time range. - - This will extract Yeti cache file sequence and fur settings. - """ - - label = "Extract Yeti Cache (Unreal)" - families = ["yeticacheUE"] - - def process(self, instance): - - yeti_nodes = cmds.ls(instance, type="pgYetiMaya") - if not yeti_nodes: - raise RuntimeError("No pgYetiMaya nodes found in the instance") - - # Define extract output file path - dirname = self.staging_dir(instance) - - # Collect information for writing cache - start_frame = instance.data["frameStartHandle"] - end_frame = instance.data["frameEndHandle"] - preroll = instance.data["preroll"] - if preroll > 0: - start_frame -= preroll - - kwargs = {} - samples = instance.data.get("samples", 0) - if samples == 0: - kwargs.update({"sampleTimes": "0.0 1.0"}) - else: - kwargs.update({"samples": samples}) - - self.log.debug(f"Writing out cache {start_frame} - {end_frame}") - filename = f"{instance.name}.abc" - path = os.path.join(dirname, filename) - cmds.pgYetiCommand(yeti_nodes, - writeAlembic=path, - range=(start_frame, end_frame), - asUnrealAbc=True, - **kwargs) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - 'stagingDir': dirname - } - instance.data["representations"].append(representation) - - self.log.debug(f"Extracted {instance} to {dirname}") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_vrayproxy.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_vrayproxy.py deleted file mode 100644 index d6f1fd6698..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_vrayproxy.py +++ /dev/null @@ -1,70 +0,0 @@ -import os - -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractVRayProxy(plugin.MayaExtractorPlugin): - """Extract the content of the instance to a vrmesh file - - Things to pay attention to: - - If animation is toggled, are the frames correct - - - """ - - label = "VRay Proxy (.vrmesh)" - families = ["vrayproxy.vrmesh"] - - def process(self, instance): - - staging_dir = self.staging_dir(instance) - file_name = "{}.vrmesh".format(instance.name) - file_path = os.path.join(staging_dir, file_name) - - anim_on = instance.data["animation"] - if not anim_on: - # Remove animation information because it is not required for - # non-animated products - keys = ["frameStart", "frameEnd", - "handleStart", "handleEnd", - "frameStartHandle", "frameEndHandle"] - for key in keys: - instance.data.pop(key, None) - - start_frame = 1 - end_frame = 1 - else: - start_frame = instance.data["frameStartHandle"] - end_frame = instance.data["frameEndHandle"] - - vertex_colors = instance.data.get("vertexColors", False) - - # Write out vrmesh file - self.log.debug("Writing: '%s'" % file_path) - with maintained_selection(): - cmds.select(instance.data["setMembers"], noExpand=True) - cmds.vrayCreateProxy(exportType=1, - dir=staging_dir, - fname=file_name, - animOn=anim_on, - animType=3, - startFrame=start_frame, - endFrame=end_frame, - vertexColorsOn=vertex_colors, - ignoreHiddenObjects=True, - createProxyNode=False) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'vrmesh', - 'ext': 'vrmesh', - 'files': file_name, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s" - % (instance.name, staging_dir)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_vrayscene.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_vrayscene.py deleted file mode 100644 index 785cb4c37c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_vrayscene.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract vrayscene from specified families.""" -import os -import re - -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin -from ayon_maya.api.render_setup_tools import export_in_rs_layer -from maya import cmds - - -class ExtractVrayscene(plugin.MayaExtractorPlugin): - """Extractor for vrscene.""" - - label = "VRay Scene (.vrscene)" - families = ["vrayscene_layer"] - - def process(self, instance): - """Plugin entry point.""" - if instance.data.get("exportOnFarm"): - self.log.debug("vrayscenes will be exported on farm.") - raise NotImplementedError( - "exporting vrayscenes is not implemented") - - # handle sequence - if instance.data.get("vraySceneMultipleFiles"): - self.log.debug("vrayscenes will be exported on farm.") - raise NotImplementedError( - "exporting vrayscene sequences not implemented yet") - - vray_settings = cmds.ls(type="VRaySettingsNode") - if not vray_settings: - node = cmds.createNode("VRaySettingsNode") - else: - node = vray_settings[0] - - # setMembers on vrayscene_layer should contain layer name. - layer_name = instance.data.get("layer") - - staging_dir = self.staging_dir(instance) - template = cmds.getAttr("{}.vrscene_filename".format(node)) - start_frame = instance.data.get( - "frameStartHandle") if instance.data.get( - "vraySceneMultipleFiles") else None - formatted_name = self.format_vray_output_filename( - os.path.basename(instance.data.get("source")), - layer_name, - template, - start_frame - ) - - file_path = os.path.join( - staging_dir, "vrayscene", *formatted_name.split("/")) - - # Write out vrscene file - self.log.debug("Writing: '%s'" % file_path) - with maintained_selection(): - if "*" not in instance.data["setMembers"]: - self.log.debug( - "Exporting: {}".format(instance.data["setMembers"])) - set_members = instance.data["setMembers"] - cmds.select(set_members, noExpand=True) - else: - self.log.debug("Exporting all ...") - set_members = cmds.ls( - long=True, objectsOnly=True, - geometry=True, lights=True, cameras=True) - cmds.select(set_members, noExpand=True) - - self.log.debug("Appending layer name {}".format(layer_name)) - set_members.append(layer_name) - - export_in_rs_layer( - file_path, - set_members, - export=lambda: cmds.file( - file_path, type="V-Ray Scene", - pr=True, es=True, force=True)) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - files = file_path - - representation = { - 'name': 'vrscene', - 'ext': 'vrscene', - 'files': os.path.basename(files), - "stagingDir": os.path.dirname(files), - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s" - % (instance.name, staging_dir)) - - @staticmethod - def format_vray_output_filename( - filename, layer, template, start_frame=None): - """Format the expected output file of the Export job. - - Example: - filename: /mnt/projects/foo/shot010_v006.mb - template: // - result: "shot010_v006/CHARS/CHARS.vrscene" - - Args: - filename (str): path to scene file. - layer (str): layer name. - template (str): token template. - start_frame (int, optional): start frame - if set we use - multiple files export mode. - - Returns: - str: formatted path. - - """ - # format template to match pythons format specs - template = re.sub(r"<(\w+?)>", r"{\1}", template.lower()) - - # Ensure filename has no extension - file_name, _ = os.path.splitext(filename) - mapping = { - "scene": file_name, - "layer": layer - } - - output_path = template.format(**mapping) - - if start_frame: - filename_zero = "{}_{:04d}.vrscene".format( - output_path, start_frame) - else: - filename_zero = "{}.vrscene".format(output_path) - - result = filename_zero.replace("\\", "/") - - return result diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_workfile_xgen.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_workfile_xgen.py deleted file mode 100644 index e6df19c7f1..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_workfile_xgen.py +++ /dev/null @@ -1,249 +0,0 @@ -import copy -import os -import shutil - -import pyblish.api -from ayon_maya.api.alembic import extract_alembic -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractWorkfileXgen(plugin.MayaExtractorPlugin): - """Extract Workfile Xgen. - - When submitting a render, we need to prep Xgen side car files. - """ - - # Offset to run before workfile scene save. - order = pyblish.api.ExtractorOrder - 0.499 - label = "Extract Workfile Xgen" - families = ["workfile"] - - def get_render_max_frame_range(self, context): - """Return start to end frame range including all renderlayers in - context. - - This will return the full frame range which includes all frames of the - renderlayer instances to be published/submitted. - - Args: - context (pyblish.api.Context): Current publishing context. - - Returns: - tuple or None: Start frame, end frame tuple if any renderlayers - found. Otherwise None is returned. - - """ - - def _is_active_renderlayer(i): - """Return whether instance is active renderlayer""" - if not i.data.get("publish", True): - return False - - is_renderlayer = ( - "renderlayer" in i.data.get("families", []) or - i.data["productType"] == "renderlayer" - ) - return is_renderlayer - - start_frame = None - end_frame = None - for instance in context: - if not _is_active_renderlayer(instance): - # Only consider renderlyare instances - continue - - render_start_frame = instance.data["frameStart"] - render_end_frame = instance.data["frameEnd"] - - if start_frame is None: - start_frame = render_start_frame - else: - start_frame = min(start_frame, render_start_frame) - - if end_frame is None: - end_frame = render_end_frame - else: - end_frame = max(end_frame, render_end_frame) - - if start_frame is None or end_frame is None: - return - - return start_frame, end_frame - - def process(self, instance): - transfers = [] - - # Validate there is any palettes in the scene. - if not cmds.ls(type="xgmPalette"): - self.log.debug( - "No collections found in the scene. Skipping Xgen extraction." - ) - return - - import xgenm - - # Validate to extract only when we are publishing a renderlayer as - # well. - render_range = self.get_render_max_frame_range(instance.context) - if not render_range: - self.log.debug( - "No publishable renderlayers found in context. Skipping Xgen" - " extraction." - ) - return - - start_frame, end_frame = render_range - - # We decrement start frame and increment end frame so motion blur will - # render correctly. - start_frame -= 1 - end_frame += 1 - - # Extract patches alembic. - path_no_ext, _ = os.path.splitext(instance.context.data["currentFile"]) - kwargs = {"attrPrefix": ["xgen"], "stripNamespaces": True} - alembic_files = [] - for palette in cmds.ls(type="xgmPalette"): - patch_names = [] - for description in xgenm.descriptions(palette): - for name in xgenm.boundGeometry(palette, description): - patch_names.append(name) - - alembic_file = "{}__{}.abc".format( - path_no_ext, palette.replace(":", "__ns__") - ) - extract_alembic( - alembic_file, - root=patch_names, - selection=False, - startFrame=float(start_frame), - endFrame=float(end_frame), - verbose=True, - **kwargs - ) - alembic_files.append(alembic_file) - - template_data = copy.deepcopy(instance.data["anatomyData"]) - anatomy = instance.context.data["anatomy"] - publish_template = anatomy.get_template_item( - "publish", "default", "file" - ) - published_maya_path = publish_template.format(template_data) - published_basename, _ = os.path.splitext(published_maya_path) - - for source in alembic_files: - destination = os.path.join( - os.path.dirname(instance.data["resourcesDir"]), - os.path.basename( - source.replace(path_no_ext, published_basename) - ) - ) - transfers.append((source, destination)) - - # Validate that we are using the published workfile. - deadline_settings = instance.context.get("deadline") - if deadline_settings: - publish_settings = deadline_settings["publish"] - if not publish_settings["MayaSubmitDeadline"]["use_published"]: - self.log.debug( - "Not using the published workfile. Abort Xgen extraction." - ) - return - - # Collect Xgen and Delta files. - xgen_files = [] - sources = [] - current_dir = os.path.dirname(instance.context.data["currentFile"]) - attrs = ["xgFileName", "xgBaseFile"] - for palette in cmds.ls(type="xgmPalette"): - for attr in attrs: - source = os.path.join( - current_dir, cmds.getAttr(palette + "." + attr) - ) - if not os.path.exists(source): - continue - - ext = os.path.splitext(source)[1] - if ext == ".xgen": - xgen_files.append(source) - if ext == ".xgd": - sources.append(source) - - # Copy .xgen file to temporary location and modify. - staging_dir = self.staging_dir(instance) - for source in xgen_files: - destination = os.path.join(staging_dir, os.path.basename(source)) - shutil.copy(source, destination) - - lines = [] - with open(destination, "r") as f: - for line in [line.rstrip() for line in f]: - if line.startswith("\txgProjectPath"): - path = os.path.dirname(instance.data["resourcesDir"]) - line = "\txgProjectPath\t\t{}/".format( - path.replace("\\", "/") - ) - - lines.append(line) - - with open(destination, "w") as f: - f.write("\n".join(lines)) - - sources.append(destination) - - # Add resource files to workfile instance. - for source in sources: - basename = os.path.basename(source) - destination = os.path.join( - os.path.dirname(instance.data["resourcesDir"]), basename - ) - transfers.append((source, destination)) - - destination_dir = os.path.join( - instance.data["resourcesDir"], "collections" - ) - for palette in cmds.ls(type="xgmPalette"): - project_path = xgenm.getAttr("xgProjectPath", palette) - data_path = xgenm.getAttr("xgDataPath", palette) - data_path = data_path.replace("${PROJECT}", project_path) - for path in data_path.split(";"): - for root, _, files in os.walk(path): - for f in files: - source = os.path.join(root, f) - destination = "{}/{}{}".format( - destination_dir, - palette.replace(":", "__ns__"), - source.replace(path, "") - ) - transfers.append((source, destination)) - - for source, destination in transfers: - self.log.debug("Transfer: {} > {}".format(source, destination)) - - instance.data["transfers"] = transfers - - # Set palette attributes in preparation for workfile publish. - attrs = {"xgFileName": None, "xgBaseFile": ""} - data = {} - for palette in cmds.ls(type="xgmPalette"): - attrs["xgFileName"] = "resources/{}.xgen".format( - palette.replace(":", "__ns__") - ) - for attr, value in attrs.items(): - node_attr = palette + "." + attr - - old_value = cmds.getAttr(node_attr) - try: - data[palette][attr] = old_value - except KeyError: - data[palette] = {attr: old_value} - - cmds.setAttr(node_attr, value, type="string") - self.log.debug( - "Setting \"{}\" on \"{}\"".format(value, node_attr) - ) - - cmds.setAttr(palette + "." + "xgExportAsDelta", False) - - instance.data["xgenAttributes"] = data diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_xgen.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_xgen.py deleted file mode 100644 index bb700bbdec..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_xgen.py +++ /dev/null @@ -1,154 +0,0 @@ -import copy -import os -import tempfile - -import xgenm -from ayon_maya.api.lib import ( - attribute_values, - delete_after, - maintained_selection, - write_xgen_file, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractXgen(plugin.MayaExtractorPlugin): - """Extract Xgen - - Workflow: - - Duplicate nodes used for patches. - - Export palette and import onto duplicate nodes. - - Export/Publish duplicate nodes and palette. - - Export duplicate palette to .xgen file and add to publish. - - Publish all xgen files as resources. - """ - - label = "Extract Xgen" - families = ["xgen"] - scene_type = "ma" - - def process(self, instance): - if "representations" not in instance.data: - instance.data["representations"] = [] - - staging_dir = self.staging_dir(instance) - maya_filename = "{}.{}".format(instance.data["name"], self.scene_type) - maya_filepath = os.path.join(staging_dir, maya_filename) - - # Get published xgen file name. - template_data = copy.deepcopy(instance.data["anatomyData"]) - template_data.update({"ext": "xgen"}) - anatomy = instance.context.data["anatomy"] - file_template = anatomy.get_template_item("publish", "default", "file") - xgen_filename = file_template.format(template_data) - - xgen_path = os.path.join( - self.staging_dir(instance), xgen_filename - ).replace("\\", "/") - type = "mayaAscii" if self.scene_type == "ma" else "mayaBinary" - - # Duplicate xgen setup. - with delete_after() as delete_bin: - duplicate_nodes = [] - # Collect nodes to export. - for node in instance.data["xgenConnections"]: - # Duplicate_transform subd patch geometry. - duplicate_transform = cmds.duplicate(node)[0] - delete_bin.append(duplicate_transform) - - # Discard the children. - shapes = cmds.listRelatives(duplicate_transform, shapes=True) - children = cmds.listRelatives( - duplicate_transform, children=True - ) - cmds.delete(set(children) - set(shapes)) - - if cmds.listRelatives(duplicate_transform, parent=True): - duplicate_transform = cmds.parent( - duplicate_transform, world=True - )[0] - - duplicate_nodes.append(duplicate_transform) - - # Export temp xgen palette files. - temp_xgen_path = os.path.join( - tempfile.gettempdir(), "temp.xgen" - ).replace("\\", "/") - xgenm.exportPalette( - instance.data["xgmPalette"].replace("|", ""), temp_xgen_path - ) - self.log.debug("Extracted to {}".format(temp_xgen_path)) - - # Import xgen onto the duplicate. - with maintained_selection(): - cmds.select(duplicate_nodes) - palette = xgenm.importPalette(temp_xgen_path, []) - - delete_bin.append(palette) - - # Copy shading assignments. - nodes = ( - instance.data["xgmDescriptions"] + - instance.data["xgmSubdPatches"] - ) - for node in nodes: - target_node = node.split(":")[-1] - shading_engine = cmds.listConnections( - node, type="shadingEngine" - )[0] - cmds.sets(target_node, edit=True, forceElement=shading_engine) - - # Export duplicated palettes. - xgenm.exportPalette(palette, xgen_path) - - # Export Maya file. - attribute_data = {"{}.xgFileName".format(palette): xgen_filename} - with attribute_values(attribute_data): - with maintained_selection(): - cmds.select(duplicate_nodes + [palette]) - cmds.file( - maya_filepath, - force=True, - type=type, - exportSelected=True, - preserveReferences=False, - constructionHistory=True, - shader=True, - constraints=True, - expressions=True - ) - - self.log.debug("Extracted to {}".format(maya_filepath)) - - if os.path.exists(temp_xgen_path): - os.remove(temp_xgen_path) - - data = { - "xgDataPath": os.path.join( - instance.data["resourcesDir"], - "collections", - palette.replace(":", "__ns__") - ).replace("\\", "/"), - "xgProjectPath": os.path.dirname( - instance.data["resourcesDir"] - ).replace("\\", "/") - } - write_xgen_file(data, xgen_path) - - # Adding representations. - representation = { - "name": "xgen", - "ext": "xgen", - "files": xgen_filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - representation = { - "name": self.scene_type, - "ext": self.scene_type, - "files": maya_filename, - "stagingDir": staging_dir - } - instance.data["representations"].append(representation) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_yeti_cache.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_yeti_cache.py deleted file mode 100644 index b84867316c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_yeti_cache.py +++ /dev/null @@ -1,88 +0,0 @@ -import json -import os - -from ayon_maya.api import plugin -from maya import cmds - - -class ExtractYetiCache(plugin.MayaExtractorPlugin): - """Producing Yeti cache files using scene time range. - - This will extract Yeti cache file sequence and fur settings. - """ - - label = "Extract Yeti Cache" - families = ["yetiRig", "yeticache"] - - def process(self, instance): - - yeti_nodes = cmds.ls(instance, type="pgYetiMaya") - if not yeti_nodes: - raise RuntimeError("No pgYetiMaya nodes found in the instance") - - # Define extract output file path - dirname = self.staging_dir(instance) - - # Collect information for writing cache - start_frame = instance.data["frameStartHandle"] - end_frame = instance.data["frameEndHandle"] - preroll = instance.data["preroll"] - if preroll > 0: - start_frame -= preroll - - kwargs = {} - samples = instance.data.get("samples", 0) - if samples == 0: - kwargs.update({"sampleTimes": "0.0 1.0"}) - else: - kwargs.update({"samples": samples}) - - self.log.debug( - "Writing out cache {} - {}".format(start_frame, end_frame)) - # Start writing the files for snap shot - # will be replace by the Yeti node name - path = os.path.join(dirname, ".%04d.fur") - cmds.pgYetiCommand(yeti_nodes, - writeCache=path, - range=(start_frame, end_frame), - updateViewport=False, - generatePreview=False, - **kwargs) - - cache_files = [x for x in os.listdir(dirname) if x.endswith(".fur")] - - self.log.debug("Writing metadata file") - settings = instance.data["fursettings"] - fursettings_path = os.path.join(dirname, "yeti.fursettings") - with open(fursettings_path, "w") as fp: - json.dump(settings, fp, ensure_ascii=False) - - # build representations - if "representations" not in instance.data: - instance.data["representations"] = [] - - self.log.debug("cache files: {}".format(cache_files[0])) - - # Workaround: We do not explicitly register these files with the - # representation solely so that we can write multiple sequences - # a single Subset without renaming - it's a bit of a hack - # TODO: Implement better way to manage this sort of integration - if 'transfers' not in instance.data: - instance.data['transfers'] = [] - - publish_dir = instance.data["publishDir"] - for cache_filename in cache_files: - src = os.path.join(dirname, cache_filename) - dst = os.path.join(publish_dir, os.path.basename(cache_filename)) - instance.data['transfers'].append([src, dst]) - - instance.data["representations"].append( - { - 'name': 'fur', - 'ext': 'fursettings', - 'files': os.path.basename(fursettings_path), - 'stagingDir': dirname - } - ) - - self.log.debug("Extracted {} to {}".format(instance, dirname)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/extract_yeti_rig.py b/server_addon/maya/client/ayon_maya/plugins/publish/extract_yeti_rig.py deleted file mode 100644 index 640b37b667..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/extract_yeti_rig.py +++ /dev/null @@ -1,206 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract Yeti rig.""" - -import contextlib -import json -import os - -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -@contextlib.contextmanager -def disconnect_plugs(settings, members): - """Disconnect and store attribute connections.""" - members = cmds.ls(members, long=True) - original_connections = [] - try: - for input in settings["inputs"]: - - # Get source shapes - source_nodes = lib.lsattr("cbId", input["sourceID"]) - if not source_nodes: - continue - - source = next(s for s in source_nodes if s not in members) - - # Get destination shapes (the shapes used as hook up) - destination_nodes = lib.lsattr("cbId", input["destinationID"]) - destination = next(i for i in destination_nodes if i in members) - - # Create full connection - connections = input["connections"] - src_attribute = "%s.%s" % (source, connections[0]) - dst_attribute = "%s.%s" % (destination, connections[1]) - - # Check if there is an actual connection - if not cmds.isConnected(src_attribute, dst_attribute): - print("No connection between %s and %s" % ( - src_attribute, dst_attribute)) - continue - - # Break and store connection - cmds.disconnectAttr(src_attribute, dst_attribute) - original_connections.append([src_attribute, dst_attribute]) - yield - finally: - # Restore previous connections - for connection in original_connections: - try: - cmds.connectAttr(connection[0], connection[1]) - except Exception as e: - print(e) - continue - - -@contextlib.contextmanager -def yetigraph_attribute_values(assumed_destination, resources): - """Get values from Yeti attributes in graph.""" - try: - for resource in resources: - if "graphnode" not in resource: - continue - - fname = os.path.basename(resource["source"]) - new_fpath = os.path.join(assumed_destination, fname) - new_fpath = new_fpath.replace("\\", "/") - - try: - cmds.pgYetiGraph(resource["node"], - node=resource["graphnode"], - param=resource["param"], - setParamValueString=new_fpath) - except Exception as exc: - print(">>> Exception:", exc) - yield - - finally: - for resource in resources: - if "graphnode" not in resources: - continue - - try: - cmds.pgYetiGraph(resource["node"], - node=resource["graphnode"], - param=resource["param"], - setParamValue=resource["source"]) - except RuntimeError: - pass - - -class ExtractYetiRig(plugin.MayaExtractorPlugin): - """Extract the Yeti rig to a Maya Scene and write the Yeti rig data.""" - - label = "Extract Yeti Rig" - families = ["yetiRig"] - scene_type = "ma" - - def process(self, instance): - """Plugin entry point.""" - maya_settings = instance.context.data["project_settings"]["maya"] - ext_mapping = { - item["name"]: item["value"] - for item in maya_settings["ext_mapping"] - } - if ext_mapping: - self.log.debug("Looking in settings for scene type ...") - # use extension mapping for first family found - for family in self.families: - try: - self.scene_type = ext_mapping[family] - self.log.debug( - "Using {} as scene type".format(self.scene_type)) - break - except KeyError: - # no preset found - pass - yeti_nodes = cmds.ls(instance, type="pgYetiMaya") - if not yeti_nodes: - raise RuntimeError("No pgYetiMaya nodes found in the instance") - - # Define extract output file path - dirname = self.staging_dir(instance) - settings_path = os.path.join(dirname, "yeti.rigsettings") - - # Yeti related staging dirs - maya_path = os.path.join(dirname, - "yeti_rig.{}".format(self.scene_type)) - - self.log.debug("Writing metadata file: {}".format(settings_path)) - - image_search_path = resources_dir = instance.data["resourcesDir"] - - settings = instance.data.get("rigsettings", None) - assert settings, "Yeti rig settings were not collected." - settings["imageSearchPath"] = image_search_path - with open(settings_path, "w") as fp: - json.dump(settings, fp, ensure_ascii=False) - - # add textures to transfers - if 'transfers' not in instance.data: - instance.data['transfers'] = [] - - for resource in instance.data.get('resources', []): - for file in resource['files']: - src = file - dst = os.path.join(image_search_path, os.path.basename(file)) - instance.data['transfers'].append([src, dst]) - - self.log.debug("adding transfer {} -> {}". format(src, dst)) - - # Ensure the imageSearchPath is being remapped to the publish folder - attr_value = {"%s.imageSearchPath" % n: str(image_search_path) for - n in yeti_nodes} - - # Get input_SET members - input_set = next(i for i in instance if i == "input_SET") - - # Get all items - set_members = cmds.sets(input_set, query=True) or [] - set_members += cmds.listRelatives(set_members, - allDescendents=True, - fullPath=True) or [] - members = cmds.ls(set_members, long=True) - - nodes = instance.data["setMembers"] - resources = instance.data.get("resources", {}) - with disconnect_plugs(settings, members): - with yetigraph_attribute_values(resources_dir, resources): - with lib.attribute_values(attr_value): - cmds.select(nodes, noExpand=True) - cmds.file(maya_path, - force=True, - exportSelected=True, - typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 - preserveReferences=False, - constructionHistory=True, - shader=False) - - # Ensure files can be stored - # build representations - if "representations" not in instance.data: - instance.data["representations"] = [] - - self.log.debug("rig file: {}".format(maya_path)) - instance.data["representations"].append( - { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': os.path.basename(maya_path), - 'stagingDir': dirname - } - ) - self.log.debug("settings file: {}".format(settings_path)) - instance.data["representations"].append( - { - 'name': 'rigsettings', - 'ext': 'rigsettings', - 'files': os.path.basename(settings_path), - 'stagingDir': dirname - } - ) - - self.log.debug("Extracted {} to {}".format(instance, dirname)) - - cmds.select(clear=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml b/server_addon/maya/client/ayon_maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml deleted file mode 100644 index fa908fe425..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - -Errors found - -## Publish process has errors - -At least one plugin failed before this plugin, job won't be sent to Deadline for processing before all issues are fixed. - -### How to repair? - -Check all failing plugins (should be highlighted in red) and fix issues if possible. - - - - diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_animation_out_set_related_node_ids.xml b/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_animation_out_set_related_node_ids.xml deleted file mode 100644 index cdaf97b8f4..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_animation_out_set_related_node_ids.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - -Shape IDs mismatch original shape -## Shapes mismatch IDs with original shape - -Meshes are detected where the (deformed) mesh has a different `cbId` than -the same mesh in its deformation history. -These should normally be the same. - -### How to repair? - -By using the repair action the IDs from the shape in history will be -copied to the deformed shape. For **animation** instances using the -repair action is usually the correct fix. - - - -### How does this happen? - -When a deformer is applied in the scene on a referenced mesh that had no -deformers then Maya will create a new shape node for the mesh that -does not have the original id. Then on scene save new ids get created for the -meshes lacking a `cbId` and thus the mesh then has a different `cbId` than -the mesh in the deformation history. - - - - diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_maya_units.xml b/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_maya_units.xml deleted file mode 100644 index 40169b28f9..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_maya_units.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - -Maya scene units -## Invalid maya scene units - -Detected invalid maya scene units: - -{issues} - - - -### How to repair? - -You can automatically repair the scene units by clicking the Repair action on -the right. - -After that restart publishing with Reload button. - - - diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_mesh_non_manifold.xml b/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_mesh_non_manifold.xml deleted file mode 100644 index 5aec3009a7..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_mesh_non_manifold.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - -Non-Manifold Edges/Vertices -## Non-Manifold Edges/Vertices - -Meshes found with non-manifold edges or vertices. - -### How to repair? - -Run select invalid to select the invalid components. - -You can also try the _cleanup matching polygons_ action which will perform a -cleanup like Maya's `Mesh > Cleanup...` modeling tool. - -It is recommended to always select the invalid to see where the issue is -because if you run any repair on it you will need to double check the topology -is still like you wanted. - - - -### What is non-manifold topology? - -_Non-manifold topology_ polygons have a configuration that cannot be unfolded -into a continuous flat piece, for example: - -- Three or more faces share an edge -- Two or more faces share a single vertex but no edge. -- Adjacent faces have opposite normals - - - - diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_node_ids.xml b/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_node_ids.xml deleted file mode 100644 index 2ef4bc95c2..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_node_ids.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - -Missing node ids -## Nodes found with missing `cbId` - -Nodes were detected in your scene which are missing required `cbId` -attributes for identification further in the pipeline. - -### How to repair? - -The node ids are auto-generated on scene save, and thus the easiest fix is to -save your scene again. - -After that restart publishing with Reload button. - - -### Invalid nodes - -{nodes} - - -### How could this happen? - -This often happens if you've generated new nodes but haven't saved your scene -after creating the new nodes. - - - diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_rig_out_set_node_ids.xml b/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_rig_out_set_node_ids.xml deleted file mode 100644 index 956a7adb3b..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_rig_out_set_node_ids.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - -Shape IDs mismatch original shape -## Shapes mismatch IDs with original shape - -Meshes are detected in the **rig** where the (deformed) mesh has a different -`cbId` than the same mesh in its deformation history. -These should normally be the same. - -### How to repair? - -By using the repair action the IDs from the shape in history will be -copied to the deformed shape. For rig instances, in many cases the -correct fix is to use the repair action **unless** you explicitly tried -to update the `cbId` values on the meshes - in that case you actually want -to do to the reverse and copy the IDs from the deformed mesh to the history -mesh instead. - - - -### How does this happen? - -When a deformer is applied in the scene on a referenced mesh that had no -deformers then Maya will create a new shape node for the mesh that -does not have the original id. Then on scene save new ids get created for the -meshes lacking a `cbId` and thus the mesh then has a different `cbId` than -the mesh in the deformation history. - - - - diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml b/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml deleted file mode 100644 index d30c4cb69d..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - -Skeletal Mesh Top Node -## Skeletal meshes needs common root - -Skeletal meshes and their joints must be under one common root. - -### How to repair? - -Make sure all geometry and joints resides under same root. - - - diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/increment_current_file_deadline.py b/server_addon/maya/client/ayon_maya/plugins/publish/increment_current_file_deadline.py deleted file mode 100644 index 66019c4837..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/increment_current_file_deadline.py +++ /dev/null @@ -1,39 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin - - -class IncrementCurrentFileDeadline(plugin.MayaContextPlugin): - """Increment the current file. - - Saves the current maya scene with an increased version number. - - """ - - label = "Increment current file" - order = pyblish.api.IntegratorOrder + 9.0 - families = ["workfile"] - optional = True - - def process(self, context): - - from ayon_core.lib import version_up - from ayon_core.pipeline.publish import get_errored_plugins_from_context - from maya import cmds - - errored_plugins = get_errored_plugins_from_context(context) - if any(plugin.__name__ == "MayaSubmitDeadline" - for plugin in errored_plugins): - raise RuntimeError("Skipping incrementing current file because " - "submission to deadline failed.") - - current_filepath = context.data["currentFile"] - new_filepath = version_up(current_filepath) - - # # Ensure the suffix is .ma because we're saving to `mayaAscii` type - if new_filepath.endswith(".ma"): - fileType = "mayaAscii" - elif new_filepath.endswith(".mb"): - fileType = "mayaBinary" - - cmds.file(rename=new_filepath) - cmds.file(save=True, force=True, type=fileType) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/reset_xgen_attributes.py b/server_addon/maya/client/ayon_maya/plugins/publish/reset_xgen_attributes.py deleted file mode 100644 index ac9e1beeec..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/reset_xgen_attributes.py +++ /dev/null @@ -1,36 +0,0 @@ -import pyblish.api -from ayon_maya.api import plugin -from maya import cmds - - -class ResetXgenAttributes(plugin.MayaInstancePlugin): - """Reset Xgen attributes. - - When the incremental save of the workfile triggers, the Xgen attributes - changes so this plugin will change it back to the values before publishing. - """ - - label = "Reset Xgen Attributes." - # Offset to run after workfile increment plugin. - order = pyblish.api.IntegratorOrder + 10.0 - families = ["workfile"] - - def process(self, instance): - xgen_attributes = instance.data.get("xgenAttributes", {}) - if not xgen_attributes: - return - - for palette, data in xgen_attributes.items(): - for attr, value in data.items(): - node_attr = "{}.{}".format(palette, attr) - self.log.debug( - "Setting \"{}\" on \"{}\"".format(value, node_attr) - ) - cmds.setAttr(node_attr, value, type="string") - cmds.setAttr(palette + ".xgExportAsDelta", True) - - # Need to save the scene, cause the attribute changes above does not - # mark the scene as modified so user can exit without committing the - # changes. - self.log.debug("Saving changes.") - cmds.file(save=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/save_scene.py b/server_addon/maya/client/ayon_maya/plugins/publish/save_scene.py deleted file mode 100644 index 9c23fcff85..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/save_scene.py +++ /dev/null @@ -1,33 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.workfile.lock_workfile import ( - is_workfile_lock_enabled, - remove_workfile_lock, -) -from ayon_maya.api import plugin - - -class SaveCurrentScene(plugin.MayaContextPlugin): - """Save current scene.""" - - label = "Save current file" - order = pyblish.api.ExtractorOrder - 0.49 - families = ["renderlayer", "workfile"] - - def process(self, context): - import maya.cmds as cmds - - current = cmds.file(query=True, sceneName=True) - assert context.data['currentFile'] == current - - # If file has no modifications, skip forcing a file save - if not cmds.file(query=True, modified=True): - self.log.debug("Skipping file save as there " - "are no modifications..") - return - project_name = context.data["projectName"] - project_settings = context.data["project_settings"] - # remove lockfile before saving - if is_workfile_lock_enabled("maya", project_name, project_settings): - remove_workfile_lock(current) - self.log.info("Saving current file: {}".format(current)) - cmds.file(save=True, force=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_alembic_options_defaults.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_alembic_options_defaults.py deleted file mode 100644 index fd4e2254a7..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_alembic_options_defaults.py +++ /dev/null @@ -1,130 +0,0 @@ -import inspect - -import pyblish.api -from ayon_core.pipeline import OptionalPyblishPluginMixin -from ayon_core.pipeline.publish import PublishValidationError, RepairAction -from ayon_maya.api import plugin - - -class ValidateAlembicDefaultsPointcache( - plugin.MayaInstancePlugin, OptionalPyblishPluginMixin -): - """Validate the attributes on the instance are defaults. - - The defaults are defined in the project settings. - """ - - order = pyblish.api.ValidatorOrder - families = ["pointcache"] - label = "Validate Alembic Options Defaults" - actions = [RepairAction] - optional = True - - plugin_name = "ExtractAlembic" - - @classmethod - def _get_settings(cls, context): - maya_settings = context.data["project_settings"]["maya"] - settings = maya_settings["publish"]["ExtractAlembic"] - return settings - - @classmethod - def _get_publish_attributes(cls, instance): - return instance.data["publish_attributes"][cls.plugin_name] - - def process(self, instance): - if not self.is_active(instance.data): - return - - settings = self._get_settings(instance.context) - attributes = self._get_publish_attributes(instance) - - invalid = {} - for key, value in attributes.items(): - if key not in settings: - # This may occur if attributes have changed over time and an - # existing instance has older legacy attributes that do not - # match the current settings definition. - self.log.warning( - "Publish attribute %s not found in Alembic Export " - "default settings. Ignoring validation for attribute.", - key - ) - continue - - default_value = settings[key] - - # Lists are best to compared sorted since we can't rely on - # the order of the items. - if isinstance(value, list): - value = sorted(value) - default_value = sorted(default_value) - - if value != default_value: - invalid[key] = value, default_value - - if invalid: - non_defaults = "\n".join( - f"- {key}: {value} \t(default: {default_value})" - for key, (value, default_value) in invalid.items() - ) - - raise PublishValidationError( - "Alembic extract options differ from default values:\n" - f"{non_defaults}", - description=self.get_description() - ) - - @staticmethod - def get_description(): - return inspect.cleandoc( - """### Alembic Extract settings differ from defaults - - The alembic export options differ from the project default values. - - If this is intentional you can disable this validation by - disabling **Validate Alembic Options Default**. - - If not you may use the "Repair" action to revert all the options to - their default values. - - """ - ) - - @classmethod - def repair(cls, instance): - # Find create instance twin. - create_context = instance.context.data["create_context"] - create_instance = create_context.get_instance_by_id( - instance.data["instance_id"] - ) - - # Set the settings values on the create context then save to workfile. - settings = cls._get_settings(instance.context) - attributes = cls._get_publish_attributes(create_instance) - for key in attributes: - if key not in settings: - # This may occur if attributes have changed over time and an - # existing instance has older legacy attributes that do not - # match the current settings definition. - cls.log.warning( - "Publish attribute %s not found in Alembic Export " - "default settings. Ignoring repair for attribute.", - key - ) - continue - attributes[key] = settings[key] - - create_context.save_changes() - - -class ValidateAlembicDefaultsAnimation( - ValidateAlembicDefaultsPointcache -): - """Validate the attributes on the instance are defaults. - - The defaults are defined in the project settings. - """ - label = "Validate Alembic Options Defaults" - families = ["animation"] - plugin_name = "ExtractAnimation" diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_animation_content.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_animation_content.py deleted file mode 100644 index b10a1a2bb7..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_animation_content.py +++ /dev/null @@ -1,58 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -class ValidateAnimationContent(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Adheres to the content of 'animation' product type - - - Must have collected `out_hierarchy` data. - - All nodes in `out_hierarchy` must be in the instance. - - """ - - order = ValidateContentsOrder - families = ["animation"] - label = "Animation Content" - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - @classmethod - def get_invalid(cls, instance): - - out_set = next((i for i in instance.data["setMembers"] if - i.endswith("out_SET")), None) - - assert out_set, ("Instance '%s' has no objectSet named: `OUT_set`. " - "If this instance is an unloaded reference, " - "please deactivate by toggling the 'Active' attribute" - % instance.name) - - assert 'out_hierarchy' in instance.data, "Missing `out_hierarchy` data" - - out_sets = [node for node in instance if node.endswith("out_SET")] - msg = "Couldn't find exactly one out_SET: {0}".format(out_sets) - assert len(out_sets) == 1, msg - - # All nodes in the `out_hierarchy` must be among the nodes that are - # in the instance. The nodes in the instance are found from the top - # group, as such this tests whether all nodes are under that top group. - - lookup = set(instance[:]) - invalid = [node for node in instance.data['out_hierarchy'] if - node not in lookup] - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Animation content is invalid. See log.") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_animation_out_set_related_node_ids.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_animation_out_set_related_node_ids.py deleted file mode 100644 index 40f03b2690..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_animation_out_set_related_node_ids.py +++ /dev/null @@ -1,108 +0,0 @@ -import ayon_maya.api.action -import maya.cmds as cmds -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishXmlValidationError, - RepairAction, - ValidateContentsOrder, - apply_plugin_settings_automatically, - get_plugin_settings, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin - - -class ValidateOutRelatedNodeIds(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate if deformed shapes have related IDs to the original shapes - - When a deformer is applied in the scene on a referenced mesh that already - had deformers then Maya will create a new shape node for the mesh that - does not have the original id. This validator checks whether the ids are - valid on all the shape nodes in the instance. - - """ - - order = ValidateContentsOrder - families = ['animation', "pointcache", "proxyAbc"] - hosts = ['maya'] - label = 'Animation Out Set Related Node Ids' - actions = [ - ayon_maya.api.action.SelectInvalidAction, - RepairAction - ] - optional = False - - @classmethod - def apply_settings(cls, project_settings): - # Preserve automatic settings applying logic - settings = get_plugin_settings(plugin=cls, - project_settings=project_settings, - log=cls.log, - category="maya") - apply_plugin_settings_automatically(cls, settings, logger=cls.log) - - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - def process(self, instance): - """Process all meshes""" - if not self.is_active(instance.data): - return - # Ensure all nodes have a cbId and a related ID to the original shapes - # if a deformer has been created on the shape - invalid = self.get_invalid(instance) - if invalid: - - # Use the short names - invalid = cmds.ls(invalid) - invalid.sort() - - # Construct a human-readable list - invalid = "\n".join("- {}".format(node) for node in invalid) - - raise PublishXmlValidationError( - plugin=self, - message=( - "Nodes have different IDs than their input " - "history: \n{0}".format(invalid) - ) - ) - - @classmethod - def get_invalid(cls, instance): - """Get all nodes which do not match the criteria""" - - invalid = [] - types = ["mesh", "nurbsCurve", "nurbsSurface"] - - # get asset id - nodes = instance.data.get("out_hierarchy", instance[:]) - for node in cmds.ls(nodes, type=types, long=True): - - # We only check when the node is *not* referenced - if cmds.referenceQuery(node, isNodeReferenced=True): - continue - - # Get the current id of the node - node_id = lib.get_id(node) - - history_id = lib.get_id_from_sibling(node) - if history_id is not None and node_id != history_id: - invalid.append(node) - - return invalid - - @classmethod - def repair(cls, instance): - - for node in cls.get_invalid(instance): - # Get the original id from history - history_id = lib.get_id_from_sibling(node) - if not history_id: - cls.log.error("Could not find ID in history for '%s'", node) - continue - - lib.set_id(node, history_id, overwrite=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_arnold_scene_source.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_arnold_scene_source.py deleted file mode 100644 index edc4161dff..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_arnold_scene_source.py +++ /dev/null @@ -1,127 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api.lib import is_visible -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateArnoldSceneSource(plugin.MayaInstancePlugin): - """Validate Arnold Scene Source. - - Ensure no nodes are hidden. - """ - - order = ValidateContentsOrder - families = ["ass", "assProxy"] - label = "Validate Arnold Scene Source" - - def process(self, instance): - # Validate against having nodes hidden, which will result in the - # extraction to ignore the node. - nodes = instance.data["members"] + instance.data.get("proxy", []) - nodes = [x for x in nodes if cmds.objectType(x, isAType='dagNode')] - hidden_nodes = [ - x for x in nodes if not is_visible(x, intermediateObject=False) - ] - if hidden_nodes: - raise PublishValidationError( - "Found hidden nodes:\n\n{}\n\nPlease unhide for" - " publishing.".format("\n".join(hidden_nodes)) - ) - - -class ValidateArnoldSceneSourceProxy(pyblish.api.InstancePlugin): - """Validate Arnold Scene Source Proxy. - - When using proxies we need the nodes to share the same names and not be - parent to the world. This ends up needing at least two groups with content - nodes and proxy nodes in another. - """ - - order = ValidateContentsOrder - hosts = ["maya"] - families = ["assProxy"] - label = "Validate Arnold Scene Source Proxy" - - def _get_nodes_by_name(self, nodes): - ungrouped_nodes = [] - nodes_by_name = {} - parents = [] - for node in nodes: - node_split = node.split("|") - if len(node_split) == 2: - ungrouped_nodes.append(node) - - parent = "|".join(node_split[:-1]) - if parent: - parents.append(parent) - - node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - nodes_by_name[node_name] = node - - return ungrouped_nodes, nodes_by_name, parents - - def process(self, instance): - # Validate against nodes directly parented to world. - ungrouped_nodes = [] - - nodes, content_nodes_by_name, content_parents = ( - self._get_nodes_by_name(instance.data["members"]) - ) - ungrouped_nodes.extend(nodes) - - nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_by_name( - instance.data.get("proxy", []) - ) - ungrouped_nodes.extend(nodes) - - if ungrouped_nodes: - raise PublishValidationError( - "Found nodes parented to the world: {}\n" - "All nodes need to be grouped.".format(ungrouped_nodes) - ) - - # Validate for content and proxy nodes amount being the same. - if len(instance.data["members"]) != len(instance.data["proxy"]): - raise PublishValidationError( - "Amount of content nodes ({}) and proxy nodes ({}) needs to " - "be the same.\nContent nodes: {}\nProxy nodes:{}".format( - len(instance.data["members"]), - len(instance.data["proxy"]), - instance.data["members"], - instance.data["proxy"] - ) - ) - - # Validate against content and proxy nodes sharing same parent. - if list(set(content_parents) & set(proxy_parents)): - raise PublishValidationError( - "Content and proxy nodes cannot share the same parent." - ) - - # Validate for content and proxy nodes sharing same names. - sorted_content_names = sorted(content_nodes_by_name.keys()) - sorted_proxy_names = sorted(proxy_nodes_by_name.keys()) - odd_content_names = list( - set(sorted_content_names) - set(sorted_proxy_names) - ) - odd_content_nodes = [ - content_nodes_by_name[x] for x in odd_content_names - ] - odd_proxy_names = list( - set(sorted_proxy_names) - set(sorted_content_names) - ) - odd_proxy_nodes = [ - proxy_nodes_by_name[x] for x in odd_proxy_names - ] - if not sorted_content_names == sorted_proxy_names: - raise PublishValidationError( - "Content and proxy nodes need to share the same names.\n" - "Content nodes not matching: {}\n" - "Proxy nodes not matching: {}".format( - odd_content_nodes, odd_proxy_nodes - ) - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_arnold_scene_source_cbid.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_arnold_scene_source_cbid.py deleted file mode 100644 index 8da8813b0d..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_arnold_scene_source_cbid.py +++ /dev/null @@ -1,83 +0,0 @@ -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin - - -class ValidateArnoldSceneSourceCbid(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate Arnold Scene Source Cbid. - - It is required for the proxy and content nodes to share the same cbid. - """ - - order = ValidateContentsOrder - families = ["assProxy"] - label = "Validate Arnold Scene Source CBID" - actions = [RepairAction] - optional = False - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - @staticmethod - def _get_nodes_by_name(nodes): - nodes_by_name = {} - for node in nodes: - node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - nodes_by_name[node_name] = node - - return nodes_by_name - - @classmethod - def get_invalid_couples(cls, instance): - nodes_by_name = cls._get_nodes_by_name(instance.data["members"]) - proxy_nodes_by_name = cls._get_nodes_by_name(instance.data["proxy"]) - - invalid_couples = [] - for content_name, content_node in nodes_by_name.items(): - proxy_node = proxy_nodes_by_name.get(content_name, None) - - if not proxy_node: - cls.log.debug( - "Content node '{}' has no matching proxy node.".format( - content_node - ) - ) - continue - - content_id = lib.get_id(content_node) - proxy_id = lib.get_id(proxy_node) - if content_id != proxy_id: - invalid_couples.append((content_node, proxy_node)) - - return invalid_couples - - def process(self, instance): - if not self.is_active(instance.data): - return - # Proxy validation. - if not instance.data["proxy"]: - return - - # Validate for proxy nodes sharing the same cbId as content nodes. - invalid_couples = self.get_invalid_couples(instance) - if invalid_couples: - raise PublishValidationError( - "Found proxy nodes with mismatching cbid:\n{}".format( - invalid_couples - ) - ) - - @classmethod - def repair(cls, instance): - for content_node, proxy_node in cls.get_invalid_couples(instance): - lib.set_id(proxy_node, lib.get_id(content_node), overwrite=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_ass_relative_paths.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_ass_relative_paths.py deleted file mode 100644 index 36c220f862..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_ass_relative_paths.py +++ /dev/null @@ -1,137 +0,0 @@ -import os -import types - -import maya.cmds as cmds -from mtoa.core import createOptions - -from ayon_core.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_maya.api import plugin - - -class ValidateAssRelativePaths(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure exporting ass file has set relative texture paths""" - - order = ValidateContentsOrder - families = ['ass'] - label = "ASS has relative texture paths" - actions = [RepairAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - # we cannot ask this until user open render settings as - # `defaultArnoldRenderOptions` doesn't exist - errors = [] - - try: - absolute_texture = cmds.getAttr( - "defaultArnoldRenderOptions.absolute_texture_paths") - absolute_procedural = cmds.getAttr( - "defaultArnoldRenderOptions.absolute_procedural_paths") - texture_search_path = cmds.getAttr( - "defaultArnoldRenderOptions.tspath" - ) - procedural_search_path = cmds.getAttr( - "defaultArnoldRenderOptions.pspath" - ) - except ValueError: - raise PublishValidationError( - "Default Arnold options has not been created yet." - ) - - scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) - scene_name, _ = os.path.splitext(scene_basename) - - if self.maya_is_true(absolute_texture): - errors.append("Texture path is set to be absolute") - if self.maya_is_true(absolute_procedural): - errors.append("Procedural path is set to be absolute") - - anatomy = instance.context.data["anatomy"] - - # Use project root variables for multiplatform support, see: - # https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path - # ':' as path separator is supported by Arnold for all platforms. - keys = anatomy.root_environments().keys() - paths = [] - for k in keys: - paths.append("[{}]".format(k)) - - self.log.debug("discovered roots: {}".format(":".join(paths))) - - if ":".join(paths) not in texture_search_path: - errors.append(( - "Project roots {} are not in texture_search_path: {}" - ).format(paths, texture_search_path)) - - if ":".join(paths) not in procedural_search_path: - errors.append(( - "Project roots {} are not in procedural_search_path: {}" - ).format(paths, procedural_search_path)) - - if errors: - raise PublishValidationError("\n".join(errors)) - - @classmethod - def repair(cls, instance): - createOptions() - - texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath") - procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath") - - # Use project root variables for multiplatform support, see: - # https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path - # ':' as path separator is supported by Arnold for all platforms. - anatomy = instance.context.data["anatomy"] - keys = anatomy.root_environments().keys() - paths = [] - for k in keys: - paths.append("[{}]".format(k)) - - cmds.setAttr( - "defaultArnoldRenderOptions.tspath", - ":".join([p for p in paths + [texture_path] if p]), - type="string" - ) - cmds.setAttr( - "defaultArnoldRenderOptions.absolute_texture_paths", - False - ) - - cmds.setAttr( - "defaultArnoldRenderOptions.pspath", - ":".join([p for p in paths + [procedural_path] if p]), - type="string" - ) - cmds.setAttr( - "defaultArnoldRenderOptions.absolute_procedural_paths", - False - ) - - @staticmethod - def find_absolute_path(relative_path, all_root_paths): - for root_path in all_root_paths: - possible_path = os.path.join(root_path, relative_path) - if os.path.exists(possible_path): - return possible_path - - def maya_is_true(self, attr_val): - """ - Whether a Maya attr evaluates to True. - When querying an attribute value from an ambiguous object the - Maya API will return a list of values, which need to be properly - handled to evaluate properly. - """ - if isinstance(attr_val, bool): - return attr_val - elif isinstance(attr_val, (list, types.GeneratorType)): - return any(attr_val) - else: - return bool(attr_val) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_name.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_name.py deleted file mode 100644 index 4dfe7214bf..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_name.py +++ /dev/null @@ -1,59 +0,0 @@ -import ayon_maya.api.action -import maya.cmds as cmds -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, -) -from ayon_maya.api import plugin - - -class ValidateAssemblyName(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """ Ensure Assembly name ends with `GRP` - - Check if assembly name ends with `_GRP` string. - """ - - label = "Validate Assembly Name" - order = pyblish.api.ValidatorOrder - families = ["assembly"] - actions = [ayon_maya.api.action.SelectInvalidAction] - active = False - optional = True - - @classmethod - def get_invalid(cls, instance): - cls.log.debug("Checking name of {}".format(instance.name)) - - content_instance = instance.data.get("setMembers", None) - if not content_instance: - cls.log.error("Instance has no nodes!") - return True - - # All children will be included in the extracted export so we also - # validate *all* descendents of the set members and we skip any - # intermediate shapes - descendants = cmds.listRelatives(content_instance, - allDescendents=True, - fullPath=True) or [] - descendants = cmds.ls( - descendants, noIntermediate=True, type="transform") - content_instance = list(set(content_instance + descendants)) - assemblies = cmds.ls(content_instance, assemblies=True, long=True) - - invalid = [] - for cr in assemblies: - if not cr.endswith('_GRP'): - cls.log.error("{} doesn't end with _GRP".format(cr)) - invalid.append(cr) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("Found {} invalid named assembly " - "items".format(len(invalid))) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_namespaces.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_namespaces.py deleted file mode 100644 index 324b12a207..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_namespaces.py +++ /dev/null @@ -1,47 +0,0 @@ -import ayon_maya.api.action -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, -) -from ayon_maya.api import plugin - - -class ValidateAssemblyNamespaces(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure namespaces are not nested. - - In the outliner an item in a normal namespace looks as following: - props_desk_01_:modelDefault - - Any namespace which diverts from that is illegal, example of an illegal - namespace: - room_study_01_:props_desk_01_:modelDefault - - """ - - label = "Validate Assembly Namespaces" - order = pyblish.api.ValidatorOrder - families = ["assembly"] - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - self.log.debug("Checking namespace for %s" % instance.name) - if self.get_invalid(instance): - raise PublishValidationError("Nested namespaces found") - - @classmethod - def get_invalid(cls, instance): - - from maya import cmds - - invalid = [] - for item in cmds.ls(instance): - item_parts = item.split("|", 1)[0].rsplit(":") - if len(item_parts[:-1]) > 1: - invalid.append(item) - - return invalid diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_transforms.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_transforms.py deleted file mode 100644 index 7fc14560f7..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_assembly_transforms.py +++ /dev/null @@ -1,118 +0,0 @@ -import ayon_maya.api.action -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateAssemblyModelTransforms(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Verify only root nodes of the loaded asset have transformations. - - Note: This check is temporary and is subject to change. - - Example outliner: - <> means referenced - =================================================================== - - setdress_GRP| - props_GRP| - barrel_01_:modelDefault| [can have transforms] - <> barrel_01_:barrel_GRP [CAN'T have transforms] - - fence_01_:modelDefault| [can have transforms] - <> fence_01_:fence_GRP [CAN'T have transforms] - - """ - - order = pyblish.api.ValidatorOrder + 0.49 - label = "Assembly Model Transforms" - families = ["assembly"] - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - - prompt_message = ("You are about to reset the matrix to the default values." - " This can alter the look of your scene. " - "Are you sure you want to continue?") - - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - ("Found {} invalid transforms of assembly " - "items").format(len(invalid))) - - @classmethod - def get_invalid(cls, instance): - - from ayon_maya.api import lib - - # Get all transforms in the loaded containers - container_roots = cmds.listRelatives(instance.data["nodesHierarchy"], - children=True, - type="transform", - fullPath=True) - - transforms_in_container = cmds.listRelatives(container_roots, - allDescendents=True, - type="transform", - fullPath=True) - - # Extra check due to the container roots still being passed through - transforms_in_container = [i for i in transforms_in_container if i - not in container_roots] - - # Ensure all are identity matrix - invalid = [] - for transform in transforms_in_container: - node_matrix = cmds.xform(transform, - query=True, - matrix=True, - objectSpace=True) - if not lib.matrix_equals(node_matrix, lib.DEFAULT_MATRIX): - invalid.append(transform) - - return invalid - - @classmethod - def repair(cls, instance): - """Reset matrix for illegally transformed nodes - - We want to ensure the user knows the reset will alter the look of - the current scene because the transformations were done on asset - nodes instead of the asset top node. - - Args: - instance: - - Returns: - None - - """ - - from ayon_maya.api import lib - from qtpy import QtWidgets - - # Store namespace in variable, cosmetics thingy - choice = QtWidgets.QMessageBox.warning( - None, - "Matrix reset", - cls.prompt_message, - QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel - ) - - invalid = cls.get_invalid(instance) - if not invalid: - cls.log.info("No invalid nodes") - return - - if choice: - cmds.xform(invalid, matrix=lib.DEFAULT_MATRIX, objectSpace=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_attributes.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_attributes.py deleted file mode 100644 index 8d4d8323ce..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_attributes.py +++ /dev/null @@ -1,119 +0,0 @@ -import json -from collections import defaultdict - -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api.lib import set_attribute -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateAttributes(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure attributes are consistent. - - Attributes to validate and their values comes from the - "maya/attributes.json" preset, which needs this structure: - { - "family": { - "node_name.attribute_name": attribute_value - } - } - """ - - order = ValidateContentsOrder - label = "Validate Attributes" - actions = [RepairAction] - optional = True - - attributes = "{}" - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Check for preset existence. - if not self.get_attributes_data(): - return - - invalid = self.get_invalid(instance, compute=True) - if invalid: - raise PublishValidationError( - "Found attributes with invalid values: {}".format(invalid) - ) - - @classmethod - def get_attributes_data(cls): - return json.loads(cls.attributes) - - @classmethod - def get_invalid(cls, instance, compute=False): - if compute: - return cls.get_invalid_attributes(instance) - else: - return instance.data.get("invalid_attributes", []) - - @classmethod - def get_invalid_attributes(cls, instance): - invalid_attributes = [] - - attributes_data = cls.get_attributes_data() - # Filter families. - families = [instance.data["productType"]] - families += instance.data.get("families", []) - families = set(families) & set(attributes_data.keys()) - if not families: - return [] - - # Get all attributes to validate. - attributes = defaultdict(dict) - for family in families: - if family not in attributes_data: - # No attributes to validate for family - continue - - for preset_attr, preset_value in attributes_data[family].items(): - node_name, attribute_name = preset_attr.split(".", 1) - attributes[node_name][attribute_name] = preset_value - - if not attributes: - return [] - - # Get invalid attributes. - nodes = cmds.ls(long=True) - for node in nodes: - node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - if node_name not in attributes: - continue - - for attr_name, expected in attributes[node_name].items(): - - # Skip if attribute does not exist - if not cmds.attributeQuery(attr_name, node=node, exists=True): - continue - - plug = "{}.{}".format(node, attr_name) - value = cmds.getAttr(plug) - if value != expected: - invalid_attributes.append( - { - "attribute": plug, - "expected": expected, - "current": value - } - ) - - instance.data["invalid_attributes"] = invalid_attributes - return invalid_attributes - - @classmethod - def repair(cls, instance): - invalid = cls.get_invalid(instance) - for data in invalid: - node, attr = data["attribute"].split(".", 1) - value = data["expected"] - set_attribute(node=node, attribute=attr, value=value) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_camera_attributes.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_camera_attributes.py deleted file mode 100644 index 8c3f3800cc..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_camera_attributes.py +++ /dev/null @@ -1,75 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateCameraAttributes(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates Camera has no invalid attribute keys or values. - - The Alembic file format does not a specific subset of attributes as such - we validate that no values are set there as the output will not match the - current scene. For example the preScale, film offsets and film roll. - - """ - - order = ValidateContentsOrder - families = ['camera'] - hosts = ['maya'] - label = 'Camera Attributes' - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = True - - DEFAULTS = [ - ("filmFitOffset", 0.0), - ("horizontalFilmOffset", 0.0), - ("verticalFilmOffset", 0.0), - ("preScale", 1.0), - ("filmTranslateH", 0.0), - ("filmTranslateV", 0.0), - ("filmRollValue", 0.0) - ] - - @classmethod - def get_invalid(cls, instance): - - # get cameras - members = instance.data['setMembers'] - shapes = cmds.ls(members, dag=True, shapes=True, long=True) - cameras = cmds.ls(shapes, type='camera', long=True) - - invalid = set() - for cam in cameras: - - for attr, default_value in cls.DEFAULTS: - plug = "{}.{}".format(cam, attr) - value = cmds.getAttr(plug) - - # Check if is default value - if value != default_value: - cls.log.warning("Invalid attribute value: {0} " - "(should be: {1}))".format(plug, - default_value)) - invalid.add(cam) - - if cmds.listConnections(plug, source=True, destination=False): - # TODO: Validate correctly whether value always correct - cls.log.warning("%s has incoming connections, validation " - "is unpredictable." % plug) - - return list(invalid) - - def process(self, instance): - """Process all the nodes in the instance""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Invalid camera attributes: {}".format(invalid)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_camera_contents.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_camera_contents.py deleted file mode 100644 index 42a5ef1769..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_camera_contents.py +++ /dev/null @@ -1,82 +0,0 @@ -from maya import cmds - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, - OptionalPyblishPluginMixin) -from ayon_maya.api import plugin - - -class ValidateCameraContents(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates Camera instance contents. - - A Camera instance may only hold a SINGLE camera's transform, nothing else. - - It may hold a "locator" as shape, but different shapes are down the - hierarchy. - - """ - - order = ValidateContentsOrder - families = ['camera'] - label = 'Camera Contents' - actions = [ayon_maya.api.action.SelectInvalidAction] - validate_shapes = True - optional = False - - @classmethod - def get_invalid(cls, instance): - - # get cameras - members = instance.data['setMembers'] - shapes = cmds.ls(members, dag=True, shapes=True, long=True) - - # single camera - invalid = [] - cameras = cmds.ls(shapes, type='camera', long=True) - if len(cameras) != 1: - cls.log.error("Camera instance must have a single camera. " - "Found {0}: {1}".format(len(cameras), cameras)) - invalid.extend(cameras) - - # We need to check this edge case because returning an extended - # list when there are no actual cameras results in - # still an empty 'invalid' list - if len(cameras) < 1: - if members: - # If there are members in the instance return all of - # them as 'invalid' so the user can still select invalid - cls.log.error("No cameras found in instance " - "members: {}".format(members)) - return members - - raise PublishValidationError( - "No cameras found in empty instance.") - - if not cls.validate_shapes: - cls.log.debug("Not validating shapes in the camera content" - " because 'validate shapes' is disabled") - return invalid - - # non-camera shapes - valid_shapes = cmds.ls(shapes, type=('camera', 'locator'), long=True) - shapes = set(shapes) - set(valid_shapes) - if shapes: - shapes = list(shapes) - cls.log.error("Camera instance should only contain camera " - "shapes. Found: {0}".format(shapes)) - invalid.extend(shapes) - - invalid = list(set(invalid)) - return invalid - - def process(self, instance): - """Process all the nodes in the instance""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("Invalid camera contents: " - "{0}".format(invalid)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_color_sets.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_color_sets.py deleted file mode 100644 index f95e27def1..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_color_sets.py +++ /dev/null @@ -1,61 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateColorSets(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate all meshes in the instance have unlocked normals - - These can be removed manually through: - Modeling > Mesh Display > Color Sets Editor - - """ - - order = ValidateMeshOrder - families = ['model'] - label = 'Mesh ColorSets' - actions = [ - ayon_maya.api.action.SelectInvalidAction, RepairAction - ] - optional = True - - @staticmethod - def has_color_sets(mesh): - """Return whether a mesh node has locked normals""" - return cmds.polyColorSet(mesh, - allColorSets=True, - query=True) - - @classmethod - def get_invalid(cls, instance): - """Return the meshes with ColorSets in instance""" - - meshes = cmds.ls(instance, type='mesh', long=True) - return [mesh for mesh in meshes if cls.has_color_sets(mesh)] - - def process(self, instance): - """Raise invalid when any of the meshes have ColorSets""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - message="Meshes found with Color Sets: {0}".format(invalid) - ) - - @classmethod - def repair(cls, instance): - """Remove all Color Sets on the meshes in this instance.""" - invalid = cls.get_invalid(instance) - for mesh in invalid: - for set in cmds.polyColorSet(mesh, acs=True, q=True): - cmds.polyColorSet(mesh, colorSet=set, delete=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_current_renderlayer_renderable.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_current_renderlayer_renderable.py deleted file mode 100644 index 6c599d398d..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_current_renderlayer_renderable.py +++ /dev/null @@ -1,73 +0,0 @@ -import inspect - -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - context_plugin_should_run, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateCurrentRenderLayerIsRenderable(plugin.MayaContextPlugin, - OptionalPyblishPluginMixin): - """Validate if current render layer has a renderable camera. - - There is a bug in Redshift which occurs when the current render layer - at file open has no renderable camera. The error raised is as follows: - - "No renderable cameras found. Aborting render" - - This error is raised even if that render layer will not be rendered. - - """ - - label = "Current Render Layer Has Renderable Camera" - order = pyblish.api.ValidatorOrder - families = ["renderlayer"] - optional = False - - def process(self, context): - if not self.is_active(context.data): - return - # Workaround bug pyblish-base#250 - if not context_plugin_should_run(self, context): - return - - # This validator only makes sense when publishing renderlayer instances - # with Redshift. We skip validation if there isn't any. - if not any(self.is_active_redshift_render_instance(instance) - for instance in context): - return - - cameras = cmds.ls(type="camera", long=True) - renderable = any(c for c in cameras if cmds.getAttr(c + ".renderable")) - if not renderable: - layer = cmds.editRenderLayerGlobals(query=True, - currentRenderLayer=True) - raise PublishValidationError( - "Current render layer '{}' has no renderable camera".format( - layer - ), - description=inspect.getdoc(self) - ) - - @staticmethod - def is_active_redshift_render_instance(instance) -> bool: - """Return whether instance is an active renderlayer instance set to - render with Redshift renderer.""" - if not instance.data.get("active", True): - return False - - # Check this before families just because it's a faster check - if not instance.data.get("renderer") == "redshift": - return False - - families = set() - families.add(instance.data.get("family")) - families.update(instance.data.get("families", [])) - if "renderlayer" not in families: - return False - - return True diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_cycle_error.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_cycle_error.py deleted file mode 100644 index 0b870993e9..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_cycle_error.py +++ /dev/null @@ -1,40 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api.lib import maintained_selection -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateCycleError(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate nodes produce no cycle errors.""" - - order = ValidateContentsOrder + 0.05 - label = "Cycle Errors" - hosts = ["maya"] - families = ["rig"] - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Nodes produce a cycle error: {}".format(invalid)) - - @classmethod - def get_invalid(cls, instance): - - with maintained_selection(): - cmds.select(instance[:], noExpand=True) - plugs = cmds.cycleCheck(all=False, # check selection only - list=True) - invalid = cmds.ls(plugs, objectsOnly=True, long=True) - return invalid diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_frame_range.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_frame_range.py deleted file mode 100644 index 90bdef4107..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_frame_range.py +++ /dev/null @@ -1,202 +0,0 @@ -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api.lib_rendersetup import get_attr_in_layer, get_attr_overrides -from ayon_maya.api import plugin -from maya import cmds -from maya.app.renderSetup.model.override import AbsOverride - - -class ValidateFrameRange(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates the frame ranges. - - This is an optional validator checking if the frame range on instance - matches the frame range specified for the asset. - - It also validates render frame ranges of render layers. - - Repair action will change everything to match the asset frame range. - - This can be turned off by the artist to allow custom ranges. - """ - - label = "Validate Frame Range" - order = ValidateContentsOrder - families = ["animation", - "pointcache", - "camera", - "proxyAbc", - "renderlayer", - "review", - "yeticache"] - optional = True - actions = [RepairAction] - exclude_product_types = [] - - def process(self, instance): - if not self.is_active(instance.data): - return - - context = instance.context - if instance.data.get("tileRendering"): - self.log.debug( - "Skipping frame range validation because " - "tile rendering is enabled." - ) - return - - frame_start_handle = int(context.data.get("frameStartHandle")) - frame_end_handle = int(context.data.get("frameEndHandle")) - handle_start = int(context.data.get("handleStart")) - handle_end = int(context.data.get("handleEnd")) - frame_start = int(context.data.get("frameStart")) - frame_end = int(context.data.get("frameEnd")) - - inst_start = int(instance.data.get("frameStartHandle")) - inst_end = int(instance.data.get("frameEndHandle")) - inst_frame_start = int(instance.data.get("frameStart")) - inst_frame_end = int(instance.data.get("frameEnd")) - inst_handle_start = int(instance.data.get("handleStart")) - inst_handle_end = int(instance.data.get("handleEnd")) - - # basic sanity checks - assert frame_start_handle <= frame_end_handle, ( - "start frame is lower then end frame") - - # compare with data on instance - errors = [] - # QUESTION shouldn't this be just: - # 'if instance.data["productType"] in self.exclude_product_types:' - if [ef for ef in self.exclude_product_types - if instance.data["productType"] in ef]: - return - if (inst_start != frame_start_handle): - errors.append("Instance start frame [ {} ] doesn't " - "match the one set on folder [ {} ]: " - "{}/{}/{}/{} (handle/start/end/handle)".format( - inst_start, - frame_start_handle, - handle_start, frame_start, frame_end, handle_end - )) - - if (inst_end != frame_end_handle): - errors.append("Instance end frame [ {} ] doesn't " - "match the one set on folder [ {} ]: " - "{}/{}/{}/{} (handle/start/end/handle)".format( - inst_end, - frame_end_handle, - handle_start, frame_start, frame_end, handle_end - )) - - checks = { - "frame start": (frame_start, inst_frame_start), - "frame end": (frame_end, inst_frame_end), - "handle start": (handle_start, inst_handle_start), - "handle end": (handle_end, inst_handle_end) - } - for label, values in checks.items(): - if values[0] != values[1]: - errors.append( - "{} on instance ({}) does not match with the folder " - "({}).".format(label.title(), values[1], values[0]) - ) - - if errors: - report = "Frame range settings are incorrect.\n\n" - for error in errors: - report += "- {}\n\n".format(error) - - raise PublishValidationError(report, title="Frame Range incorrect") - - @classmethod - def repair(cls, instance): - """ - Repair instance container to match folder data. - """ - - if "renderlayer" in instance.data.get("families"): - # Special behavior for renderlayers - cls.repair_renderlayer(instance) - return - - node = instance.data["name"] - context = instance.context - - frame_start_handle = int(context.data.get("frameStartHandle")) - frame_end_handle = int(context.data.get("frameEndHandle")) - handle_start = int(context.data.get("handleStart")) - handle_end = int(context.data.get("handleEnd")) - frame_start = int(context.data.get("frameStart")) - frame_end = int(context.data.get("frameEnd")) - - # Start - if cmds.attributeQuery("handleStart", node=node, exists=True): - cmds.setAttr("{}.handleStart".format(node), handle_start) - cmds.setAttr("{}.frameStart".format(node), frame_start) - else: - # Include start handle in frame start if no separate handleStart - # attribute exists on the node - cmds.setAttr("{}.frameStart".format(node), frame_start_handle) - - # End - if cmds.attributeQuery("handleEnd", node=node, exists=True): - cmds.setAttr("{}.handleEnd".format(node), handle_end) - cmds.setAttr("{}.frameEnd".format(node), frame_end) - else: - # Include end handle in frame end if no separate handleEnd - # attribute exists on the node - cmds.setAttr("{}.frameEnd".format(node), frame_end_handle) - - @classmethod - def repair_renderlayer(cls, instance): - """Apply frame range in render settings""" - - layer = instance.data["renderlayer"] - context = instance.context - - start_attr = "defaultRenderGlobals.startFrame" - end_attr = "defaultRenderGlobals.endFrame" - - frame_start_handle = int(context.data.get("frameStartHandle")) - frame_end_handle = int(context.data.get("frameEndHandle")) - - cls._set_attr_in_layer(start_attr, layer, frame_start_handle) - cls._set_attr_in_layer(end_attr, layer, frame_end_handle) - - @classmethod - def _set_attr_in_layer(cls, node_attr, layer, value): - - if get_attr_in_layer(node_attr, layer=layer) == value: - # Already ok. This can happen if you have multiple renderlayers - # validated and there are no frame range overrides. The first - # layer's repair would have fixed the global value already - return - - overrides = list(get_attr_overrides(node_attr, layer=layer)) - if overrides: - # We set the last absolute override if it is an absolute override - # otherwise we'll add an Absolute override - last_override = overrides[-1][1] - if not isinstance(last_override, AbsOverride): - collection = last_override.parent() - node, attr = node_attr.split(".", 1) - last_override = collection.createAbsoluteOverride(node, attr) - - cls.log.debug("Setting {attr} absolute override in " - "layer '{layer}': {value}".format(layer=layer, - attr=node_attr, - value=value)) - cmds.setAttr(last_override.name() + ".attrValue", value) - - else: - # Set the attribute directly - # (Note that this will set the global attribute) - cls.log.debug("Setting global {attr}: {value}".format( - attr=node_attr, - value=value - )) - cmds.setAttr(node_attr, value) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_glsl_material.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_glsl_material.py deleted file mode 100644 index e94cb3e663..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_glsl_material.py +++ /dev/null @@ -1,209 +0,0 @@ -import os - -from ayon_core.pipeline import ( - OptionalPyblishPluginMixin, - PublishValidationError, -) -from ayon_core.pipeline.publish import RepairAction, ValidateContentsOrder -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateGLSLMaterial(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """ - Validate if the asset uses GLSL Shader - """ - - order = ValidateContentsOrder + 0.1 - families = ['gltf'] - label = 'GLSL Shader for GLTF' - actions = [RepairAction] - optional = True - active = True - - def process(self, instance): - if not self.is_active(instance.data): - return - shading_grp = self.get_material_from_shapes(instance) - if not shading_grp: - raise PublishValidationError("No shading group found") - invalid = self.get_texture_shader_invalid(instance) - if invalid: - raise PublishValidationError("Non GLSL Shader found: " - "{0}".format(invalid)) - - def get_material_from_shapes(self, instance): - shapes = cmds.ls(instance, type="mesh", long=True) - for shape in shapes: - shading_grp = cmds.listConnections(shape, - destination=True, - type="shadingEngine") - - return shading_grp or [] - - def get_texture_shader_invalid(self, instance): - - invalid = set() - shading_grp = self.get_material_from_shapes(instance) - for shading_group in shading_grp: - material_name = "{}.surfaceShader".format(shading_group) - material = cmds.listConnections(material_name, - source=True, - destination=False, - type="GLSLShader") - - if not material: - # add material name - material = cmds.listConnections(material_name)[0] - invalid.add(material) - - return list(invalid) - - @classmethod - def repair(cls, instance): - """ - Repair instance by assigning GLSL Shader - to the material - """ - cls.assign_glsl_shader(instance) - return - - @classmethod - def assign_glsl_shader(cls, instance): - """ - Converting StingrayPBS material to GLSL Shaders - for the glb export through Maya2GLTF plugin - """ - - meshes = cmds.ls(instance, type="mesh", long=True) - cls.log.debug("meshes: {}".format(meshes)) - # load the glsl shader plugin - cmds.loadPlugin("glslShader", quiet=True) - - for mesh in meshes: - # create glsl shader - glsl = cmds.createNode('GLSLShader') - glsl_shading_grp = cmds.sets(name=glsl + "SG", empty=True, - renderable=True, noSurfaceShader=True) - cmds.connectAttr(glsl + ".outColor", - glsl_shading_grp + ".surfaceShader") - - # load the maya2gltf shader - ogsfx_path = instance.context.data["project_settings"]["maya"]["publish"]["ExtractGLB"]["ogsfx_path"] # noqa - if not os.path.exists(ogsfx_path): - if ogsfx_path: - # if custom ogsfx path is not specified - # the log below is the warning for the user - cls.log.warning("ogsfx shader file " - "not found in {}".format(ogsfx_path)) - - cls.log.debug("Searching the ogsfx shader file in " - "default maya directory...") - # re-direct to search the ogsfx path in maya_dir - ogsfx_path = os.getenv("MAYA_APP_DIR") + ogsfx_path - if not os.path.exists(ogsfx_path): - raise PublishValidationError("The ogsfx shader file does not " # noqa - "exist: {}".format(ogsfx_path)) # noqa - - cmds.setAttr(glsl + ".shader", ogsfx_path, typ="string") - # list the materials used for the assets - shading_grp = cmds.listConnections(mesh, - destination=True, - type="shadingEngine") - - # get the materials related to the selected assets - for material in shading_grp: - pbs_shader = cmds.listConnections(material, - destination=True, - type="StingrayPBS") - if pbs_shader: - cls.pbs_shader_conversion(pbs_shader, glsl) - # setting up to relink the texture if - # the mesh is with aiStandardSurface - arnold_shader = cmds.listConnections(material, - destination=True, - type="aiStandardSurface") - if arnold_shader: - cls.arnold_shader_conversion(arnold_shader, glsl) - - cmds.sets(mesh, forceElement=str(glsl_shading_grp)) - - @classmethod - def pbs_shader_conversion(cls, main_shader, glsl): - - cls.log.debug("StringrayPBS detected " - "-> Can do texture conversion") - - for shader in main_shader: - # get the file textures related to the PBS Shader - albedo = cmds.listConnections(shader + - ".TEX_color_map") - if albedo: - dif_output = albedo[0] + ".outColor" - # get the glsl_shader input - # reconnect the file nodes to maya2gltf shader - glsl_dif = glsl + ".u_BaseColorTexture" - cmds.connectAttr(dif_output, glsl_dif) - - # connect orm map if there is one - orm_packed = cmds.listConnections(shader + - ".TEX_ao_map") - if orm_packed: - orm_output = orm_packed[0] + ".outColor" - - mtl = glsl + ".u_MetallicTexture" - ao = glsl + ".u_OcclusionTexture" - rough = glsl + ".u_RoughnessTexture" - - cmds.connectAttr(orm_output, mtl) - cmds.connectAttr(orm_output, ao) - cmds.connectAttr(orm_output, rough) - - # connect nrm map if there is one - nrm = cmds.listConnections(shader + - ".TEX_normal_map") - if nrm: - nrm_output = nrm[0] + ".outColor" - glsl_nrm = glsl + ".u_NormalTexture" - cmds.connectAttr(nrm_output, glsl_nrm) - - @classmethod - def arnold_shader_conversion(cls, main_shader, glsl): - cls.log.debug("aiStandardSurface detected " - "-> Can do texture conversion") - - for shader in main_shader: - # get the file textures related to the PBS Shader - albedo = cmds.listConnections(shader + ".baseColor") - if albedo: - dif_output = albedo[0] + ".outColor" - # get the glsl_shader input - # reconnect the file nodes to maya2gltf shader - glsl_dif = glsl + ".u_BaseColorTexture" - cmds.connectAttr(dif_output, glsl_dif) - - orm_packed = cmds.listConnections(shader + - ".specularRoughness") - if orm_packed: - orm_output = orm_packed[0] + ".outColor" - - mtl = glsl + ".u_MetallicTexture" - ao = glsl + ".u_OcclusionTexture" - rough = glsl + ".u_RoughnessTexture" - - cmds.connectAttr(orm_output, mtl) - cmds.connectAttr(orm_output, ao) - cmds.connectAttr(orm_output, rough) - - # connect nrm map if there is one - bump_node = cmds.listConnections(shader + - ".normalCamera") - if bump_node: - for bump in bump_node: - nrm = cmds.listConnections(bump + - ".bumpValue") - if nrm: - nrm_output = nrm[0] + ".outColor" - glsl_nrm = glsl + ".u_NormalTexture" - cmds.connectAttr(nrm_output, glsl_nrm) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_glsl_plugin.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_glsl_plugin.py deleted file mode 100644 index aaea616631..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_glsl_plugin.py +++ /dev/null @@ -1,35 +0,0 @@ -from maya import cmds - -from ayon_core.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_maya.api import plugin - - -class ValidateGLSLPlugin(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """ - Validate if the asset uses GLSL Shader - """ - - order = ValidateContentsOrder + 0.15 - families = ['gltf'] - label = 'maya2glTF plugin' - actions = [RepairAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - if not cmds.pluginInfo("maya2glTF", query=True, loaded=True): - raise PublishValidationError("maya2glTF is not loaded") - - @classmethod - def repair(cls, instance): - """ - Repair instance by enabling the plugin - """ - return cmds.loadPlugin("maya2glTF", quiet=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_has_members.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_has_members.py deleted file mode 100644 index baca2a9008..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_has_members.py +++ /dev/null @@ -1,38 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -class ValidateInstanceHasMembers(plugin.MayaInstancePlugin): - """Validates instance objectSet has *any* members.""" - - order = ValidateContentsOrder - label = 'Instance has members' - actions = [ayon_maya.api.action.SelectInvalidAction] - - @classmethod - def get_invalid(cls, instance): - invalid = list() - if not instance.data.get("setMembers"): - objectset_name = instance.data['name'] - invalid.append(objectset_name) - - return invalid - - def process(self, instance): - # Allow renderlayer, rendersetup and workfile to be empty - skip_families = {"workfile", "renderlayer", "rendersetup"} - if instance.data.get("productType") in skip_families: - return - - invalid = self.get_invalid(instance) - if invalid: - # Invalid will always be a single entry, we log the single name - name = invalid[0] - raise PublishValidationError( - title="Empty instance", - message="Instance '{0}' is empty".format(name) - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_in_context.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_in_context.py deleted file mode 100644 index 5168c8496c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_in_context.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validate if instance asset is the same as context asset.""" -from __future__ import absolute_import - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -class ValidateInstanceInContext(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validator to check if instance asset match context asset. - - When working in per-shot style you always publish data in context of - current asset (shot). This validator checks if this is so. It is optional - so it can be disabled when needed. - - Action on this validator will select invalid instances in Outliner. - """ - - order = ValidateContentsOrder - label = "Instance in same Context" - optional = True - actions = [ - ayon_maya.api.action.SelectInvalidAction, RepairAction - ] - - def process(self, instance): - if not self.is_active(instance.data): - return - - folder_path = instance.data.get("folderPath") - task = instance.data.get("task") - context = self.get_context(instance) - if (folder_path, task) != context: - context_label = "{} > {}".format(*context) - instance_label = "{} > {}".format(folder_path, task) - raise PublishValidationError( - message=( - "Instance '{}' publishes to different context than current" - " context: {}. Current context: {}".format( - instance.name, instance_label, context_label - ) - ), - description=( - "## Publishing to a different context data\n" - "There are publish instances present which are publishing " - "into a different folder than your current context.\n\n" - "Usually this is not what you want but there can be cases " - "where you might want to publish into another folder or " - "shot. If that's the case you can disable the validation " - "on the instance to ignore it." - ) - ) - - @classmethod - def get_invalid(cls, instance): - return [instance.data["instance_node"]] - - @classmethod - def repair(cls, instance): - context_folder_path, context_task = cls.get_context( - instance) - - create_context = instance.context.data["create_context"] - instance_id = instance.data["instance_id"] - created_instance = create_context.get_instance_by_id( - instance_id - ) - created_instance["folderPath"] = context_folder_path - created_instance["task"] = context_task - create_context.save_changes() - - @staticmethod - def get_context(instance): - """Return asset, task from publishing context data""" - context = instance.context - return context.data["folderPath"], context.data["task"] diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_subset.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_subset.py deleted file mode 100644 index 4c876079ff..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_instance_subset.py +++ /dev/null @@ -1,53 +0,0 @@ -import string - -import six -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - -# Allow only characters, numbers and underscore -allowed = set(string.ascii_lowercase + - string.ascii_uppercase + - string.digits + - '_') - - -def validate_name(product_name): - return all(x in allowed for x in product_name) - - -class ValidateSubsetName(plugin.MayaInstancePlugin): - """Validates product name has only valid characters""" - - order = ValidateContentsOrder - families = ["*"] - label = "Product Name" - - def process(self, instance): - - product_name = instance.data.get("productName", None) - - # Ensure product data - if product_name is None: - raise PublishValidationError( - "Instance is missing product name: {0}".format(product_name) - ) - - if not isinstance(product_name, six.string_types): - raise PublishValidationError(( - "Instance product name must be string, got: {0} ({1})" - ).format(product_name, type(product_name))) - - # Ensure is not empty product - if not product_name: - raise PublishValidationError( - "Instance product name is empty: {0}".format(product_name) - ) - - # Validate product characters - if not validate_name(product_name): - raise PublishValidationError(( - "Instance product name contains invalid characters: {0}" - ).format(product_name)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_loaded_plugin.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_loaded_plugin.py deleted file mode 100644 index 60af00186e..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_loaded_plugin.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import pyblish.api -import maya.cmds as cmds - -from ayon_core.pipeline.publish import ( - RepairContextAction, - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_maya.api import plugin - - -class ValidateLoadedPlugin(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure there are no unauthorized loaded plugins""" - - label = "Loaded Plugin" - order = pyblish.api.ValidatorOrder - actions = [RepairContextAction] - optional = True - - @classmethod - def get_invalid(cls, context): - - invalid = [] - loaded_plugins = cmds.pluginInfo(query=True, listPlugins=True) - # get variable from AYON settings - whitelist_native_plugins = cls.whitelist_native_plugins - authorized_plugins = cls.authorized_plugins or [] - - for maya_plugin in loaded_plugins: - if not whitelist_native_plugins and os.getenv('MAYA_LOCATION') \ - in cmds.pluginInfo(maya_plugin, query=True, path=True): - continue - if maya_plugin not in authorized_plugins: - invalid.append(maya_plugin) - - return invalid - - def process(self, context): - if not self.is_active(context.data): - return - invalid = self.get_invalid(context) - if invalid: - raise PublishValidationError( - "Found forbidden plugin name: {}".format(", ".join(invalid)) - ) - - @classmethod - def repair(cls, context): - """Unload forbidden plugins""" - - for maya_plugin in cls.get_invalid(context): - cmds.pluginInfo(maya_plugin, edit=True, autoload=False) - cmds.unloadPlugin(maya_plugin, force=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_contents.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_contents.py deleted file mode 100644 index 722f92b1b5..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_contents.py +++ /dev/null @@ -1,135 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds # noqa - - -class ValidateLookContents(plugin.MayaInstancePlugin): - """Validate look instance contents - - Rules: - * Look data must have `relationships` and `attributes` keys. - * At least one relationship must be collection. - * All relationship object sets at least have an ID value - - Tip: - * When no node IDs are found on shadingEngines please save your scene - and try again. - - """ - - order = ValidateContentsOrder - families = ['look'] - label = 'Look Data Contents' - actions = [ayon_maya.api.action.SelectInvalidAction] - - def process(self, instance): - """Process all the nodes in the instance""" - - if not instance[:]: - raise PublishValidationError("Instance is empty") - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("'{}' has invalid look " - "content".format(instance.name)) - - @classmethod - def get_invalid(cls, instance): - """Get all invalid nodes""" - - # check if data has the right attributes and content - attributes = cls.validate_lookdata_attributes(instance) - # check the looks for ID - looks = cls.validate_looks(instance) - # check if file nodes have valid files - files = cls.validate_files(instance) - - invalid = looks + attributes + files - - return invalid - - @classmethod - def validate_lookdata_attributes(cls, instance): - """Check if the lookData has the required attributes - - Args: - instance - - """ - - invalid = set() - - keys = ["relationships", "attributes"] - lookdata = instance.data["lookData"] - for key in keys: - if key not in lookdata: - cls.log.error("Look Data has no key " - "'{}'".format(key)) - invalid.add(instance.name) - - # Validate at least one single relationship is collected - if not lookdata["relationships"]: - cls.log.error("Look '%s' has no " - "`relationships`" % instance.name) - invalid.add(instance.name) - - # Check if attributes are on a node with an ID, crucial for rebuild! - for attr_changes in lookdata["attributes"]: - if not attr_changes["uuid"] and not attr_changes["attributes"]: - cls.log.error("Node '%s' has no cbId, please set the " - "attributes to its children if it has any" - % attr_changes["name"]) - invalid.add(instance.name) - - return list(invalid) - - @classmethod - def validate_looks(cls, instance): - - looks = instance.data["lookData"]["relationships"] - invalid = [] - for name, data in looks.items(): - if not data["uuid"]: - cls.log.error("Look '{}' has no UUID".format(name)) - invalid.append(name) - - return invalid - - @classmethod - def validate_files(cls, instance): - - invalid = [] - - resources = instance.data.get("resources", []) - for resource in resources: - files = resource["files"] - if len(files) == 0: - node = resource["node"] - cls.log.error("File node '%s' uses no or non-existing " - "files" % node) - invalid.append(node) - - return invalid - - @classmethod - def validate_renderer(cls, instance): - # TODO: Rewrite this to be more specific and configurable - renderer = cmds.getAttr( - 'defaultRenderGlobals.currentRenderer').lower() - do_maketx = instance.data.get("maketx", False) - do_rstex = instance.data.get("rstex", False) - processors = [] - - if do_maketx: - processors.append('arnold') - if do_rstex: - processors.append('redshift') - - for processor in processors: - if processor == renderer: - continue - else: - cls.log.error("Converted texture does not match current renderer.") # noqa diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_default_shaders_connections.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_default_shaders_connections.py deleted file mode 100644 index ac936b36c7..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_default_shaders_connections.py +++ /dev/null @@ -1,76 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.publish import ( - PublishValidationError, - RepairContextAction, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateLookDefaultShadersConnections(plugin.MayaContextPlugin): - """Validate default shaders in the scene have their default connections. - - For example the standardSurface1 or lambert1 (maya 2023 and before) could - potentially be disconnected from the initialShadingGroup. As such it's not - lambert1 that will be identified as the default shader which can have - unpredictable results. - - To fix the default connections need to be made again. See the logs for - more details on which connections are missing. - - """ - - order = pyblish.api.ValidatorOrder - 0.4999 - families = ['look'] - label = 'Look Default Shader Connections' - actions = [RepairContextAction] - - # The default connections to check - DEFAULTS = { - "initialShadingGroup.surfaceShader": ["standardSurface1.outColor", - "lambert1.outColor"], - "initialParticleSE.surfaceShader": ["standardSurface1.outColor", - "lambert1.outColor"], - "initialParticleSE.volumeShader": ["particleCloud1.outColor"] - } - - def process(self, context): - - if self.get_invalid(): - raise PublishValidationError( - "Default shaders in your scene do not have their " - "default shader connections. Please repair them to continue." - ) - - @classmethod - def get_invalid(cls): - - # Process as usual - invalid = list() - for plug, valid_inputs in cls.DEFAULTS.items(): - inputs = cmds.listConnections(plug, - source=True, - destination=False, - plugs=True) or None - if not inputs or inputs[0] not in valid_inputs: - cls.log.error( - "{0} is not connected to {1}. This can result in " - "unexpected behavior. Please reconnect to continue." - "".format(plug, " or ".join(valid_inputs)) - ) - invalid.append(plug) - - return invalid - - @classmethod - def repair(cls, context): - invalid = cls.get_invalid() - for plug in invalid: - valid_inputs = cls.DEFAULTS[plug] - for valid_input in valid_inputs: - if cmds.objExists(valid_input): - cls.log.info( - "Connecting {} -> {}".format(valid_input, plug) - ) - cmds.connectAttr(valid_input, plug, force=True) - break diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_id_reference_edits.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_id_reference_edits.py deleted file mode 100644 index 4763128f3f..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_id_reference_edits.py +++ /dev/null @@ -1,108 +0,0 @@ -from collections import defaultdict - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateLookIdReferenceEdits(plugin.MayaInstancePlugin): - """Validate nodes in look have no reference edits to cbId. - - Note: - This only validates the cbId edits on the referenced nodes that are - used in the look. For example, a transform can have its cbId changed - without being invalidated when it is not used in the look's assignment. - - """ - - order = ValidateContentsOrder - families = ['look'] - label = 'Look Id Reference Edits' - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - def process(self, instance): - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError("Invalid nodes %s" % (invalid,)) - - @staticmethod - def get_invalid(instance): - - # Collect all referenced members - references = defaultdict(set) - relationships = instance.data["lookData"]["relationships"] - for relationship in relationships.values(): - for member in relationship['members']: - node = member["name"] - - if cmds.referenceQuery(node, isNodeReferenced=True): - ref = cmds.referenceQuery(node, referenceNode=True) - references[ref].add(node) - - # Validate whether any has changes to 'cbId' attribute - invalid = list() - for ref, nodes in references.items(): - edits = cmds.referenceQuery(editAttrs=True, - editNodes=True, - showDagPath=True, - showNamespace=True, - onReferenceNode=ref) - for edit in edits: - - # Ensure it is an attribute ending with .cbId - # thus also ignore just node edits (like parenting) - if not edit.endswith(".cbId"): - continue - - # Ensure the attribute is 'cbId' (and not a nested attribute) - node, attr = edit.split(".", 1) - if attr != "cbId": - continue - - if node in nodes: - invalid.append(node) - - return invalid - - @classmethod - def repair(cls, instance): - - invalid = cls.get_invalid(instance) - - # Group invalid nodes by reference node - references = defaultdict(set) - for node in invalid: - ref = cmds.referenceQuery(node, referenceNode=True) - references[ref].add(node) - - # Remove the reference edits on the nodes per reference node - for ref, nodes in references.items(): - for node in nodes: - - # Somehow this only works if you run the the removal - # per edit command. - for command in ["addAttr", - "connectAttr", - "deleteAttr", - "disconnectAttr", - "setAttr"]: - cmds.referenceEdit("{}.cbId".format(node), - removeEdits=True, - successfulEdits=True, - failedEdits=True, - editCommand=command, - onReferenceNode=ref) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_no_default_shaders.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_no_default_shaders.py deleted file mode 100644 index e4662dd498..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_no_default_shaders.py +++ /dev/null @@ -1,63 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateLookNoDefaultShaders(plugin.MayaInstancePlugin): - """Validate if any node has a connection to a default shader. - - This checks whether the look has any members of: - - lambert1 - - initialShadingGroup - - initialParticleSE - - particleCloud1 - - If any of those is present it will raise an error. A look is not allowed - to have any of the "default" shaders present in a scene as they can - introduce problems when referenced (overriding local scene shaders). - - To fix this no shape nodes in the look must have any of default shaders - applied. - - """ - - order = ValidateContentsOrder + 0.01 - families = ['look'] - label = 'Look No Default Shaders' - actions = [ayon_maya.api.action.SelectInvalidAction] - - DEFAULT_SHADERS = {"lambert1", "initialShadingGroup", - "initialParticleSE", "particleCloud1"} - - def process(self, instance): - """Process all the nodes in the instance""" - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("Invalid node relationships found: " - "{0}".format(invalid)) - - @classmethod - def get_invalid(cls, instance): - - invalid = set() - for node in instance: - # Get shading engine connections - shaders = cmds.listConnections(node, type="shadingEngine") or [] - - # Check for any disallowed connections on *all* nodes - if any(s in cls.DEFAULT_SHADERS for s in shaders): - - # Explicitly log each individual "wrong" connection. - for s in shaders: - if s in cls.DEFAULT_SHADERS: - cls.log.error("Node has unallowed connection to " - "'{}': {}".format(s, node)) - - invalid.add(node) - - return list(invalid) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_sets.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_sets.py deleted file mode 100644 index eae1664114..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_sets.py +++ /dev/null @@ -1,102 +0,0 @@ -import ayon_maya.api.action -from ayon_maya.api import lib -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - PublishValidationError -) -from ayon_maya.api import plugin - - -class ValidateLookSets(plugin.MayaInstancePlugin): - """Validate if any sets relationships are not being collected. - - A shader can be assigned to a node that is missing a Colorbleed ID. - Because it is missing the ID it has not been collected in the instance. - This validator ensures those relationships and thus considers it invalid - if a relationship was not collected. - - When the relationship needs to be maintained the artist might need to - create a different* relationship or ensure the node has the Colorbleed ID. - - *The relationship might be too broad (assigned to top node of hierarchy). - This can be countered by creating the relationship on the shape or its - transform. In essence, ensure item the shader is assigned to has the - Colorbleed ID! - - Examples: - - - Displacement objectSets (like V-Ray): - - It is best practice to add the transform of the shape to the - displacement objectSet. Any parent groups will not work as groups - do not receive a Colorbleed Id. As such the assignments need to be - made to the shapes and their transform. - - Example content: - [asset_GRP|geometry_GRP|body_GES, - asset_GRP|geometry_GRP|L_eye_GES, - asset_GRP|geometry_GRP|R_eye_GES, - asset_GRP|geometry_GRP|wings_GEO] - - """ - - order = ValidateContentsOrder - families = ['look'] - label = 'Look Sets' - actions = [ayon_maya.api.action.SelectInvalidAction] - - def process(self, instance): - """Process all the nodes in the instance""" - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("'{}' has invalid look " - "content".format(instance.name)) - - @classmethod - def get_invalid(cls, instance): - """Get all invalid nodes""" - - relationships = instance.data["lookData"]["relationships"] - invalid = [] - - renderlayer = instance.data.get("renderlayer", "defaultRenderLayer") - with lib.renderlayer(renderlayer): - for node in instance: - # get the connected objectSets of the node - sets = lib.get_related_sets(node) - if not sets: - continue - - # check if any objectSets are not present ion the relationships - missing_sets = [s for s in sets if s not in relationships] - if missing_sets: - for missing_set in missing_sets: - cls.log.debug(missing_set) - - if '_SET' not in missing_set: - # A set of this node is not coming along, this is wrong! - cls.log.error("Missing sets '{}' for node " - "'{}'".format(missing_sets, node)) - invalid.append(node) - continue - - # Ensure the node is in the sets that are collected - for shader_set, data in relationships.items(): - if shader_set not in sets: - # no need to check for a set if the node - # isn't in it anyway - continue - - member_nodes = [member['name'] for member in - data['members']] - if node not in member_nodes: - # The node is not found in the collected set - # relationships - cls.log.error("Missing '{}' in collected set node " - "'{}'".format(node, shader_set)) - invalid.append(node) - - continue - - return invalid diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_shading_group.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_shading_group.py deleted file mode 100644 index 2aecf64e01..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_shading_group.py +++ /dev/null @@ -1,73 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateShadingEngine(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate all shading engines are named after the surface material. - - Shading engines should be named "{surface_shader}SG" - """ - - order = ValidateContentsOrder - families = ["look"] - label = "Look Shading Engine Naming" - actions = [ - ayon_maya.api.action.SelectInvalidAction, RepairAction - ] - optional = True - - # The default connections to check - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Found shading engines with incorrect naming:" - "\n{}".format(invalid) - ) - - @classmethod - def get_invalid(cls, instance): - shapes = cmds.ls(instance, type=["nurbsSurface", "mesh"], long=True) - invalid = [] - for shape in shapes: - shading_engines = cmds.listConnections( - shape, destination=True, type="shadingEngine" - ) or [] - for shading_engine in shading_engines: - materials = cmds.listConnections( - shading_engine + ".surfaceShader", - source=True, destination=False - ) - if not materials: - cls.log.warning( - "Shading engine '{}' has no material connected to its " - ".surfaceShader attribute.".format(shading_engine)) - continue - - material = materials[0] # there should only ever be one input - name = material + "SG" - if shading_engine != name: - invalid.append(shading_engine) - - return list(set(invalid)) - - @classmethod - def repair(cls, instance): - shading_engines = cls.get_invalid(instance) - for shading_engine in shading_engines: - name = ( - cmds.listConnections(shading_engine + ".surfaceShader")[0] - + "SG" - ) - cmds.rename(shading_engine, name) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_single_shader.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_single_shader.py deleted file mode 100644 index d48c050d97..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_look_single_shader.py +++ /dev/null @@ -1,59 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateSingleShader(plugin.MayaInstancePlugin): - """Validate all nurbsSurfaces and meshes have exactly one shader assigned. - - This will error if a shape has no shaders or more than one shader. - - """ - - order = ValidateContentsOrder - families = ['look'] - label = 'Look Single Shader Per Shape' - actions = [ayon_maya.api.action.SelectInvalidAction] - - # The default connections to check - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - ("Found shapes which don't have a single shader " - "assigned:\n{}").format(invalid)) - - @classmethod - def get_invalid(cls, instance): - - # Get all shapes from the instance - shapes = cmds.ls(instance, type=["nurbsSurface", "mesh"], long=True) - - # Check the number of connected shadingEngines per shape - no_shaders = [] - more_than_one_shaders = [] - for shape in shapes: - shading_engines = cmds.listConnections(shape, - destination=True, - type="shadingEngine") or [] - - # Only interested in unique shading engines. - shading_engines = list(set(shading_engines)) - - if not shading_engines: - no_shaders.append(shape) - elif len(shading_engines) > 1: - more_than_one_shaders.append(shape) - - if no_shaders: - cls.log.error("No shaders found on: {}".format(no_shaders)) - if more_than_one_shaders: - cls.log.error("More than one shader found on: " - "{}".format(more_than_one_shaders)) - - return no_shaders + more_than_one_shaders diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_maya_units.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_maya_units.py deleted file mode 100644 index cb4df4b5d5..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_maya_units.py +++ /dev/null @@ -1,131 +0,0 @@ -import ayon_maya.api.lib as mayalib -import maya.cmds as cmds -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishXmlValidationError, - RepairContextAction, - ValidateSceneOrder, -) -from ayon_maya.api import plugin - - -class ValidateMayaUnits(plugin.MayaContextPlugin, - OptionalPyblishPluginMixin): - """Check if the Maya units are set correct""" - - order = ValidateSceneOrder - label = "Maya Units" - actions = [RepairContextAction] - - validate_linear_units = True - linear_units = "cm" - - validate_angular_units = True - angular_units = "deg" - - validate_fps = True - - nice_message_format = ( - "- {setting} must be {required_value}. " - "Your scene is set to {current_value}" - ) - log_message_format = ( - "Maya scene {setting} must be '{required_value}'. " - "Current value is '{current_value}'." - ) - optional = False - - @classmethod - def apply_settings(cls, project_settings): - """Apply project settings to creator""" - settings = ( - project_settings["maya"]["publish"]["ValidateMayaUnits"] - ) - - cls.validate_linear_units = settings.get("validate_linear_units", - cls.validate_linear_units) - cls.linear_units = settings.get("linear_units", cls.linear_units) - cls.validate_angular_units = settings.get("validate_angular_units", - cls.validate_angular_units) - cls.angular_units = settings.get("angular_units", cls.angular_units) - cls.validate_fps = settings.get("validate_fps", cls.validate_fps) - - def process(self, context): - if not self.is_active(context.data): - return - # Collected units - linearunits = context.data.get('linearUnits') - angularunits = context.data.get('angularUnits') - - fps = context.data.get('fps') - - folder_attributes = context.data["folderEntity"]["attrib"] - folder_fps = mayalib.convert_to_maya_fps(folder_attributes["fps"]) - - self.log.info('Units (linear): {0}'.format(linearunits)) - self.log.info('Units (angular): {0}'.format(angularunits)) - self.log.info('Units (time): {0} FPS'.format(fps)) - - invalid = [] - - # Check if units are correct - if ( - self.validate_linear_units - and linearunits - and linearunits != self.linear_units - ): - invalid.append({ - "setting": "Linear units", - "required_value": self.linear_units, - "current_value": linearunits - }) - - if ( - self.validate_angular_units - and angularunits - and angularunits != self.angular_units - ): - invalid.append({ - "setting": "Angular units", - "required_value": self.angular_units, - "current_value": angularunits - }) - - if self.validate_fps and fps and fps != folder_fps: - invalid.append({ - "setting": "FPS", - "required_value": folder_fps, - "current_value": fps - }) - - if invalid: - - issues = [] - for data in invalid: - self.log.error(self.log_message_format.format(**data)) - issues.append(self.nice_message_format.format(**data)) - issues = "\n".join(issues) - - raise PublishXmlValidationError( - plugin=self, - message="Invalid maya scene units", - formatting_data={"issues": issues} - ) - - @classmethod - def repair(cls, context): - """Fix the current FPS setting of the scene, set to PAL(25.0 fps)""" - - cls.log.info("Setting angular unit to '{}'".format(cls.angular_units)) - cmds.currentUnit(angle=cls.angular_units) - current_angle = cmds.currentUnit(query=True, angle=True) - cls.log.debug(current_angle) - - cls.log.info("Setting linear unit to '{}'".format(cls.linear_units)) - cmds.currentUnit(linear=cls.linear_units) - current_linear = cmds.currentUnit(query=True, linear=True) - cls.log.debug(current_linear) - - cls.log.info("Setting time unit to match project") - folder_entity = context.data["folderEntity"] - mayalib.set_scene_fps(folder_entity["attrib"]["fps"]) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_arnold_attributes.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_arnold_attributes.py deleted file mode 100644 index 9729c8863d..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_arnold_attributes.py +++ /dev/null @@ -1,126 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api.lib import ( - delete_after, - get_attribute, - maintained_selection, - set_attribute, - undo_chunk, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateMeshArnoldAttributes(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate the mesh has default Arnold attributes. - - It compares all Arnold attributes from a default mesh. This is to ensure - later published looks can discover non-default Arnold attributes. - """ - - order = ValidateMeshOrder - hosts = ["maya"] - families = ["model"] - label = "Mesh Arnold Attributes" - actions = [ - ayon_maya.api.action.SelectInvalidAction, - RepairAction - ] - - optional = True - - # cache (will be `dict` when cached) - arnold_mesh_defaults = None - - @classmethod - def get_default_attributes(cls): - - if cls.arnold_mesh_defaults is not None: - # Use from cache - return cls.arnold_mesh_defaults - - # Get default arnold attribute values for mesh type. - defaults = {} - with delete_after() as tmp: - transform = cmds.createNode("transform", skipSelect=True) - tmp.append(transform) - - mesh = cmds.createNode("mesh", parent=transform, skipSelect=True) - arnold_attributes = cmds.listAttr(mesh, - string="ai*", - fromPlugin=True) or [] - for attr in arnold_attributes: - plug = "{}.{}".format(mesh, attr) - try: - defaults[attr] = get_attribute(plug) - except PublishValidationError: - cls.log.debug("Ignoring arnold attribute: {}".format(attr)) - - cls.arnold_mesh_defaults = defaults # assign cache - return defaults - - @classmethod - def get_invalid_attributes(cls, instance, compute=False): - invalid = [] - - if compute: - - meshes = cmds.ls(instance, type="mesh", long=True) - if not meshes: - return [] - - # Compare the values against the defaults - defaults = cls.get_default_attributes() - for mesh in meshes: - for attr_name, default_value in defaults.items(): - plug = "{}.{}".format(mesh, attr_name) - if get_attribute(plug) != default_value: - invalid.append(plug) - - instance.data["nondefault_arnold_attributes"] = invalid - - return instance.data.get("nondefault_arnold_attributes", []) - - @classmethod - def get_invalid(cls, instance): - invalid_attrs = cls.get_invalid_attributes(instance, compute=False) - invalid_nodes = set(attr.split(".", 1)[0] for attr in invalid_attrs) - return sorted(invalid_nodes) - - @classmethod - def repair(cls, instance): - with maintained_selection(): - with undo_chunk(): - defaults = cls.get_default_attributes() - attributes = cls.get_invalid_attributes( - instance, compute=False - ) - for attr in attributes: - node, attr_name = attr.split(".", 1) - value = defaults[attr_name] - set_attribute( - node=node, - attribute=attr_name, - value=value - ) - - def process(self, instance): - if not self.is_active(instance.data): - return - - if not cmds.pluginInfo("mtoa", query=True, loaded=True): - # Arnold attributes only exist if plug-in is loaded - return - - invalid = self.get_invalid_attributes(instance, compute=True) - if invalid: - raise PublishValidationError( - "Non-default Arnold attributes found in instance:" - " {0}".format(invalid) - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_empty.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_empty.py deleted file mode 100644 index 12b2252eb8..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_empty.py +++ /dev/null @@ -1,53 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateMeshEmpty(plugin.MayaInstancePlugin): - """Validate meshes have some vertices. - - Its possible to have meshes without any vertices. To replicate - this issue, delete all faces/polygons then all edges. - """ - - order = ValidateMeshOrder - families = ["model"] - label = "Mesh Empty" - actions = [ - ayon_maya.api.action.SelectInvalidAction, RepairAction - ] - - @classmethod - def repair(cls, instance): - invalid = cls.get_invalid(instance) - for node in invalid: - cmds.delete(node) - - @classmethod - def get_invalid(cls, instance): - invalid = [] - - meshes = cmds.ls(instance, type="mesh", long=True) - for mesh in meshes: - num_vertices = cmds.polyEvaluate(mesh, vertex=True) - - if num_vertices == 0: - cls.log.warning( - "\"{}\" does not have any vertices.".format(mesh) - ) - invalid.append(mesh) - - return invalid - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Meshes found without any vertices: %s" % invalid - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_has_uv.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_has_uv.py deleted file mode 100644 index c7576d2a78..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_has_uv.py +++ /dev/null @@ -1,87 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateMeshOrder, -) -from ayon_maya.api.lib import len_flattened -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateMeshHasUVs(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate the current mesh has UVs. - - It validates whether the current UV set has non-zero UVs and - at least more than the vertex count. It's not really bulletproof, - but a simple quick validation to check if there are likely - UVs for every face. - """ - - order = ValidateMeshOrder - families = ['model'] - label = 'Mesh Has UVs' - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = True - - @classmethod - def get_invalid(cls, instance): - invalid = [] - - for node in cmds.ls(instance, type='mesh'): - num_vertices = cmds.polyEvaluate(node, vertex=True) - - if num_vertices == 0: - cls.log.warning( - "Skipping \"{}\", cause it does not have any " - "vertices.".format(node) - ) - continue - - uv = cmds.polyEvaluate(node, uv=True) - - if uv == 0: - invalid.append(node) - continue - - vertex = cmds.polyEvaluate(node, vertex=True) - if uv < vertex: - # Workaround: - # Maya can have instanced UVs in a single mesh, for example - # imported from an Alembic. With instanced UVs the UV count - # from `maya.cmds.polyEvaluate(uv=True)` will only result in - # the unique UV count instead of for all vertices. - # - # Note: Maya can save instanced UVs to `mayaAscii` but cannot - # load this as instanced. So saving, opening and saving - # again will lose this information. - map_attr = "{}.map[*]".format(node) - uv_to_vertex = cmds.polyListComponentConversion(map_attr, - toVertex=True) - uv_vertex_count = len_flattened(uv_to_vertex) - if uv_vertex_count < vertex: - invalid.append(node) - else: - cls.log.warning("Node has instanced UV points: " - "{0}".format(node)) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - - names = "
".join( - " - {}".format(node) for node in invalid - ) - - raise PublishValidationError( - title="Mesh has missing UVs", - message="Model meshes are required to have UVs.

" - "Meshes detected with invalid or missing UVs:
" - "{0}".format(names) - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_lamina_faces.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_lamina_faces.py deleted file mode 100644 index 50e3b5b53a..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_lamina_faces.py +++ /dev/null @@ -1,53 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateMeshLaminaFaces(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate meshes don't have lamina faces. - - Lamina faces share all of their edges. - - """ - - order = ValidateMeshOrder - families = ['model'] - label = 'Mesh Lamina Faces' - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = True - - description = ( - "## Meshes with Lamina Faces\n" - "Detected meshes with lamina faces. Lamina faces are faces " - "that share all of their edges and thus are merged together on top of " - "each other.\n\n" - "### How to repair?\n" - "You can repair them by using Maya's modeling tool `Mesh > Cleanup..` " - "and select to cleanup matching polygons for lamina faces." - ) - - @staticmethod - def get_invalid(instance): - meshes = cmds.ls(instance, type='mesh', long=True) - invalid = [mesh for mesh in meshes if - cmds.polyInfo(mesh, laminaFaces=True)] - - return invalid - - def process(self, instance): - """Process all the nodes in the instance 'objectSet'""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Meshes found with lamina faces: {0}".format(invalid), - description=self.description) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_ngons.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_ngons.py deleted file mode 100644 index c73c8d27e8..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_ngons.py +++ /dev/null @@ -1,67 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateMeshNgons(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure that meshes don't have ngons - - Ngon are faces with more than 4 sides. - - To debug the problem on the meshes you can use Maya's modeling - tool: "Mesh > Cleanup..." - - """ - - order = ValidateContentsOrder - families = ["model"] - label = "Mesh ngons" - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = True - - description = ( - "## Meshes with NGONs Faces\n" - "Detected meshes with NGON faces. **NGONS** are faces that " - "with more than four sides.\n\n" - "### How to repair?\n" - "You can repair them by usings Maya's modeling tool Mesh > Cleanup.. " - "and select to cleanup matching polygons for lamina faces." - ) - - @staticmethod - def get_invalid(instance): - - meshes = cmds.ls(instance, type='mesh', long=True) - - # Get all faces - faces = ['{0}.f[*]'.format(node) for node in meshes] - - # Skip meshes that for some reason have no faces, e.g. empty meshes - faces = cmds.ls(faces) - if not faces: - return [] - - # Filter to n-sided polygon faces (ngons) - invalid = lib.polyConstraint(faces, - t=0x0008, # type=face - size=3) # size=nsided - - return invalid - - def process(self, instance): - """Process all the nodes in the instance "objectSet""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Meshes found with n-gons: {0}".format(invalid), - description=self.description) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_no_negative_scale.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_no_negative_scale.py deleted file mode 100644 index 7a77a2a4f6..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_no_negative_scale.py +++ /dev/null @@ -1,66 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -class ValidateMeshNoNegativeScale(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure that meshes don't have a negative scale. - - Using negatively scaled proxies in a VRayMesh results in inverted - normals. As such we want to avoid this. - - We also avoid this on the rig or model because these are often the - previous steps for those that are cached to proxies so we can catch this - issue early. - - """ - - order = ValidateMeshOrder - families = ['model'] - label = 'Mesh No Negative Scale' - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - @staticmethod - def get_invalid(instance): - meshes = cmds.ls(instance, - type='mesh', - long=True, - noIntermediate=True) - - invalid = [] - for mesh in meshes: - transform = cmds.listRelatives(mesh, parent=True, fullPath=True)[0] - scale = cmds.getAttr("{0}.scale".format(transform))[0] - - if any(x < 0 for x in scale): - invalid.append(mesh) - - return invalid - - def process(self, instance): - """Process all the nodes in the instance 'objectSet'""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Meshes found with negative scale:\n\n{0}".format( - _as_report_list(sorted(invalid)) - ), - title="Negative scale" - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_non_manifold.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_non_manifold.py deleted file mode 100644 index 8288e8a3b3..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_non_manifold.py +++ /dev/null @@ -1,168 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishXmlValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds, mel - - -def poly_cleanup(version=4, - meshes=None, - # Version 1 - all_meshes=False, - select_only=False, - history_on=True, - quads=False, - nsided=False, - concave=False, - holed=False, - nonplanar=False, - zeroGeom=False, - zeroGeomTolerance=1e-05, - zeroEdge=False, - zeroEdgeTolerance=1e-05, - zeroMap=False, - zeroMapTolerance=1e-05, - # Version 2 - shared_uvs=False, - non_manifold=False, - # Version 3 - lamina=False, - # Version 4 - invalid_components=False): - """Wrapper around `polyCleanupArgList` mel command""" - - # Get all inputs named as `dict` to easily do conversions and formatting - values = locals() - - # Convert booleans to 1 or 0 - for key in [ - "all_meshes", - "select_only", - "history_on", - "quads", - "nsided", - "concave", - "holed", - "nonplanar", - "zeroGeom", - "zeroEdge", - "zeroMap", - "shared_uvs", - "non_manifold", - "lamina", - "invalid_components", - ]: - values[key] = 1 if values[key] else 0 - - cmd = ( - 'polyCleanupArgList {version} {{ ' - '"{all_meshes}",' # 0: All selectable meshes - '"{select_only}",' # 1: Only perform a selection - '"{history_on}",' # 2: Keep construction history - '"{quads}",' # 3: Check for quads polys - '"{nsided}",' # 4: Check for n-sides polys - '"{concave}",' # 5: Check for concave polys - '"{holed}",' # 6: Check for holed polys - '"{nonplanar}",' # 7: Check for non-planar polys - '"{zeroGeom}",' # 8: Check for 0 area faces - '"{zeroGeomTolerance}",' # 9: Tolerance for face areas - '"{zeroEdge}",' # 10: Check for 0 length edges - '"{zeroEdgeTolerance}",' # 11: Tolerance for edge length - '"{zeroMap}",' # 12: Check for 0 uv face area - '"{zeroMapTolerance}",' # 13: Tolerance for uv face areas - '"{shared_uvs}",' # 14: Unshare uvs that are shared - # across vertices - '"{non_manifold}",' # 15: Check for nonmanifold polys - '"{lamina}",' # 16: Check for lamina polys - '"{invalid_components}"' # 17: Remove invalid components - ' }};'.format(**values) - ) - - mel.eval("source polyCleanupArgList") - if not all_meshes and meshes: - # Allow to specify meshes to run over by selecting them - cmds.select(meshes, replace=True) - mel.eval(cmd) - - -class CleanupMatchingPolygons(RepairAction): - label = "Cleanup matching polygons" - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -class ValidateMeshNonManifold(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure that meshes don't have non-manifold edges or vertices - - To debug the problem on the meshes you can use Maya's modeling - tool: "Mesh > Cleanup..." - - """ - - order = ValidateMeshOrder - families = ['model'] - label = 'Mesh Non-Manifold Edges/Vertices' - actions = [ayon_maya.api.action.SelectInvalidAction, - CleanupMatchingPolygons] - optional = True - - @staticmethod - def get_invalid(instance): - - meshes = cmds.ls(instance, type='mesh', long=True) - - invalid = [] - for mesh in meshes: - components = cmds.polyInfo(mesh, - nonManifoldVertices=True, - nonManifoldEdges=True) - if components: - invalid.extend(components) - - return invalid - - def process(self, instance): - """Process all the nodes in the instance 'objectSet'""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - - if invalid: - # Report only the meshes instead of all component indices - invalid_meshes = { - component.split(".", 1)[0] for component in invalid - } - invalid_meshes = _as_report_list(sorted(invalid_meshes)) - - raise PublishXmlValidationError( - plugin=self, - message=( - "Meshes found with non-manifold " - "edges/vertices:\n\n{0}".format(invalid_meshes) - ) - ) - - @classmethod - def repair(cls, instance): - invalid_components = cls.get_invalid(instance) - if not invalid_components: - cls.log.info("No invalid components found to cleanup.") - return - - invalid_meshes = { - component.split(".", 1)[0] for component in invalid_components - } - poly_cleanup(meshes=list(invalid_meshes), - select_only=True, - non_manifold=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_non_zero_edge.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_non_zero_edge.py deleted file mode 100644 index bd11ca6488..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_non_zero_edge.py +++ /dev/null @@ -1,81 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateMeshOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateMeshNonZeroEdgeLength(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate meshes don't have edges with a zero length. - - Based on Maya's polyCleanup 'Edges with zero length'. - - Note: - This can be slow for high-res meshes. - - """ - - order = ValidateMeshOrder - families = ['model'] - label = 'Mesh Edge Length Non Zero' - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = True - - __tolerance = 1e-5 - - @classmethod - def get_invalid(cls, instance): - """Return the invalid edges. - - Also see: - - http://help.autodesk.com/view/MAYAUL/2015/ENU/?guid=Mesh__Cleanup - - """ - - meshes = cmds.ls(instance, type='mesh', long=True) - if not meshes: - return list() - - valid_meshes = [] - for mesh in meshes: - num_vertices = cmds.polyEvaluate(mesh, vertex=True) - - if num_vertices == 0: - cls.log.warning( - "Skipping \"{}\", cause it does not have any " - "vertices.".format(mesh) - ) - continue - - valid_meshes.append(mesh) - - # Get all edges - edges = ['{0}.e[*]'.format(node) for node in valid_meshes] - - # Filter by constraint on edge length - invalid = lib.polyConstraint(edges, - t=0x8000, # type=edge - length=1, - lengthbound=(0, cls.__tolerance)) - - return invalid - - def process(self, instance): - """Process all meshes""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - label = "Meshes found with zero edge length" - raise PublishValidationError( - message="{}: {}".format(label, invalid), - title=label, - description="{}:\n- ".format(label) + "\n- ".join(invalid) - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_normals_unlocked.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_normals_unlocked.py deleted file mode 100644 index dd8c523082..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_normals_unlocked.py +++ /dev/null @@ -1,76 +0,0 @@ -import ayon_maya.api.action -import maya.api.OpenMaya as om2 -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -class ValidateMeshNormalsUnlocked(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate all meshes in the instance have unlocked normals - - These can be unlocked manually through: - Modeling > Mesh Display > Unlock Normals - - """ - - order = ValidateMeshOrder - families = ['model'] - label = 'Mesh Normals Unlocked' - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - optional = True - - @staticmethod - def has_locked_normals(mesh): - """Return whether mesh has at least one locked normal""" - - sel = om2.MGlobal.getSelectionListByName(mesh) - node = sel.getDependNode(0) - fn_mesh = om2.MFnMesh(node) - _, normal_ids = fn_mesh.getNormalIds() - for normal_id in normal_ids: - if fn_mesh.isNormalLocked(normal_id): - return True - return False - - @classmethod - def get_invalid(cls, instance): - """Return the meshes with locked normals in instance""" - - meshes = cmds.ls(instance, type='mesh', long=True) - return [mesh for mesh in meshes if cls.has_locked_normals(mesh)] - - def process(self, instance): - """Raise invalid when any of the meshes have locked normals""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Meshes found with locked normals:\n\n{0}".format( - _as_report_list(sorted(invalid)) - ), - title="Locked normals" - ) - - @classmethod - def repair(cls, instance): - """Unlocks all normals on the meshes in this instance.""" - invalid = cls.get_invalid(instance) - for mesh in invalid: - cmds.polyNormalPerVertex(mesh, unFreezeNormal=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_overlapping_uvs.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_overlapping_uvs.py deleted file mode 100644 index 0ef6c2732e..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_overlapping_uvs.py +++ /dev/null @@ -1,304 +0,0 @@ -import math - -import ayon_maya.api.action -import maya.api.OpenMaya as om -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds -from six.moves import xrange - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -class GetOverlappingUVs(object): - - def _createBoundingCircle(self, meshfn): - """ Represent a face by center and radius - - :param meshfn: MFnMesh class - :type meshfn: :class:`maya.api.OpenMaya.MFnMesh` - :returns: (center, radius) - :rtype: tuple - """ - center = [] - radius = [] - for i in xrange(meshfn.numPolygons): # noqa: F821 - # get uvs from face - uarray = [] - varray = [] - for j in range(len(meshfn.getPolygonVertices(i))): - uv = meshfn.getPolygonUV(i, j) - uarray.append(uv[0]) - varray.append(uv[1]) - - # loop through all vertices to construct edges/rays - cu = 0.0 - cv = 0.0 - for j in range(len(uarray)): - cu += uarray[j] - cv += varray[j] - - cu /= len(uarray) - cv /= len(varray) - rsqr = 0.0 - for j in range(len(varray)): - du = uarray[j] - cu - dv = varray[j] - cv - dsqr = du * du + dv * dv - rsqr = dsqr if dsqr > rsqr else rsqr - - center.append(cu) - center.append(cv) - radius.append(math.sqrt(rsqr)) - - return center, radius - - def _createRayGivenFace(self, meshfn, faceId): - """ Represent a face by a series of edges(rays), i.e. - - :param meshfn: MFnMesh class - :type meshfn: :class:`maya.api.OpenMaya.MFnMesh` - :param faceId: face id - :type faceId: int - :returns: False if no valid uv's. - ""(True, orig, vec)"" or ""(False, None, None)"" - :rtype: tuple - - .. code-block:: python - - orig = [orig1u, orig1v, orig2u, orig2v, ... ] - vec = [vec1u, vec1v, vec2u, vec2v, ... ] - """ - orig = [] - vec = [] - # get uvs - uarray = [] - varray = [] - for i in range(len(meshfn.getPolygonVertices(faceId))): - uv = meshfn.getPolygonUV(faceId, i) - uarray.append(uv[0]) - varray.append(uv[1]) - - if len(uarray) == 0 or len(varray) == 0: - return (False, None, None) - - # loop through all vertices to construct edges/rays - u = uarray[-1] - v = varray[-1] - for i in xrange(len(uarray)): # noqa: F821 - orig.append(uarray[i]) - orig.append(varray[i]) - vec.append(u - uarray[i]) - vec.append(v - varray[i]) - u = uarray[i] - v = varray[i] - - return (True, orig, vec) - - def _checkCrossingEdges(self, - face1Orig, - face1Vec, - face2Orig, - face2Vec): - """ Check if there are crossing edges between two faces. - Return True if there are crossing edges and False otherwise. - - :param face1Orig: origin of face 1 - :type face1Orig: tuple - :param face1Vec: face 1 edges - :type face1Vec: list - :param face2Orig: origin of face 2 - :type face2Orig: tuple - :param face2Vec: face 2 edges - :type face2Vec: list - - A face is represented by a series of edges(rays), i.e. - .. code-block:: python - - faceOrig[] = [orig1u, orig1v, orig2u, orig2v, ... ] - faceVec[] = [vec1u, vec1v, vec2u, vec2v, ... ] - """ - face1Size = len(face1Orig) - face2Size = len(face2Orig) - for i in xrange(0, face1Size, 2): # noqa: F821 - o1x = face1Orig[i] - o1y = face1Orig[i+1] - v1x = face1Vec[i] - v1y = face1Vec[i+1] - n1x = v1y - n1y = -v1x - for j in xrange(0, face2Size, 2): # noqa: F821 - # Given ray1(O1, V1) and ray2(O2, V2) - # Normal of ray1 is (V1.y, V1.x) - o2x = face2Orig[j] - o2y = face2Orig[j+1] - v2x = face2Vec[j] - v2y = face2Vec[j+1] - n2x = v2y - n2y = -v2x - - # Find t for ray2 - # t = [(o1x-o2x)n1x + (o1y-o2y)n1y] / - # (v2x * n1x + v2y * n1y) - denum = v2x * n1x + v2y * n1y - # Edges are parallel if denum is close to 0. - if math.fabs(denum) < 0.000001: - continue - t2 = ((o1x-o2x) * n1x + (o1y-o2y) * n1y) / denum - if (t2 < 0.00001 or t2 > 0.99999): - continue - - # Find t for ray1 - # t = [(o2x-o1x)n2x - # + (o2y-o1y)n2y] / (v1x * n2x + v1y * n2y) - denum = v1x * n2x + v1y * n2y - # Edges are parallel if denum is close to 0. - if math.fabs(denum) < 0.000001: - continue - t1 = ((o2x-o1x) * n2x + (o2y-o1y) * n2y) / denum - - # Edges intersect - if (t1 > 0.00001 and t1 < 0.99999): - return 1 - - return 0 - - def _getOverlapUVFaces(self, meshName): - """ Return overlapping faces - - :param meshName: name of mesh - :type meshName: str - :returns: list of overlapping faces - :rtype: list - """ - faces = [] - # find polygon mesh node - selList = om.MSelectionList() - selList.add(meshName) - mesh = selList.getDependNode(0) - if mesh.apiType() == om.MFn.kTransform: - dagPath = selList.getDagPath(0) - dagFn = om.MFnDagNode(dagPath) - child = dagFn.child(0) - if child.apiType() != om.MFn.kMesh: - raise Exception("Can't find polygon mesh") - mesh = child - meshfn = om.MFnMesh(mesh) - - center, radius = self._createBoundingCircle(meshfn) - for i in xrange(meshfn.numPolygons): # noqa: F821 - rayb1, face1Orig, face1Vec = self._createRayGivenFace(meshfn, i) - if not rayb1: - continue - cui = center[2*i] - cvi = center[2*i+1] - ri = radius[i] - # Exclude the degenerate face - # if(area(face1Orig) < 0.000001) continue; - # Loop through face j where j != i - for j in range(i+1, meshfn.numPolygons): - cuj = center[2*j] - cvj = center[2*j+1] - rj = radius[j] - du = cuj - cui - dv = cvj - cvi - dsqr = du * du + dv * dv - # Quick rejection if bounding circles don't overlap - if (dsqr >= (ri + rj) * (ri + rj)): - continue - - rayb2, face2Orig, face2Vec = self._createRayGivenFace(meshfn, - j) - if not rayb2: - continue - # Exclude the degenerate face - # if(area(face2Orig) < 0.000001): continue; - if self._checkCrossingEdges(face1Orig, - face1Vec, - face2Orig, - face2Vec): - face1 = '%s.f[%d]' % (meshfn.name(), i) - face2 = '%s.f[%d]' % (meshfn.name(), j) - if face1 not in faces: - faces.append(face1) - if face2 not in faces: - faces.append(face2) - return faces - - -class ValidateMeshHasOverlappingUVs(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """ Validate the current mesh overlapping UVs. - - It validates whether the current UVs are overlapping or not. - It is optional to warn publisher about it. - """ - - order = ValidateMeshOrder - families = ['model'] - label = 'Mesh Has Overlapping UVs' - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = True - - @classmethod - def _get_overlapping_uvs(cls, mesh): - """Return overlapping UVs of mesh. - - Args: - mesh (str): Mesh node name - - Returns: - list: Overlapping uvs for the input mesh in all uv sets. - - """ - ovl = GetOverlappingUVs() - - # Store original uv set - original_current_uv_set = cmds.polyUVSet(mesh, - query=True, - currentUVSet=True)[0] - - overlapping_faces = [] - for uv_set in cmds.polyUVSet(mesh, query=True, allUVSets=True): - cmds.polyUVSet(mesh, currentUVSet=True, uvSet=uv_set) - overlapping_faces.extend(ovl._getOverlapUVFaces(mesh)) - - # Restore original uv set - cmds.polyUVSet(mesh, currentUVSet=True, uvSet=original_current_uv_set) - - return overlapping_faces - - @classmethod - def get_invalid(cls, instance, compute=False): - - if compute: - invalid = [] - for node in cmds.ls(instance, type="mesh"): - faces = cls._get_overlapping_uvs(node) - invalid.extend(faces) - - instance.data["overlapping_faces"] = invalid - - return instance.data.get("overlapping_faces", []) - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance, compute=True) - if invalid: - raise PublishValidationError( - "Meshes found with overlapping UVs:\n\n{0}".format( - _as_report_list(sorted(invalid)) - ), - title="Overlapping UVs" - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_shader_connections.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_shader_connections.py deleted file mode 100644 index e3fff157ac..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_shader_connections.py +++ /dev/null @@ -1,128 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -def pairs(iterable): - """Iterate over iterable per group of two""" - a = iter(iterable) - for i, y in zip(a, a): - yield i, y - - -def get_invalid_sets(shapes): - """Return invalid sets for the given shapes. - - This takes a list of shape nodes to cache the set members for overlapping - sets in the queries. This avoids many Maya set member queries. - - Returns: - dict: Dictionary of shapes and their invalid sets, e.g. - {"pCubeShape": ["set1", "set2"]} - - """ - - cache = dict() - invalid = dict() - - # Collect the sets from the shape - for shape in shapes: - invalid_sets = [] - sets = cmds.listSets(object=shape, t=1, extendToShape=False) or [] - for set_ in sets: - - members = cache.get(set_, None) - if members is None: - members = set(cmds.ls(cmds.sets(set_, - query=True, - nodesOnly=True), long=True)) - cache[set_] = members - - # If the shape is not actually present as a member of the set - # consider it invalid - if shape not in members: - invalid_sets.append(set_) - - if invalid_sets: - invalid[shape] = invalid_sets - - return invalid - - -def disconnect(node_a, node_b): - """Remove all connections between node a and b.""" - - # Disconnect outputs - outputs = cmds.listConnections(node_a, - plugs=True, - connections=True, - source=False, - destination=True) - for output, destination in pairs(outputs): - if destination.split(".", 1)[0] == node_b: - cmds.disconnectAttr(output, destination) - - # Disconnect inputs - inputs = cmds.listConnections(node_a, - plugs=True, - connections=True, - source=True, - destination=False) - for input, source in pairs(inputs): - if source.split(".", 1)[0] == node_b: - cmds.disconnectAttr(source, input) - - -class ValidateMeshShaderConnections(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure mesh shading engine connections are valid. - - In some scenarios Maya keeps connections to multiple shaders even if just - a single one is assigned on the shape. - - These are related sets returned by `maya.cmds.listSets` that don't - actually have the shape as member. - - """ - - order = ValidateMeshOrder - families = ['model'] - label = "Mesh Shader Connections" - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - optional = True - - def process(self, instance): - """Process all the nodes in the instance 'objectSet'""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Shapes found with invalid shader connections: " - "{0}".format(invalid)) - - @staticmethod - def get_invalid(instance): - - nodes = instance[:] - shapes = cmds.ls(nodes, noIntermediate=True, long=True, type="mesh") - invalid = get_invalid_sets(shapes).keys() - - return invalid - - @classmethod - def repair(cls, instance): - - shapes = cls.get_invalid(instance) - invalid = get_invalid_sets(shapes) - for shape, invalid_sets in invalid.items(): - for set_node in invalid_sets: - disconnect(shape, set_node) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_single_uv_set.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_single_uv_set.py deleted file mode 100644 index a254cbf8a6..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_single_uv_set.py +++ /dev/null @@ -1,73 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateMeshSingleUVSet(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Warn on multiple UV sets existing for each polygon mesh. - - On versions prior to Maya 2017 this will force no multiple uv sets because - the Alembic exports in Maya prior to 2017 don't support writing multiple - UV sets. - - """ - - order = ValidateMeshOrder - families = ['model', 'pointcache'] - optional = True - label = "Mesh Single UV Set" - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - - @staticmethod - def get_invalid(instance): - - meshes = cmds.ls(instance, type='mesh', long=True) - - invalid = [] - for mesh in meshes: - uvSets = cmds.polyUVSet(mesh, - query=True, - allUVSets=True) or [] - - # ensure unique (sometimes maya will list 'map1' twice) - uvSets = set(uvSets) - - if len(uvSets) != 1: - invalid.append(mesh) - - return invalid - - def process(self, instance): - """Process all the nodes in the instance 'objectSet'""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - - if invalid: - - message = "Nodes found with multiple UV sets: {0}".format(invalid) - - # Maya 2017 and up allows multiple UV sets in Alembic exports - # so we allow it, yet just warn the user to ensure they know about - # the other UV sets. - allowed = int(cmds.about(version=True)) >= 2017 - - if allowed: - self.log.warning(message) - else: - raise PublishValidationError(message) - - @classmethod - def repair(cls, instance): - for mesh in cls.get_invalid(instance): - lib.remove_other_uv_sets(mesh) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_uv_set_map1.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_uv_set_map1.py deleted file mode 100644 index a749edb35a..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_uv_set_map1.py +++ /dev/null @@ -1,136 +0,0 @@ -import inspect - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateMeshUVSetMap1(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate model's default set exists and is named 'map1'. - - In Maya meshes by default have a uv set named "map1" that cannot be - deleted. It can be renamed however, introducing some issues with some - renderers. As such we ensure the first (default) UV set index is named - "map1". - - """ - - order = ValidateMeshOrder - families = ['model'] - optional = True - label = "Mesh has map1 UV Set" - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - - @classmethod - def get_invalid(cls, instance): - - meshes = cmds.ls(instance, type='mesh', long=True) - - invalid = [] - for mesh in meshes: - - # Get existing mapping of uv sets by index - indices = cmds.polyUVSet(mesh, query=True, allUVSetsIndices=True) - maps = cmds.polyUVSet(mesh, query=True, allUVSets=True) - if not indices or not maps: - cls.log.warning("Mesh has no UV set: %s", mesh) - invalid.append(mesh) - continue - - mapping = dict(zip(indices, maps)) - - # Get the uv set at index zero. - name = mapping[0] - if name != "map1": - invalid.append(mesh) - - return invalid - - def process(self, instance): - """Process all the nodes in the instance 'objectSet'""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - - invalid_list = "\n".join(f"- {node}" for node in invalid) - - raise PublishValidationError( - "Meshes found without 'map1' UV set:\n" - "{0}".format(invalid_list), - description=self.get_description() - ) - - @classmethod - def repair(cls, instance): - """Rename uv map at index zero to map1""" - - for mesh in cls.get_invalid(instance): - - # Get existing mapping of uv sets by index - indices = cmds.polyUVSet(mesh, query=True, allUVSetsIndices=True) - maps = cmds.polyUVSet(mesh, query=True, allUVSets=True) - if not indices or not maps: - # No UV set exist at all, create a `map1` uv set - # This may fail silently if the mesh has no geometry at all - cmds.polyUVSet(mesh, create=True, uvSet="map1") - continue - - mapping = dict(zip(indices, maps)) - - # Ensure there is no uv set named map1 to avoid - # a clash on renaming the "default uv set" to map1 - existing = set(maps) - if "map1" in existing: - - # Find a unique name index - i = 2 - while True: - name = "map{0}".format(i) - if name not in existing: - break - i += 1 - - cls.log.warning("Renaming clashing uv set name on mesh" - " %s to '%s'", mesh, name) - - cmds.polyUVSet(mesh, - rename=True, - uvSet="map1", - newUVSet=name) - - # Rename the initial index to map1 - original = mapping[0] - cmds.polyUVSet(mesh, - rename=True, - uvSet=original, - newUVSet="map1") - - @staticmethod - def get_description(): - return inspect.cleandoc("""### Mesh found without map1 uv set - - A mesh must have a default UV set named `map1` to adhere to the default - mesh behavior of Maya meshes. - - There may be meshes that: - - Have no UV set - - Have no `map1` uv set but are using a different name - - Have a `map1` uv set, but it's not the default (first index) - - - #### Repair - - Using repair will try to make the first UV set the `map1` uv set. If it - does not exist yet it will be created or renames the current first - UV set to `map1`. - """) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_vertices_have_edges.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_vertices_have_edges.py deleted file mode 100644 index a10a275c44..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mesh_vertices_have_edges.py +++ /dev/null @@ -1,85 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api.lib import len_flattened -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateMeshVerticesHaveEdges(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate meshes have only vertices that are connected to edges. - - Maya can have invalid geometry with vertices that have no edges or - faces connected to them. - - In Maya 2016 EXT 2 and later there's a command to fix this: - `maya.cmds.polyClean(mesh, cleanVertices=True)` - - In older versions of Maya it works to select the invalid vertices - and merge the components. - - To find these invalid vertices select all vertices of the mesh - that are visible in the viewport (drag to select), afterwards - invert your selection (Ctrl + Shift + I). The remaining selection - contains the invalid vertices. - - """ - - order = ValidateMeshOrder - families = ['model'] - label = 'Mesh Vertices Have Edges' - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - optional = True - - @classmethod - def repair(cls, instance): - - # This fix only works in Maya 2016 EXT2 and newer - if float(cmds.about(version=True)) <= 2016.0: - raise PublishValidationError( - ("Repair not supported in Maya version below " - "2016 EXT 2")) - - invalid = cls.get_invalid(instance) - for node in invalid: - cmds.polyClean(node, cleanVertices=True) - - @classmethod - def get_invalid(cls, instance): - invalid = [] - - meshes = cmds.ls(instance, type="mesh", long=True) - for mesh in meshes: - num_vertices = cmds.polyEvaluate(mesh, vertex=True) - - if num_vertices == 0: - cls.log.warning( - "Skipping \"{}\", cause it does not have any " - "vertices.".format(mesh) - ) - continue - - # Vertices from all edges - edges = "%s.e[*]" % mesh - vertices = cmds.polyListComponentConversion(edges, toVertex=True) - num_vertices_from_edges = len_flattened(vertices) - - if num_vertices != num_vertices_from_edges: - invalid.append(mesh) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - ("Meshes found in instance with vertices that " - "have no edges: {}").format(invalid)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_model_content.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_model_content.py deleted file mode 100644 index 5e100dca1c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_model_content.py +++ /dev/null @@ -1,135 +0,0 @@ -import inspect - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateModelContent(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Adheres to the content of 'model' product type - - See `get_description` for more details. - - """ - - order = ValidateContentsOrder - families = ["model"] - label = "Model Content" - actions = [ayon_maya.api.action.SelectInvalidAction] - - validate_top_group = True - optional = False - - allowed = ('mesh', 'transform', 'nurbsCurve', 'nurbsSurface', 'locator') - - @classmethod - def get_invalid(cls, instance): - - content_instance = instance.data.get("setMembers", None) - if not content_instance: - cls.log.error("Model instance has no nodes. " - "It is not allowed to be empty") - return [instance.data["instance_node"]] - - # All children will be included in the extracted export so we also - # validate *all* descendents of the set members and we skip any - # intermediate shapes - descendants = cmds.listRelatives(content_instance, - allDescendents=True, - fullPath=True) or [] - descendants = cmds.ls(descendants, noIntermediate=True, long=True) - content_instance = list(set(content_instance + descendants)) - - # Ensure only valid node types - nodes = cmds.ls(content_instance, long=True) - valid = cmds.ls(content_instance, long=True, type=cls.allowed) - invalid = set(nodes) - set(valid) - - if invalid: - # List as bullet points - invalid_bullets = "\n".join(f"- {node}" for node in invalid) - - cls.log.error( - "These nodes are not allowed:\n{}\n\n" - "The valid node types are: {}".format( - invalid_bullets, ", ".join(cls.allowed)) - ) - return list(invalid) - - if not valid: - cls.log.error( - "No valid nodes in the model instance.\n" - "The valid node types are: {}".format(", ".join(cls.allowed)) - ) - return [instance.data["instance_node"]] - - # Ensure it has shapes - shapes = cmds.ls(valid, long=True, shapes=True) - if not shapes: - cls.log.error("No shapes in the model instance") - return [instance.data["instance_node"]] - - # Ensure single top group - top_parents = {"|" + x.split("|", 2)[1] for x in content_instance} - if cls.validate_top_group and len(top_parents) != 1: - cls.log.error( - "A model instance must have exactly one top group. " - "Found top groups: {}".format(", ".join(top_parents)) - ) - return list(top_parents) - - def _is_visible(node): - """Return whether node is visible""" - return lib.is_visible(node, - displayLayer=False, - intermediateObject=True, - parentHidden=True, - visibility=True) - - # The roots must be visible (the assemblies) - for parent in top_parents: - if not _is_visible(parent): - cls.log.error("Invisible parent (root node) is not " - "allowed: {0}".format(parent)) - invalid.add(parent) - - # Ensure at least one shape is visible - if not any(_is_visible(shape) for shape in shapes): - cls.log.error("No visible shapes in the model instance") - invalid.update(shapes) - - return list(invalid) - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - title="Model content is invalid", - message="Model content is invalid. See log for more details.", - description=self.get_description() - ) - - @classmethod - def get_description(cls): - return inspect.cleandoc(f""" - ### Model content is invalid - - Your model instance does not adhere to the rules of a - model product type: - - - Must have at least one visible shape in it, like a mesh. - - Must have one root node. When exporting multiple meshes they - must be inside a group. - - May only contain the following node types: - {", ".join(cls.allowed)} - """) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mvlook_contents.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_mvlook_contents.py deleted file mode 100644 index b26a3f4cb7..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_mvlook_contents.py +++ /dev/null @@ -1,103 +0,0 @@ -import os - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - -COLOUR_SPACES = ['sRGB', 'linear', 'auto'] -MIPMAP_EXTENSIONS = ['tdl'] - - -class ValidateMvLookContents(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - order = ValidateContentsOrder - families = ['mvLook'] - hosts = ['maya'] - label = 'Validate mvLook Data' - actions = [ayon_maya.api.action.SelectInvalidAction] - - # Allow this validation step to be skipped when you just need to - # get things pushed through. - optional = True - - # These intents get enforced checks, other ones get warnings. - enforced_intents = ['-', 'Final'] - - def process(self, instance): - if not self.is_active(instance.data): - return - - intent = instance.context.data['intent']['value'] - publishMipMap = instance.data["publishMipMap"] - enforced = True - if intent in self.enforced_intents: - self.log.debug("This validation will be enforced: '{}'" - .format(intent)) - else: - enforced = False - self.log.debug("This validation will NOT be enforced: '{}'" - .format(intent)) - - if not instance[:]: - raise PublishValidationError("Instance is empty") - - invalid = set() - - resources = instance.data.get("resources", []) - for resource in resources: - files = resource["files"] - self.log.debug( - "Resource '{}', files: [{}]".format(resource, files)) - node = resource["node"] - if len(files) == 0: - self.log.error("File node '{}' uses no or non-existing " - "files".format(node)) - invalid.add(node) - continue - for fname in files: - if not self.valid_file(fname): - self.log.error("File node '{}'/'{}' is not valid" - .format(node, fname)) - invalid.add(node) - - if publishMipMap and not self.is_or_has_mipmap(fname, files): - msg = "File node '{}'/'{}' does not have a mipmap".format( - node, fname) - if enforced: - invalid.add(node) - self.log.error(msg) - raise PublishValidationError(msg) - else: - self.log.warning(msg) - - if invalid: - raise PublishValidationError( - "'{}' has invalid look content".format(instance.name) - ) - - def valid_file(self, fname): - self.log.debug("Checking validity of '{}'".format(fname)) - if not os.path.exists(fname): - return False - if os.path.getsize(fname) == 0: - return False - return True - - def is_or_has_mipmap(self, fname, files): - ext = os.path.splitext(fname)[1][1:] - if ext in MIPMAP_EXTENSIONS: - self.log.debug(" - Is a mipmap '{}'".format(fname)) - return True - - for colour_space in COLOUR_SPACES: - for mipmap_ext in MIPMAP_EXTENSIONS: - mipmap_fname = '.'.join([fname, colour_space, mipmap_ext]) - if mipmap_fname in files: - self.log.debug( - " - Has a mipmap '{}'".format(mipmap_fname)) - return True - return False diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_animation.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_animation.py deleted file mode 100644 index 146dfda2ca..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_animation.py +++ /dev/null @@ -1,59 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -class ValidateNoAnimation(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure no keyframes on nodes in the Instance. - - Even though a Model would extract without animCurves correctly this avoids - getting different output from a model when extracted from a different - frame than the first frame. (Might be overly restrictive though) - - """ - - order = ValidateContentsOrder - label = "No Animation" - hosts = ["maya"] - families = ["model"] - optional = True - actions = [ayon_maya.api.action.SelectInvalidAction] - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Keyframes found on:\n\n{0}".format( - _as_report_list(sorted(invalid)) - ), - title="Keyframes on model" - ) - - @staticmethod - def get_invalid(instance): - - nodes = instance[:] - if not nodes: - return [] - - curves = cmds.keyframe(nodes, query=True, name=True) - if curves: - return list(set(cmds.listConnections(curves))) - - return [] diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_default_camera.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_default_camera.py deleted file mode 100644 index a57c02a842..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_default_camera.py +++ /dev/null @@ -1,50 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -class ValidateNoDefaultCameras(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure no default (startup) cameras are in the instance. - - This might be unnecessary. In the past there were some issues with - referencing/importing files that contained the start up cameras overriding - settings when being loaded and sometimes being skipped. - """ - - order = ValidateContentsOrder - families = ['camera'] - label = "No Default Cameras" - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - @staticmethod - def get_invalid(instance): - cameras = cmds.ls(instance, type='camera', long=True) - return [cam for cam in cameras if - cmds.camera(cam, query=True, startupCamera=True)] - - def process(self, instance): - """Process all the cameras in the instance""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Default cameras found:\n\n{0}".format( - _as_report_list(sorted(invalid)) - ), - title="Default cameras" - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_namespace.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_namespace.py deleted file mode 100644 index 9bd2d98e54..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_namespace.py +++ /dev/null @@ -1,75 +0,0 @@ -import ayon_maya.api.action -import maya.cmds as cmds -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -def get_namespace(node_name): - # ensure only node's name (not parent path) - node_name = node_name.rsplit("|", 1)[-1] - # ensure only namespace - return node_name.rpartition(":")[0] - - -class ValidateNoNamespace(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure the nodes don't have a namespace""" - - order = ValidateContentsOrder - families = ['model'] - label = 'No Namespaces' - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - optional = False - - @staticmethod - def get_invalid(instance): - nodes = cmds.ls(instance, long=True) - return [node for node in nodes if get_namespace(node)] - - def process(self, instance): - """Process all the nodes in the instance""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - - if invalid: - invalid_namespaces = {get_namespace(node) for node in invalid} - raise PublishValidationError( - message="Namespaces found:\n\n{0}".format( - _as_report_list(sorted(invalid_namespaces)) - ), - title="Namespaces in model", - description=( - "## Namespaces found in model\n" - "It is not allowed to publish a model that contains " - "namespaces." - ) - ) - - @classmethod - def repair(cls, instance): - """Remove all namespaces from the nodes in the instance""" - - invalid = cls.get_invalid(instance) - - # Iterate over the nodes by long to short names to iterate the lowest - # in hierarchy nodes first. This way we avoid having renamed parents - # before renaming children nodes - for node in sorted(invalid, key=len, reverse=True): - - node_name = node.rsplit("|", 1)[-1] - node_name_without_namespace = node_name.rsplit(":")[-1] - cmds.rename(node, node_name_without_namespace) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_null_transforms.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_null_transforms.py deleted file mode 100644 index 5c1baeb0e3..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_null_transforms.py +++ /dev/null @@ -1,90 +0,0 @@ -import ayon_maya.api.action -import maya.cmds as cmds -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -def has_shape_children(node): - # Check if any descendants - all_descendents = cmds.listRelatives(node, - allDescendents=True, - fullPath=True) - if not all_descendents: - return False - - # Check if there are any shapes at all - shapes = cmds.ls(all_descendents, shapes=True, noIntermediate=True) - if not shapes: - return False - - return True - - -class ValidateNoNullTransforms(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure no null transforms are in the scene. - - Warning: - Transforms with only intermediate shapes are also considered null - transforms. These transform nodes could potentially be used in your - construction history, so take care when automatically fixing this or - when deleting the empty transforms manually. - - """ - - order = ValidateContentsOrder - families = ['model'] - label = 'No Empty/Null Transforms' - actions = [RepairAction, - ayon_maya.api.action.SelectInvalidAction] - optional = False - - @staticmethod - def get_invalid(instance): - """Return invalid transforms in instance""" - - transforms = cmds.ls(instance, type='transform', long=True) - - invalid = [] - for transform in transforms: - if not has_shape_children(transform): - invalid.append(transform) - - return invalid - - def process(self, instance): - """Process all the transform nodes in the instance """ - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Empty transforms found without shapes:\n\n{0}".format( - _as_report_list(sorted(invalid)) - ), - title="Empty transforms" - ) - - @classmethod - def repair(cls, instance): - """Delete all null transforms. - - Note: If the node is used elsewhere (eg. connection to attributes or - in history) deletion might mess up things. - - """ - invalid = cls.get_invalid(instance) - if invalid: - cmds.delete(invalid) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_unknown_nodes.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_unknown_nodes.py deleted file mode 100644 index 3662c94ba3..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_unknown_nodes.py +++ /dev/null @@ -1,53 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -class ValidateNoUnknownNodes(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Checks to see if there are any unknown nodes in the instance. - - This often happens if nodes from plug-ins are used but are not available - on this machine. - - Note: Some studios use unknown nodes to store data on (as attributes) - because it's a lightweight node. - - """ - - order = ValidateContentsOrder - hosts = ['maya'] - families = ['model', 'rig'] - optional = True - label = "Unknown Nodes" - actions = [ayon_maya.api.action.SelectInvalidAction] - - @staticmethod - def get_invalid(instance): - return cmds.ls(instance, type='unknown') - - def process(self, instance): - """Process all the nodes in the instance""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Unknown nodes found:\n\n{0}".format( - _as_report_list(sorted(invalid)) - ), - title="Unknown nodes" - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_vraymesh.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_vraymesh.py deleted file mode 100644 index d14c335021..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_no_vraymesh.py +++ /dev/null @@ -1,48 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, -) -from ayon_maya.api import plugin -from maya import cmds - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -class ValidateNoVRayMesh(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate there are no VRayMesh objects in the instance""" - - order = pyblish.api.ValidatorOrder - label = 'No V-Ray Proxies (VRayMesh)' - families = ["pointcache"] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - if not cmds.pluginInfo("vrayformaya", query=True, loaded=True): - return - - shapes = cmds.ls(instance, - shapes=True, - type="mesh") - - inputs = cmds.listConnections(shapes, - destination=False, - source=True) or [] - vray_meshes = cmds.ls(inputs, type='VRayMesh') - if vray_meshes: - raise PublishValidationError( - "Meshes that are V-Ray Proxies should not be in an Alembic " - "pointcache.\n" - "Found V-Ray proxies:\n\n{}".format( - _as_report_list(sorted(vray_meshes)) - ), - title="V-Ray Proxies in pointcache" - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids.py deleted file mode 100644 index f9dafa024f..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids.py +++ /dev/null @@ -1,65 +0,0 @@ -from ayon_core.pipeline.publish import ( - ValidatePipelineOrder, - PublishXmlValidationError -) -import ayon_maya.api.action -from ayon_maya.api import lib -from ayon_maya.api import plugin - - -class ValidateNodeIDs(plugin.MayaInstancePlugin): - """Validate nodes have a Colorbleed Id. - - When IDs are missing from nodes *save your scene* and they should be - automatically generated because IDs are created on non-referenced nodes - in Maya upon scene save. - - """ - - order = ValidatePipelineOrder - label = 'Instance Nodes Have ID' - families = ["model", - "look", - "rig", - "pointcache", - "animation", - "yetiRig", - "assembly"] - - actions = [ayon_maya.api.action.SelectInvalidAction, - ayon_maya.api.action.GenerateUUIDsOnInvalidAction] - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - def process(self, instance): - """Process all meshes""" - - # Ensure all nodes have a cbId - invalid = self.get_invalid(instance) - if invalid: - names = "\n".join( - "- {}".format(node) for node in invalid - ) - raise PublishXmlValidationError( - plugin=self, - message="Nodes found without IDs: {}".format(invalid), - formatting_data={"nodes": names} - ) - - @classmethod - def get_invalid(cls, instance): - """Return the member nodes that are invalid""" - - # We do want to check the referenced nodes as it might be - # part of the end product. - id_nodes = lib.get_id_required_nodes(referenced_nodes=True, - nodes=instance[:], - # Exclude those with already - # existing ids - existing_ids=False) - return id_nodes diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_deformed_shapes.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_deformed_shapes.py deleted file mode 100644 index 8e3c4e5bff..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_deformed_shapes.py +++ /dev/null @@ -1,79 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateNodeIdsDeformedShape(plugin.MayaInstancePlugin): - """Validate if deformed shapes have related IDs to the original shapes. - - When a deformer is applied in the scene on a referenced mesh that already - had deformers then Maya will create a new shape node for the mesh that - does not have the original id. This validator checks whether the ids are - valid on all the shape nodes in the instance. - - """ - - order = ValidateContentsOrder - families = ['look'] - label = 'Deformed shape ids' - actions = [ - ayon_maya.api.action.SelectInvalidAction, - RepairAction - ] - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - def process(self, instance): - """Process all the nodes in the instance""" - - # Ensure all nodes have a cbId and a related ID to the original shapes - # if a deformer has been created on the shape - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - ("Shapes found that are considered 'Deformed'" - "without object ids: {0}").format(invalid)) - - @classmethod - def get_invalid(cls, instance): - """Get all nodes which do not match the criteria""" - - shapes = cmds.ls(instance[:], - dag=True, - leaf=True, - shapes=True, - long=True, - noIntermediate=True) - - invalid = [] - for shape in shapes: - history_id = lib.get_id_from_sibling(shape) - if history_id: - current_id = lib.get_id(shape) - if current_id != history_id: - invalid.append(shape) - - return invalid - - @classmethod - def repair(cls, instance): - - for node in cls.get_invalid(instance): - # Get the original id from history - history_id = lib.get_id_from_sibling(node) - if not history_id: - cls.log.error("Could not find ID in history for '%s'", node) - continue - - lib.set_id(node, history_id, overwrite=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_in_database.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_in_database.py deleted file mode 100644 index 95e488d8e7..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_in_database.py +++ /dev/null @@ -1,101 +0,0 @@ -import ayon_api -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidatePipelineOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin - - -class ValidateNodeIdsInDatabase(plugin.MayaInstancePlugin): - """Validate if the CB Id is related to an folder in the database - - All nodes with the `cbId` attribute will be validated to ensure that - the loaded asset in the scene is related to the current project. - - Tip: If there is an asset which is being reused from a different project - please ensure the asset is republished in the new project - - """ - - order = ValidatePipelineOrder - label = 'Node Ids in Database' - families = ["*"] - - actions = [ayon_maya.api.action.SelectInvalidAction, - ayon_maya.api.action.GenerateUUIDsOnInvalidAction] - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Found folder ids which are not related to " - "current project in instance: `{}`".format(instance.name)) - - @classmethod - def get_invalid(cls, instance): - - nodes = instance[:] - if not nodes: - return - - # Get all id required nodes - id_required_nodes = lib.get_id_required_nodes(referenced_nodes=False, - nodes=nodes) - if not id_required_nodes: - return - - # check ids against database ids - folder_ids = cls.get_project_folder_ids(context=instance.context) - - # Get all asset IDs - invalid = [] - for node in id_required_nodes: - cb_id = lib.get_id(node) - - # Ignore nodes without id, those are validated elsewhere - if not cb_id: - continue - - folder_id = cb_id.split(":", 1)[0] - if folder_id not in folder_ids: - cls.log.error("`%s` has unassociated folder id" % node) - invalid.append(node) - - return invalid - - @classmethod - def get_project_folder_ids(cls, context): - """Return all folder ids in the current project. - - Arguments: - context (pyblish.api.Context): The publish context. - - Returns: - set[str]: All folder ids in the current project. - - """ - # We query the database only for the first instance instead of - # per instance by storing a cache in the context - key = "__cache_project_folder_ids" - if key in context.data: - return context.data[key] - - # check ids against database - project_name = context.data["projectName"] - folder_entities = ayon_api.get_folders(project_name, fields={"id"}) - folder_ids = { - folder_entity["id"] - for folder_entity in folder_entities - } - - context.data[key] = folder_ids - return folder_ids diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_related.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_related.py deleted file mode 100644 index 7382653d35..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_related.py +++ /dev/null @@ -1,124 +0,0 @@ -import inspect -import uuid -from collections import defaultdict - -import ayon_maya.api.action -from ayon_api import get_folders -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidatePipelineOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin - - -def is_valid_uuid(value) -> bool: - """Return whether value is a valid UUID""" - try: - uuid.UUID(value) - except ValueError: - return False - return True - - -class ValidateNodeIDsRelated(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate nodes have a related `cbId` to the instance.data[folderPath]""" - - order = ValidatePipelineOrder - label = 'Node Ids Related (ID)' - families = ["model", - "look", - "rig"] - optional = True - - actions = [ayon_maya.api.action.SelectInvalidAction, - ayon_maya.api.action.GenerateUUIDsOnInvalidAction] - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - def process(self, instance): - """Process all nodes in instance (including hierarchy)""" - if not self.is_active(instance.data): - return - - # Ensure all nodes have a cbId - invalid = self.get_invalid(instance) - if invalid: - - invalid_list = "\n".join(f"- {node}" for node in sorted(invalid)) - - raise PublishValidationError(( - "Nodes IDs found that are not related to folder '{}':\n{}" - ).format(instance.data["folderPath"], invalid_list), - description=self.get_description() - ) - - @classmethod - def get_invalid(cls, instance): - """Return the member nodes that are invalid""" - folder_id = instance.data["folderEntity"]["id"] - - # We do want to check the referenced nodes as it might be - # part of the end product - invalid = list() - nodes_by_other_folder_ids = defaultdict(set) - for node in instance: - _id = lib.get_id(node) - if not _id: - continue - - node_folder_id = _id.split(":", 1)[0] - if node_folder_id != folder_id: - invalid.append(node) - nodes_by_other_folder_ids[node_folder_id].add(node) - - # Log what other assets were found. - if nodes_by_other_folder_ids: - project_name = instance.context.data["projectName"] - other_folder_ids = set(nodes_by_other_folder_ids.keys()) - - # Remove folder ids that are not valid UUID identifiers, these - # may be legacy OpenPype ids - other_folder_ids = {folder_id for folder_id in other_folder_ids - if is_valid_uuid(folder_id)} - if not other_folder_ids: - return invalid - - folder_entities = get_folders(project_name=project_name, - folder_ids=other_folder_ids, - fields=["path"]) - if folder_entities: - # Log names of other assets detected - # We disregard logging nodes/ids for asset ids where no asset - # was found in the database because ValidateNodeIdsInDatabase - # takes care of that. - folder_paths = {entity["path"] for entity in folder_entities} - cls.log.error( - "Found nodes related to other folders:\n{}".format( - "\n".join(f"- {path}" for path in sorted(folder_paths)) - ) - ) - - return invalid - - @staticmethod - def get_description(): - return inspect.cleandoc("""### Node IDs must match folder id - - The node ids must match the folder entity id you are publishing to. - - Usually these mismatch occurs if you are re-using nodes from another - folder or project. - - #### How to repair? - - The repair action will regenerate new ids for - the invalid nodes to match the instance's folder. - """) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_unique.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_unique.py deleted file mode 100644 index 27606baa55..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_ids_unique.py +++ /dev/null @@ -1,79 +0,0 @@ -from collections import defaultdict - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidatePipelineOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateNodeIdsUnique(plugin.MayaInstancePlugin): - """Validate the nodes in the instance have a unique Colorbleed Id - - Here we ensure that what has been added to the instance is unique - """ - - order = ValidatePipelineOrder - label = 'Non Duplicate Instance Members (ID)' - families = ["model", - "look", - "rig", - "yetiRig"] - - actions = [ayon_maya.api.action.SelectInvalidAction, - ayon_maya.api.action.GenerateUUIDsOnInvalidAction] - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - def process(self, instance): - """Process all meshes""" - - # Ensure all nodes have a cbId - invalid = self.get_invalid(instance) - if invalid: - label = "Nodes found with non-unique folder ids" - raise PublishValidationError( - message="{}, see log".format(label), - title="Non-unique folder ids on nodes", - description="{}\n- {}".format(label, - "\n- ".join(sorted(invalid))) - ) - - @classmethod - def get_invalid(cls, instance): - """Return the member nodes that are invalid""" - - # Check only non intermediate shapes - # todo: must the instance itself ensure to have no intermediates? - # todo: how come there are intermediates? - instance_members = cmds.ls(instance, noIntermediate=True, long=True) - - # Collect each id with their members - ids = defaultdict(list) - for member in instance_members: - object_id = lib.get_id(member) - if not object_id: - continue - ids[object_id].append(member) - - # Take only the ids with more than one member - invalid = list() - for members in ids.values(): - if len(members) > 1: - members_text = "\n".join( - "- {}".format(member) for member in sorted(members) - ) - cls.log.error( - "ID found on multiple nodes:\n{}".format(members_text) - ) - invalid.extend(members) - - return invalid diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_no_ghosting.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_no_ghosting.py deleted file mode 100644 index 184e4eaf15..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_node_no_ghosting.py +++ /dev/null @@ -1,58 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateNodeNoGhosting(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure nodes do not have ghosting enabled. - - If one would publish towards a non-Maya format it's likely that stats - like ghosting won't be exported, eg. exporting to Alembic. - - Instead of creating many micro-managing checks (like this one) to ensure - attributes have not been changed from their default it could be more - efficient to export to a format that will never hold such data anyway. - - """ - - order = ValidateContentsOrder - families = ['model', 'rig'] - label = "No Ghosting" - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - _attributes = {'ghosting': 0} - - @classmethod - def get_invalid(cls, instance): - - # Transforms and shapes seem to have ghosting - nodes = cmds.ls(instance, long=True, type=['transform', 'shape']) - invalid = [] - for node in nodes: - _iteritems = getattr( - cls._attributes, "iteritems", cls._attributes.items - ) - for attr, required_value in _iteritems(): - if cmds.attributeQuery(attr, node=node, exists=True): - - value = cmds.getAttr('{0}.{1}'.format(node, attr)) - if value != required_value: - invalid.append(node) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Nodes with ghosting enabled found: {0}".format(invalid)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_plugin_path_attributes.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_plugin_path_attributes.py deleted file mode 100644 index 506759516e..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_plugin_path_attributes.py +++ /dev/null @@ -1,79 +0,0 @@ -import os - -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api.action import SelectInvalidAction -from ayon_maya.api.lib import pairwise -from ayon_maya.api import plugin -from maya import cmds - - -class ValidatePluginPathAttributes(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """ - Validate plug-in path attributes point to existing file paths. - """ - - order = ValidateContentsOrder - families = ["workfile"] - label = "Plug-in Path Attributes" - actions = [SelectInvalidAction] - optional = False - - # Attributes are defined in project settings - attribute = [] - - @classmethod - def get_invalid(cls, instance): - invalid = list() - - file_attrs = { - item["name"]: item["value"] - for item in cls.attribute - } - if not file_attrs: - return invalid - - # Consider only valid node types to avoid "Unknown object type" warning - all_node_types = set(cmds.allNodeTypes()) - node_types = [ - key - for key in file_attrs.keys() - if key in all_node_types - ] - - for node, node_type in pairwise(cmds.ls(type=node_types, - showType=True)): - # get the filepath - file_attr = "{}.{}".format(node, file_attrs[node_type]) - filepath = cmds.getAttr(file_attr) - - if filepath and not os.path.exists(filepath): - cls.log.error("{} '{}' uses non-existing filepath: {}" - .format(node_type, node, filepath)) - invalid.append(node) - - return invalid - - def process(self, instance): - """Process all directories Set as Filenames in Non-Maya Nodes""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - title="Plug-in Path Attributes", - message="Non-existent filepath found on nodes: {}".format( - ", ".join(invalid) - ), - description=( - "## Plug-in nodes use invalid filepaths\n" - "The workfile contains nodes from plug-ins that use " - "filepaths which do not exist.\n\n" - "Please make sure their filepaths are correct and the " - "files exist on disk." - ) - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_render_image_rule.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_render_image_rule.py deleted file mode 100644 index fd1a9ad9e4..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_render_image_rule.py +++ /dev/null @@ -1,73 +0,0 @@ -import os - -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateRenderImageRule(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates Maya Workpace "images" file rule matches project settings. - - This validates against the configured default render image folder: - Studio Settings > Project > Maya > - Render Settings > Default render image folder. - - """ - - order = ValidateContentsOrder - label = "Images File Rule (Workspace)" - families = ["renderlayer"] - actions = [RepairAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - required_images_rule = os.path.normpath( - self.get_default_render_image_folder(instance) - ) - current_images_rule = os.path.normpath( - cmds.workspace(fileRuleEntry="images") - ) - - if current_images_rule != required_images_rule: - raise PublishValidationError( - ( - "Invalid workspace `images` file rule value: '{}'. " - "Must be set to: '{}'" - ).format(current_images_rule, required_images_rule)) - - @classmethod - def repair(cls, instance): - - required_images_rule = cls.get_default_render_image_folder(instance) - current_images_rule = cmds.workspace(fileRuleEntry="images") - - if current_images_rule != required_images_rule: - cmds.workspace(fileRule=("images", required_images_rule)) - cmds.workspace(saveWorkspace=True) - - @classmethod - def get_default_render_image_folder(cls, instance): - staging_dir = instance.data.get("stagingDir") - if staging_dir: - cls.log.debug( - "Staging dir found: \"{}\". Ignoring setting from " - "`project_settings/maya/render_settings/" - "default_render_image_folder`.".format(staging_dir) - ) - return staging_dir - - return ( - instance.context.data - ["project_settings"] - ["maya"] - ["render_settings"] - ["default_render_image_folder"] - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_render_no_default_cameras.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_render_no_default_cameras.py deleted file mode 100644 index 083b21b819..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_render_no_default_cameras.py +++ /dev/null @@ -1,42 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateRenderNoDefaultCameras(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure no default (startup) cameras are to be rendered.""" - - order = ValidateContentsOrder - families = ['renderlayer'] - label = "No Default Cameras Renderable" - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - @staticmethod - def get_invalid(instance): - - renderable = set(instance.data["cameras"]) - - # Collect default cameras - cameras = cmds.ls(type='camera', long=True) - defaults = set(cam for cam in cameras if - cmds.camera(cam, query=True, startupCamera=True)) - - return [cam for cam in renderable if cam in defaults] - - def process(self, instance): - """Process all the cameras in the instance""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - title="Rendering default cameras", - message="Renderable default cameras " - "found: {0}".format(invalid)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_render_single_camera.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_render_single_camera.py deleted file mode 100644 index 6856b9a0f1..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_render_single_camera.py +++ /dev/null @@ -1,82 +0,0 @@ -import inspect -import re - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api.lib_rendersettings import RenderSettings -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateRenderSingleCamera(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate renderable camera count for layer and token. - - Pipeline is supporting multiple renderable cameras per layer, but image - prefix must contain token. - """ - - order = ValidateContentsOrder - label = "Render Single Camera" - families = ["renderlayer", - "vrayscene"] - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - R_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) - - def process(self, instance): - """Process all the cameras in the instance""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Invalid render cameras.", - description=self.get_description() - ) - - @classmethod - def get_invalid(cls, instance): - - cameras = instance.data.get("cameras", []) - renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() - # handle various renderman names - if renderer.startswith('renderman'): - renderer = 'renderman' - - file_prefix = cmds.getAttr( - RenderSettings.get_image_prefix_attr(renderer) - ) - - renderlayer = instance.data["renderlayer"] - if len(cameras) > 1: - if re.search(cls.R_CAMERA_TOKEN, file_prefix): - # if there is token in prefix and we have more then - # 1 camera, all is ok. - return - cls.log.error( - "Multiple renderable cameras found for %s: %s ", - renderlayer, ", ".join(cameras)) - return [renderlayer] + cameras - - elif len(cameras) < 1: - cls.log.error("No renderable cameras found for %s ", renderlayer) - return [renderlayer] - - def get_description(self): - return inspect.cleandoc( - """### Render Cameras Invalid - - Your render cameras are misconfigured. You may have no render - camera set or have multiple cameras with a render filename - prefix that does not include the `` token. - - See the logs for more details about the cameras. - - """ - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_renderlayer_aovs.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_renderlayer_aovs.py deleted file mode 100644 index 16350cfa08..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_renderlayer_aovs.py +++ /dev/null @@ -1,65 +0,0 @@ -import ayon_api -import ayon_maya.api.action -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, -) -from ayon_maya.api import plugin - - -class ValidateRenderLayerAOVs(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate created AOVs / RenderElement is registered in the database - - Each render element is registered as a product which is formatted based on - the render layer and the render element, example: - - . - - This translates to something like this: - - CHAR.diffuse - - This check is needed to ensure the render output is still complete - - """ - - order = pyblish.api.ValidatorOrder + 0.1 - label = "Render Passes / AOVs Are Registered" - families = ["renderlayer"] - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Found unregistered products: {}".format(invalid)) - - def get_invalid(self, instance): - invalid = [] - - project_name = instance.context.data["projectName"] - folder_entity = instance.data["folderEntity"] - render_passes = instance.data.get("renderPasses", []) - for render_pass in render_passes: - is_valid = self.validate_product_registered( - project_name, folder_entity, render_pass - ) - if not is_valid: - invalid.append(render_pass) - - return invalid - - def validate_product_registered( - self, project_name, folder_entity, product_name - ): - """Check if product is registered in the database under the folder""" - - return ayon_api.get_product_by_name( - project_name, product_name, folder_entity["id"], fields={"id"} - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rendersettings.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_rendersettings.py deleted file mode 100644 index 7b21bf6a14..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rendersettings.py +++ /dev/null @@ -1,446 +0,0 @@ -# -*- coding: utf-8 -*- -"""Maya validator for render settings.""" -import re -from collections import OrderedDict - -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import lib -from ayon_maya.api.lib_rendersettings import RenderSettings -from ayon_maya.api import plugin -from maya import cmds, mel - - -def convert_to_int_or_float(string_value): - # Order of types are important here since float can convert string - # representation of integer. - types = [int, float] - for t in types: - try: - result = t(string_value) - except ValueError: - continue - else: - return result - - # Neither integer or float. - return string_value - - -def get_redshift_image_format_labels(): - """Return nice labels for Redshift image formats.""" - var = "$g_redshiftImageFormatLabels" - return mel.eval("{0}={0}".format(var)) - - -class ValidateRenderSettings(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates the global render settings - - * File Name Prefix must start with: `` - all other token are customizable but sane values for Arnold are: - - `//_` - - token is supported also, useful for multiple renderable - cameras per render layer. - - For Redshift omit token. Redshift will append it - automatically if AOVs are enabled and if you user Multipart EXR - it doesn't make much sense. - - * Frame Padding must be: - * default: 4 - - * Animation must be toggled on, in Render Settings - Common tab: - * vray: Animation on standard of specific - * arnold: Frame / Animation ext: Any choice without "(Single Frame)" - * redshift: Animation toggled on - - NOTE: - The repair function of this plugin does not repair the animation - setting of the render settings due to multiple possibilities. - - """ - - order = ValidateContentsOrder - label = "Validate Render Settings" - hosts = ["maya"] - families = ["renderlayer"] - actions = [RepairAction] - optional = True - - ImagePrefixes = { - 'mentalray': 'defaultRenderGlobals.imageFilePrefix', - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'rmanGlobals.imageFileFormat', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', - } - - ImagePrefixTokens = { - 'mentalray': '//{aov_separator}', # noqa: E501 - 'arnold': '//{aov_separator}', # noqa: E501 - 'redshift': '//', - 'vray': '//', - 'renderman': '{aov_separator}..', - 'mayahardware2': '//', - } - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - - redshift_AOV_prefix = "/{aov_separator}" # noqa: E501 - - renderman_dir_prefix = "/" - - R_AOV_TOKEN = re.compile( - r'%a||', re.IGNORECASE) - R_LAYER_TOKEN = re.compile( - r'%l||', re.IGNORECASE) - R_CAMERA_TOKEN = re.compile(r'%c|Camera>') - R_SCENE_TOKEN = re.compile(r'%s|', re.IGNORECASE) - - DEFAULT_PADDING = 4 - VRAY_PREFIX = "//" - DEFAULT_PREFIX = "//_" - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - title="Invalid Render Settings", - message=("Invalid render settings found " - "for '{}'!".format(instance.name)) - ) - - @classmethod - def get_invalid(cls, instance): - - invalid = False - - renderer = instance.data['renderer'] - layer = instance.data['renderlayer'] - cameras = instance.data.get("cameras", []) - - # Prefix attribute can return None when a value was never set - prefix = lib.get_attr_in_layer(cls.ImagePrefixes[renderer], - layer=layer) or "" - padding = lib.get_attr_in_layer( - attr=RenderSettings.get_padding_attr(renderer), - layer=layer - ) - - anim_override = lib.get_attr_in_layer("defaultRenderGlobals.animation", - layer=layer) - - prefix = prefix.replace( - "{aov_separator}", instance.data.get("aovSeparator", "_")) - - default_prefix = cls.ImagePrefixTokens[renderer] - - if not anim_override: - invalid = True - cls.log.error("Animation needs to be enabled. Use the same " - "frame for start and end to render single frame") - - if not re.search(cls.R_LAYER_TOKEN, prefix): - invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "doesn't have: '' or " - "'' token".format(prefix)) - - if len(cameras) > 1 and not re.search(cls.R_CAMERA_TOKEN, prefix): - invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "doesn't have: '' token".format(prefix)) - cls.log.error( - "Note that to needs to have capital 'C' at the beginning") - - # renderer specific checks - if renderer == "vray": - vray_settings = cmds.ls(type="VRaySettingsNode") - if not vray_settings: - node = cmds.createNode("VRaySettingsNode") - else: - node = vray_settings[0] - - scene_sep = cmds.getAttr( - "{}.fileNameRenderElementSeparator".format(node)) - if scene_sep != instance.data.get("aovSeparator", "_"): - cls.log.error("AOV separator is not set correctly.") - invalid = True - - if renderer == "redshift": - redshift_AOV_prefix = cls.redshift_AOV_prefix.replace( - "{aov_separator}", instance.data.get("aovSeparator", "_") - ) - if re.search(cls.R_AOV_TOKEN, prefix): - invalid = True - cls.log.error(("Do not use AOV token [ {} ] - " - "Redshift is using image prefixes per AOV so " - "it doesn't make much sense using it in global" - "image prefix").format(prefix)) - # get redshift AOVs - rs_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=False) - for aov in rs_aovs: - aov_prefix = cmds.getAttr("{}.filePrefix".format(aov)) - # check their image prefix - if aov_prefix != redshift_AOV_prefix: - cls.log.error(("AOV ({}) image prefix is not set " - "correctly {} != {}").format( - cmds.getAttr("{}.name".format(aov)), - aov_prefix, - redshift_AOV_prefix - )) - invalid = True - - # check aov file format - aov_ext = cmds.getAttr("{}.fileFormat".format(aov)) - default_ext = cmds.getAttr("redshiftOptions.imageFormat") - aov_type = cmds.getAttr("{}.aovType".format(aov)) - if aov_type == "Cryptomatte": - # redshift Cryptomatte AOV always uses "Cryptomatte (EXR)" - # so we ignore validating file format for it. - pass - - elif default_ext != aov_ext: - labels = get_redshift_image_format_labels() - cls.log.error( - "AOV file format {} does not match global file format " - "{}".format(labels[aov_ext], labels[default_ext]) - ) - invalid = True - - if renderer == "renderman": - file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat") - dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir") - - if file_prefix.lower() != prefix.lower(): - invalid = True - cls.log.error("Wrong image prefix [ {} ]".format(file_prefix)) - - if dir_prefix.lower() != cls.renderman_dir_prefix.lower(): - invalid = True - cls.log.error("Wrong directory prefix [ {} ]".format( - dir_prefix)) - - if renderer == "arnold": - multipart = cmds.getAttr("defaultArnoldDriver.mergeAOVs") - if multipart: - if re.search(cls.R_AOV_TOKEN, prefix): - invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "You can't use '' token " - "with merge AOVs turned on".format(prefix)) - default_prefix = re.sub( - cls.R_AOV_TOKEN, "", default_prefix) - # remove aov token from prefix to pass validation - default_prefix = default_prefix.split("{aov_separator}")[0] - elif not re.search(cls.R_AOV_TOKEN, prefix): - invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "doesn't have: '' or " - "token".format(prefix)) - - default_prefix = default_prefix.replace( - "{aov_separator}", instance.data.get("aovSeparator", "_")) - if prefix.lower() != default_prefix.lower(): - cls.log.warning("warning: prefix differs from " - "recommended {}".format( - default_prefix)) - - if padding != cls.DEFAULT_PADDING: - invalid = True - cls.log.error("Expecting padding of {} ( {} )".format( - cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING)) - - # load validation definitions from settings - settings_lights_flag = instance.context.data["project_settings"].get( - "maya", {}).get( - "render_settings", {}).get( - "enable_all_lights", False) - - instance_lights_flag = instance.data.get("renderSetupIncludeLights") - if settings_lights_flag != instance_lights_flag: - cls.log.warning( - "Instance flag for \"Render Setup Include Lights\" is set to " - "{} and Settings flag is set to {}".format( - instance_lights_flag, settings_lights_flag - ) - ) - - # go through definitions and test if such node.attribute exists. - # if so, compare its value from the one required. - for data in cls.get_nodes(instance, renderer): - for node in data["nodes"]: - # Why is captured 'PublishValidationError'? How it can be - # raised by 'cmds.getAttr(...)'? - try: - render_value = cmds.getAttr( - "{}.{}".format(node, data["attribute"]) - ) - except PublishValidationError: - invalid = True - cls.log.error( - "Cannot get value of {}.{}".format( - node, data["attribute"] - ) - ) - else: - if render_value not in data["values"]: - invalid = True - cls.log.error( - "Invalid value {} set on {}.{}. Expecting " - "{}".format( - render_value, - node, - data["attribute"], - data["values"] - ) - ) - - return invalid - - @classmethod - def get_nodes(cls, instance, renderer): - maya_settings = instance.context.data["project_settings"]["maya"] - renderer_key = "{}_render_attributes".format(renderer) - validation_settings = ( - maya_settings["publish"]["ValidateRenderSettings"].get( - renderer_key - ) - ) or [] - validation_settings = [ - (item["type"], item["value"]) - for item in validation_settings - ] - result = [] - for attr, values in OrderedDict(validation_settings).items(): - values = [convert_to_int_or_float(v) for v in values if v] - - # Validate the settings has values. - if not values: - cls.log.error( - "Settings for {} is missing values.".format(attr) - ) - continue - - cls.log.debug("{}: {}".format(attr, values)) - if "." not in attr: - cls.log.warning( - "Skipping invalid attribute defined in validation " - "settings: \"{}\"".format(attr) - ) - continue - - node_type, attribute_name = attr.split(".", 1) - - # first get node of that type - nodes = cmds.ls(type=node_type) - - if not nodes: - cls.log.warning( - "No nodes of type \"{}\" found.".format(node_type) - ) - continue - - result.append( - { - "attribute": attribute_name, - "nodes": nodes, - "values": values - } - ) - - return result - - @classmethod - def repair(cls, instance): - renderer = instance.data['renderer'] - layer_node = instance.data['setMembers'] - redshift_AOV_prefix = cls.redshift_AOV_prefix.replace( - "{aov_separator}", instance.data.get("aovSeparator", "_") - ) - default_prefix = cls.ImagePrefixTokens[renderer].replace( - "{aov_separator}", instance.data.get("aovSeparator", "_") - ) - - for data in cls.get_nodes(instance, renderer): - if not data["values"]: - continue - for node in data["nodes"]: - lib.set_attribute(data["attribute"], data["values"][0], node) - with lib.renderlayer(layer_node): - - # Repair animation must be enabled - cmds.setAttr("defaultRenderGlobals.animation", True) - - # Repair prefix - if renderer == "arnold": - multipart = cmds.getAttr("defaultArnoldDriver.mergeAOVs") - if multipart: - separator_variations = [ - "_", - "_", - "", - ] - for variant in separator_variations: - default_prefix = default_prefix.replace(variant, "") - - if renderer != "renderman": - prefix_attr = RenderSettings.get_image_prefix_attr(renderer) - fname_prefix = default_prefix - cmds.setAttr(prefix_attr, fname_prefix, type="string") - - # Repair padding - padding_attr = RenderSettings.get_padding_attr(renderer) - cmds.setAttr(padding_attr, cls.DEFAULT_PADDING) - else: - # renderman handles stuff differently - cmds.setAttr("rmanGlobals.imageFileFormat", - default_prefix, - type="string") - cmds.setAttr("rmanGlobals.imageOutputDir", - cls.renderman_dir_prefix, - type="string") - - if renderer == "vray": - vray_settings = cmds.ls(type="VRaySettingsNode") - if not vray_settings: - node = cmds.createNode("VRaySettingsNode") - else: - node = vray_settings[0] - - cmds.optionMenuGrp("vrayRenderElementSeparator", - v=instance.data.get("aovSeparator", "_")) - cmds.setAttr( - "{}.fileNameRenderElementSeparator".format(node), - instance.data.get("aovSeparator", "_"), - type="string" - ) - - if renderer == "redshift": - # get redshift AOVs - rs_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=False) - for aov in rs_aovs: - # fix AOV prefixes - cmds.setAttr( - "{}.filePrefix".format(aov), - redshift_AOV_prefix, type="string") - # fix AOV file format - default_ext = cmds.getAttr( - "redshiftOptions.imageFormat", asString=True) - cmds.setAttr( - "{}.fileFormat".format(aov), default_ext) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_resolution.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_resolution.py deleted file mode 100644 index 58d2ad3030..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_resolution.py +++ /dev/null @@ -1,107 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import ( - OptionalPyblishPluginMixin, - PublishValidationError, -) -from ayon_core.pipeline.publish import RepairAction -from ayon_maya.api import lib -from ayon_maya.api.lib import reset_scene_resolution -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateResolution(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate the render resolution setting aligned with DB""" - - order = pyblish.api.ValidatorOrder - families = ["renderlayer"] - label = "Validate Resolution" - actions = [RepairAction] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid_resolution(instance) - if invalid: - raise PublishValidationError( - "Render resolution is invalid. See log for details.", - description=( - "Wrong render resolution setting. " - "Please use repair button to fix it.\n\n" - "If current renderer is V-Ray, " - "make sure vraySettings node has been created." - ) - ) - - @classmethod - def get_invalid_resolution(cls, instance): - width, height, pixelAspect = cls.get_folder_resolution(instance) - current_renderer = instance.data["renderer"] - layer = instance.data["renderlayer"] - invalid = False - if current_renderer == "vray": - vray_node = "vraySettings" - if cmds.objExists(vray_node): - current_width = lib.get_attr_in_layer( - "{}.width".format(vray_node), layer=layer) - current_height = lib.get_attr_in_layer( - "{}.height".format(vray_node), layer=layer) - current_pixelAspect = lib.get_attr_in_layer( - "{}.pixelAspect".format(vray_node), layer=layer - ) - else: - cls.log.error( - "Can't detect VRay resolution because there is no node " - "named: `{}`".format(vray_node) - ) - return True - else: - current_width = lib.get_attr_in_layer( - "defaultResolution.width", layer=layer) - current_height = lib.get_attr_in_layer( - "defaultResolution.height", layer=layer) - current_pixelAspect = lib.get_attr_in_layer( - "defaultResolution.pixelAspect", layer=layer - ) - if current_width != width or current_height != height: - cls.log.error( - "Render resolution {}x{} does not match " - "folder resolution {}x{}".format( - current_width, current_height, - width, height - )) - invalid = True - if current_pixelAspect != pixelAspect: - cls.log.error( - "Render pixel aspect {} does not match " - "folder pixel aspect {}".format( - current_pixelAspect, pixelAspect - )) - invalid = True - return invalid - - @classmethod - def get_folder_resolution(cls, instance): - task_attributes = instance.data["taskEntity"]["attrib"] - width = task_attributes["resolutionWidth"] - height = task_attributes["resolutionHeight"] - pixel_aspect = task_attributes["pixelAspect"] - return int(width), int(height), float(pixel_aspect) - - @classmethod - def repair(cls, instance): - # Usually without renderlayer overrides the renderlayers - # all share the same resolution value - so fixing the first - # will have fixed all the others too. It's much faster to - # check whether it's invalid first instead of switching - # into all layers individually - if not cls.get_invalid_resolution(instance): - cls.log.debug( - "Nothing to repair on instance: {}".format(instance) - ) - return - layer_node = instance.data['setMembers'] - with lib.renderlayer(layer_node): - reset_scene_resolution() diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_resources.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_resources.py deleted file mode 100644 index 0ec51d909c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_resources.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -from collections import defaultdict - -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -class ValidateResources(plugin.MayaInstancePlugin): - """Validates mapped resources. - - These are external files to the current application, for example - these could be textures, image planes, cache files or other linked - media. - - This validates: - - The resources have unique filenames (without extension) - - """ - - order = ValidateContentsOrder - label = "Resources Unique" - - def process(self, instance): - - resources = instance.data.get("resources", []) - if not resources: - self.log.debug("No resources to validate..") - return - - basenames = defaultdict(set) - - for resource in resources: - files = resource.get("files", []) - for filename in files: - - # Use normalized paths in comparison and ignore case - # sensitivity - filename = os.path.normpath(filename).lower() - - basename = os.path.splitext(os.path.basename(filename))[0] - basenames[basename].add(filename) - - invalid_resources = list() - for basename, sources in basenames.items(): - if len(sources) > 1: - invalid_resources.extend(sources) - - self.log.error( - "Non-unique resource name: {0}" - "{0} (sources: {1})".format( - basename, - list(sources) - ) - ) - - if invalid_resources: - raise PublishValidationError("Invalid resources in instance.") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_review.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_review.py deleted file mode 100644 index 76f61dc3e5..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_review.py +++ /dev/null @@ -1,29 +0,0 @@ -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, PublishValidationError -) -from ayon_maya.api import plugin - - -class ValidateReview(plugin.MayaInstancePlugin): - """Validate review.""" - - order = ValidateContentsOrder - label = "Validate Review" - families = ["review"] - - def process(self, instance): - cameras = instance.data["cameras"] - - # validate required settings - if len(cameras) == 0: - raise PublishValidationError( - "No camera found in review instance: {}".format(instance) - ) - elif len(cameras) > 2: - raise PublishValidationError( - "Only a single camera is allowed for a review instance but " - "more than one camera found in review instance: {}. " - "Cameras found: {}".format(instance, ", ".join(cameras)) - ) - - self.log.debug('camera: {}'.format(instance.data["review_camera"])) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_contents.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_contents.py deleted file mode 100644 index ada2ce045b..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_contents.py +++ /dev/null @@ -1,259 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateRigContents(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure rig contains pipeline-critical content - - Every rig must contain at least two object sets: - "controls_SET" - Set of all animatable controls - "out_SET" - Set of all cacheable meshes - - """ - - order = ValidateContentsOrder - label = "Rig Contents" - families = ["rig"] - action = [ayon_maya.api.action.SelectInvalidAction] - optional = True - - accepted_output = ["mesh", "transform"] - accepted_controllers = ["transform"] - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Invalid rig content. See log for details.") - - @classmethod - def get_invalid(cls, instance): - - # Find required sets by suffix - required, rig_sets = cls.get_nodes(instance) - - cls.validate_missing_objectsets(instance, required, rig_sets) - - controls_set = rig_sets["controls_SET"] - out_set = rig_sets["out_SET"] - - # Ensure contents in sets and retrieve long path for all objects - output_content = cmds.sets(out_set, query=True) or [] - if not output_content: - raise PublishValidationError("Must have members in rig out_SET") - output_content = cmds.ls(output_content, long=True) - - controls_content = cmds.sets(controls_set, query=True) or [] - if not controls_content: - raise PublishValidationError( - "Must have members in rig controls_SET" - ) - controls_content = cmds.ls(controls_content, long=True) - - rig_content = output_content + controls_content - invalid_hierarchy = cls.invalid_hierarchy(instance, rig_content) - - # Additional validations - invalid_geometry = cls.validate_geometry(output_content) - invalid_controls = cls.validate_controls(controls_content) - - error = False - if invalid_hierarchy: - cls.log.error("Found nodes which reside outside of root group " - "while they are set up for publishing." - "\n%s" % invalid_hierarchy) - error = True - - if invalid_controls: - cls.log.error("Only transforms can be part of the controls_SET." - "\n%s" % invalid_controls) - error = True - - if invalid_geometry: - cls.log.error("Only meshes can be part of the out_SET\n%s" - % invalid_geometry) - error = True - if error: - return invalid_hierarchy + invalid_controls + invalid_geometry - - @classmethod - def validate_missing_objectsets(cls, instance, - required_objsets, rig_sets): - """Validate missing objectsets in rig sets - - Args: - instance (pyblish.api.Instance): instance - required_objsets (list[str]): list of objectset names - rig_sets (list[str]): list of rig sets - - Raises: - PublishValidationError: When the error is raised, it will show - which instance has the missing object sets - """ - missing = [ - key for key in required_objsets if key not in rig_sets - ] - if missing: - raise PublishValidationError( - "%s is missing sets: %s" % (instance, ", ".join(missing)) - ) - - @classmethod - def invalid_hierarchy(cls, instance, content): - """ - Check if all rig set members are within the hierarchy of the rig root - - Args: - instance (pyblish.api.Instance): instance - content (list[str]): list of content from rig sets - - Raises: - PublishValidationError: It means no dag nodes in - the rig instance - - Returns: - List[str]: invalid hierarchy - """ - # Ensure there are at least some transforms or dag nodes - # in the rig instance - set_members = instance.data['setMembers'] - if not cmds.ls(set_members, type="dagNode", long=True): - raise PublishValidationError( - "No dag nodes in the rig instance. " - "(Empty instance?)" - ) - # Validate members are inside the hierarchy from root node - root_nodes = cmds.ls(set_members, assemblies=True, long=True) - hierarchy = cmds.listRelatives(root_nodes, allDescendents=True, - fullPath=True) + root_nodes - hierarchy = set(hierarchy) - invalid_hierarchy = [] - for node in content: - if node not in hierarchy: - invalid_hierarchy.append(node) - return invalid_hierarchy - - @classmethod - def validate_geometry(cls, set_members): - """Checks if the node types of the set members valid - - Args: - set_members (list[str]): nodes of the out_set - - Returns: - list[str]: Nodes of invalid types. - """ - - # Validate all shape types - invalid = [] - shapes = cmds.listRelatives(set_members, - allDescendents=True, - shapes=True, - fullPath=True) or [] - all_shapes = cmds.ls(set_members + shapes, long=True, shapes=True) - for shape in all_shapes: - if cmds.nodeType(shape) not in cls.accepted_output: - invalid.append(shape) - - return invalid - - @classmethod - def validate_controls(cls, set_members): - """Checks if the node types of the set members are valid for controls. - - Args: - set_members (list[str]): list of nodes of the controls_set - - Returns: - list: Controls of disallowed node types. - """ - - # Validate control types - invalid = [] - for node in set_members: - if cmds.nodeType(node) not in cls.accepted_controllers: - invalid.append(node) - - return invalid - - @classmethod - def get_nodes(cls, instance): - """Get the target objectsets and rig sets nodes - - Args: - instance (pyblish.api.Instance): instance - - Returns: - tuple: 2-tuple of list of objectsets, - list of rig sets nodes - """ - objectsets = ["controls_SET", "out_SET"] - rig_sets_nodes = instance.data.get("rig_sets", []) - return objectsets, rig_sets_nodes - - -class ValidateSkeletonRigContents(ValidateRigContents): - """Ensure skeleton rigs contains pipeline-critical content - - The rigs optionally contain at least two object sets: - "skeletonMesh_SET" - Set of the skinned meshes - with bone hierarchies - - """ - - order = ValidateContentsOrder - label = "Skeleton Rig Contents" - hosts = ["maya"] - families = ["rig.fbx"] - optional = True - - @classmethod - def get_invalid(cls, instance): - objectsets, skeleton_mesh_nodes = cls.get_nodes(instance) - cls.validate_missing_objectsets( - instance, objectsets, instance.data["rig_sets"]) - - # Ensure contents in sets and retrieve long path for all objects - output_content = instance.data.get("skeleton_mesh", []) - output_content = cmds.ls(skeleton_mesh_nodes, long=True) - - invalid_hierarchy = cls.invalid_hierarchy( - instance, output_content) - invalid_geometry = cls.validate_geometry(output_content) - - error = False - if invalid_hierarchy: - cls.log.error("Found nodes which reside outside of root group " - "while they are set up for publishing." - "\n%s" % invalid_hierarchy) - error = True - if invalid_geometry: - cls.log.error("Found nodes which reside outside of root group " - "while they are set up for publishing." - "\n%s" % invalid_hierarchy) - error = True - if error: - return invalid_hierarchy + invalid_geometry - - @classmethod - def get_nodes(cls, instance): - """Get the target objectsets and rig sets nodes - - Args: - instance (pyblish.api.Instance): instance - - Returns: - tuple: 2-tuple of list of objectsets, list of rig sets nodes - """ - objectsets = ["skeletonMesh_SET"] - skeleton_mesh_nodes = instance.data.get("skeleton_mesh", []) - return objectsets, skeleton_mesh_nodes diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_controllers.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_controllers.py deleted file mode 100644 index fed78df2da..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_controllers.py +++ /dev/null @@ -1,294 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api.lib import undo_chunk -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateRigControllers(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate rig controllers. - - Controls must have the transformation attributes on their default - values of translate zero, rotate zero and scale one when they are - unlocked attributes. - - Unlocked keyable attributes may not have any incoming connections. If - these connections are required for the rig then lock the attributes. - - The visibility attribute must be locked. - - Note that `repair` will: - - Lock all visibility attributes - - Reset all default values for translate, rotate, scale - - Break all incoming connections to keyable attributes - - """ - order = ValidateContentsOrder + 0.05 - label = "Rig Controllers" - families = ["rig"] - optional = True - actions = [RepairAction, - ayon_maya.api.action.SelectInvalidAction] - - # Default controller values - CONTROLLER_DEFAULTS = { - "translateX": 0, - "translateY": 0, - "translateZ": 0, - "rotateX": 0, - "rotateY": 0, - "rotateZ": 0, - "scaleX": 1, - "scaleY": 1, - "scaleZ": 1 - } - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - '{} failed, see log information'.format(self.label) - ) - - @classmethod - def get_invalid(cls, instance): - - controls_set = cls.get_node(instance) - if not controls_set: - cls.log.error( - "Must have 'controls_SET' in rig instance" - ) - return [instance.data["instance_node"]] - - controls = cmds.sets(controls_set, query=True) - - # Ensure all controls are within the top group - lookup = set(instance[:]) - if not all(control in lookup for control in cmds.ls(controls, - long=True)): - cls.log.error( - "All controls must be inside the rig's group." - ) - return [controls_set] - - # Validate all controls - has_connections = list() - has_unlocked_visibility = list() - has_non_default_values = list() - for control in controls: - if cls.get_connected_attributes(control): - has_connections.append(control) - - # check if visibility is locked - attribute = "{}.visibility".format(control) - locked = cmds.getAttr(attribute, lock=True) - if not locked: - has_unlocked_visibility.append(control) - - if cls.get_non_default_attributes(control): - has_non_default_values.append(control) - - if has_connections: - cls.log.error("Controls have input connections: " - "%s" % has_connections) - - if has_non_default_values: - cls.log.error("Controls have non-default values: " - "%s" % has_non_default_values) - - if has_unlocked_visibility: - cls.log.error("Controls have unlocked visibility " - "attribute: %s" % has_unlocked_visibility) - - invalid = [] - if (has_connections or - has_unlocked_visibility or - has_non_default_values): - invalid = set() - invalid.update(has_connections) - invalid.update(has_non_default_values) - invalid.update(has_unlocked_visibility) - invalid = list(invalid) - cls.log.error("Invalid rig controllers. See log for details.") - - return invalid - - @classmethod - def get_non_default_attributes(cls, control): - """Return attribute plugs with non-default values - - Args: - control (str): Name of control node. - - Returns: - list: The invalid plugs - - """ - - invalid = [] - for attr, default in cls.CONTROLLER_DEFAULTS.items(): - if cmds.attributeQuery(attr, node=control, exists=True): - plug = "{}.{}".format(control, attr) - - # Ignore locked attributes - locked = cmds.getAttr(plug, lock=True) - if locked: - continue - - value = cmds.getAttr(plug) - if value != default: - cls.log.warning("Control non-default value: " - "%s = %s" % (plug, value)) - invalid.append(plug) - - return invalid - - @staticmethod - def get_connected_attributes(control): - """Return attribute plugs with incoming connections. - - This will also ensure no (driven) keys on unlocked keyable attributes. - - Args: - control (str): Name of control node. - - Returns: - list: The invalid plugs - - """ - import maya.cmds as mc - - # Support controls without any attributes returning None - attributes = mc.listAttr(control, keyable=True, scalar=True) or [] - invalid = [] - for attr in attributes: - plug = "{}.{}".format(control, attr) - - # Ignore locked attributes - locked = cmds.getAttr(plug, lock=True) - if locked: - continue - - # Ignore proxy connections. - if (cmds.addAttr(plug, query=True, exists=True) and - cmds.addAttr(plug, query=True, usedAsProxy=True)): - continue - - # Check for incoming connections - if cmds.listConnections(plug, source=True, destination=False): - invalid.append(plug) - - return invalid - - @classmethod - def repair(cls, instance): - - controls_set = cls.get_node(instance) - if not controls_set: - cls.log.error( - "Unable to repair because no 'controls_SET' found in rig " - "instance: {}".format(instance) - ) - return - - # Use a single undo chunk - with undo_chunk(): - controls = cmds.sets(controls_set, query=True) - for control in controls: - - # Lock visibility - attr = "{}.visibility".format(control) - locked = cmds.getAttr(attr, lock=True) - if not locked: - cls.log.info("Locking visibility for %s" % control) - cmds.setAttr(attr, lock=True) - - # Remove incoming connections - invalid_plugs = cls.get_connected_attributes(control) - if invalid_plugs: - for plug in invalid_plugs: - cls.log.info("Breaking input connection to %s" % plug) - source = cmds.listConnections(plug, - source=True, - destination=False, - plugs=True)[0] - cmds.disconnectAttr(source, plug) - - # Reset non-default values - invalid_plugs = cls.get_non_default_attributes(control) - if invalid_plugs: - for plug in invalid_plugs: - attr = plug.split(".")[-1] - default = cls.CONTROLLER_DEFAULTS[attr] - cls.log.info("Setting %s to %s" % (plug, default)) - cmds.setAttr(plug, default) - - @classmethod - def get_node(cls, instance): - """Get target object nodes from controls_SET - - Args: - instance (str): instance - - Returns: - list: list of object nodes from controls_SET - """ - return instance.data["rig_sets"].get("controls_SET") - - -class ValidateSkeletonRigControllers(ValidateRigControllers): - """Validate rig controller for skeletonAnim_SET - - Controls must have the transformation attributes on their default - values of translate zero, rotate zero and scale one when they are - unlocked attributes. - - Unlocked keyable attributes may not have any incoming connections. If - these connections are required for the rig then lock the attributes. - - The visibility attribute must be locked. - - Note that `repair` will: - - Lock all visibility attributes - - Reset all default values for translate, rotate, scale - - Break all incoming connections to keyable attributes - - """ - order = ValidateContentsOrder + 0.05 - label = "Skeleton Rig Controllers" - hosts = ["maya"] - families = ["rig.fbx"] - - # Default controller values - CONTROLLER_DEFAULTS = { - "translateX": 0, - "translateY": 0, - "translateZ": 0, - "rotateX": 0, - "rotateY": 0, - "rotateZ": 0, - "scaleX": 1, - "scaleY": 1, - "scaleZ": 1 - } - - @classmethod - def get_node(cls, instance): - """Get target object nodes from skeletonMesh_SET - - Args: - instance (str): instance - - Returns: - list: list of object nodes from skeletonMesh_SET - """ - return instance.data["rig_sets"].get("skeletonMesh_SET") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_controllers_arnold_attributes.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_controllers_arnold_attributes.py deleted file mode 100644 index 06691f4bd3..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_controllers_arnold_attributes.py +++ /dev/null @@ -1,98 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateRigControllersArnoldAttributes(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate rig control curves have no keyable arnold attributes. - - The Arnold plug-in will create curve attributes like: - - aiRenderCurve - - aiCurveWidth - - aiSampleRate - - aiCurveShaderR - - aiCurveShaderG - - aiCurveShaderB - - Unfortunately these attributes visible in the channelBox are *keyable* - by default and visible in the channelBox. As such pressing a regular "S" - set key shortcut will set keys on these attributes too, thus cluttering - the animator's scene. - - This validator will ensure they are hidden or unkeyable attributes. - - """ - order = ValidateContentsOrder + 0.05 - label = "Rig Controllers (Arnold Attributes)" - families = ["rig"] - optional = False - actions = [RepairAction, - ayon_maya.api.action.SelectInvalidAction] - - attributes = [ - "rcurve", - "cwdth", - "srate", - "ai_curve_shaderr", - "ai_curve_shaderg", - "ai_curve_shaderb" - ] - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError('{} failed, see log ' - 'information'.format(self.label)) - - @classmethod - def get_invalid(cls, instance): - - controls_set = instance.data["rig_sets"].get("controls_SET") - if not controls_set: - return [] - - controls = cmds.sets(controls_set, query=True) or [] - if not controls: - return [] - - shapes = cmds.ls(controls, - dag=True, - leaf=True, - long=True, - shapes=True, - noIntermediate=True) - curves = cmds.ls(shapes, type="nurbsCurve", long=True) - - invalid = list() - for node in curves: - - for attribute in cls.attributes: - if cmds.attributeQuery(attribute, node=node, exists=True): - plug = "{}.{}".format(node, attribute) - if cmds.getAttr(plug, keyable=True): - invalid.append(node) - break - - return invalid - - @classmethod - def repair(cls, instance): - - invalid = cls.get_invalid(instance) - with lib.undo_chunk(): - for node in invalid: - for attribute in cls.attributes: - if cmds.attributeQuery(attribute, node=node, exists=True): - plug = "{}.{}".format(node, attribute) - cmds.setAttr(plug, channelBox=False, keyable=False) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_joints_hidden.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_joints_hidden.py deleted file mode 100644 index f0593ed7bc..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_joints_hidden.py +++ /dev/null @@ -1,50 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateRigJointsHidden(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate all joints are hidden visually. - - This includes being hidden: - - visibility off, - - in a display layer that has visibility off, - - having hidden parents or - - being an intermediate object. - - """ - - order = ValidateContentsOrder - families = ['rig'] - label = "Joints Hidden" - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - optional = True - - @staticmethod - def get_invalid(instance): - joints = cmds.ls(instance, type='joint', long=True) - return [j for j in joints if lib.is_visible(j, displayLayer=True)] - - def process(self, instance): - """Process all the nodes in the instance 'objectSet'""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Visible joints found: {0}".format(invalid)) - - @classmethod - def repair(cls, instance): - import maya.mel as mel - mel.eval("HideJoints") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_out_set_node_ids.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_out_set_node_ids.py deleted file mode 100644 index e5f7a5b204..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_out_set_node_ids.py +++ /dev/null @@ -1,159 +0,0 @@ -import ayon_maya.api.action -import maya.cmds as cmds -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishXmlValidationError, - RepairAction, - ValidateContentsOrder, - apply_plugin_settings_automatically, - get_plugin_settings, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin - - -class ValidateRigOutSetNodeIds(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate if deformed shapes have related IDs to the original shapes. - - When a deformer is applied in the scene on a referenced mesh that already - had deformers then Maya will create a new shape node for the mesh that - does not have the original id. This validator checks whether the ids are - valid on all the shape nodes in the instance. - - """ - - order = ValidateContentsOrder - families = ["rig"] - label = 'Rig Out Set Node Ids' - actions = [ - ayon_maya.api.action.SelectInvalidAction, - RepairAction - ] - allow_history_only = False - optional = False - - @classmethod - def apply_settings(cls, project_settings): - # Preserve automatic settings applying logic - settings = get_plugin_settings(plugin=cls, - project_settings=project_settings, - log=cls.log, - category="maya") - apply_plugin_settings_automatically(cls, settings, logger=cls.log) - - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - def process(self, instance): - """Process all meshes""" - if not self.is_active(instance.data): - return - # Ensure all nodes have a cbId and a related ID to the original shapes - # if a deformer has been created on the shape - invalid = self.get_invalid(instance) - if invalid: - - # Use the short names - invalid = cmds.ls(invalid) - invalid.sort() - - # Construct a human-readable list - invalid = "\n".join("- {}".format(node) for node in invalid) - - raise PublishXmlValidationError( - plugin=ValidateRigOutSetNodeIds, - message=( - "Rig nodes have different IDs than their input " - "history: \n{0}".format(invalid) - ) - ) - - @classmethod - def get_invalid(cls, instance): - """Get all nodes which do not match the criteria""" - - out_set = cls.get_node(instance) - if not out_set: - return [] - - invalid = [] - members = cmds.sets(out_set, query=True) - shapes = cmds.ls(members, - dag=True, - leaf=True, - shapes=True, - long=True, - noIntermediate=True) - - for shape in shapes: - sibling_id = lib.get_id_from_sibling( - shape, - history_only=cls.allow_history_only - ) - if sibling_id: - current_id = lib.get_id(shape) - if current_id != sibling_id: - invalid.append(shape) - - return invalid - - @classmethod - def repair(cls, instance): - - for node in cls.get_invalid(instance): - # Get the original id from sibling - sibling_id = lib.get_id_from_sibling( - node, - history_only=cls.allow_history_only - ) - if not sibling_id: - cls.log.error("Could not find ID in siblings for '%s'", node) - continue - - lib.set_id(node, sibling_id, overwrite=True) - - @classmethod - def get_node(cls, instance): - """Get target object nodes from out_SET - - Args: - instance (str): instance - - Returns: - list: list of object nodes from out_SET - """ - return instance.data["rig_sets"].get("out_SET") - - -class ValidateSkeletonRigOutSetNodeIds(ValidateRigOutSetNodeIds): - """Validate if deformed shapes have related IDs to the original shapes - from skeleton set. - - When a deformer is applied in the scene on a referenced mesh that already - had deformers then Maya will create a new shape node for the mesh that - does not have the original id. This validator checks whether the ids are - valid on all the shape nodes in the instance. - - """ - - order = ValidateContentsOrder - families = ["rig.fbx"] - hosts = ['maya'] - label = 'Skeleton Rig Out Set Node Ids' - optional = False - - @classmethod - def get_node(cls, instance): - """Get target object nodes from skeletonMesh_SET - - Args: - instance (str): instance - - Returns: - list: list of object nodes from skeletonMesh_SET - """ - return instance.data["rig_sets"].get( - "skeletonMesh_SET") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_output_ids.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_output_ids.py deleted file mode 100644 index 064c83c0b9..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_rig_output_ids.py +++ /dev/null @@ -1,158 +0,0 @@ -from collections import defaultdict - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api.lib import get_id, set_id -from ayon_maya.api import plugin -from maya import cmds - - -def get_basename(node): - """Return node short name without namespace""" - return node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - - -class ValidateRigOutputIds(plugin.MayaInstancePlugin): - """Validate rig output ids. - - Ids must share the same id as similarly named nodes in the scene. This is - to ensure the id from the model is preserved through animation. - - """ - order = ValidateContentsOrder + 0.05 - label = "Rig Output Ids" - families = ["rig"] - actions = [RepairAction, - ayon_maya.api.action.SelectInvalidAction] - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if cbId workflow is disabled - if not project_settings["maya"].get("use_cbid_workflow", True): - cls.enabled = False - return - - def process(self, instance): - invalid = self.get_invalid(instance, compute=True) - if invalid: - raise PublishValidationError("Found nodes with mismatched IDs.") - - @classmethod - def get_invalid(cls, instance, compute=False): - invalid_matches = cls.get_invalid_matches(instance, compute=compute) - return list(invalid_matches.keys()) - - @classmethod - def get_invalid_matches(cls, instance, compute=False): - invalid = {} - - if compute: - out_set = cls.get_node(instance) - if not out_set: - instance.data["mismatched_output_ids"] = invalid - return invalid - - instance_nodes = cmds.sets(out_set, query=True, nodesOnly=True) - instance_nodes = cmds.ls(instance_nodes, long=True) - for node in instance_nodes: - shapes = cmds.listRelatives(node, shapes=True, fullPath=True) - if shapes: - instance_nodes.extend(shapes) - - scene_nodes = cmds.ls(type="transform", long=True) - scene_nodes += cmds.ls(type="mesh", long=True) - scene_nodes = set(scene_nodes) - set(instance_nodes) - - scene_nodes_by_basename = defaultdict(list) - for node in scene_nodes: - basename = get_basename(node) - scene_nodes_by_basename[basename].append(node) - - for instance_node in instance_nodes: - basename = get_basename(instance_node) - if basename not in scene_nodes_by_basename: - continue - - matches = scene_nodes_by_basename[basename] - - ids = set(get_id(node) for node in matches) - ids.add(get_id(instance_node)) - - if len(ids) > 1: - cls.log.error( - "\"{}\" id mismatch to: {}".format( - instance_node, matches - ) - ) - invalid[instance_node] = matches - - instance.data["mismatched_output_ids"] = invalid - else: - invalid = instance.data["mismatched_output_ids"] - - return invalid - - @classmethod - def repair(cls, instance): - invalid_matches = cls.get_invalid_matches(instance) - - multiple_ids_match = [] - for instance_node, matches in invalid_matches.items(): - ids = set(get_id(node) for node in matches) - - # If there are multiple scene ids matched, and error needs to be - # raised for manual correction. - if len(ids) > 1: - multiple_ids_match.append({"node": instance_node, - "matches": matches}) - continue - - id_to_set = next(iter(ids)) - set_id(instance_node, id_to_set, overwrite=True) - - if multiple_ids_match: - raise PublishValidationError( - "Multiple matched ids found. Please repair manually: " - "{}".format(multiple_ids_match) - ) - - @classmethod - def get_node(cls, instance): - """Get target object nodes from out_SET - - Args: - instance (str): instance - - Returns: - list: list of object nodes from out_SET - """ - return instance.data["rig_sets"].get("out_SET") - - -class ValidateSkeletonRigOutputIds(ValidateRigOutputIds): - """Validate rig output ids from the skeleton sets. - - Ids must share the same id as similarly named nodes in the scene. This is - to ensure the id from the model is preserved through animation. - - """ - order = ValidateContentsOrder + 0.05 - label = "Skeleton Rig Output Ids" - hosts = ["maya"] - families = ["rig.fbx"] - - @classmethod - def get_node(cls, instance): - """Get target object nodes from skeletonMesh_SET - - Args: - instance (str): instance - - Returns: - list: list of object nodes from skeletonMesh_SET - """ - return instance.data["rig_sets"].get("skeletonMesh_SET") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_scene_set_workspace.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_scene_set_workspace.py deleted file mode 100644 index cd36f98dfb..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_scene_set_workspace.py +++ /dev/null @@ -1,51 +0,0 @@ -import os - -import maya.cmds as cmds -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidatePipelineOrder, -) -from ayon_maya.api import plugin - - -def is_subdir(path, root_dir): - """ Returns whether path is a subdirectory (or file) within root_dir """ - path = os.path.realpath(path) - root_dir = os.path.realpath(root_dir) - - # If not on same drive - if os.path.splitdrive(path)[0].lower() != os.path.splitdrive(root_dir)[0].lower(): # noqa: E501 - return False - - # Get 'relative path' (can contain ../ which means going up) - relative = os.path.relpath(path, root_dir) - - # Check if the path starts by going up, if so it's not a subdirectory. :) - if relative.startswith(os.pardir) or relative == os.curdir: - return False - else: - return True - - -class ValidateSceneSetWorkspace(plugin.MayaContextPlugin): - """Validate the scene is inside the currently set Maya workspace""" - - order = ValidatePipelineOrder - label = 'Maya Workspace Set' - - def process(self, context): - - scene_name = cmds.file(query=True, sceneName=True) - if not scene_name: - raise PublishValidationError( - "Scene hasn't been saved. Workspace can't be validated.") - - root_dir = cmds.workspace(query=True, rootDirectory=True) - - if not is_subdir(scene_name, root_dir): - raise PublishValidationError( - "Maya workspace is not set correctly.\n\n" - f"Current workfile `{scene_name}` is not inside the " - f"current Maya project root directory `{root_dir}`.\n\n" - "Please use Workfile app to re-save." - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_setdress_root.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_setdress_root.py deleted file mode 100644 index 26245435eb..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_setdress_root.py +++ /dev/null @@ -1,27 +0,0 @@ -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -class ValidateSetdressRoot(plugin.MayaInstancePlugin): - """Validate if set dress top root node is published.""" - - order = ValidateContentsOrder - label = "SetDress Root" - families = ["setdress"] - - def process(self, instance): - from maya import cmds - - if instance.data.get("exactSetMembersOnly"): - return - - set_member = instance.data["setMembers"] - root = cmds.ls(set_member, assemblies=True, long=True) - - if not root or root[0] not in set_member: - raise PublishValidationError( - "Setdress top root node is not being published." - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_shader_name.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_shader_name.py deleted file mode 100644 index 45d7ee1c8b..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_shader_name.py +++ /dev/null @@ -1,83 +0,0 @@ -import re - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateShaderName(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate shader name assigned. - - It should be _<*>_SHD - - """ - optional = True - order = ValidateContentsOrder - families = ["look"] - label = 'Validate Shaders Name' - actions = [ayon_maya.api.action.SelectInvalidAction] - regex = r'(?P.*)_(.*)_SHD' - - # The default connections to check - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - ("Found shapes with invalid shader names " - "assigned:\n{}").format(invalid)) - - @classmethod - def get_invalid(cls, instance): - - invalid = [] - - # Get all shapes from the instance - content_instance = instance.data.get("setMembers", None) - if not content_instance: - cls.log.error("Instance has no nodes!") - return True - pass - descendants = cmds.listRelatives(content_instance, - allDescendents=True, - fullPath=True) or [] - - descendants = cmds.ls(descendants, noIntermediate=True, long=True) - shapes = cmds.ls(descendants, type=["nurbsSurface", "mesh"], long=True) - folder_path = instance.data.get("folderPath") - - # Check the number of connected shadingEngines per shape - regex_compile = re.compile(cls.regex) - error_message = "object {0} has invalid shader name {1}" - for shape in shapes: - shading_engines = cmds.listConnections(shape, - destination=True, - type="shadingEngine") or [] - shaders = cmds.ls( - cmds.listConnections(shading_engines), materials=1 - ) - - for shader in shaders: - m = regex_compile.match(shader) - if m is None: - invalid.append(shape) - cls.log.error(error_message.format(shape, shader)) - else: - if 'asset' in regex_compile.groupindex: - if m.group('asset') != folder_path: - invalid.append(shape) - message = error_message - message += " with missing folder path \"{2}\"" - cls.log.error( - message.format(shape, shader, folder_path) - ) - - return invalid diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_default_names.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_default_names.py deleted file mode 100644 index 4e4f98d755..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_default_names.py +++ /dev/null @@ -1,94 +0,0 @@ -import re - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -def short_name(node): - return node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - - -class ValidateShapeDefaultNames(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates that Shape names are using Maya's default format. - - When you create a new polygon cube Maya will name the transform - and shape respectively: - - ['pCube1', 'pCubeShape1'] - If you rename it to `bar1` it will become: - - ['bar1', 'barShape1'] - Then if you rename it to `bar` it will become: - - ['bar', 'barShape'] - Rename it again to `bar1` it will differ as opposed to before: - - ['bar1', 'bar1Shape'] - Note that bar1Shape != barShape1 - Thus the suffix number can be either in front of Shape or behind it. - Then it becomes harder to define where what number should be when a - node contains multiple shapes, for example with many controls in - rigs existing of multiple curves. - - """ - - order = ValidateContentsOrder - hosts = ['maya'] - families = ['model'] - optional = True - label = "Shape Default Naming" - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - - @staticmethod - def _define_default_name(shape): - parent = cmds.listRelatives(shape, parent=True, fullPath=True)[0] - transform = short_name(parent) - return '{0}Shape'.format(transform) - - @staticmethod - def _is_valid(shape): - """ Return whether the shape's name is similar to Maya's default. """ - transform = cmds.listRelatives(shape, parent=True, fullPath=True)[0] - - transform_name = short_name(transform) - shape_name = short_name(shape) - - # A Shape's name can be either {transform}{numSuffix} - # Shape or {transform}Shape{numSuffix} - # Upon renaming nodes in Maya that is - # the pattern Maya will act towards. - transform_no_num = transform_name.rstrip("0123456789") - pattern = '^{transform}[0-9]*Shape[0-9]*$'.format( - transform=transform_no_num) - - if re.match(pattern, shape_name): - return True - else: - return False - - @classmethod - def get_invalid(cls, instance): - shapes = cmds.ls(instance, shapes=True, long=True) - return [shape for shape in shapes if not cls._is_valid(shape)] - - def process(self, instance): - """Process all the shape nodes in the instance""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Incorrectly named shapes found: {0}".format(invalid)) - - @classmethod - def repair(cls, instance): - """Process all the shape nodes in the instance""" - for shape in cls.get_invalid(instance): - correct_shape_name = cls._define_default_name(shape) - cmds.rename(shape, correct_shape_name) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_render_stats.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_render_stats.py deleted file mode 100644 index a9c3e861f0..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_render_stats.py +++ /dev/null @@ -1,85 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateShapeRenderStats(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Ensure all render stats are set to the default values.""" - - order = ValidateMeshOrder - families = ['model'] - label = 'Shape Default Render Stats' - actions = [ayon_maya.api.action.SelectInvalidAction, - RepairAction] - - defaults = {'castsShadows': 1, - 'receiveShadows': 1, - 'motionBlur': 1, - 'primaryVisibility': 1, - 'smoothShading': 1, - 'visibleInReflections': 1, - 'visibleInRefractions': 1, - 'doubleSided': 1, - 'opposite': 0} - - @classmethod - def get_invalid(cls, instance): - # It seems the "surfaceShape" and those derived from it have - # `renderStat` attributes. - shapes = cmds.ls(instance, long=True, type='surfaceShape') - invalid = set() - for shape in shapes: - for attr, default_value in cls.defaults.items(): - if cmds.attributeQuery(attr, node=shape, exists=True): - value = cmds.getAttr('{}.{}'.format(shape, attr)) - if value != default_value: - invalid.add(shape) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if not invalid: - return - - defaults_str = "\n".join( - "- {}: {}\n".format(key, value) - for key, value in self.defaults.items() - ) - description = ( - "## Shape Default Render Stats\n" - "Shapes are detected with non-default render stats.\n\n" - "To ensure a model's shapes behave like a shape would by default " - "we require the render stats to have not been altered in " - "the published models.\n\n" - "### How to repair?\n" - "You can reset the default values on the shapes by using the " - "repair action." - ) - - raise PublishValidationError( - "Shapes with non-default renderStats " - "found: {0}".format(", ".join(sorted(invalid))), - description=description, - detail="The expected default values " - "are:\n\n{}".format(defaults_str) - ) - - @classmethod - def repair(cls, instance): - for shape in cls.get_invalid(instance): - for attr, default_value in cls.defaults.items(): - if cmds.attributeQuery(attr, node=shape, exists=True): - plug = '{0}.{1}'.format(shape, attr) - value = cmds.getAttr(plug) - if value != default_value: - cmds.setAttr(plug, default_value) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_zero.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_zero.py deleted file mode 100644 index 8c11009374..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_shape_zero.py +++ /dev/null @@ -1,99 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateShapeZero(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Shape components may not have any "tweak" values - - To solve this issue, try freezing the shapes. - - """ - - order = ValidateContentsOrder - families = ["model"] - label = "Shape Zero (Freeze)" - actions = [ - ayon_maya.api.action.SelectInvalidAction, - RepairAction - ] - optional = True - - @staticmethod - def get_invalid(instance): - """Returns the invalid shapes in the instance. - - This is the same as checking: - - all(pnt == [0,0,0] for pnt in shape.pnts[:]) - - Returns: - list: Shape with non freezed vertex - - """ - - shapes = cmds.ls(instance, type="shape") - - invalid = [] - for shape in shapes: - if cmds.polyCollapseTweaks(shape, q=True, hasVertexTweaks=True): - invalid.append(shape) - - return invalid - - @classmethod - def repair(cls, instance): - invalid_shapes = cls.get_invalid(instance) - if not invalid_shapes: - return - - with lib.maintained_selection(): - with lib.tool("selectSuperContext"): - for shape in invalid_shapes: - cmds.polyCollapseTweaks(shape) - # cmds.polyCollapseTweaks keeps selecting the geometry - # after each command. When running on many meshes - # after one another this tends to get really heavy - cmds.select(clear=True) - - def process(self, instance): - """Process all the nodes in the instance "objectSet""" - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - title="Shape Component Tweaks", - message="Shapes found with non-zero component tweaks: '{}'" - "".format(", ".join(invalid)), - description=( - "## Shapes found with component tweaks\n" - "Shapes were detected that have component tweaks on their " - "components. Please remove the component tweaks to " - "continue.\n\n" - "### Repair\n" - "The repair action will try to *freeze* the component " - "tweaks into the shapes, which is usually the correct fix " - "if the mesh has no construction history (= has its " - "history deleted)."), - detail=( - "Maya allows to store component tweaks within shape nodes " - "which are applied between its `inMesh` and `outMesh` " - "connections resulting in the output of a shape node " - "differing from the input. We usually want to avoid this " - "for published meshes (in particular for Maya scenes) as " - "it can have unintended results when using these meshes " - "as intermediate meshes since it applies positional " - "differences without being visible edits in the node " - "graph.\n\n" - "These tweaks are traditionally stored in the `.pnts` " - "attribute of shapes.") - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_single_assembly.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_single_assembly.py deleted file mode 100644 index 0a760e2acc..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_single_assembly.py +++ /dev/null @@ -1,42 +0,0 @@ -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -class ValidateSingleAssembly(plugin.MayaInstancePlugin): - """Ensure the content of the instance is grouped in a single hierarchy - - The instance must have a single root node containing all the content. - This root node *must* be a top group in the outliner. - - Example outliner: - root_GRP - -- geometry_GRP - -- mesh_GEO - -- controls_GRP - -- control_CTL - - """ - - order = ValidateContentsOrder - families = ['rig'] - label = 'Single Assembly' - - def process(self, instance): - from maya import cmds - - assemblies = cmds.ls(instance, assemblies=True) - - # ensure unique (somehow `maya.cmds.ls` doesn't manage that) - assemblies = set(assemblies) - - if len(assemblies) == 0: - raise PublishValidationError( - "One assembly required for: %s (currently empty?)" % instance - ) - elif len(assemblies) > 1: - raise PublishValidationError( - 'Multiple assemblies found: %s' % assemblies - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_skeletalmesh_hierarchy.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_skeletalmesh_hierarchy.py deleted file mode 100644 index 9904fdce22..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_skeletalmesh_hierarchy.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- - -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishXmlValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateSkeletalMeshHierarchy(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates that nodes has common root.""" - - order = ValidateContentsOrder - families = ["skeletalMesh"] - label = "Skeletal Mesh Top Node" - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - geo = instance.data.get("geometry") - joints = instance.data.get("joints") - - joints_parents = cmds.ls(joints, long=True) - geo_parents = cmds.ls(geo, long=True) - - parents_set = { - parent.split("|")[1] for parent in (joints_parents + geo_parents) - } - - self.log.debug(parents_set) - - if len(set(parents_set)) > 2: - raise PublishXmlValidationError( - self, - "Multiple roots on geometry or joints." - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_skeletalmesh_triangulated.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_skeletalmesh_triangulated.py deleted file mode 100644 index 7b2b005e9c..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_skeletalmesh_triangulated.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- - -from ayon_core.pipeline.publish import ( - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api.action import SelectInvalidAction -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateSkeletalMeshTriangulated(plugin.MayaInstancePlugin): - """Validates that the geometry has been triangulated.""" - - order = ValidateContentsOrder - families = ["skeletalMesh"] - label = "Skeletal Mesh Triangulated" - optional = True - actions = [ - SelectInvalidAction, - RepairAction - ] - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "The following objects needs to be triangulated: " - "{}".format(invalid)) - - @classmethod - def get_invalid(cls, instance): - geo = instance.data.get("geometry") - - invalid = [] - - for obj in cmds.listRelatives( - cmds.ls(geo), allDescendents=True, fullPath=True): - n_triangles = cmds.polyEvaluate(obj, triangle=True) - n_faces = cmds.polyEvaluate(obj, face=True) - - if not (isinstance(n_triangles, int) and isinstance(n_faces, int)): - continue - - # We check if the number of triangles is equal to the number of - # faces for each transform node. - # If it is, the object is triangulated. - if cmds.objectType(obj, i="transform") and n_triangles != n_faces: - invalid.append(obj) - - return invalid - - @classmethod - def repair(cls, instance): - for node in cls.get_invalid(instance): - cmds.polyTriangulate(node) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_skeleton_top_group_hierarchy.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_skeleton_top_group_hierarchy.py deleted file mode 100644 index 4e0dc38a36..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_skeleton_top_group_hierarchy.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -"""Plugin for validating naming conventions.""" -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateSkeletonTopGroupHierarchy(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates top group hierarchy in the SETs - Make sure the object inside the SETs are always top - group of the hierarchy - - """ - order = ValidateContentsOrder + 0.05 - label = "Skeleton Rig Top Group Hierarchy" - families = ["rig.fbx"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = [] - skeleton_mesh_data = instance.data("skeleton_mesh", []) - if skeleton_mesh_data: - invalid = self.get_top_hierarchy(skeleton_mesh_data) - if invalid: - raise PublishValidationError( - "The skeletonMesh_SET includes the object which " - "is not at the top hierarchy: {}".format(invalid)) - - def get_top_hierarchy(self, targets): - targets = cmds.ls(targets, long=True) # ensure long names - non_top_hierarchy_list = [ - target for target in targets if target.count("|") > 2 - ] - return non_top_hierarchy_list diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_skinCluster_deformer_set.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_skinCluster_deformer_set.py deleted file mode 100644 index 32015a57bc..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_skinCluster_deformer_set.py +++ /dev/null @@ -1,81 +0,0 @@ -from maya import cmds - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) -from ayon_maya.api import plugin - - -class ValidateSkinclusterDeformerSet(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate skinClusters on meshes have valid member relationships. - - In rare cases it can happen that a mesh has a skinCluster in its history - but it is *not* included in the deformer relationship history. If this is - the case then FBX will not export the skinning. - - """ - - order = ValidateContentsOrder - families = ['fbx'] - label = "Skincluster Deformer Relationships" - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - def process(self, instance): - """Process all the transform nodes in the instance""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Invalid skinCluster relationships found on meshes: {0}" - .format(invalid) - ) - - @classmethod - def get_invalid(cls, instance): - - meshes = cmds.ls(instance, type="mesh", noIntermediate=True, long=True) - invalid = list() - - for mesh in meshes: - history = cmds.listHistory(mesh) or [] - skins = cmds.ls(history, type="skinCluster") - - # Ensure at most one skinCluster - assert len(skins) <= 1, "Cannot have more than one skinCluster" - - if skins: - skin = skins[0] - - # Ensure the mesh is also in the skinCluster set - # otherwise the skin will not be exported correctly - # by the FBX Exporter. - deformer_sets = cmds.listSets(object=mesh, type=2) - for deformer_set in deformer_sets: - used_by = cmds.listConnections(deformer_set + ".usedBy", - source=True, - destination=False) - - # Ignore those that don't seem to have a usedBy connection - if not used_by: - continue - - # We have a matching deformer set relationship - if skin in set(used_by): - break - - else: - invalid.append(mesh) - cls.log.warning( - "Mesh has skinCluster in history but is not included " - "in its deformer relationship set: " - "{0} (skinCluster: {1})".format(mesh, skin) - ) - - return invalid diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_step_size.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_step_size.py deleted file mode 100644 index 303885d907..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_step_size.py +++ /dev/null @@ -1,49 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -class ValidateStepSize(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates the step size for the instance is in a valid range. - - For example the `step` size should never be lower or equal to zero. - - """ - - order = ValidateContentsOrder - label = 'Step size' - families = ['camera', - 'pointcache', - 'animation'] - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - MIN = 0.01 - MAX = 1.0 - - @classmethod - def get_invalid(cls, instance): - - objset = instance.data['instance_node'] - step = instance.data.get("step", 1.0) - - if step < cls.MIN or step > cls.MAX: - cls.log.warning("Step size is outside of valid range: {0} " - "(valid: {1} to {2})".format(step, - cls.MIN, - cls.MAX)) - return objset - - return [] - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Instance found with invalid step size: {0}".format(invalid)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_transform_naming_suffix.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_transform_naming_suffix.py deleted file mode 100644 index 3a60bc7aef..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_transform_naming_suffix.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- coding: utf-8 -*- -"""Plugin for validating naming conventions.""" -import json - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateTransformNamingSuffix(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates transform suffix based on the type of its children shapes. - - Suffices must be: - - mesh: - _GEO (regular geometry) - _GES (geometry to be smoothed at render) - _GEP (proxy geometry; usually not to be rendered) - _OSD (open subdiv smooth at rendertime) - - nurbsCurve: _CRV - - nurbsSurface: _NRB - - locator: _LOC - - null/group: _GRP - Suffices can also be overridden by project settings. - - .. warning:: - This grabs the first child shape as a reference and doesn't use the - others in the check. - - """ - - order = ValidateContentsOrder - families = ["model"] - optional = True - label = "Suffix Naming Conventions" - actions = [ayon_maya.api.action.SelectInvalidAction] - SUFFIX_NAMING_TABLE = json.dumps({ - "mesh": ["_GEO", "_GES", "_GEP", "_OSD"], - "nurbsCurve": ["_CRV"], - "nurbsSurface": ["_NRB"], - "locator": ["_LOC"], - "group": ["_GRP"] - }) - - ALLOW_IF_NOT_IN_SUFFIX_TABLE = True - - @classmethod - def get_table_for_invalid(cls): - suffix_naming_table = json.loads(cls.SUFFIX_NAMING_TABLE) - ss = [ - " - {}: {}".format(k, ", ".join(v)) - for k, v in suffix_naming_table.items() - ] - return "
".join(ss) - - @staticmethod - def is_valid_name( - node_name, - shape_type, - suffix_naming_table, - allow_if_not_in_suffix_table - ): - """Return whether node's name is correct. - - The correctness for a transform's suffix is dependent on what - `shape_type` it holds. E.g. a transform with a mesh might need and - `_GEO` suffix. - - When `shape_type` is None the transform doesn't have any direct - children shapes. - - Args: - node_name (str): Node name. - shape_type (str): Type of node. - suffix_naming_table (dict): Mapping dict for suffixes. - allow_if_not_in_suffix_table (bool): Default output. - - """ - if shape_type not in suffix_naming_table: - return allow_if_not_in_suffix_table - - suffices = suffix_naming_table[shape_type] - for suffix in suffices: - if node_name.endswith(suffix): - return True - return False - - @classmethod - def get_invalid(cls, instance): - """Get invalid nodes in instance. - - Args: - instance (:class:`pyblish.api.Instance`): published instance. - - """ - transforms = cmds.ls(instance, type="transform", long=True) - - invalid = [] - suffix_naming_table = json.loads(cls.SUFFIX_NAMING_TABLE) - for transform in transforms: - shapes = cmds.listRelatives(transform, - shapes=True, - fullPath=True, - noIntermediate=True) - - shape_type = cmds.nodeType(shapes[0]) if shapes else "group" - if not cls.is_valid_name( - transform, - shape_type, - suffix_naming_table, - cls.ALLOW_IF_NOT_IN_SUFFIX_TABLE - ): - invalid.append(transform) - - return invalid - - def process(self, instance): - """Process all the nodes in the instance. - - Args: - instance (:class:`pyblish.api.Instance`): published instance. - - """ - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - valid = self.get_table_for_invalid() - - names = "
".join( - " - {}".format(node) for node in invalid - ) - valid = valid.replace("\n", "
") - - raise PublishValidationError( - title="Invalid naming suffix", - message="Valid suffixes are:
{0}

" - "Incorrectly named geometry transforms:
{1}" - "".format(valid, names)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_transform_zero.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_transform_zero.py deleted file mode 100644 index e251572c0d..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_transform_zero.py +++ /dev/null @@ -1,91 +0,0 @@ -import inspect - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateTransformZero(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Transforms can't have any values - - To solve this issue, try freezing the transforms. So long - as the transforms, rotation and scale values are zero, - you're all good. - - """ - - order = ValidateContentsOrder - families = ["model"] - label = "Transform Zero (Freeze)" - actions = [ayon_maya.api.action.SelectInvalidAction] - - _identity = [1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.0, 0.0, 1.0] - _tolerance = 1e-30 - optional = True - - @classmethod - def get_invalid(cls, instance): - """Returns the invalid transforms in the instance. - - This is the same as checking: - - translate == [0, 0, 0] and rotate == [0, 0, 0] and - scale == [1, 1, 1] and shear == [0, 0, 0] - - .. note:: - This will also catch camera transforms if those - are in the instances. - - Returns: - list: Transforms that are not identity matrix - - """ - - transforms = cmds.ls(instance, type="transform") - - invalid = [] - for transform in transforms: - if ('_LOC' in transform) or ('_loc' in transform): - continue - mat = cmds.xform(transform, q=1, matrix=True, objectSpace=True) - if not all(abs(x - y) < cls._tolerance - for x, y in zip(cls._identity, mat)): - invalid.append(transform) - - return invalid - - def process(self, instance): - """Process all the nodes in the instance "objectSet""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - names = "
".join( - " - {}".format(node) for node in invalid - ) - - raise PublishValidationError( - title="Transform Zero", - description=self.get_description(), - message="The model publish allows no transformations. You must" - " freeze transformations to continue.

" - "Nodes found with transform values:
" - "{0}".format(names)) - - @staticmethod - def get_description(): - return inspect.cleandoc("""### Transform can't have any values - - The model publish allows no transformations. - - You must **freeze transformations** to continue. - - """) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_unique_names.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_unique_names.py deleted file mode 100644 index cbe5d2f647..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_unique_names.py +++ /dev/null @@ -1,44 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateUniqueNames(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """transform names should be unique - - ie: using cmds.ls(someNodeName) should always return shortname - - """ - - order = ValidateContentsOrder - families = ["model"] - label = "Unique transform name" - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = True - - @staticmethod - def get_invalid(instance): - """Returns the invalid transforms in the instance. - - Returns: - list: Non-unique name transforms. - - """ - - return [tr for tr in cmds.ls(instance, type="transform") - if '|' in tr] - - def process(self, instance): - """Process all the nodes in the instance "objectSet""" - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Nodes found with non-unique names:\n{0}".format(invalid)) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_mesh_triangulated.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_mesh_triangulated.py deleted file mode 100644 index f283150892..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_mesh_triangulated.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateMeshOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateUnrealMeshTriangulated(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate if mesh is made of triangles for Unreal Engine""" - - order = ValidateMeshOrder - families = ["staticMesh"] - label = "Mesh is Triangulated" - actions = [ayon_maya.api.action.SelectInvalidAction] - active = False - - @classmethod - def get_invalid(cls, instance): - invalid = [] - meshes = cmds.ls(instance, type="mesh", long=True) - for mesh in meshes: - faces = cmds.polyEvaluate(mesh, face=True) - tris = cmds.polyEvaluate(mesh, triangle=True) - if faces != tris: - invalid.append(mesh) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("Found meshes without triangles") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_staticmesh_naming.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_staticmesh_naming.py deleted file mode 100644 index dee6563989..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_staticmesh_naming.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for correct naming of Static Meshes.""" -import re - -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin - - -class ValidateUnrealStaticMeshName(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate name of Unreal Static Mesh - - Unreals naming convention states that staticMesh should start with `SM` - prefix - SM_[Name]_## (Eg. SM_sube_01).These prefixes can be configured - in Settings UI. This plugin also validates other types of - meshes - collision meshes: - - UBX_[RenderMeshName]*: - Boxes are created with the Box objects type in - Max or with the Cube polygonal primitive in Maya. - You cannot move the vertices around or deform it - in any way to make it something other than a - rectangular prism, or else it will not work. - - UCP_[RenderMeshName]*: - Capsules are created with the Capsule object type. - The capsule does not need to have many segments - (8 is a good number) at all because it is - converted into a true capsule for collision. Like - boxes, you should not move the individual - vertices around. - - USP_[RenderMeshName]*: - Spheres are created with the Sphere object type. - The sphere does not need to have many segments - (8 is a good number) at all because it is - converted into a true sphere for collision. Like - boxes, you should not move the individual - vertices around. - - UCX_[RenderMeshName]*: - Convex objects can be any completely closed - convex 3D shape. For example, a box can also be - a convex object - - This validator also checks if collision mesh [RenderMeshName] matches one - of SM_[RenderMeshName]. - - """ - optional = True - order = ValidateContentsOrder - families = ["staticMesh"] - label = "Unreal Static Mesh Name" - actions = [ayon_maya.api.action.SelectInvalidAction] - regex_mesh = r"(?P.*))" - regex_collision = r"(?P.*)" - - @classmethod - def get_invalid(cls, instance): - - invalid = [] - - collision_prefixes = ( - instance.context.data["project_settings"] - ["maya"] - ["create"] - ["CreateUnrealStaticMesh"] - ["collision_prefixes"] - ) - - if cls.validate_mesh: - # compile regex for testing names - regex_mesh = "{}{}".format( - ("_" + cls.static_mesh_prefix) or "", cls.regex_mesh - ) - sm_r = re.compile(regex_mesh) - if not sm_r.match(instance.data.get("productName")): - cls.log.error("Mesh doesn't comply with name validation.") - return True - - if cls.validate_collision: - collision_set = instance.data.get("collisionMembers", None) - # soft-fail is there are no collision objects - if not collision_set: - cls.log.warning("No collision objects to validate.") - return False - - regex_collision = "{}{}_(\\d+)".format( - "(?P({}))_".format( - "|".join("{0}".format(p) for p in collision_prefixes) - ) or "", cls.regex_collision - ) - - cl_r = re.compile(regex_collision) - - folder_name = instance.data["folderEntity"]["name"] - mesh_name = "{}{}".format(folder_name, - instance.data.get("variant", [])) - - for obj in collision_set: - cl_m = cl_r.match(obj) - if not cl_m: - cls.log.error("{} is invalid".format(obj)) - invalid.append(obj) - else: - expected_collision = "{}_{}".format( - cl_m.group("prefix"), - mesh_name - ) - - if not obj.startswith(expected_collision): - - cls.log.error( - "Collision object name doesn't match " - "static mesh name" - ) - cls.log.error("{}_{} != {}_{}*".format( - cl_m.group("prefix"), - cl_m.group("renderName"), - cl_m.group("prefix"), - mesh_name, - )) - invalid.append(obj) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - if not self.validate_mesh and not self.validate_collision: - self.log.debug("Validation of both mesh and collision names" - "is disabled.") - return - - if not instance.data.get("collisionMembers", None): - self.log.debug("There are no collision objects to validate") - return - - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError("Model naming is invalid. See log.") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_up_axis.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_up_axis.py deleted file mode 100644 index d970eb8020..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_unreal_up_axis.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateUnrealUpAxis(plugin.MayaContextPlugin, - OptionalPyblishPluginMixin): - """Validate if Z is set as up axis in Maya""" - - optional = True - active = False - order = ValidateContentsOrder - families = ["staticMesh"] - label = "Unreal Up-Axis check" - actions = [RepairAction] - - def process(self, context): - if not self.is_active(context.data): - return - - if cmds.upAxis(q=True, axis=True) != "z": - raise PublishValidationError( - "Invalid axis set as up axis" - ) - - @classmethod - def repair(cls, instance): - cmds.upAxis(axis="z", rotateView=True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_visible_only.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_visible_only.py deleted file mode 100644 index b694b890fc..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_visible_only.py +++ /dev/null @@ -1,57 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api.lib import iter_visible_nodes_in_range -from ayon_maya.api import plugin - - -class ValidateAlembicVisibleOnly(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validates at least a single node is visible in frame range. - - This validation only validates if the `visibleOnly` flag is enabled - on the instance - otherwise the validation is skipped. - - """ - order = ValidateContentsOrder + 0.05 - label = "Alembic Visible Only" - families = ["pointcache", "animation"] - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - if not instance.data.get("visibleOnly", False): - self.log.debug("Visible only is disabled. Validation skipped..") - return - - invalid = self.get_invalid(instance) - if invalid: - start, end = self.get_frame_range(instance) - raise PublishValidationError( - f"No visible nodes found in frame range {start}-{end}." - ) - - @classmethod - def get_invalid(cls, instance): - - if instance.data["productType"] == "animation": - # Special behavior to use the nodes in out_SET - nodes = instance.data["out_hierarchy"] - else: - nodes = instance[:] - - start, end = cls.get_frame_range(instance) - if not any(iter_visible_nodes_in_range(nodes, start, end)): - # Return the nodes we have considered so the user can identify - # them with the select invalid action - return nodes - - @staticmethod - def get_frame_range(instance): - data = instance.data - return data["frameStartHandle"], data["frameEndHandle"] diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray.py deleted file mode 100644 index 7cf064f993..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray.py +++ /dev/null @@ -1,17 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.publish import PublishValidationError -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateVray(plugin.MayaInstancePlugin): - """Validate general Vray setup.""" - - order = pyblish.api.ValidatorOrder - label = 'VRay' - families = ["vrayproxy"] - - def process(self, instance): - # Validate vray plugin is loaded. - if not cmds.pluginInfo("vrayformaya", query=True, loaded=True): - raise PublishValidationError("Vray plugin is not loaded.") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_distributed_rendering.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_distributed_rendering.py deleted file mode 100644 index 0338798e3f..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_distributed_rendering.py +++ /dev/null @@ -1,67 +0,0 @@ -from ayon_core.pipeline.publish import ( - KnownPublishError, - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, - ValidateContentsOrder, -) -from ayon_maya.api import lib -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateVRayDistributedRendering(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate V-Ray Distributed Rendering is ignored in batch mode. - - Whenever Distributed Rendering is enabled for V-Ray in the render settings - ensure that the "Ignore in batch mode" is enabled so the submitted job - won't try to render each frame with all machines resulting in faulty - errors. - - """ - - order = ValidateContentsOrder - label = "VRay Distributed Rendering" - families = ["renderlayer"] - actions = [RepairAction] - optional = False - - # V-Ray attribute names - enabled_attr = "vraySettings.sys_distributed_rendering_on" - ignored_attr = "vraySettings.sys_distributed_rendering_ignore_batch" - - def process(self, instance): - if not self.is_active(instance.data): - return - if instance.data.get("renderer") != "vray": - # If not V-Ray, ignore - return - - vray_settings = cmds.ls("vraySettings", type="VRaySettingsNode") - if not vray_settings: - raise KnownPublishError( - "Please ensure a VRay Settings Node is present" - ) - - renderlayer = instance.data['renderlayer'] - - if not lib.get_attr_in_layer(self.enabled_attr, layer=renderlayer): - # If not distributed rendering enabled, ignore.. - return - - # If distributed rendering is enabled but it is *not* set to ignore - # during batch mode we invalidate the instance - if not lib.get_attr_in_layer(self.ignored_attr, layer=renderlayer): - raise PublishValidationError( - "Renderlayer has distributed rendering enabled " - "but is not set to ignore in batch mode.") - - @classmethod - def repair(cls, instance): - - renderlayer = instance.data.get("renderlayer") - with lib.renderlayer(renderlayer): - cls.log.debug("Enabling Distributed Rendering " - "ignore in batch mode..") - cmds.setAttr(cls.ignored_attr, True) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_referenced_aovs.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_referenced_aovs.py deleted file mode 100644 index a330866e9b..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_referenced_aovs.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validate if there are AOVs pulled from references.""" -import types - -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairContextAction, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateVrayReferencedAOVs(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate whether the V-Ray Render Elements (AOVs) include references. - - This will check if there are AOVs pulled from references. If - `Vray Use Referenced Aovs` is checked on render instance, u must add those - manually to Render Elements as Pype will expect them to be rendered. - - """ - - order = pyblish.api.ValidatorOrder - label = 'VRay Referenced AOVs' - families = ['renderlayer'] - actions = [RepairContextAction] - optional = False - - def process(self, instance): - """Plugin main entry point.""" - if not self.is_active(instance.data): - return - if instance.data.get("renderer") != "vray": - # If not V-Ray ignore.. - return - - ref_aovs = cmds.ls( - type=["VRayRenderElement", "VRayRenderElementSet"], - referencedNodes=True) - ref_aovs_enabled = ValidateVrayReferencedAOVs.maya_is_true( - cmds.getAttr("vraySettings.relements_usereferenced")) - - if not instance.data.get("vrayUseReferencedAovs"): - if ref_aovs_enabled and ref_aovs: - self.log.warning(( - "Referenced AOVs are enabled in Vray " - "Render Settings and are detected in scene, but " - "AYON render instance option for referenced AOVs is " - "disabled. Those AOVs will be rendered but not published " - "by Pype." - )) - self.log.warning(", ".join(ref_aovs)) - else: - if not ref_aovs: - self.log.warning(( - "Use of referenced AOVs enabled but there are none " - "in the scene." - )) - if not ref_aovs_enabled: - self.log.error(( - "'Use referenced' not enabled in Vray Render Settings." - )) - raise PublishValidationError("Invalid render settings") - - @classmethod - def repair(cls, context): - """Repair action.""" - vray_settings = cmds.ls(type="VRaySettingsNode") - if not vray_settings: - node = cmds.createNode("VRaySettingsNode") - else: - node = vray_settings[0] - - cmds.setAttr("{}.relements_usereferenced".format(node), True) - - @staticmethod - def maya_is_true(attr_val): - """Whether a Maya attr evaluates to True. - - When querying an attribute value from an ambiguous object the - Maya API will return a list of values, which need to be properly - handled to evaluate properly. - - Args: - attr_val (mixed): Maya attribute to be evaluated as bool. - - Returns: - bool: cast Maya attribute to Pythons boolean value. - - """ - if isinstance(attr_val, bool): - return attr_val - elif isinstance(attr_val, (list, types.GeneratorType)): - return any(attr_val) - else: - return bool(attr_val) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_translator_settings.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_translator_settings.py deleted file mode 100644 index a4b34db2a1..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vray_translator_settings.py +++ /dev/null @@ -1,104 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validate VRay Translator settings.""" -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairContextAction, - ValidateContentsOrder, - context_plugin_should_run, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateVRayTranslatorEnabled(plugin.MayaContextPlugin, - OptionalPyblishPluginMixin): - """Validate VRay Translator settings for extracting vrscenes.""" - - order = ValidateContentsOrder - label = "VRay Translator Settings" - families = ["vrayscene_layer"] - actions = [RepairContextAction] - optional = False - - def process(self, context): - """Plugin entry point.""" - if not self.is_active(context.data): - return - # Workaround bug pyblish-base#250 - if not context_plugin_should_run(self, context): - return - - invalid = self.get_invalid(context) - if invalid: - raise PublishValidationError( - message="Found invalid VRay Translator settings", - title=self.label - ) - - @classmethod - def get_invalid(cls, context): - """Get invalid instances.""" - invalid = False - - # Get vraySettings node - vray_settings = cmds.ls(type="VRaySettingsNode") - if not vray_settings: - raise PublishValidationError( - "Please ensure a VRay Settings Node is present", - title=cls.label - ) - - node = vray_settings[0] - - if cmds.setAttr("{}.vrscene_render_on".format(node)): - cls.log.error( - "Render is enabled, for export it should be disabled") - invalid = True - - if not cmds.getAttr("{}.vrscene_on".format(node)): - cls.log.error("Export vrscene not enabled") - invalid = True - - for instance in context: - if "vrayscene_layer" not in instance.data.get("families"): - continue - - if instance.data.get("vraySceneMultipleFiles"): - if not cmds.getAttr("{}.misc_eachFrameInFile".format(node)): - cls.log.error("Each Frame in File not enabled") - invalid = True - else: - if cmds.getAttr("{}.misc_eachFrameInFile".format(node)): - cls.log.error("Each Frame in File is enabled") - invalid = True - - vrscene_filename = cmds.getAttr("{}.vrscene_filename".format(node)) - if vrscene_filename != "vrayscene///": - cls.log.error("Template for file name is wrong") - invalid = True - - return invalid - - @classmethod - def repair(cls, context): - """Repair invalid settings.""" - vray_settings = cmds.ls(type="VRaySettingsNode") - if not vray_settings: - node = cmds.createNode("VRaySettingsNode") - else: - node = vray_settings[0] - - cmds.setAttr("{}.vrscene_render_on".format(node), False) - cmds.setAttr("{}.vrscene_on".format(node), True) - for instance in context: - if "vrayscene" not in instance.data.get("families"): - continue - - if instance.data.get("vraySceneMultipleFiles"): - cmds.setAttr("{}.misc_eachFrameInFile".format(node), True) - else: - cmds.setAttr("{}.misc_eachFrameInFile".format(node), False) - cmds.setAttr("{}.vrscene_filename".format(node), - "vrayscene///", - type="string") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vrayproxy.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_vrayproxy.py deleted file mode 100644 index 67d656b183..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vrayproxy.py +++ /dev/null @@ -1,36 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, -) -from ayon_maya.api import plugin - - -class ValidateVrayProxy(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - - order = pyblish.api.ValidatorOrder - label = "VRay Proxy Settings" - families = ["vrayproxy"] - optional = False - - def process(self, instance): - data = instance.data - if not self.is_active(data): - return - if not data["setMembers"]: - raise PublishValidationError( - f"Instance '{instance.name}' is empty." - ) - - if data["animation"]: - if data["frameEnd"] < data["frameStart"]: - raise PublishValidationError( - "End frame is smaller than start frame" - ) - - if not data["vrmesh"] and not data["alembic"]: - raise PublishValidationError( - "Both vrmesh and alembic are off. Needs at least one to" - " publish." - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vrayproxy_members.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_vrayproxy_members.py deleted file mode 100644 index cfb17cda55..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_vrayproxy_members.py +++ /dev/null @@ -1,42 +0,0 @@ -import ayon_maya.api.action -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateVrayProxyMembers(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate whether the V-Ray Proxy instance has shape members""" - - order = pyblish.api.ValidatorOrder - label = 'VRay Proxy Members' - families = ['vrayproxy'] - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError("'%s' is invalid VRay Proxy for " - "export!" % instance.name) - - @classmethod - def get_invalid(cls, instance): - - shapes = cmds.ls(instance, - shapes=True, - noIntermediate=True, - long=True) - - if not shapes: - cls.log.error("'%s' contains no shapes." % instance.name) - - # Return the instance itself - return [instance.name] diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_xgen.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_xgen.py deleted file mode 100644 index 050165db01..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_xgen.py +++ /dev/null @@ -1,69 +0,0 @@ -import json - -import maya.cmds as cmds -import pyblish.api -import xgenm -from ayon_core.pipeline.publish import PublishValidationError -from ayon_maya.api import plugin - - -class ValidateXgen(plugin.MayaInstancePlugin): - """Validate Xgen data.""" - - label = "Validate Xgen" - order = pyblish.api.ValidatorOrder - families = ["xgen"] - - def process(self, instance): - set_members = instance.data.get("setMembers") - - # Only 1 collection/node per instance. - if len(set_members) != 1: - raise PublishValidationError( - "Only one collection per instance is allowed." - " Found:\n{}".format(set_members) - ) - - # Only xgen palette node is allowed. - node_type = cmds.nodeType(set_members[0]) - if node_type != "xgmPalette": - raise PublishValidationError( - "Only node of type \"xgmPalette\" are allowed. Referred to as" - " \"collection\" in the Maya UI." - " Node type found: {}".format(node_type) - ) - - # Can't have inactive modifiers in collection cause Xgen will try and - # look for them when loading. - palette = instance.data["xgmPalette"].replace("|", "") - inactive_modifiers = {} - for description in instance.data["xgmDescriptions"]: - description = description.split("|")[-2] - modifier_names = xgenm.fxModules(palette, description) - for name in modifier_names: - attr = xgenm.getAttr("active", palette, description, name) - # Attribute value are lowercase strings of false/true. - if attr == "false": - try: - inactive_modifiers[description].append(name) - except KeyError: - inactive_modifiers[description] = [name] - - if inactive_modifiers: - raise PublishValidationError( - "There are inactive modifiers on the collection. " - "Please delete these:\n{}".format( - json.dumps(inactive_modifiers, indent=4, sort_keys=True) - ) - ) - - # We need a namespace else there will be a naming conflict when - # extracting because of stripping namespaces and parenting to world. - node_names = [instance.data["xgmPalette"]] - node_names.extend(instance.data["xgenConnections"]) - non_namespaced_nodes = [n for n in node_names if ":" not in n] - if non_namespaced_nodes: - raise PublishValidationError( - "Could not find namespace on {}. Namespace is required for" - " xgen publishing.".format(non_namespaced_nodes) - ) diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_renderscript_callbacks.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_renderscript_callbacks.py deleted file mode 100644 index ce6410deaa..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_renderscript_callbacks.py +++ /dev/null @@ -1,122 +0,0 @@ -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateYetiRenderScriptCallbacks(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Check if the render script callbacks will be used during the rendering - - In order to ensure the render tasks are executed properly we need to check - if the pre and post render callbacks are actually used. - - For example: - Yeti is not loaded but its callback scripts are still set in the - render settings. This will cause an error because Maya tries to find - and execute the callbacks. - - Developer note: - The pre and post render callbacks cannot be overridden - - """ - - order = ValidateContentsOrder - label = "Yeti Render Script Callbacks" - families = ["renderlayer"] - optional = False - - # Settings per renderer - callbacks = { - "vray": { - "pre": "catch(`pgYetiVRayPreRender`)", - "post": "catch(`pgYetiVRayPostRender`)" - }, - "arnold": { - "pre": "pgYetiPreRender" - } - } - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - f"Invalid render callbacks found for '{instance.name}'.") - - @classmethod - def get_invalid(cls, instance): - - yeti_loaded = cmds.pluginInfo("pgYetiMaya", query=True, loaded=True) - - if not yeti_loaded and not cmds.ls(type="pgYetiMaya"): - # The yeti plug-in is available and loaded so at - # this point we don't really care whether the scene - # has any yeti callback set or not since if the callback - # is there it wouldn't error and if it weren't then - # nothing happens because there are no yeti nodes. - cls.log.debug( - "Yeti is loaded but no yeti nodes were found. " - "Callback validation skipped.." - ) - return False - - renderer = instance.data["renderer"] - if renderer == "redshift": - cls.log.debug("Redshift ignores any pre and post render callbacks") - return False - - callback_lookup = cls.callbacks.get(renderer, {}) - if not callback_lookup: - cls.log.warning("Renderer '%s' is not supported in this plugin" - % renderer) - return False - - pre_mel = cmds.getAttr("defaultRenderGlobals.preMel") or "" - post_mel = cmds.getAttr("defaultRenderGlobals.postMel") or "" - - if pre_mel.strip(): - cls.log.debug("Found pre mel: `%s`" % pre_mel) - - if post_mel.strip(): - cls.log.debug("Found post mel: `%s`" % post_mel) - - # Strip callbacks and turn into a set for quick lookup - pre_callbacks = {cmd.strip() for cmd in pre_mel.split(";")} - post_callbacks = {cmd.strip() for cmd in post_mel.split(";")} - - pre_script = callback_lookup.get("pre", "") - post_script = callback_lookup.get("post", "") - - # If Yeti is not loaded - invalid = False - if not yeti_loaded: - if pre_script and pre_script in pre_callbacks: - cls.log.error("Found pre render callback '%s' which is not " - "uses!" % pre_script) - invalid = True - - if post_script and post_script in post_callbacks: - cls.log.error("Found post render callback '%s which is " - "not used!" % post_script) - invalid = True - - # If Yeti is loaded - else: - if pre_script and pre_script not in pre_callbacks: - cls.log.error( - "Could not find required pre render callback " - "`%s`" % pre_script) - invalid = True - - if post_script and post_script not in post_callbacks: - cls.log.error( - "Could not find required post render callback" - " `%s`" % post_script) - invalid = True - - return invalid diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_cache_state.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_cache_state.py deleted file mode 100644 index a7f272c1ec..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_cache_state.py +++ /dev/null @@ -1,72 +0,0 @@ -import inspect - -import ayon_maya.api.action -import maya.cmds as cmds -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction, -) -from ayon_maya.api import plugin - - -class ValidateYetiRigCacheState(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate the I/O attributes of the node - - Every pgYetiMaya cache node per instance should have: - 1. Input Mode is set to `None` - 2. Input Cache File Name is empty - - """ - - order = pyblish.api.ValidatorOrder - label = "Yeti Rig Cache State" - families = ["yetiRig"] - actions = [RepairAction, - ayon_maya.api.action.SelectInvalidAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Nodes have incorrect I/O settings", - description=inspect.getdoc(self) - ) - - @classmethod - def get_invalid(cls, instance): - - invalid = [] - - yeti_nodes = cmds.ls(instance, type="pgYetiMaya") - for node in yeti_nodes: - # Check reading state - state = cmds.getAttr("%s.fileMode" % node) - if state == 1: - cls.log.error("Node `%s` is set to mode `cache`" % node) - invalid.append(node) - continue - - # Check reading state - has_cache = cmds.getAttr("%s.cacheFileName" % node) - if has_cache: - cls.log.error("Node `%s` has a cache file set" % node) - invalid.append(node) - continue - - return invalid - - @classmethod - def repair(cls, instance): - """Repair all errors""" - - # Create set to ensure all nodes only pass once - invalid = cls.get_invalid(instance) - for node in invalid: - cmds.setAttr("%s.fileMode" % node, 0) - cmds.setAttr("%s.cacheFileName" % node, "", type="string") diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_input_in_instance.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_input_in_instance.py deleted file mode 100644 index 8c258b4455..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_input_in_instance.py +++ /dev/null @@ -1,49 +0,0 @@ -import ayon_maya.api.action -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - ValidateContentsOrder, -) -from ayon_maya.api import plugin -from maya import cmds - - -class ValidateYetiRigInputShapesInInstance(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate if all input nodes are part of the instance's hierarchy""" - - order = ValidateContentsOrder - families = ["yetiRig"] - label = "Yeti Rig Input Shapes In Instance" - actions = [ayon_maya.api.action.SelectInvalidAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("Yeti Rig has invalid input meshes") - - @classmethod - def get_invalid(cls, instance): - - input_set = next((i for i in instance if i == "input_SET"), None) - assert input_set, "Current %s instance has no `input_SET`" % instance - - # Get all children, we do not care about intermediates - input_nodes = cmds.ls(cmds.sets(input_set, query=True), long=True) - dag = cmds.ls(input_nodes, dag=True, long=True) - shapes = cmds.ls(dag, long=True, shapes=True, noIntermediate=True) - - # Allow publish without input meshes. - if not shapes: - cls.log.debug("Found no input meshes for %s, skipping ..." - % instance) - return [] - - # check if input node is part of groomRig instance - instance_lookup = set(instance[:]) - invalid = [s for s in shapes if s not in instance_lookup] - - return invalid diff --git a/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_settings.py b/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_settings.py deleted file mode 100644 index fa0836e0ef..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/publish/validate_yeti_rig_settings.py +++ /dev/null @@ -1,61 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, -) -from ayon_maya.api import plugin - - -class ValidateYetiRigSettings(plugin.MayaInstancePlugin, - OptionalPyblishPluginMixin): - """Validate Yeti Rig Settings have collected input connections. - - The input connections are collected for the nodes in the `input_SET`. - When no input connections are found a warning is logged but it is allowed - to pass validation. - - """ - - order = pyblish.api.ValidatorOrder - label = "Yeti Rig Settings" - families = ["yetiRig"] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - ("Detected invalid Yeti Rig data. (See log) " - "Tip: Save the scene")) - - @classmethod - def get_invalid(cls, instance): - - rigsettings = instance.data.get("rigsettings", None) - if rigsettings is None: - cls.log.error("MAJOR ERROR: No rig settings found!") - return True - - # Get inputs - inputs = rigsettings.get("inputs", []) - if not inputs: - # Empty rig settings dictionary - cls.log.warning("No rig inputs found. This can happen when " - "the rig has no inputs from outside the rig.") - return False - - for input in inputs: - source_id = input["sourceID"] - if source_id is None: - cls.log.error("Discovered source with 'None' as ID, please " - "check if the input shape has a cbId") - return True - - destination_id = input["destinationID"] - if destination_id is None: - cls.log.error("Discovered None as destination ID value") - return True - - return False diff --git a/server_addon/maya/client/ayon_maya/plugins/workfile_build/assign_look_placeholder.py b/server_addon/maya/client/ayon_maya/plugins/workfile_build/assign_look_placeholder.py deleted file mode 100644 index aaecdd78b9..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/workfile_build/assign_look_placeholder.py +++ /dev/null @@ -1,128 +0,0 @@ -from maya import cmds - -from ayon_core.lib import ( - UISeparatorDef, - UILabelDef, - TextDef, - BoolDef -) -from ayon_core.lib.events import weakref_partial -from ayon_maya.api.workfile_template_builder import MayaPlaceholderPlugin -from ayon_maya.api.lib import ( - get_all_children, - assign_look, -) - - -class AssignLookPlaceholderPlugin(MayaPlaceholderPlugin): - """Assign a look product to members of the placeholder set. - - Creates an objectSet. Any members will get the look assigned with the given - product name if it exists. - - Any containers loaded from other template placeholders will get the look - assigned to their loaded containers. - - """ - - identifier = "maya.assignlook" - label = "Assign Look" - - def get_placeholder_options(self, options=None): - options = options or {} - return [ - UISeparatorDef(), - UILabelDef(label="Description"), - UISeparatorDef(), - UILabelDef( - label=( - "Creates an objectSet. Any members will get the look\n" - "assigned with the given product name if it exists.\n\n" - "Any containers loaded from other template placeholders\n" - "will get the look assigned to their loaded containers." - "" - ) - ), - UISeparatorDef(), - UILabelDef(label="Settings"), - UISeparatorDef(), - TextDef( - "product_name", - label="Product Name", - tooltip="Look product to assign to containers loaded by " - "contained placeholders", - multiline=False, - default=options.get("product_name", "lookMain") - ), - BoolDef( - "recurse", - label="Recursive", - tooltip="Assign look also to potential sub containers / " - "placeholders loaded from the load placeholder.\n" - "This will make sure that any placeholder contained " - "that itself loaded new geometry will recursively " - "also get the look assignment triggered.", - default=options.get("recurse", False) - ), - ] - - def create_placeholder(self, placeholder_data): - placeholder_data["plugin_identifier"] = self.identifier - - # Create maya objectSet on selection - selection = cmds.ls(selection=True, long=True) - product_name = placeholder_data["product_name"] - name = "AssignLook_{}".format(product_name) - node = cmds.sets(selection, name=name) - - self.imprint(node, placeholder_data) - - def populate_placeholder(self, placeholder): - callback = weakref_partial(self.assign_look, placeholder) - self.builder.add_on_depth_processed_callback( - callback, order=placeholder.order) - - # If placeholder should be deleted, delete it after finish - if not placeholder.data.get("keep_placeholder", True): - delete_callback = weakref_partial(self.delete_placeholder, - placeholder) - self.builder.add_on_finished_callback( - delete_callback, order=placeholder.order) - - def assign_look(self, placeholder): - if placeholder.data.get("finished", False): - # If not recursive we mark it finished after the first depth - # iteration - otherwise run it again to find any new members - return - - product_name = placeholder.data["product_name"] - assert product_name, "Must have defined look product name to assign" - - members = cmds.ls( - cmds.sets(placeholder.scene_identifier, query=True), long=True - ) - if not members: - return - - # Allow any children of members in the set to get assignments, - # e.g. when a group is included there. Whenever a load placeholder - # finishes it also adds loaded content into the object set the - # placeholder was in, so this will also assign to loaded content - # during this build. - assign_nodes = set(members) - assign_nodes.update(get_all_children(members)) - - processed = placeholder.data.setdefault("processed", set()) - assign_nodes.difference_update(processed) - processed.update(assign_nodes) - - if assign_nodes: - self.log.info( - "Assigning look {} for placeholder: {}".format(product_name, - placeholder) - ) - assign_nodes = list(assign_nodes) - assign_look(assign_nodes, product_name=product_name) - - if not placeholder.data.get("recurse", False): - placeholder.data["finished"] = True diff --git a/server_addon/maya/client/ayon_maya/plugins/workfile_build/load_placeholder.py b/server_addon/maya/client/ayon_maya/plugins/workfile_build/load_placeholder.py deleted file mode 100644 index 6cf38e591a..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/workfile_build/load_placeholder.py +++ /dev/null @@ -1,132 +0,0 @@ -from maya import cmds - -from ayon_core.pipeline.workfile.workfile_template_builder import ( - PlaceholderLoadMixin, - LoadPlaceholderItem -) -from ayon_maya.api.lib import ( - get_container_transforms, - get_node_parent, - get_node_index_under_parent -) -from ayon_maya.api.workfile_template_builder import ( - MayaPlaceholderPlugin, -) - - -class MayaPlaceholderLoadPlugin(MayaPlaceholderPlugin, PlaceholderLoadMixin): - identifier = "maya.load" - label = "Maya load" - - item_class = LoadPlaceholderItem - - def _create_placeholder_name(self, placeholder_data): - - # Split builder type: context_assets, linked_assets, all_assets - prefix, suffix = placeholder_data["builder_type"].split("_", 1) - parts = [prefix] - - # add family if any - placeholder_product_type = placeholder_data.get("product_type") - if placeholder_product_type is None: - placeholder_product_type = placeholder_data.get("family") - - if placeholder_product_type: - parts.append(placeholder_product_type) - - # add loader arguments if any - loader_args = placeholder_data["loader_args"] - if loader_args: - loader_args = eval(loader_args) - for value in loader_args.values(): - parts.append(str(value)) - - parts.append(suffix) - placeholder_name = "_".join(parts) - - return placeholder_name.capitalize() - - def _get_loaded_repre_ids(self): - loaded_representation_ids = self.builder.get_shared_populate_data( - "loaded_representation_ids" - ) - if loaded_representation_ids is None: - try: - containers = cmds.sets("AVALON_CONTAINERS", q=True) - except ValueError: - containers = [] - - loaded_representation_ids = { - cmds.getAttr(container + ".representation") - for container in containers - } - self.builder.set_shared_populate_data( - "loaded_representation_ids", loaded_representation_ids - ) - return loaded_representation_ids - - def populate_placeholder(self, placeholder): - self.populate_load_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - repre_ids = self._get_loaded_repre_ids() - self.populate_load_placeholder(placeholder, repre_ids) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def load_succeed(self, placeholder, container): - self._parent_in_hierarchy(placeholder, container) - - def _parent_in_hierarchy(self, placeholder, container): - """Parent loaded container to placeholder's parent. - - ie : Set loaded content as placeholder's sibling - - Args: - container (str): Placeholder loaded containers - """ - - if not container: - return - - # TODO: This currently returns only a single root but a loaded scene - # could technically load more than a single root - container_root = get_container_transforms(container, root=True) - - # Bugfix: The get_container_transforms does not recognize the load - # reference group currently - # TODO: Remove this when it does - parent = get_node_parent(container_root) - if parent: - container_root = parent - roots = [container_root] - - # Add the loaded roots to the holding sets if they exist - holding_sets = cmds.listSets(object=placeholder.scene_identifier) or [] - for holding_set in holding_sets: - cmds.sets(roots, forceElement=holding_set) - - # Parent the roots to the place of the placeholder locator and match - # its matrix - placeholder_form = cmds.xform( - placeholder.scene_identifier, - query=True, - matrix=True, - worldSpace=True - ) - scene_parent = get_node_parent(placeholder.scene_identifier) - for node in set(roots): - cmds.xform(node, matrix=placeholder_form, worldSpace=True) - - if scene_parent != get_node_parent(node): - if scene_parent: - node = cmds.parent(node, scene_parent)[0] - else: - node = cmds.parent(node, world=True)[0] - - # Move loaded nodes in index order next to their placeholder node - cmds.reorder(node, back=True) - index = get_node_index_under_parent(placeholder.scene_identifier) - cmds.reorder(node, front=True) - cmds.reorder(node, relative=index + 1) diff --git a/server_addon/maya/client/ayon_maya/plugins/workfile_build/script_placeholder.py b/server_addon/maya/client/ayon_maya/plugins/workfile_build/script_placeholder.py deleted file mode 100644 index ff54306220..0000000000 --- a/server_addon/maya/client/ayon_maya/plugins/workfile_build/script_placeholder.py +++ /dev/null @@ -1,201 +0,0 @@ -from maya import cmds - -from ayon_maya.api.workfile_template_builder import ( - MayaPlaceholderPlugin -) -from ayon_core.lib import NumberDef, TextDef, EnumDef -from ayon_core.lib.events import weakref_partial - - -EXAMPLE_SCRIPT = """ -# Access maya commands -from maya import cmds - -# Access the placeholder node -placeholder_node = placeholder.scene_identifier - -# Access the event callback -if event is None: - print(f"Populating {placeholder}") -else: - if event.topic == "template.depth_processed": - print(f"Processed depth: {event.get('depth')}") - elif event.topic == "template.finished": - print("Build finished.") -""".strip() - - -class MayaPlaceholderScriptPlugin(MayaPlaceholderPlugin): - """Execute a script at the given `order` during workfile build. - - This is a very low-level placeholder to run Python scripts at a given - point in time during the workfile template build. - - It can create either a locator or an objectSet as placeholder node. - It defaults to an objectSet, since allowing to run on e.g. other - placeholder node members can be useful, e.g. using: - - >>> members = cmds.sets(placeholder.scene_identifier, query=True) - - """ - - identifier = "maya.runscript" - label = "Run Python Script" - - use_selection_as_parent = False - - def get_placeholder_options(self, options=None): - options = options or {} - return [ - NumberDef( - "order", - label="Order", - default=options.get("order") or 0, - decimals=0, - minimum=0, - maximum=999, - tooltip=( - "Order" - "\nOrder defines asset loading priority (0 to 999)" - "\nPriority rule is : \"lowest is first to load\"." - ) - ), - TextDef( - "prepare_script", - label="Run at\nprepare", - tooltip="Run before populate at prepare order", - multiline=True, - default=options.get("prepare_script", "") - ), - TextDef( - "populate_script", - label="Run at\npopulate", - tooltip="Run script at populate node order
" - "This is the default behavior", - multiline=True, - default=options.get("populate_script", EXAMPLE_SCRIPT) - ), - TextDef( - "depth_processed_script", - label="Run after\ndepth\niteration", - tooltip="Run script after every build depth iteration", - multiline=True, - default=options.get("depth_processed_script", "") - ), - TextDef( - "finished_script", - label="Run after\nbuild", - tooltip=( - "Run script at build finished.
" - "Note: this even runs if other placeholders had " - "errors during the build" - ), - multiline=True, - default=options.get("finished_script", "") - ), - EnumDef( - "create_nodetype", - label="Nodetype", - items={ - "spaceLocator": "Locator", - "objectSet": "ObjectSet" - }, - tooltip=( - "The placeholder's node type to be created.
" - "Note this only works on create, not on update" - ), - default=options.get("create_nodetype", "objectSet") - ), - ] - - def create_placeholder(self, placeholder_data): - nodetype = placeholder_data.get("create_nodetype", "objectSet") - - if nodetype == "spaceLocator": - super(MayaPlaceholderScriptPlugin, self).create_placeholder( - placeholder_data - ) - elif nodetype == "objectSet": - placeholder_data["plugin_identifier"] = self.identifier - - # Create maya objectSet on selection - selection = cmds.ls(selection=True, long=True) - name = self._create_placeholder_name(placeholder_data) - node = cmds.sets(selection, name=name) - - self.imprint(node, placeholder_data) - - def prepare_placeholders(self, placeholders): - super(MayaPlaceholderScriptPlugin, self).prepare_placeholders( - placeholders - ) - for placeholder in placeholders: - prepare_script = placeholder.data.get("prepare_script") - if not prepare_script: - continue - - self.run_script(placeholder, prepare_script) - - def populate_placeholder(self, placeholder): - - populate_script = placeholder.data.get("populate_script") - depth_script = placeholder.data.get("depth_processed_script") - finished_script = placeholder.data.get("finished_script") - - # Run now - if populate_script: - self.run_script(placeholder, populate_script) - - if not any([depth_script, finished_script]): - # No callback scripts to run - if not placeholder.data.get("keep_placeholder", True): - self.delete_placeholder(placeholder) - return - - # Run at each depth processed - if depth_script: - callback = weakref_partial( - self.run_script, placeholder, depth_script) - self.builder.add_on_depth_processed_callback( - callback, order=placeholder.order) - - # Run at build finish - if finished_script: - callback = weakref_partial( - self.run_script, placeholder, finished_script) - self.builder.add_on_finished_callback( - callback, order=placeholder.order) - - # If placeholder should be deleted, delete it after finish so - # the scripts have access to it up to the last run - if not placeholder.data.get("keep_placeholder", True): - delete_callback = weakref_partial( - self.delete_placeholder, placeholder) - self.builder.add_on_finished_callback( - delete_callback, order=placeholder.order + 1) - - def run_script(self, placeholder, script, event=None): - """Run script - - Even though `placeholder` is an unused arguments by exposing it as - an input argument it means it makes it available through - globals()/locals() in the `exec` call, giving the script access - to the placeholder. - - For example: - >>> node = placeholder.scene_identifier - - In the case the script is running at a callback level (not during - populate) then it has access to the `event` as well, otherwise the - value is None if it runs during `populate_placeholder` directly. - - For example adding this as the callback script: - >>> if event is not None: - >>> if event.topic == "on_depth_processed": - >>> print(f"Processed depth: {event.get('depth')}") - >>> elif event.topic == "on_finished": - >>> print("Build finished.") - - """ - self.log.debug(f"Running script at event: {event}") - exec(script, locals()) diff --git a/server_addon/maya/client/ayon_maya/startup/userSetup.py b/server_addon/maya/client/ayon_maya/startup/userSetup.py deleted file mode 100644 index 600864fd2b..0000000000 --- a/server_addon/maya/client/ayon_maya/startup/userSetup.py +++ /dev/null @@ -1,50 +0,0 @@ -import os - -from ayon_core.settings import get_project_settings -from ayon_core.pipeline import install_host, get_current_project_name -from ayon_maya.api import MayaHost - -from maya import cmds - - -host = MayaHost() -install_host(host) - -print("Starting AYON usersetup...") - -project_name = get_current_project_name() -settings = get_project_settings(project_name) - -# Loading plugins explicitly. -explicit_plugins_loading = settings["maya"]["explicit_plugins_loading"] -if explicit_plugins_loading["enabled"]: - def _explicit_load_plugins(): - for plugin in explicit_plugins_loading["plugins_to_load"]: - if plugin["enabled"]: - print("Loading plug-in: " + plugin["name"]) - try: - cmds.loadPlugin(plugin["name"], quiet=True) - except RuntimeError as e: - print(e) - - # We need to load plugins deferred as loading them directly does not work - # correctly due to Maya's initialization. - cmds.evalDeferred( - _explicit_load_plugins, - lowestPriority=True - ) - -# Open Workfile Post Initialization. -key = "AYON_OPEN_WORKFILE_POST_INITIALIZATION" -if bool(int(os.environ.get(key, "0"))): - def _log_and_open(): - path = os.environ["AYON_LAST_WORKFILE"] - print("Opening \"{}\"".format(path)) - cmds.file(path, open=True, force=True) - cmds.evalDeferred( - _log_and_open, - lowestPriority=True - ) - - -print("Finished AYON usersetup.") diff --git a/server_addon/maya/client/ayon_maya/tools/__init__.py b/server_addon/maya/client/ayon_maya/tools/__init__.py deleted file mode 100644 index 0dd6de2342..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -from ayon_core.tools.utils.host_tools import qt_app_context - - -class MayaToolsSingleton: - _look_assigner = None - - -def get_look_assigner_tool(parent): - """Create, cache and return look assigner tool window.""" - if MayaToolsSingleton._look_assigner is None: - from .mayalookassigner import MayaLookAssignerWindow - mayalookassigner_window = MayaLookAssignerWindow(parent) - MayaToolsSingleton._look_assigner = mayalookassigner_window - return MayaToolsSingleton._look_assigner - - -def show_look_assigner(parent=None): - """Look manager is Maya specific tool for look management.""" - - with qt_app_context(): - look_assigner_tool = get_look_assigner_tool(parent) - look_assigner_tool.show() - - # Pull window to the front. - look_assigner_tool.raise_() - look_assigner_tool.activateWindow() - look_assigner_tool.showNormal() diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/LICENSE b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/LICENSE deleted file mode 100644 index 852751dbe4..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 Colorbleed - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/__init__.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/__init__.py deleted file mode 100644 index 5e40777741..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .app import ( - MayaLookAssignerWindow, - show -) - - -__all__ = [ - "MayaLookAssignerWindow", - "show"] diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/alembic.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/alembic.py deleted file mode 100644 index 6885e923d3..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/alembic.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -"""Tools for loading looks to vray proxies.""" -import os -from collections import defaultdict -import logging - -import six - -import alembic.Abc - - -log = logging.getLogger(__name__) - - -def get_alembic_paths_by_property(filename, attr, verbose=False): - # type: (str, str, bool) -> dict - """Return attribute value per objects in the Alembic file. - - Reads an Alembic archive hierarchy and retrieves the - value from the `attr` properties on the objects. - - Args: - filename (str): Full path to Alembic archive to read. - attr (str): Id attribute. - verbose (bool): Whether to verbosely log missing attributes. - - Returns: - dict: Mapping of node full path with its id - - """ - # Normalize alembic path - filename = os.path.normpath(filename) - filename = filename.replace("\\", "/") - filename = str(filename) # path must be string - - try: - archive = alembic.Abc.IArchive(filename) - except RuntimeError: - # invalid alembic file - probably vrmesh - log.warning("{} is not an alembic file".format(filename)) - return {} - root = archive.getTop() - - iterator = list(root.children) - obj_ids = {} - - for obj in iterator: - name = obj.getFullName() - - # include children for coming iterations - iterator.extend(obj.children) - - props = obj.getProperties() - if props.getNumProperties() == 0: - # Skip those without properties, e.g. '/materials' in a gpuCache - continue - - # THe custom attribute is under the properties' first container under - # the ".arbGeomParams" - prop = props.getProperty(0) # get base property - - _property = None - try: - geo_params = prop.getProperty('.arbGeomParams') - _property = geo_params.getProperty(attr) - except KeyError: - if verbose: - log.debug("Missing attr on: {0}".format(name)) - continue - - if not _property.isConstant(): - log.warning("Id not constant on: {0}".format(name)) - - # Get first value sample - value = _property.getValue()[0] - - obj_ids[name] = value - - return obj_ids - - -def get_alembic_ids_cache(path): - # type: (str) -> dict - """Build a id to node mapping in Alembic file. - - Nodes without IDs are ignored. - - Returns: - dict: Mapping of id to nodes in the Alembic. - - """ - node_ids = get_alembic_paths_by_property(path, attr="cbId") - id_nodes = defaultdict(list) - for node, _id in six.iteritems(node_ids): - id_nodes[_id].append(node) - - return dict(six.iteritems(id_nodes)) diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/app.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/app.py deleted file mode 100644 index 2937b72934..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/app.py +++ /dev/null @@ -1,317 +0,0 @@ -import sys -import time -import logging - -import ayon_api -from qtpy import QtWidgets, QtCore - -from ayon_core import style -from ayon_core.pipeline import get_current_project_name -from ayon_core.tools.utils.lib import qt_app_context -from ayon_maya.api.lib import ( - assign_look_by_version, - get_main_window -) - -from maya import cmds -# old api for MFileIO -import maya.OpenMaya -import maya.api.OpenMaya as om - -from .widgets import ( - AssetOutliner, - LookOutliner -) -from .commands import ( - get_workfile, - remove_unused_looks -) -from .vray_proxies import vrayproxy_assign_look -from . import arnold_standin - -module = sys.modules[__name__] -module.window = None - - -class MayaLookAssignerWindow(QtWidgets.QWidget): - - def __init__(self, parent=None): - super(MayaLookAssignerWindow, self).__init__(parent=parent) - - self.log = logging.getLogger(__name__) - - # Store callback references - self._callbacks = [] - self._connections_set_up = False - - filename = get_workfile() - - self.setObjectName("lookManager") - self.setWindowTitle("Look Manager 1.4.0 - [{}]".format(filename)) - self.setWindowFlags(QtCore.Qt.Window) - self.setParent(parent) - - self.resize(750, 500) - - self.setup_ui() - - # Force refresh check on initialization - self._on_renderlayer_switch() - - def setup_ui(self): - """Build the UI""" - - main_splitter = QtWidgets.QSplitter(self) - - # Assets (left) - asset_outliner = AssetOutliner(main_splitter) - - # Looks (right) - looks_widget = QtWidgets.QWidget(main_splitter) - - look_outliner = LookOutliner(looks_widget) # Database look overview - - assign_selected = QtWidgets.QCheckBox( - "Assign to selected only", looks_widget - ) - assign_selected.setToolTip("Whether to assign only to selected nodes " - "or to the full asset") - remove_unused_btn = QtWidgets.QPushButton( - "Remove Unused Looks", looks_widget - ) - - looks_layout = QtWidgets.QVBoxLayout(looks_widget) - looks_layout.addWidget(look_outliner) - looks_layout.addWidget(assign_selected) - looks_layout.addWidget(remove_unused_btn) - - main_splitter.addWidget(asset_outliner) - main_splitter.addWidget(looks_widget) - main_splitter.setSizes([350, 200]) - - # Footer - status = QtWidgets.QStatusBar(self) - status.setSizeGripEnabled(False) - status.setFixedHeight(25) - warn_layer = QtWidgets.QLabel( - "Current Layer is not defaultRenderLayer", self - ) - warn_layer.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - warn_layer.setStyleSheet("color: #DD5555; font-weight: bold;") - warn_layer.setFixedHeight(25) - - footer = QtWidgets.QHBoxLayout() - footer.setContentsMargins(0, 0, 0, 0) - footer.addWidget(status) - footer.addWidget(warn_layer) - - # Build up widgets - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setSpacing(0) - main_layout.addWidget(main_splitter) - main_layout.addLayout(footer) - - # Set column width - asset_outliner.view.setColumnWidth(0, 200) - look_outliner.view.setColumnWidth(0, 150) - - asset_outliner.selection_changed.connect( - self.on_asset_selection_changed) - - asset_outliner.refreshed.connect( - lambda: self.echo("Loaded assets..") - ) - - look_outliner.menu_apply_action.connect(self.on_process_selected) - remove_unused_btn.clicked.connect(remove_unused_looks) - - # Open widgets - self.asset_outliner = asset_outliner - self.look_outliner = look_outliner - self.status = status - self.warn_layer = warn_layer - - # Buttons - self.remove_unused = remove_unused_btn - self.assign_selected = assign_selected - - self._first_show = True - - def setup_connections(self): - """Connect interactive widgets with actions""" - if self._connections_set_up: - return - - # Maya renderlayer switch callback - callback = om.MEventMessage.addEventCallback( - "renderLayerManagerChange", - self._on_renderlayer_switch - ) - self._callbacks.append(callback) - self._connections_set_up = True - - def remove_connection(self): - # Delete callbacks - for callback in self._callbacks: - om.MMessage.removeCallback(callback) - - self._callbacks = [] - self._connections_set_up = False - - def showEvent(self, event): - self.setup_connections() - super(MayaLookAssignerWindow, self).showEvent(event) - if self._first_show: - self._first_show = False - self.setStyleSheet(style.load_stylesheet()) - - def closeEvent(self, event): - self.remove_connection() - super(MayaLookAssignerWindow, self).closeEvent(event) - - def _on_renderlayer_switch(self, *args): - """Callback that updates on Maya renderlayer switch""" - - if maya.OpenMaya.MFileIO.isNewingFile(): - # Don't perform a check during file open or file new as - # the renderlayers will not be in a valid state yet. - return - - layer = cmds.editRenderLayerGlobals(query=True, - currentRenderLayer=True) - if layer != "defaultRenderLayer": - self.warn_layer.show() - else: - self.warn_layer.hide() - - def echo(self, message): - self.status.showMessage(message, 1500) - - def refresh(self): - """Refresh the content""" - - # Get all containers and information - self.asset_outliner.clear() - found_items = self.asset_outliner.get_all_assets() - if not found_items: - self.look_outliner.clear() - - def on_asset_selection_changed(self): - """Get selected items from asset loader and fill look outliner""" - - items = self.asset_outliner.get_selected_items() - self.look_outliner.clear() - self.look_outliner.add_items(items) - - def on_process_selected(self): - """Process all selected looks for the selected assets""" - - assets = self.asset_outliner.get_selected_items() - assert assets, "No asset selected" - - # Collect the looks we want to apply (by name) - look_items = self.look_outliner.get_selected_items() - looks = {look["product"] for look in look_items} - - selection = self.assign_selected.isChecked() - asset_nodes = self.asset_outliner.get_nodes(selection=selection) - - project_name = get_current_project_name() - start = time.time() - for i, (asset, item) in enumerate(asset_nodes.items()): - - # Label prefix - prefix = "({}/{})".format(i + 1, len(asset_nodes)) - - # Assign the first matching look relevant for this asset - # (since assigning multiple to the same nodes makes no sense) - assign_look = next( - ( - product_entity - for product_entity in item["looks"] - if product_entity["name"] in looks - ), - None - ) - if not assign_look: - self.echo( - "{} No matching selected look for {}".format(prefix, asset) - ) - continue - - # Get the latest version of this asset's look product - version_entity = ayon_api.get_last_version_by_product_id( - project_name, assign_look["id"], fields={"id"} - ) - - product_name = assign_look["name"] - self.echo("{} Assigning {} to {}\t".format( - prefix, product_name, asset - )) - nodes = item["nodes"] - - # Assign Vray Proxy look. - if cmds.pluginInfo('vrayformaya', query=True, loaded=True): - self.echo("Getting vray proxy nodes ...") - vray_proxies = set(cmds.ls(type="VRayProxy", long=True)) - - for vp in vray_proxies: - if vp in nodes: - vrayproxy_assign_look(vp, product_name) - - nodes = list(set(nodes).difference(vray_proxies)) - else: - self.echo( - "Could not assign to VRayProxy because vrayformaya plugin " - "is not loaded." - ) - - # Assign Arnold Standin look. - if cmds.pluginInfo("mtoa", query=True, loaded=True): - arnold_standins = set(cmds.ls(type="aiStandIn", long=True)) - - for standin in arnold_standins: - if standin in nodes: - arnold_standin.assign_look(standin, product_name) - - nodes = list(set(nodes).difference(arnold_standins)) - else: - self.echo( - "Could not assign to aiStandIn because mtoa plugin is not " - "loaded." - ) - - # Assign look - if nodes: - assign_look_by_version( - nodes, version_id=version_entity["id"] - ) - - end = time.time() - - self.echo("Finished assigning.. ({0:.3f}s)".format(end - start)) - - -def show(): - """Display Loader GUI - - Arguments: - debug (bool, optional): Run loader in debug-mode, - defaults to False - - """ - - try: - module.window.close() - del module.window - except (RuntimeError, AttributeError): - pass - - # Get Maya main window - mainwindow = get_main_window() - - with qt_app_context(): - window = MayaLookAssignerWindow(parent=mainwindow) - window.show() - - module.window = window diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/arnold_standin.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/arnold_standin.py deleted file mode 100644 index c285b857c7..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/arnold_standin.py +++ /dev/null @@ -1,263 +0,0 @@ -import os -import json -from collections import defaultdict -import logging - -from maya import cmds -import ayon_api - -from ayon_core.pipeline import get_current_project_name -from ayon_maya import api - -from . import lib -from .alembic import get_alembic_ids_cache -from .usd import is_usd_lib_supported, get_usd_ids_cache - - -log = logging.getLogger(__name__) - - -ATTRIBUTE_MAPPING = { - "primaryVisibility": "visibility", # Camera - "castsShadows": "visibility", # Shadow - "receiveShadows": "receive_shadows", - "aiSelfShadows": "self_shadows", - "aiOpaque": "opaque", - "aiMatte": "matte", - "aiVisibleInDiffuseTransmission": "visibility", - "aiVisibleInSpecularTransmission": "visibility", - "aiVisibleInVolume": "visibility", - "aiVisibleInDiffuseReflection": "visibility", - "aiVisibleInSpecularReflection": "visibility", - "aiSubdivUvSmoothing": "subdiv_uv_smoothing", - "aiDispHeight": "disp_height", - "aiDispPadding": "disp_padding", - "aiDispZeroValue": "disp_zero_value", - "aiStepSize": "step_size", - "aiVolumePadding": "volume_padding", - "aiSubdivType": "subdiv_type", - "aiSubdivIterations": "subdiv_iterations" -} - - -def calculate_visibility_mask(attributes): - # https://arnoldsupport.com/2018/11/21/backdoor-setting-visibility/ - mapping = { - "primaryVisibility": 1, # Camera - "castsShadows": 2, # Shadow - "aiVisibleInDiffuseTransmission": 4, - "aiVisibleInSpecularTransmission": 8, - "aiVisibleInVolume": 16, - "aiVisibleInDiffuseReflection": 32, - "aiVisibleInSpecularReflection": 64 - } - mask = 255 - for attr, value in mapping.items(): - if attributes.get(attr, True): - continue - - mask -= value - - return mask - - -def get_nodes_by_id(standin): - """Get node id from aiStandIn via json sidecar. - - Args: - standin (string): aiStandIn node. - - Returns: - (dict): Dictionary with node full name/path and id. - """ - path = cmds.getAttr(standin + ".dso") - - if path.endswith(".abc"): - # Support alembic files directly - return get_alembic_ids_cache(path) - - elif ( - is_usd_lib_supported and - any(path.endswith(ext) for ext in [".usd", ".usda", ".usdc"]) - ): - # Support usd files directly - return get_usd_ids_cache(path) - - json_path = None - for f in os.listdir(os.path.dirname(path)): - if f.endswith(".json"): - json_path = os.path.join(os.path.dirname(path), f) - break - - if not json_path: - log.warning("Could not find json file for {}.".format(standin)) - return {} - - with open(json_path, "r") as f: - return json.load(f) - - -def shading_engine_assignments(shading_engine, attribute, nodes, assignments): - """Full assignments with shader or disp_map. - - Args: - shading_engine (string): Shading engine for material. - attribute (string): "surfaceShader" or "displacementShader" - nodes: (list): Nodes paths relative to aiStandIn. - assignments (dict): Assignments by nodes. - """ - shader_inputs = cmds.listConnections( - shading_engine + "." + attribute, source=True - ) - if not shader_inputs: - log.info( - "Shading engine \"{}\" missing input \"{}\"".format( - shading_engine, attribute - ) - ) - return - - # Strip off component assignments - for i, node in enumerate(nodes): - if "." in node: - log.warning( - "Converting face assignment to full object assignment. This " - "conversion can be lossy: {}".format(node) - ) - nodes[i] = node.split(".")[0] - - shader_type = "shader" if attribute == "surfaceShader" else "disp_map" - assignment = "{}='{}'".format(shader_type, shader_inputs[0]) - for node in nodes: - assignments[node].append(assignment) - - -def assign_look(standin, product_name): - log.info("Assigning {} to {}.".format(product_name, standin)) - - nodes_by_id = get_nodes_by_id(standin) - - # Group by folder id so we run over the look per folder - node_ids_by_folder_id = defaultdict(set) - for node_id in nodes_by_id: - folder_id = node_id.split(":", 1)[0] - node_ids_by_folder_id[folder_id].add(node_id) - - project_name = get_current_project_name() - for folder_id, node_ids in node_ids_by_folder_id.items(): - - # Get latest look version - version_entity = ayon_api.get_last_version_by_product_name( - project_name, - product_name, - folder_id, - fields={"id"} - ) - if not version_entity: - log.info("Didn't find last version for product name {}".format( - product_name - )) - continue - version_id = version_entity["id"] - - relationships = lib.get_look_relationships(version_id) - shader_nodes, container_node = lib.load_look(version_id) - namespace = shader_nodes[0].split(":")[0] - - # Get only the node ids and paths related to this folder - # And get the shader edits the look supplies - asset_nodes_by_id = { - node_id: nodes_by_id[node_id] for node_id in node_ids - } - edits = list( - api.lib.iter_shader_edits( - relationships, shader_nodes, asset_nodes_by_id - ) - ) - - # Create assignments - node_assignments = {} - for edit in edits: - for node in edit["nodes"]: - if node not in node_assignments: - node_assignments[node] = [] - - if edit["action"] == "assign": - if not cmds.ls(edit["shader"], type="shadingEngine"): - log.info("Skipping non-shader: %s" % edit["shader"]) - continue - - shading_engine_assignments( - shading_engine=edit["shader"], - attribute="surfaceShader", - nodes=edit["nodes"], - assignments=node_assignments - ) - shading_engine_assignments( - shading_engine=edit["shader"], - attribute="displacementShader", - nodes=edit["nodes"], - assignments=node_assignments - ) - - if edit["action"] == "setattr": - visibility = False - for attr, value in edit["attributes"].items(): - if attr not in ATTRIBUTE_MAPPING: - log.warning( - "Skipping setting attribute {} on {} because it is" - " not recognized.".format(attr, edit["nodes"]) - ) - continue - - if isinstance(value, str): - value = "'{}'".format(value) - - if ATTRIBUTE_MAPPING[attr] == "visibility": - visibility = True - continue - - assignment = "{}={}".format(ATTRIBUTE_MAPPING[attr], value) - - for node in edit["nodes"]: - node_assignments[node].append(assignment) - - if visibility: - mask = calculate_visibility_mask(edit["attributes"]) - assignment = "visibility={}".format(mask) - - for node in edit["nodes"]: - node_assignments[node].append(assignment) - - # Assign shader - # Clear all current shader assignments - plug = standin + ".operators" - num = cmds.getAttr(plug, size=True) - for i in reversed(range(num)): - cmds.removeMultiInstance("{}[{}]".format(plug, i), b=True) - - # Create new assignment overrides - index = 0 - for node, assignments in node_assignments.items(): - if not assignments: - continue - - with api.lib.maintained_selection(): - operator = cmds.createNode("aiSetParameter") - operator = cmds.rename(operator, namespace + ":" + operator) - - cmds.setAttr(operator + ".selection", node, type="string") - for i, assignment in enumerate(assignments): - cmds.setAttr( - "{}.assignment[{}]".format(operator, i), - assignment, - type="string" - ) - - cmds.connectAttr( - operator + ".out", "{}[{}]".format(plug, index) - ) - - index += 1 - - cmds.sets(operator, edit=True, addElement=container_node) diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/commands.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/commands.py deleted file mode 100644 index 54b1cff740..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/commands.py +++ /dev/null @@ -1,199 +0,0 @@ -import os -import logging -from collections import defaultdict - -import ayon_api -import maya.cmds as cmds - -from ayon_core.pipeline import ( - remove_container, - registered_host, - get_current_project_name, -) -from ayon_maya.api import lib - -from .vray_proxies import get_alembic_ids_cache -from . import arnold_standin - -log = logging.getLogger(__name__) - - -def get_workfile(): - path = cmds.file(query=True, sceneName=True) or "untitled" - return os.path.basename(path) - - -def get_workfolder(): - return os.path.dirname(cmds.file(query=True, sceneName=True)) - - -def select(nodes): - cmds.select(nodes) - - -def get_namespace_from_node(node): - """Get the namespace from the given node - - Args: - node (str): name of the node - - Returns: - namespace (str) - - """ - parts = node.rsplit("|", 1)[-1].rsplit(":", 1) - return parts[0] if len(parts) > 1 else u":" - - -def get_selected_nodes(): - """Get information from current selection""" - - selection = cmds.ls(selection=True, long=True) - hierarchy = lib.get_all_children(selection, - ignore_intermediate_objects=True) - return list(hierarchy.union(selection)) - - -def get_all_asset_nodes(): - """Get all assets from the scene, container based - - Returns: - list: list of dictionaries - """ - return cmds.ls(dag=True, noIntermediate=True, long=True) - - -def create_folder_id_hash(nodes): - """Create a hash based on cbId attribute value - Args: - nodes (list): a list of nodes - - Returns: - dict - """ - node_id_hash = defaultdict(list) - for node in nodes: - # iterate over content of reference node - if cmds.nodeType(node) == "reference": - ref_hashes = create_folder_id_hash( - list(set(cmds.referenceQuery(node, nodes=True, dp=True)))) - for folder_id, ref_nodes in ref_hashes.items(): - node_id_hash[folder_id] += ref_nodes - elif cmds.pluginInfo('vrayformaya', query=True, - loaded=True) and cmds.nodeType( - node) == "VRayProxy": - path = cmds.getAttr("{}.fileName".format(node)) - ids = get_alembic_ids_cache(path) - for k, _ in ids.items(): - id = k.split(":")[0] - node_id_hash[id].append(node) - elif cmds.nodeType(node) == "aiStandIn": - for id, _ in arnold_standin.get_nodes_by_id(node).items(): - id = id.split(":")[0] - node_id_hash[id].append(node) - else: - value = lib.get_id(node) - if value is None: - continue - - folder_id = value.split(":")[0] - node_id_hash[folder_id].append(node) - - return dict(node_id_hash) - - -def create_items_from_nodes(nodes): - """Create an item for the view based the container and content of it - - It fetches the look document based on the folder id found in the content. - The item will contain all important information for the tool to work. - - If there is an folder id which is not registered in the project's collection - it will log a warning message. - - Args: - nodes (list): list of maya nodes - - Returns: - list of dicts - - """ - - folder_view_items = [] - - id_hashes = create_folder_id_hash(nodes) - - if not id_hashes: - log.warning("No id hashes") - return folder_view_items - - project_name = get_current_project_name() - folder_ids = set(id_hashes.keys()) - - folder_entities = ayon_api.get_folders( - project_name, folder_ids, fields={"id", "path"} - ) - folder_entities_by_id = { - folder_entity["id"]: folder_entity - for folder_entity in folder_entities - } - - for folder_id, id_nodes in id_hashes.items(): - folder_entity = folder_entities_by_id.get(folder_id) - # Skip if folder id is not found - if not folder_entity: - log.warning( - "Id found on {num} nodes for which no folder is found database," - " skipping '{folder_id}'".format( - num=len(nodes), - folder_id=folder_id - ) - ) - continue - - # Collect available look products for this folder - looks = lib.list_looks(project_name, folder_entity["id"]) - - # Collect namespaces the folder is found in - namespaces = set() - for node in id_nodes: - namespace = get_namespace_from_node(node) - namespaces.add(namespace) - - folder_view_items.append({ - "label": folder_entity["path"], - "folder_entity": folder_entity, - "looks": looks, - "namespaces": namespaces - }) - - return folder_view_items - - -def remove_unused_looks(): - """Removes all loaded looks for which none of the shaders are used. - - This will cleanup all loaded "LookLoader" containers that are unused in - the current scene. - - """ - - host = registered_host() - - unused = [] - for container in host.ls(): - if container['loader'] == "LookLoader": - members = lib.get_container_members(container['objectName']) - look_sets = cmds.ls(members, type="objectSet") - for look_set in look_sets: - # If the set is used than we consider this look *in use* - if cmds.sets(look_set, query=True): - break - else: - unused.append(container) - - for container in unused: - log.info("Removing unused look container: %s", container['objectName']) - remove_container(container) - - log.info("Finished removing unused looks. (see log for details)") diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/lib.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/lib.py deleted file mode 100644 index 5417db26bc..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/lib.py +++ /dev/null @@ -1,88 +0,0 @@ -import json -import logging - -from ayon_api import get_representation_by_name - -from ayon_core.pipeline import ( - get_current_project_name, - get_representation_path, - registered_host, - discover_loader_plugins, - loaders_from_representation, - load_container -) -from ayon_maya.api import lib - - -log = logging.getLogger(__name__) - - -def get_look_relationships(version_id): - # type: (str) -> dict - """Get relations for the look. - - Args: - version_id (str): Parent version Id. - - Returns: - dict: Dictionary of relations. - """ - - project_name = get_current_project_name() - json_representation = get_representation_by_name( - project_name, "json", version_id - ) - - # Load relationships - shader_relation = get_representation_path(json_representation) - with open(shader_relation, "r") as f: - relationships = json.load(f) - - return relationships - - -def load_look(version_id): - # type: (str) -> list - """Load look from version. - - Get look from version and invoke Loader for it. - - Args: - version_id (str): Version ID - - Returns: - list of shader nodes. - - """ - - project_name = get_current_project_name() - # Get representations of shader file and relationships - look_representation = get_representation_by_name( - project_name, "ma", version_id - ) - - # See if representation is already loaded, if so reuse it. - host = registered_host() - representation_id = look_representation["id"] - for container in host.ls(): - if (container['loader'] == "LookLoader" and - container['representation'] == representation_id): - log.info("Reusing loaded look ...") - container_node = container['objectName'] - break - else: - log.info("Using look for the first time ...") - - # Load file - all_loaders = discover_loader_plugins() - loaders = loaders_from_representation(all_loaders, representation_id) - loader = next( - (i for i in loaders if i.__name__ == "LookLoader"), None) - if loader is None: - raise RuntimeError("Could not find LookLoader, this is a bug") - - # Reference the look file - with lib.maintained_selection(): - container_node = load_container(loader, look_representation)[0] - - return lib.get_container_members(container_node), container_node diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/models.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/models.py deleted file mode 100644 index b0807be6a6..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/models.py +++ /dev/null @@ -1,134 +0,0 @@ -from collections import defaultdict - -from qtpy import QtCore -import qtawesome - -from ayon_core.tools.utils import models -from ayon_core.style import get_default_entity_icon_color - - -class AssetModel(models.TreeModel): - - Columns = ["label"] - - def __init__(self, *args, **kwargs): - super(AssetModel, self).__init__(*args, **kwargs) - - self._icon_color = get_default_entity_icon_color() - - def add_items(self, items): - """ - Add items to model with needed data - Args: - items(list): collection of item data - - Returns: - None - """ - - self.beginResetModel() - - # Add the items sorted by label - def sorter(x): - return x["label"] - - for item in sorted(items, key=sorter): - - asset_item = models.Item() - asset_item.update(item) - asset_item["icon"] = "folder" - - # Add namespace children - namespaces = item["namespaces"] - for namespace in sorted(namespaces): - child = models.Item() - child.update(item) - child.update({ - "label": (namespace if namespace != ":" - else "(no namespace)"), - "namespace": namespace, - "looks": item["looks"], - "icon": "folder-o" - }) - asset_item.add_child(child) - - self.add_child(asset_item) - - self.endResetModel() - - def data(self, index, role): - - if not index.isValid(): - return - - if role == models.TreeModel.ItemRole: - node = index.internalPointer() - return node - - # Add icon - if role == QtCore.Qt.DecorationRole: - if index.column() == 0: - node = index.internalPointer() - icon = node.get("icon") - if icon: - return qtawesome.icon( - "fa.{0}".format(icon), - color=self._icon_color - ) - - return super(AssetModel, self).data(index, role) - - -class LookModel(models.TreeModel): - """Model displaying a list of looks and matches for assets""" - - Columns = ["label", "match"] - - def add_items(self, items): - """Add items to model with needed data - - An item exists of: - { - "product": 'name of product', - "asset": asset_document - } - - Args: - items(list): collection of item data - - Returns: - None - """ - - self.beginResetModel() - - # Collect the assets per look name (from the items of the AssetModel) - look_products = defaultdict(list) - for asset_item in items: - folder_entity = asset_item["folder_entity"] - for look in asset_item["looks"]: - look_products[look["name"]].append(folder_entity) - - for product_name in sorted(look_products.keys()): - folder_entities = look_products[product_name] - - # Define nice label without "look" prefix for readability - label = ( - product_name - if not product_name.startswith("look") - else product_name[4:] - ) - - item_node = models.Item() - item_node["label"] = label - item_node["product"] = product_name - - # Amount of matching assets for this look - item_node["match"] = len(folder_entities) - - # Store the assets that have this product available - item_node["folder_entities"] = folder_entities - - self.add_child(item_node) - - self.endResetModel() diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/usd.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/usd.py deleted file mode 100644 index 6b5cb2f0f5..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/usd.py +++ /dev/null @@ -1,38 +0,0 @@ -from collections import defaultdict - -try: - from pxr import Usd - is_usd_lib_supported = True -except ImportError: - is_usd_lib_supported = False - - -def get_usd_ids_cache(path): - # type: (str) -> dict - """Build a id to node mapping in a USD file. - - Nodes without IDs are ignored. - - Returns: - dict: Mapping of id to nodes in the USD file. - - """ - if not is_usd_lib_supported: - raise RuntimeError("No pxr.Usd python library available.") - - stage = Usd.Stage.Open(path) - ids = {} - for prim in stage.Traverse(): - attr = prim.GetAttribute("userProperties:cbId") - if not attr.IsValid(): - continue - value = attr.Get() - if not value: - continue - path = str(prim.GetPath()) - ids[path] = value - - cache = defaultdict(list) - for path, value in ids.items(): - cache[value].append(path) - return dict(cache) diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/views.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/views.py deleted file mode 100644 index 489c194f60..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/views.py +++ /dev/null @@ -1,47 +0,0 @@ -from qtpy import QtWidgets, QtCore - - -class View(QtWidgets.QTreeView): - data_changed = QtCore.Signal() - - def __init__(self, parent=None): - super(View, self).__init__(parent=parent) - - # view settings - self.setAlternatingRowColors(False) - self.setSortingEnabled(True) - self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) - self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - - def get_indices(self): - """Get the selected rows""" - selection_model = self.selectionModel() - return selection_model.selectedRows() - - def extend_to_children(self, indices): - """Extend the indices to the children indices. - - Top-level indices are extended to its children indices. Sub-items - are kept as is. - - :param indices: The indices to extend. - :type indices: list - - :return: The children indices - :rtype: list - """ - - subitems = set() - for i in indices: - valid_parent = i.parent().isValid() - if valid_parent and i not in subitems: - subitems.add(i) - else: - # is top level node - model = i.model() - rows = model.rowCount(parent=i) - for row in range(rows): - child = model.index(row, 0, parent=i) - subitems.add(child) - - return list(subitems) diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/vray_proxies.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/vray_proxies.py deleted file mode 100644 index 6b451abb9a..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/vray_proxies.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -"""Tools for loading looks to vray proxies.""" -from collections import defaultdict -import logging - -from maya import cmds -import ayon_api - -from ayon_core.pipeline import get_current_project_name -import ayon_maya.api.lib as maya_lib -from . import lib -from .alembic import get_alembic_ids_cache - - -log = logging.getLogger(__name__) - - -def assign_vrayproxy_shaders(vrayproxy, assignments): - # type: (str, dict) -> None - """Assign shaders to content of Vray Proxy. - - This will create shader overrides on Vray Proxy to assign shaders to its - content. - - Todo: - Allow to optimize and assign a single shader to multiple shapes at - once or maybe even set it to the highest available path? - - Args: - vrayproxy (str): Name of Vray Proxy - assignments (dict): Mapping of shader assignments. - - Returns: - None - - """ - # Clear all current shader assignments - plug = vrayproxy + ".shaders" - num = cmds.getAttr(plug, size=True) - for i in reversed(range(num)): - cmds.removeMultiInstance("{}[{}]".format(plug, i), b=True) - - # Create new assignment overrides - index = 0 - for material, paths in assignments.items(): - for path in paths: - plug = "{}.shaders[{}]".format(vrayproxy, index) - cmds.setAttr(plug + ".shadersNames", path, type="string") - cmds.connectAttr(material + ".outColor", - plug + ".shadersConnections", force=True) - index += 1 - - -def vrayproxy_assign_look(vrayproxy, product_name="lookMain"): - # type: (str, str) -> None - """Assign look to vray proxy. - - Args: - vrayproxy (str): Name of vrayproxy to apply look to. - product_name (str): Name of look product. - - Returns: - None - - """ - path = cmds.getAttr(vrayproxy + ".fileName") - - nodes_by_id = get_alembic_ids_cache(path) - if not nodes_by_id: - log.warning("Alembic file has no cbId attributes: %s" % path) - return - - # Group by asset id so we run over the look per asset - node_ids_by_asset_id = defaultdict(set) - for node_id in nodes_by_id: - folder_id = node_id.split(":", 1)[0] - node_ids_by_asset_id[folder_id].add(node_id) - - project_name = get_current_project_name() - for folder_id, node_ids in node_ids_by_asset_id.items(): - - # Get latest look version - version_entity = ayon_api.get_last_version_by_product_name( - project_name, - product_name, - folder_id, - fields={"id"} - ) - if not version_entity: - print("Didn't find last version for product name {}".format( - product_name - )) - continue - version_id = version_entity["id"] - - relationships = lib.get_look_relationships(version_id) - shadernodes, _ = lib.load_look(version_id) - - # Get only the node ids and paths related to this asset - # And get the shader edits the look supplies - asset_nodes_by_id = { - node_id: nodes_by_id[node_id] for node_id in node_ids - } - edits = list( - maya_lib.iter_shader_edits( - relationships, shadernodes, asset_nodes_by_id - ) - ) - - # Create assignments - assignments = {} - for edit in edits: - if edit["action"] == "assign": - nodes = edit["nodes"] - shader = edit["shader"] - if not cmds.ls(shader, type="shadingEngine"): - print("Skipping non-shader: %s" % shader) - continue - - inputs = cmds.listConnections( - shader + ".surfaceShader", source=True) - if not inputs: - print("Shading engine missing material: %s" % shader) - - # Strip off component assignments - for i, node in enumerate(nodes): - if "." in node: - log.warning( - ("Converting face assignment to full object " - "assignment. This conversion can be lossy: " - "{}").format(node)) - nodes[i] = node.split(".")[0] - - material = inputs[0] - assignments[material] = nodes - - assign_vrayproxy_shaders(vrayproxy, assignments) diff --git a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/widgets.py b/server_addon/maya/client/ayon_maya/tools/mayalookassigner/widgets.py deleted file mode 100644 index f345b87e36..0000000000 --- a/server_addon/maya/client/ayon_maya/tools/mayalookassigner/widgets.py +++ /dev/null @@ -1,256 +0,0 @@ -import logging -from collections import defaultdict - -from qtpy import QtWidgets, QtCore - -from ayon_core.tools.utils.models import TreeModel -from ayon_core.tools.utils.lib import ( - preserve_expanded_rows, - preserve_selection, -) - -from .models import ( - AssetModel, - LookModel -) -from . import commands -from .views import View - -from maya import cmds - - -class AssetOutliner(QtWidgets.QWidget): - refreshed = QtCore.Signal() - selection_changed = QtCore.Signal() - - def __init__(self, parent=None): - super(AssetOutliner, self).__init__(parent) - - title = QtWidgets.QLabel("Assets", self) - title.setAlignment(QtCore.Qt.AlignCenter) - title.setStyleSheet("font-weight: bold; font-size: 12px") - - model = AssetModel() - view = View(self) - view.setModel(model) - view.customContextMenuRequested.connect(self.right_mouse_menu) - view.setSortingEnabled(False) - view.setHeaderHidden(True) - view.setIndentation(10) - - from_all_asset_btn = QtWidgets.QPushButton( - "Get All Assets", self - ) - from_selection_btn = QtWidgets.QPushButton( - "Get Assets From Selection", self - ) - - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(title) - layout.addWidget(from_all_asset_btn) - layout.addWidget(from_selection_btn) - layout.addWidget(view) - - # Build connections - from_selection_btn.clicked.connect(self.get_selected_assets) - from_all_asset_btn.clicked.connect(self.get_all_assets) - - selection_model = view.selectionModel() - selection_model.selectionChanged.connect(self.selection_changed) - - self.view = view - self.model = model - - self.log = logging.getLogger(__name__) - - def clear(self): - self.model.clear() - - # fix looks remaining visible when no items present after "refresh" - # todo: figure out why this workaround is needed. - self.selection_changed.emit() - - def add_items(self, items): - """Add new items to the outliner""" - - self.model.add_items(items) - self.refreshed.emit() - - def get_selected_items(self): - """Get current selected items from view - - Returns: - list: list of dictionaries - """ - - selection_model = self.view.selectionModel() - return [row.data(TreeModel.ItemRole) - for row in selection_model.selectedRows(0)] - - def get_all_assets(self): - """Add all items from the current scene""" - - with preserve_expanded_rows(self.view): - with preserve_selection(self.view): - self.clear() - nodes = commands.get_all_asset_nodes() - items = commands.create_items_from_nodes(nodes) - self.add_items(items) - return len(items) > 0 - - def get_selected_assets(self): - """Add all selected items from the current scene""" - - with preserve_expanded_rows(self.view): - with preserve_selection(self.view): - self.clear() - nodes = commands.get_selected_nodes() - items = commands.create_items_from_nodes(nodes) - self.add_items(items) - - def get_nodes(self, selection=False): - """Find the nodes in the current scene per folder.""" - - items = self.get_selected_items() - - # Collect all nodes by hash (optimization) - if not selection: - nodes = cmds.ls(dag=True, long=True) - else: - nodes = commands.get_selected_nodes() - id_nodes = commands.create_folder_id_hash(nodes) - - # Collect the asset item entries per folder - # and collect the namespaces we'd like to apply - folder_items = {} - namespaces_by_folder_path = defaultdict(set) - for item in items: - folder_entity = item["folder_entity"] - folder_id = folder_entity["id"] - folder_path = folder_entity["path"] - namespaces_by_folder_path[folder_path].add(item.get("namespace")) - - if folder_path in folder_items: - continue - - folder_items[folder_path] = item - folder_items[folder_path]["nodes"] = id_nodes.get(folder_id, []) - - # Filter nodes to namespace (if only namespaces were selected) - for folder_path in folder_items: - namespaces = namespaces_by_folder_path[folder_path] - - # When None is present there should be no filtering - if None in namespaces: - continue - - # Else only namespaces are selected and *not* the top entry so - # we should filter to only those namespaces. - nodes = folder_items[folder_path]["nodes"] - nodes = [node for node in nodes if - commands.get_namespace_from_node(node) in namespaces] - folder_items[folder_path]["nodes"] = nodes - - return folder_items - - def select_asset_from_items(self): - """Select nodes from listed asset""" - - items = self.get_nodes(selection=False) - nodes = [] - for item in items.values(): - nodes.extend(item["nodes"]) - - commands.select(nodes) - - def right_mouse_menu(self, pos): - """Build RMB menu for asset outliner""" - - active = self.view.currentIndex() # index under mouse - active = active.sibling(active.row(), 0) # get first column - globalpos = self.view.viewport().mapToGlobal(pos) - - menu = QtWidgets.QMenu(self.view) - - # Direct assignment - apply_action = QtWidgets.QAction(menu, text="Select nodes") - apply_action.triggered.connect(self.select_asset_from_items) - - if not active.isValid(): - apply_action.setEnabled(False) - - menu.addAction(apply_action) - - menu.exec_(globalpos) - - -class LookOutliner(QtWidgets.QWidget): - menu_apply_action = QtCore.Signal() - - def __init__(self, parent=None): - super(LookOutliner, self).__init__(parent) - - # Looks from database - title = QtWidgets.QLabel("Looks", self) - title.setAlignment(QtCore.Qt.AlignCenter) - title.setStyleSheet("font-weight: bold; font-size: 12px") - title.setAlignment(QtCore.Qt.AlignCenter) - - model = LookModel() - - # Proxy for dynamic sorting - proxy = QtCore.QSortFilterProxyModel() - proxy.setSourceModel(model) - - view = View(self) - view.setModel(proxy) - view.setMinimumHeight(180) - view.setToolTip("Use right mouse button menu for direct actions") - view.customContextMenuRequested.connect(self.right_mouse_menu) - view.sortByColumn(0, QtCore.Qt.AscendingOrder) - - # look manager layout - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(10) - layout.addWidget(title) - layout.addWidget(view) - - self.view = view - self.model = model - - def clear(self): - self.model.clear() - - def add_items(self, items): - self.model.add_items(items) - - def get_selected_items(self): - """Get current selected items from view - - Returns: - list: list of dictionaries - """ - - items = [i.data(TreeModel.ItemRole) for i in self.view.get_indices()] - return [item for item in items if item is not None] - - def right_mouse_menu(self, pos): - """Build RMB menu for look view""" - - active = self.view.currentIndex() # index under mouse - active = active.sibling(active.row(), 0) # get first column - globalpos = self.view.viewport().mapToGlobal(pos) - - if not active.isValid(): - return - - menu = QtWidgets.QMenu(self.view) - - # Direct assignment - apply_action = QtWidgets.QAction(menu, text="Assign looks..") - apply_action.triggered.connect(self.menu_apply_action) - - menu.addAction(apply_action) - - menu.exec_(globalpos) diff --git a/server_addon/maya/client/ayon_maya/vendor/python/capture.py b/server_addon/maya/client/ayon_maya/vendor/python/capture.py deleted file mode 100644 index 4ccfdb35f3..0000000000 --- a/server_addon/maya/client/ayon_maya/vendor/python/capture.py +++ /dev/null @@ -1,919 +0,0 @@ -"""Maya Capture - -Playblasting with independent viewport, camera and display options - -""" - -import re -import sys -import contextlib -import logging - -from maya import cmds -from maya import mel - -from qtpy import QtGui, QtWidgets - -version_info = (2, 3, 0) - -__version__ = "%s.%s.%s" % version_info -__license__ = "MIT" -logger = logging.getLogger("capture") - - -def capture(camera=None, - width=None, - height=None, - filename=None, - start_frame=None, - end_frame=None, - frame=None, - format='qt', - compression='H.264', - quality=100, - off_screen=False, - viewer=True, - show_ornaments=True, - sound=None, - isolate=None, - maintain_aspect_ratio=True, - overwrite=False, - frame_padding=4, - raw_frame_numbers=False, - camera_options=None, - display_options=None, - viewport_options=None, - viewport2_options=None, - complete_filename=None, - log=None): - """Playblast in an independent panel - - Arguments: - camera (str, optional): Name of camera, defaults to "persp" - width (int, optional): Width of output in pixels - height (int, optional): Height of output in pixels - filename (str, optional): Name of output file. If - none is specified, no files are saved. - start_frame (float, optional): Defaults to current start frame. - end_frame (float, optional): Defaults to current end frame. - frame (float or tuple, optional): A single frame or list of frames. - Use this to capture a single frame or an arbitrary sequence of - frames. - format (str, optional): Name of format, defaults to "qt". - compression (str, optional): Name of compression, defaults to "H.264" - quality (int, optional): The quality of the output, defaults to 100 - off_screen (bool, optional): Whether or not to playblast off screen - viewer (bool, optional): Display results in native player - show_ornaments (bool, optional): Whether or not model view ornaments - (e.g. axis icon, grid and HUD) should be displayed. - sound (str, optional): Specify the sound node to be used during - playblast. When None (default) no sound will be used. - isolate (list): List of nodes to isolate upon capturing - maintain_aspect_ratio (bool, optional): Modify height in order to - maintain aspect ratio. - overwrite (bool, optional): Whether or not to overwrite if file - already exists. If disabled and file exists and error will be - raised. - frame_padding (bool, optional): Number of zeros used to pad file name - for image sequences. - raw_frame_numbers (bool, optional): Whether or not to use the exact - frame numbers from the scene or capture to a sequence starting at - zero. Defaults to False. When set to True `viewer` can't be used - and will be forced to False. - camera_options (dict, optional): Supplied camera options, - using `CameraOptions` - display_options (dict, optional): Supplied display - options, using `DisplayOptions` - viewport_options (dict, optional): Supplied viewport - options, using `ViewportOptions` - viewport2_options (dict, optional): Supplied display - options, using `Viewport2Options` - complete_filename (str, optional): Exact name of output file. Use this - to override the output of `filename` so it excludes frame padding. - log (logger, optional): pass logger for logging messages. - - Example: - >>> # Launch default capture - >>> capture() - >>> # Launch capture with custom viewport settings - >>> capture('persp', 800, 600, - ... viewport_options={ - ... "displayAppearance": "wireframe", - ... "grid": False, - ... "polymeshes": True, - ... }, - ... camera_options={ - ... "displayResolution": True - ... } - ... ) - - - """ - global logger - if log: - logger = log - camera = camera or "persp" - - # Ensure camera exists - if not cmds.objExists(camera): - raise RuntimeError("Camera does not exist: {0}".format(camera)) - - if width and height : - maintain_aspect_ratio = False - width = width or cmds.getAttr("defaultResolution.width") - height = height or cmds.getAttr("defaultResolution.height") - if maintain_aspect_ratio: - ratio = cmds.getAttr("defaultResolution.deviceAspectRatio") - height = round(width / ratio) - - if start_frame is None: - start_frame = cmds.playbackOptions(minTime=True, query=True) - if end_frame is None: - end_frame = cmds.playbackOptions(maxTime=True, query=True) - - # (#74) Bugfix: `maya.cmds.playblast` will raise an error when playblasting - # with `rawFrameNumbers` set to True but no explicit `frames` provided. - # Since we always know what frames will be included we can provide it - # explicitly - if raw_frame_numbers and frame is None: - frame = range(int(start_frame), int(end_frame) + 1) - - # We need to wrap `completeFilename`, otherwise even when None is provided - # it will use filename as the exact name. Only when lacking as argument - # does it function correctly. - playblast_kwargs = dict() - if complete_filename: - playblast_kwargs['completeFilename'] = complete_filename - if frame is not None: - playblast_kwargs['frame'] = frame - if sound is not None: - playblast_kwargs['sound'] = sound - - # We need to raise an error when the user gives a custom frame range with - # negative frames in combination with raw frame numbers. This will result - # in a minimal integer frame number : filename.-2147483648.png for any - # negative rendered frame - if frame and raw_frame_numbers: - check = frame if isinstance(frame, (list, tuple)) else [frame] - if any(f < 0 for f in check): - raise RuntimeError("Negative frames are not supported with " - "raw frame numbers and explicit frame numbers") - - # (#21) Bugfix: `maya.cmds.playblast` suffers from undo bug where it - # always sets the currentTime to frame 1. By setting currentTime before - # the playblast call it'll undo correctly. - cmds.currentTime(cmds.currentTime(query=True)) - - padding = 10 # Extend panel to accommodate for OS window manager - - with _independent_panel(width=width + padding, - height=height + padding, - off_screen=off_screen) as panel: - cmds.setFocus(panel) - - all_playblast_kwargs = { - "compression": compression, - "format": format, - "percent": 100, - "quality": quality, - "viewer": viewer, - "startTime": start_frame, - "endTime": end_frame, - "offScreen": off_screen, - "showOrnaments": show_ornaments, - "forceOverwrite": overwrite, - "filename": filename, - "widthHeight": [width, height], - "rawFrameNumbers": raw_frame_numbers, - "framePadding": frame_padding - } - all_playblast_kwargs.update(playblast_kwargs) - - if getattr(contextlib, "nested", None): - with contextlib.nested( - _disabled_inview_messages(), - _maintain_camera(panel, camera), - _applied_viewport_options(viewport_options, panel), - _applied_camera_options(camera_options, panel), - _applied_display_options(display_options), - _applied_viewport2_options(viewport2_options), - _isolated_nodes(isolate, panel), - _maintained_time() - ): - output = cmds.playblast(**all_playblast_kwargs) - else: - with contextlib.ExitStack() as stack: - stack.enter_context(_disabled_inview_messages()) - stack.enter_context(_maintain_camera(panel, camera)) - stack.enter_context( - _applied_viewport_options(viewport_options, panel) - ) - stack.enter_context( - _applied_camera_options(camera_options, panel) - ) - stack.enter_context( - _applied_display_options(display_options) - ) - stack.enter_context( - _applied_viewport2_options(viewport2_options) - ) - stack.enter_context(_isolated_nodes(isolate, panel)) - stack.enter_context(_maintained_time()) - - output = cmds.playblast(**all_playblast_kwargs) - - return output - - -def snap(*args, **kwargs): - """Single frame playblast in an independent panel. - - The arguments of `capture` are all valid here as well, except for - `start_frame` and `end_frame`. - - Arguments: - frame (float, optional): The frame to snap. If not provided current - frame is used. - clipboard (bool, optional): Whether to add the output image to the - global clipboard. This allows to easily paste the snapped image - into another application, eg. into Photoshop. - - Keywords: - See `capture`. - - """ - - # capture single frame - frame = kwargs.pop('frame', cmds.currentTime(q=1)) - kwargs['start_frame'] = frame - kwargs['end_frame'] = frame - kwargs['frame'] = frame - - if not isinstance(frame, (int, float)): - raise TypeError("frame must be a single frame (integer or float). " - "Use `capture()` for sequences.") - - # override capture defaults - format = kwargs.pop('format', "image") - compression = kwargs.pop('compression', "png") - viewer = kwargs.pop('viewer', False) - raw_frame_numbers = kwargs.pop('raw_frame_numbers', True) - kwargs['compression'] = compression - kwargs['format'] = format - kwargs['viewer'] = viewer - kwargs['raw_frame_numbers'] = raw_frame_numbers - - # pop snap only keyword arguments - clipboard = kwargs.pop('clipboard', False) - - # perform capture - output = capture(*args, **kwargs) - - def replace(m): - """Substitute # with frame number""" - return str(int(frame)).zfill(len(m.group())) - - output = re.sub("#+", replace, output) - - # add image to clipboard - if clipboard: - _image_to_clipboard(output) - - return output - - -CameraOptions = { - "displayGateMask": False, - "displayResolution": False, - "displayFilmGate": False, - "displayFieldChart": False, - "displaySafeAction": False, - "displaySafeTitle": False, - "displayFilmPivot": False, - "displayFilmOrigin": False, - "overscan": 1.0, - "depthOfField": False, -} - -DisplayOptions = { - "displayGradient": True, - "background": (0.631, 0.631, 0.631), - "backgroundTop": (0.535, 0.617, 0.702), - "backgroundBottom": (0.052, 0.052, 0.052), -} - -# These display options require a different command to be queried and set -_DisplayOptionsRGB = set(["background", "backgroundTop", "backgroundBottom"]) - -ViewportOptions = { - # renderer - "rendererName": "vp2Renderer", - "fogging": False, - "fogMode": "linear", - "fogDensity": 1, - "fogStart": 1, - "fogEnd": 1, - "fogColor": (0, 0, 0, 0), - "shadows": False, - "displayTextures": True, - "displayLights": "default", - "useDefaultMaterial": False, - "wireframeOnShaded": False, - "displayAppearance": 'smoothShaded', - "selectionHiliteDisplay": False, - "headsUpDisplay": True, - # object display - "imagePlane": True, - "nurbsCurves": False, - "nurbsSurfaces": False, - "polymeshes": True, - "subdivSurfaces": False, - "planes": True, - "cameras": False, - "controlVertices": True, - "lights": False, - "grid": False, - "hulls": True, - "joints": False, - "ikHandles": False, - "deformers": False, - "dynamics": False, - "fluids": False, - "hairSystems": False, - "follicles": False, - "nCloths": False, - "nParticles": False, - "nRigids": False, - "dynamicConstraints": False, - "locators": False, - "manipulators": False, - "dimensions": False, - "handles": False, - "pivots": False, - "textures": False, - "strokes": False -} - -Viewport2Options = { - "consolidateWorld": True, - "enableTextureMaxRes": False, - "bumpBakeResolution": 64, - "colorBakeResolution": 64, - "floatingPointRTEnable": True, - "floatingPointRTFormat": 1, - "gammaCorrectionEnable": False, - "gammaValue": 2.2, - "lineAAEnable": False, - "maxHardwareLights": 8, - "motionBlurEnable": False, - "motionBlurSampleCount": 8, - "motionBlurShutterOpenFraction": 0.2, - "motionBlurType": 0, - "multiSampleCount": 8, - "multiSampleEnable": False, - "singleSidedLighting": False, - "ssaoEnable": False, - "ssaoAmount": 1.0, - "ssaoFilterRadius": 16, - "ssaoRadius": 16, - "ssaoSamples": 16, - "textureMaxResolution": 4096, - "threadDGEvaluation": False, - "transparencyAlgorithm": 1, - "transparencyQuality": 0.33, - "useMaximumHardwareLights": True, - "vertexAnimationCache": 0, - "renderDepthOfField": 0 -} - - -def apply_view(panel, **options): - """Apply options to panel""" - - camera = cmds.modelPanel(panel, camera=True, query=True) - - # Display options - display_options = options.get("display_options", {}) - _iteritems = getattr(display_options, "iteritems", display_options.items) - for key, value in _iteritems(): - if key in _DisplayOptionsRGB: - cmds.displayRGBColor(key, *value) - else: - cmds.displayPref(**{key: value}) - - # Camera options - camera_options = options.get("camera_options", {}) - _iteritems = getattr(camera_options, "iteritems", camera_options.items) - for key, value in _iteritems: - _safe_setAttr("{0}.{1}".format(camera, key), value) - - # Viewport options - viewport_options = options.get("viewport_options", {}) - _iteritems = getattr(viewport_options, "iteritems", viewport_options.items) - for key, value in _iteritems(): - cmds.modelEditor(panel, edit=True, **{key: value}) - - viewport2_options = options.get("viewport2_options", {}) - _iteritems = getattr( - viewport2_options, "iteritems", viewport2_options.items - ) - for key, value in _iteritems(): - attr = "hardwareRenderingGlobals.{0}".format(key) - _safe_setAttr(attr, value) - - -def parse_active_panel(): - """Parse the active modelPanel. - - Raises - RuntimeError: When no active modelPanel an error is raised. - - Returns: - str: Name of modelPanel - - """ - - panel = cmds.getPanel(withFocus=True) - - # This happens when last focus was on panel - # that got deleted (e.g. `capture()` then `parse_active_view()`) - if not panel or "modelPanel" not in panel: - raise RuntimeError("No active model panel found") - - return panel - - -def parse_active_view(): - """Parse the current settings from the active view""" - panel = parse_active_panel() - return parse_view(panel) - - -def parse_view(panel): - """Parse the scene, panel and camera for their current settings - - Example: - >>> parse_view("modelPanel1") - - Arguments: - panel (str): Name of modelPanel - - """ - - camera = cmds.modelPanel(panel, query=True, camera=True) - - # Display options - display_options = {} - for key in DisplayOptions: - if key in _DisplayOptionsRGB: - display_options[key] = cmds.displayRGBColor(key, query=True) - else: - display_options[key] = cmds.displayPref(query=True, **{key: True}) - - # Camera options - camera_options = {} - for key in CameraOptions: - camera_options[key] = cmds.getAttr("{0}.{1}".format(camera, key)) - - # Viewport options - viewport_options = {} - - # capture plugin display filters first to ensure we never override - # built-in arguments if ever possible a plugin has similarly named - # plugin display filters (which it shouldn't!) - plugins = cmds.pluginDisplayFilter(query=True, listFilters=True) - for plugin in plugins: - plugin = str(plugin) # unicode->str for simplicity of the dict - state = cmds.modelEditor(panel, query=True, queryPluginObjects=plugin) - viewport_options[plugin] = state - - for key in ViewportOptions: - viewport_options[key] = cmds.modelEditor( - panel, query=True, **{key: True}) - - viewport2_options = {} - for key in Viewport2Options.keys(): - attr = "hardwareRenderingGlobals.{0}".format(key) - try: - viewport2_options[key] = cmds.getAttr(attr) - except ValueError: - continue - - return { - "camera": camera, - "display_options": display_options, - "camera_options": camera_options, - "viewport_options": viewport_options, - "viewport2_options": viewport2_options - } - - -def parse_active_scene(): - """Parse active scene for arguments for capture() - - *Resolution taken from render settings. - - """ - - time_control = mel.eval("$gPlayBackSlider = $gPlayBackSlider") - - return { - "start_frame": cmds.playbackOptions(minTime=True, query=True), - "end_frame": cmds.playbackOptions(maxTime=True, query=True), - "width": cmds.getAttr("defaultResolution.width"), - "height": cmds.getAttr("defaultResolution.height"), - "compression": cmds.optionVar(query="playblastCompression"), - "filename": (cmds.optionVar(query="playblastFile") - if cmds.optionVar(query="playblastSaveToFile") else None), - "format": cmds.optionVar(query="playblastFormat"), - "off_screen": (True if cmds.optionVar(query="playblastOffscreen") - else False), - "show_ornaments": (True if cmds.optionVar(query="playblastShowOrnaments") - else False), - "quality": cmds.optionVar(query="playblastQuality"), - "sound": cmds.timeControl(time_control, q=True, sound=True) or None - } - - -def apply_scene(**options): - """Apply options from scene - - Example: - >>> apply_scene({"start_frame": 1009}) - - Arguments: - options (dict): Scene options - - """ - - if "start_frame" in options: - cmds.playbackOptions(minTime=options["start_frame"]) - - if "end_frame" in options: - cmds.playbackOptions(maxTime=options["end_frame"]) - - if "width" in options: - _safe_setAttr("defaultResolution.width", options["width"]) - - if "height" in options: - _safe_setAttr("defaultResolution.height", options["height"]) - - if "compression" in options: - cmds.optionVar( - stringValue=["playblastCompression", options["compression"]]) - - if "filename" in options: - cmds.optionVar( - stringValue=["playblastFile", options["filename"]]) - - if "format" in options: - cmds.optionVar( - stringValue=["playblastFormat", options["format"]]) - - if "off_screen" in options: - cmds.optionVar( - intValue=["playblastFormat", options["off_screen"]]) - - if "show_ornaments" in options: - cmds.optionVar( - intValue=["show_ornaments", options["show_ornaments"]]) - - if "quality" in options: - cmds.optionVar( - floatValue=["playblastQuality", options["quality"]]) - - -@contextlib.contextmanager -def _applied_view(panel, **options): - """Apply options to panel""" - - original = parse_view(panel) - apply_view(panel, **options) - - try: - yield - finally: - apply_view(panel, **original) - - -@contextlib.contextmanager -def _independent_panel(width, height, off_screen=False): - """Create capture-window context without decorations - - Arguments: - width (int): Width of panel - height (int): Height of panel - - Example: - >>> with _independent_panel(800, 600): - ... cmds.capture() - - """ - - # center panel on screen - screen_width, screen_height = _get_screen_size() - topLeft = [int((screen_height-height)/2.0), - int((screen_width-width)/2.0)] - - window = cmds.window(width=width, - height=height, - topLeftCorner=topLeft, - menuBarVisible=False, - titleBar=False, - visible=not off_screen) - cmds.paneLayout() - panel = cmds.modelPanel(menuBarVisible=False, - label='CapturePanel') - - # Hide icons under panel menus - bar_layout = cmds.modelPanel(panel, q=True, barLayout=True) - cmds.frameLayout(bar_layout, edit=True, collapse=True) - - if not off_screen: - cmds.showWindow(window) - - # Set the modelEditor of the modelPanel as the active view so it takes - # the playback focus. Does seem redundant with the `refresh` added in. - editor = cmds.modelPanel(panel, query=True, modelEditor=True) - cmds.modelEditor(editor, edit=True, activeView=True) - - # Force a draw refresh of Maya so it keeps focus on the new panel - # This focus is required to force preview playback in the independent panel - cmds.refresh(force=True) - - try: - yield panel - finally: - # Delete the panel to fix memory leak (about 5 mb per capture) - cmds.deleteUI(panel, panel=True) - cmds.deleteUI(window) - - -@contextlib.contextmanager -def _applied_camera_options(options, panel): - """Context manager for applying `options` to `camera`""" - - camera = cmds.modelPanel(panel, query=True, camera=True) - options = dict(CameraOptions, **(options or {})) - - old_options = dict() - for opt in options.copy(): - try: - old_options[opt] = cmds.getAttr(camera + "." + opt) - except: - sys.stderr.write("Could not get camera attribute " - "for capture: %s" % opt) - options.pop(opt) - - _iteritems = getattr(options, "iteritems", options.items) - for opt, value in _iteritems(): - if cmds.getAttr(camera + "." + opt, lock=True): - continue - else: - _safe_setAttr(camera + "." + opt, value) - - try: - yield - finally: - if old_options: - _iteritems = getattr(old_options, "iteritems", old_options.items) - for opt, value in _iteritems(): - # - if cmds.getAttr(camera + "." + opt, lock=True): - continue - else: - _safe_setAttr(camera + "." + opt, value) - - -@contextlib.contextmanager -def _applied_display_options(options): - """Context manager for setting background color display options.""" - - options = dict(DisplayOptions, **(options or {})) - - colors = ['background', 'backgroundTop', 'backgroundBottom'] - preferences = ['displayGradient'] - - # Store current settings - original = {} - for color in colors: - original[color] = cmds.displayRGBColor(color, query=True) or [] - - for preference in preferences: - original[preference] = cmds.displayPref( - query=True, **{preference: True}) - - # Apply settings - for color in colors: - value = options[color] - cmds.displayRGBColor(color, *value) - - for preference in preferences: - value = options[preference] - cmds.displayPref(**{preference: value}) - - try: - yield - - finally: - # Restore original settings - for color in colors: - cmds.displayRGBColor(color, *original[color]) - for preference in preferences: - cmds.displayPref(**{preference: original[preference]}) - - -@contextlib.contextmanager -def _applied_viewport_options(options, panel): - """Context manager for applying `options` to `panel`""" - - options = dict(ViewportOptions, **(options or {})) - plugin_options = options.pop("pluginObjects", {}) - - # BUGFIX Maya 2020 some keys in viewport options dict may not be unicode - # This is a local OpenPype edit to capture.py for issue #4730 - # TODO: Remove when dropping Maya 2020 compatibility - if int(cmds.about(version=True)) <= 2020: - options = { - str(key): value for key, value in options.items() - } - plugin_options = { - str(key): value for key, value in plugin_options.items() - } - - # Backwards compatibility for `pluginObjects` flattened into `options` - # separate the plugin display filter options since they need to - # be set differently (see #55) - plugins = set(cmds.pluginDisplayFilter(query=True, listFilters=True)) - for plugin in plugins: - if plugin in options: - plugin_options[plugin] = options.pop(plugin) - - # default options - try: - cmds.modelEditor(panel, edit=True, **options) - except TypeError as e: - # Try to set as much as possible of the state by setting them one by - # one. This way we can also report the failing key values explicitly. - for key, value in options.items(): - try: - cmds.modelEditor(panel, edit=True, **{key: value}) - except TypeError: - logger.error("Failing to apply option '{}': {}".format(key, - value)) - - # plugin display filter options - for plugin, state in plugin_options.items(): - cmds.modelEditor(panel, edit=True, pluginObjects=(plugin, state)) - - yield - - -@contextlib.contextmanager -def _applied_viewport2_options(options): - """Context manager for setting viewport 2.0 options. - - These options are applied by setting attributes on the - "hardwareRenderingGlobals" node. - - """ - - options = dict(Viewport2Options, **(options or {})) - - # Store current settings - original = {} - for opt in options.copy(): - try: - original[opt] = cmds.getAttr("hardwareRenderingGlobals." + opt) - except ValueError: - options.pop(opt) - - # Apply settings - _iteritems = getattr(options, "iteritems", options.items) - for opt, value in _iteritems(): - _safe_setAttr("hardwareRenderingGlobals." + opt, value) - - try: - yield - finally: - # Restore previous settings - _iteritems = getattr(original, "iteritems", original.items) - for opt, value in _iteritems(): - _safe_setAttr("hardwareRenderingGlobals." + opt, value) - - -@contextlib.contextmanager -def _isolated_nodes(nodes, panel): - """Context manager for isolating `nodes` in `panel`""" - - if nodes is not None: - cmds.isolateSelect(panel, state=True) - for obj in nodes: - cmds.isolateSelect(panel, addDagObject=obj) - yield - - -@contextlib.contextmanager -def _maintained_time(): - """Context manager for preserving (resetting) the time after the context""" - - current_time = cmds.currentTime(query=1) - try: - yield - finally: - cmds.currentTime(current_time) - - -@contextlib.contextmanager -def _maintain_camera(panel, camera): - state = {} - - if not _in_standalone(): - cmds.lookThru(panel, camera) - else: - state = dict((camera, cmds.getAttr(camera + ".rnd")) - for camera in cmds.ls(type="camera")) - _safe_setAttr(camera + ".rnd", True) - - try: - yield - finally: - _iteritems = getattr(state, "iteritems", state.items) - for camera, renderable in _iteritems(): - _safe_setAttr(camera + ".rnd", renderable) - - -@contextlib.contextmanager -def _disabled_inview_messages(): - """Disable in-view help messages during the context""" - original = cmds.optionVar(q="inViewMessageEnable") - cmds.optionVar(iv=("inViewMessageEnable", 0)) - try: - yield - finally: - cmds.optionVar(iv=("inViewMessageEnable", original)) - - -def _image_to_clipboard(path): - """Copies the image at path to the system's global clipboard.""" - if _in_standalone(): - raise Exception("Cannot copy to clipboard from Maya Standalone") - - image = QtGui.QImage(path) - clipboard = QtWidgets.QApplication.clipboard() - clipboard.setImage(image, mode=QtGui.QClipboard.Clipboard) - - -def _get_screen_size(): - """Return available screen size without space occupied by taskbar""" - if _in_standalone(): - return [0, 0] - - try: - rect = QtWidgets.QDesktopWidget().screenGeometry(-1) - except AttributeError: - # in Qt6 it is a different call - rect = QtWidgets.QApplication.primaryScreen().availableGeometry() - return [rect.width(), rect.height()] - - -def _in_standalone(): - return not hasattr(cmds, "about") or cmds.about(batch=True) - - -def _safe_setAttr(*args, **kwargs): - """Wrapper to handle failures when attribute is locked. - - Temporary hotfix until better approach (store value, unlock, set new, - return old, lock again) is implemented. - """ - try: - cmds.setAttr(*args, **kwargs) - except RuntimeError: - print("Cannot setAttr {}!".format(args)) - - -# -------------------------------- -# -# Apply version specific settings -# -# -------------------------------- - -version = mel.eval("getApplicationVersionAsFloat") -if version > 2015: - Viewport2Options.update({ - "hwFogAlpha": 1.0, - "hwFogFalloff": 0, - "hwFogDensity": 0.1, - "hwFogEnable": False, - "holdOutDetailMode": 1, - "hwFogEnd": 100.0, - "holdOutMode": True, - "hwFogColorR": 0.5, - "hwFogColorG": 0.5, - "hwFogColorB": 0.5, - "hwFogStart": 0.0, - }) - ViewportOptions.update({ - "motionTrails": False - }) diff --git a/server_addon/maya/client/ayon_maya/version.py b/server_addon/maya/client/ayon_maya/version.py deleted file mode 100644 index 80af287e97..0000000000 --- a/server_addon/maya/client/ayon_maya/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'maya' version.""" -__version__ = "0.2.8" diff --git a/server_addon/maya/package.py b/server_addon/maya/package.py deleted file mode 100644 index b2d0622493..0000000000 --- a/server_addon/maya/package.py +++ /dev/null @@ -1,8 +0,0 @@ -name = "maya" -title = "Maya" -version = "0.2.8" -client_dir = "ayon_maya" - -ayon_required_addons = { - "core": ">0.3.2", -} diff --git a/server_addon/maya/server/__init__.py b/server_addon/maya/server/__init__.py deleted file mode 100644 index 6dda2cdd77..0000000000 --- a/server_addon/maya/server/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Maya Addon Module""" -from ayon_server.addons import BaseServerAddon - -from .settings.main import MayaSettings, DEFAULT_MAYA_SETTING - - -class MayaAddon(BaseServerAddon): - settings_model = MayaSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_MAYA_SETTING) diff --git a/server_addon/maya/server/settings/__init__.py b/server_addon/maya/server/settings/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/maya/server/settings/creators.py b/server_addon/maya/server/settings/creators.py deleted file mode 100644 index ede33b6eec..0000000000 --- a/server_addon/maya/server/settings/creators.py +++ /dev/null @@ -1,434 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - task_types_enum, -) - - -class CreateLookModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - make_tx: bool = SettingsField(title="Make tx files") - rs_tex: bool = SettingsField(title="Make Redshift texture files") - default_variants: list[str] = SettingsField( - default_factory=list, title="Default Products" - ) - - -class BasicCreatorModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - default_variants: list[str] = SettingsField( - default_factory=list, - title="Default Products" - ) - - -class CreateUnrealStaticMeshModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - default_variants: list[str] = SettingsField( - default_factory=list, - title="Default Products" - ) - static_mesh_prefix: str = SettingsField("S", title="Static Mesh Prefix") - collision_prefixes: list[str] = SettingsField( - default_factory=list, - title="Collision Prefixes" - ) - - -class CreateUnrealSkeletalMeshModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - default_variants: list[str] = SettingsField( - default_factory=list, title="Default Products") - joint_hints: str = SettingsField("jnt_org", title="Joint root hint") - - -class CreateMultiverseLookModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - publish_mip_map: bool = SettingsField(title="publish_mip_map") - - -class BasicExportMeshModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - write_color_sets: bool = SettingsField(title="Write Color Sets") - write_face_sets: bool = SettingsField(title="Write Face Sets") - default_variants: list[str] = SettingsField( - default_factory=list, - title="Default Products" - ) - - -class CreateAnimationModel(BaseSettingsModel): - write_color_sets: bool = SettingsField(title="Write Color Sets") - write_face_sets: bool = SettingsField(title="Write Face Sets") - include_parent_hierarchy: bool = SettingsField( - title="Include Parent Hierarchy") - include_user_defined_attributes: bool = SettingsField( - title="Include User Defined Attributes") - default_variants: list[str] = SettingsField( - default_factory=list, - title="Default Products" - ) - - -class CreatePointCacheModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - write_color_sets: bool = SettingsField(title="Write Color Sets") - write_face_sets: bool = SettingsField(title="Write Face Sets") - include_user_defined_attributes: bool = SettingsField( - title="Include User Defined Attributes" - ) - default_variants: list[str] = SettingsField( - default_factory=list, - title="Default Products" - ) - - -class CreateProxyAlembicModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - write_color_sets: bool = SettingsField(title="Write Color Sets") - write_face_sets: bool = SettingsField(title="Write Face Sets") - default_variants: list[str] = SettingsField( - default_factory=list, - title="Default Products" - ) - - -class CreateAssModel(BasicCreatorModel): - expandProcedurals: bool = SettingsField(title="Expand Procedurals") - motionBlur: bool = SettingsField(title="Motion Blur") - motionBlurKeys: int = SettingsField(2, title="Motion Blur Keys") - motionBlurLength: float = SettingsField(0.5, title="Motion Blur Length") - maskOptions: bool = SettingsField(title="Mask Options") - maskCamera: bool = SettingsField(title="Mask Camera") - maskLight: bool = SettingsField(title="Mask Light") - maskShape: bool = SettingsField(title="Mask Shape") - maskShader: bool = SettingsField(title="Mask Shader") - maskOverride: bool = SettingsField(title="Mask Override") - maskDriver: bool = SettingsField(title="Mask Driver") - maskFilter: bool = SettingsField(title="Mask Filter") - maskColor_manager: bool = SettingsField(title="Mask Color Manager") - maskOperator: bool = SettingsField(title="Mask Operator") - - -class CreateReviewModel(BasicCreatorModel): - useMayaTimeline: bool = SettingsField( - title="Use Maya Timeline for Frame Range." - ) - - -class CreateVrayProxyModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - vrmesh: bool = SettingsField(title="VrMesh") - alembic: bool = SettingsField(title="Alembic") - default_variants: list[str] = SettingsField( - default_factory=list, title="Default Products") - - -class CreateSetDressModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - exactSetMembersOnly: bool = SettingsField(title="Exact Set Members Only") - shader: bool = SettingsField(title="Include shader") - default_variants: list[str] = SettingsField( - default_factory=list, title="Default Products") - - -class CreateMultishotLayout(BasicCreatorModel): - shotParent: str = SettingsField(title="Shot Parent Folder") - groupLoadedAssets: bool = SettingsField(title="Group Loaded Assets") - task_type: list[str] = SettingsField( - title="Task types", - enum_resolver=task_types_enum - ) - task_name: str = SettingsField(title="Task name (regex)") - - -class CreatorsModel(BaseSettingsModel): - CreateLook: CreateLookModel = SettingsField( - default_factory=CreateLookModel, - title="Create Look" - ) - CreateRender: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Render" - ) - # "-" is not compatible in the new model - CreateUnrealStaticMesh: CreateUnrealStaticMeshModel = SettingsField( - default_factory=CreateUnrealStaticMeshModel, - title="Create Unreal_Static Mesh" - ) - # "-" is not compatible in the new model - CreateUnrealSkeletalMesh: CreateUnrealSkeletalMeshModel = SettingsField( - default_factory=CreateUnrealSkeletalMeshModel, - title="Create Unreal_Skeletal Mesh" - ) - CreateMultiverseLook: CreateMultiverseLookModel = SettingsField( - default_factory=CreateMultiverseLookModel, - title="Create Multiverse Look" - ) - CreateAnimation: CreateAnimationModel = SettingsField( - default_factory=CreateAnimationModel, - title="Create Animation" - ) - CreateModel: BasicExportMeshModel = SettingsField( - default_factory=BasicExportMeshModel, - title="Create Model" - ) - CreatePointCache: CreatePointCacheModel = SettingsField( - default_factory=CreatePointCacheModel, - title="Create Point Cache" - ) - CreateProxyAlembic: CreateProxyAlembicModel = SettingsField( - default_factory=CreateProxyAlembicModel, - title="Create Proxy Alembic" - ) - CreateMultiverseUsd: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Multiverse USD" - ) - CreateMultiverseUsdComp: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Multiverse USD Composition" - ) - CreateMultiverseUsdOver: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Multiverse USD Override" - ) - CreateAss: CreateAssModel = SettingsField( - default_factory=CreateAssModel, - title="Create Ass" - ) - CreateAssembly: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Assembly" - ) - CreateCamera: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Camera" - ) - CreateLayout: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Layout" - ) - CreateMayaScene: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Maya Scene" - ) - CreateRenderSetup: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Render Setup" - ) - CreateReview: CreateReviewModel = SettingsField( - default_factory=CreateReviewModel, - title="Create Review" - ) - CreateRig: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Rig" - ) - CreateSetDress: CreateSetDressModel = SettingsField( - default_factory=CreateSetDressModel, - title="Create Set Dress" - ) - CreateVrayProxy: CreateVrayProxyModel = SettingsField( - default_factory=CreateVrayProxyModel, - title="Create VRay Proxy" - ) - CreateVRayScene: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create VRay Scene" - ) - CreateYetiRig: BasicCreatorModel = SettingsField( - default_factory=BasicCreatorModel, - title="Create Yeti Rig" - ) - - -DEFAULT_CREATORS_SETTINGS = { - "CreateLook": { - "enabled": True, - "make_tx": True, - "rs_tex": False, - "default_variants": [ - "Main" - ] - }, - "CreateRender": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateUnrealStaticMesh": { - "enabled": True, - "default_variants": [ - "", - "_Main" - ], - "static_mesh_prefix": "S", - "collision_prefixes": [ - "UBX", - "UCP", - "USP", - "UCX" - ] - }, - "CreateUnrealSkeletalMesh": { - "enabled": True, - "default_variants": [ - "Main", - ], - "joint_hints": "jnt_org" - }, - "CreateMultiverseLook": { - "enabled": True, - "publish_mip_map": True - }, - "CreateAnimation": { - "write_color_sets": False, - "write_face_sets": False, - "include_parent_hierarchy": False, - "include_user_defined_attributes": False, - "default_variants": [ - "Main" - ] - }, - "CreateModel": { - "enabled": True, - "write_color_sets": False, - "write_face_sets": False, - "default_variants": [ - "Main", - "Proxy", - "Sculpt" - ] - }, - "CreatePointCache": { - "enabled": True, - "write_color_sets": False, - "write_face_sets": False, - "include_user_defined_attributes": False, - "default_variants": [ - "Main" - ] - }, - "CreateProxyAlembic": { - "enabled": True, - "write_color_sets": False, - "write_face_sets": False, - "default_variants": [ - "Main" - ] - }, - "CreateMultiverseUsd": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateMultiverseUsdComp": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateMultiverseUsdOver": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateAss": { - "enabled": True, - "default_variants": [ - "Main" - ], - "expandProcedurals": False, - "motionBlur": True, - "motionBlurKeys": 2, - "motionBlurLength": 0.5, - "maskOptions": False, - "maskCamera": False, - "maskLight": False, - "maskShape": False, - "maskShader": False, - "maskOverride": False, - "maskDriver": False, - "maskFilter": False, - "maskColor_manager": False, - "maskOperator": False - }, - "CreateAssembly": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateCamera": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateLayout": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateMayaScene": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateRenderSetup": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateReview": { - "enabled": True, - "default_variants": [ - "Main" - ], - "useMayaTimeline": True - }, - "CreateRig": { - "enabled": True, - "default_variants": [ - "Main", - "Sim", - "Cloth" - ] - }, - "CreateSetDress": { - "enabled": True, - "exactSetMembersOnly": True, - "shader": True, - "default_variants": [ - "Main", - "Anim" - ] - }, - "CreateVrayProxy": { - "enabled": True, - "vrmesh": True, - "alembic": True, - "default_variants": [ - "Main" - ] - }, - "CreateVRayScene": { - "enabled": True, - "default_variants": [ - "Main" - ] - }, - "CreateYetiRig": { - "enabled": True, - "default_variants": [ - "Main" - ] - } -} diff --git a/server_addon/maya/server/settings/explicit_plugins_loading.py b/server_addon/maya/server/settings/explicit_plugins_loading.py deleted file mode 100644 index cda5154b90..0000000000 --- a/server_addon/maya/server/settings/explicit_plugins_loading.py +++ /dev/null @@ -1,427 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class PluginsModel(BaseSettingsModel): - _layout = "expanded" - enabled: bool = SettingsField(title="Enabled") - name: str = SettingsField("", title="Name") - - -class ExplicitPluginsLoadingModel(BaseSettingsModel): - """Maya Explicit Plugins Loading.""" - _isGroup: bool = True - enabled: bool = SettingsField(title="enabled") - plugins_to_load: list[PluginsModel] = SettingsField( - default_factory=list, title="Plugins To Load" - ) - - -DEFAULT_EXPLITCIT_PLUGINS_LOADING_SETTINGS = { - "enabled": False, - "plugins_to_load": [ - { - "enabled": False, - "name": "AbcBullet" - }, - { - "enabled": True, - "name": "AbcExport" - }, - { - "enabled": True, - "name": "AbcImport" - }, - { - "enabled": False, - "name": "animImportExport" - }, - { - "enabled": False, - "name": "ArubaTessellator" - }, - { - "enabled": False, - "name": "ATFPlugin" - }, - { - "enabled": False, - "name": "atomImportExport" - }, - { - "enabled": False, - "name": "AutodeskPacketFile" - }, - { - "enabled": False, - "name": "autoLoader" - }, - { - "enabled": False, - "name": "bifmeshio" - }, - { - "enabled": False, - "name": "bifrostGraph" - }, - { - "enabled": False, - "name": "bifrostshellnode" - }, - { - "enabled": False, - "name": "bifrostvisplugin" - }, - { - "enabled": False, - "name": "blast2Cmd" - }, - { - "enabled": False, - "name": "bluePencil" - }, - { - "enabled": False, - "name": "Boss" - }, - { - "enabled": False, - "name": "bullet" - }, - { - "enabled": True, - "name": "cacheEvaluator" - }, - { - "enabled": False, - "name": "cgfxShader" - }, - { - "enabled": False, - "name": "cleanPerFaceAssignment" - }, - { - "enabled": False, - "name": "clearcoat" - }, - { - "enabled": False, - "name": "convertToComponentTags" - }, - { - "enabled": False, - "name": "curveWarp" - }, - { - "enabled": False, - "name": "ddsFloatReader" - }, - { - "enabled": True, - "name": "deformerEvaluator" - }, - { - "enabled": False, - "name": "dgProfiler" - }, - { - "enabled": False, - "name": "drawUfe" - }, - { - "enabled": False, - "name": "dx11Shader" - }, - { - "enabled": False, - "name": "fbxmaya" - }, - { - "enabled": False, - "name": "fltTranslator" - }, - { - "enabled": False, - "name": "freeze" - }, - { - "enabled": False, - "name": "Fur" - }, - { - "enabled": False, - "name": "gameFbxExporter" - }, - { - "enabled": False, - "name": "gameInputDevice" - }, - { - "enabled": False, - "name": "GamePipeline" - }, - { - "enabled": False, - "name": "gameVertexCount" - }, - { - "enabled": False, - "name": "geometryReport" - }, - { - "enabled": False, - "name": "geometryTools" - }, - { - "enabled": False, - "name": "glslShader" - }, - { - "enabled": True, - "name": "GPUBuiltInDeformer" - }, - { - "enabled": False, - "name": "gpuCache" - }, - { - "enabled": False, - "name": "hairPhysicalShader" - }, - { - "enabled": False, - "name": "ik2Bsolver" - }, - { - "enabled": False, - "name": "ikSpringSolver" - }, - { - "enabled": False, - "name": "invertShape" - }, - { - "enabled": False, - "name": "lges" - }, - { - "enabled": False, - "name": "lookdevKit" - }, - { - "enabled": False, - "name": "MASH" - }, - { - "enabled": False, - "name": "matrixNodes" - }, - { - "enabled": False, - "name": "mayaCharacterization" - }, - { - "enabled": False, - "name": "mayaHIK" - }, - { - "enabled": False, - "name": "MayaMuscle" - }, - { - "enabled": False, - "name": "mayaUsdPlugin" - }, - { - "enabled": False, - "name": "mayaVnnPlugin" - }, - { - "enabled": False, - "name": "melProfiler" - }, - { - "enabled": False, - "name": "meshReorder" - }, - { - "enabled": True, - "name": "modelingToolkit" - }, - { - "enabled": False, - "name": "mtoa" - }, - { - "enabled": False, - "name": "mtoh" - }, - { - "enabled": False, - "name": "nearestPointOnMesh" - }, - { - "enabled": True, - "name": "objExport" - }, - { - "enabled": False, - "name": "OneClick" - }, - { - "enabled": False, - "name": "OpenEXRLoader" - }, - { - "enabled": False, - "name": "pgYetiMaya" - }, - { - "enabled": False, - "name": "pgyetiVrayMaya" - }, - { - "enabled": False, - "name": "polyBoolean" - }, - { - "enabled": False, - "name": "poseInterpolator" - }, - { - "enabled": False, - "name": "quatNodes" - }, - { - "enabled": False, - "name": "randomizerDevice" - }, - { - "enabled": False, - "name": "redshift4maya" - }, - { - "enabled": True, - "name": "renderSetup" - }, - { - "enabled": False, - "name": "retargeterNodes" - }, - { - "enabled": False, - "name": "RokokoMotionLibrary" - }, - { - "enabled": False, - "name": "rotateHelper" - }, - { - "enabled": False, - "name": "sceneAssembly" - }, - { - "enabled": False, - "name": "shaderFXPlugin" - }, - { - "enabled": False, - "name": "shotCamera" - }, - { - "enabled": False, - "name": "snapTransform" - }, - { - "enabled": False, - "name": "stage" - }, - { - "enabled": True, - "name": "stereoCamera" - }, - { - "enabled": False, - "name": "stlTranslator" - }, - { - "enabled": False, - "name": "studioImport" - }, - { - "enabled": False, - "name": "Substance" - }, - { - "enabled": False, - "name": "substancelink" - }, - { - "enabled": False, - "name": "substancemaya" - }, - { - "enabled": False, - "name": "substanceworkflow" - }, - { - "enabled": False, - "name": "svgFileTranslator" - }, - { - "enabled": False, - "name": "sweep" - }, - { - "enabled": False, - "name": "testify" - }, - { - "enabled": False, - "name": "tiffFloatReader" - }, - { - "enabled": False, - "name": "timeSliderBookmark" - }, - { - "enabled": False, - "name": "Turtle" - }, - { - "enabled": False, - "name": "Type" - }, - { - "enabled": False, - "name": "udpDevice" - }, - { - "enabled": False, - "name": "ufeSupport" - }, - { - "enabled": False, - "name": "Unfold3D" - }, - { - "enabled": False, - "name": "VectorRender" - }, - { - "enabled": False, - "name": "vrayformaya" - }, - { - "enabled": False, - "name": "vrayvolumegrid" - }, - { - "enabled": False, - "name": "xgenToolkit" - }, - { - "enabled": False, - "name": "xgenVray" - } - ] -} diff --git a/server_addon/maya/server/settings/imageio.py b/server_addon/maya/server/settings/imageio.py deleted file mode 100644 index 521f89aea7..0000000000 --- a/server_addon/maya/server/settings/imageio.py +++ /dev/null @@ -1,150 +0,0 @@ -"""Providing models and setting values for image IO in Maya. - -Note: Names were changed to get rid of the versions in class names. -""" -from pydantic import validator - -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - ensure_unique_names, -) - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class ColorManagementPreferenceV2Model(BaseSettingsModel): - """Color Management Preference v2 (Maya 2022+). - - Please migrate all to 'imageio/workfile' and enable it. - """ - - enabled: bool = SettingsField( - True, title="Use Color Management Preference v2" - ) - - renderSpace: str = SettingsField(title="Rendering Space") - displayName: str = SettingsField(title="Display") - viewName: str = SettingsField(title="View") - - -class ColorManagementPreferenceModel(BaseSettingsModel): - """Color Management Preference (legacy).""" - - renderSpace: str = SettingsField(title="Rendering Space") - viewTransform: str = SettingsField(title="Viewer Transform ") - - -class WorkfileImageIOModel(BaseSettingsModel): - enabled: bool = SettingsField(True, title="Enabled") - renderSpace: str = SettingsField(title="Rendering Space") - displayName: str = SettingsField(title="Display") - viewName: str = SettingsField(title="View") - - -class ImageIOSettings(BaseSettingsModel): - """Maya color management project settings. - - Todo: What to do with color management preferences version? - """ - - _isGroup: bool = True - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) - workfile: WorkfileImageIOModel = SettingsField( - default_factory=WorkfileImageIOModel, - title="Workfile" - ) - # Deprecated - colorManagementPreference_v2: ColorManagementPreferenceV2Model = ( - SettingsField( - default_factory=ColorManagementPreferenceV2Model, - title="DEPRECATED: Color Management Preference v2 (Maya 2022+)" - ) - ) - colorManagementPreference: ColorManagementPreferenceModel = SettingsField( - default_factory=ColorManagementPreferenceModel, - title="DEPRECATED: Color Management Preference (legacy)" - ) - - -DEFAULT_IMAGEIO_SETTINGS = { - "activate_host_color_management": True, - "ocio_config": { - "override_global_config": False, - "filepath": [] - }, - "file_rules": { - "activate_host_rules": False, - "rules": [] - }, - "workfile": { - "enabled": False, - "renderSpace": "ACES - ACEScg", - "displayName": "ACES", - "viewName": "sRGB" - }, - "colorManagementPreference_v2": { - "enabled": True, - "renderSpace": "ACEScg", - "displayName": "sRGB", - "viewName": "ACES 1.0 SDR-video" - }, - "colorManagementPreference": { - "renderSpace": "scene-linear Rec 709/sRGB", - "viewTransform": "sRGB gamma" - } -} diff --git a/server_addon/maya/server/settings/include_handles.py b/server_addon/maya/server/settings/include_handles.py deleted file mode 100644 index 931222ad2d..0000000000 --- a/server_addon/maya/server/settings/include_handles.py +++ /dev/null @@ -1,32 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - task_types_enum, -) - - -class IncludeByTaskTypeModel(BaseSettingsModel): - task_type: list[str] = SettingsField( - default_factory=list, - title="Task types", - enum_resolver=task_types_enum - ) - include_handles: bool = SettingsField(True, title="Include handles") - - -class IncludeHandlesModel(BaseSettingsModel): - """Maya dirmap settings.""" - # _layout = "expanded" - include_handles_default: bool = SettingsField( - True, title="Include handles by default" - ) - per_task_type: list[IncludeByTaskTypeModel] = SettingsField( - default_factory=list, - title="Include/exclude handles by task type" - ) - - -DEFAULT_INCLUDE_HANDLES = { - "include_handles_default": False, - "per_task_type": [] -} diff --git a/server_addon/maya/server/settings/loaders.py b/server_addon/maya/server/settings/loaders.py deleted file mode 100644 index 2f104d2858..0000000000 --- a/server_addon/maya/server/settings/loaders.py +++ /dev/null @@ -1,284 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.types import ColorRGBA_uint8 - - -class LoaderEnabledModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - - -class ColorsSetting(BaseSettingsModel): - model: ColorRGBA_uint8 = SettingsField( - (209, 132, 30, 1.0), title="Model:") - rig: ColorRGBA_uint8 = SettingsField( - (59, 226, 235, 1.0), title="Rig:") - pointcache: ColorRGBA_uint8 = SettingsField( - (94, 209, 30, 1.0), title="Pointcache:") - animation: ColorRGBA_uint8 = SettingsField( - (94, 209, 30, 1.0), title="Animation:") - ass: ColorRGBA_uint8 = SettingsField( - (249, 135, 53, 1.0), title="Arnold StandIn:") - camera: ColorRGBA_uint8 = SettingsField( - (136, 114, 244, 1.0), title="Camera:") - fbx: ColorRGBA_uint8 = SettingsField( - (215, 166, 255, 1.0), title="FBX:") - mayaAscii: ColorRGBA_uint8 = SettingsField( - (67, 174, 255, 1.0), title="Maya Ascii:") - mayaScene: ColorRGBA_uint8 = SettingsField( - (67, 174, 255, 1.0), title="Maya Scene:") - setdress: ColorRGBA_uint8 = SettingsField( - (255, 250, 90, 1.0), title="Set Dress:") - layout: ColorRGBA_uint8 = SettingsField(( - 255, 250, 90, 1.0), title="Layout:") - vdbcache: ColorRGBA_uint8 = SettingsField( - (249, 54, 0, 1.0), title="VDB Cache:") - vrayproxy: ColorRGBA_uint8 = SettingsField( - (255, 150, 12, 1.0), title="VRay Proxy:") - vrayscene_layer: ColorRGBA_uint8 = SettingsField( - (255, 150, 12, 1.0), title="VRay Scene:") - yeticache: ColorRGBA_uint8 = SettingsField( - (99, 206, 220, 1.0), title="Yeti Cache:") - yetiRig: ColorRGBA_uint8 = SettingsField( - (0, 205, 125, 1.0), title="Yeti Rig:") - # model: ColorRGB_float = SettingsField( - # (0.82, 0.52, 0.12), title="Model:" - # ) - # rig: ColorRGB_float = SettingsField( - # (0.23, 0.89, 0.92), title="Rig:" - # ) - # pointcache: ColorRGB_float = SettingsField( - # (0.37, 0.82, 0.12), title="Pointcache:" - # ) - # animation: ColorRGB_float = SettingsField( - # (0.37, 0.82, 0.12), title="Animation:" - # ) - # ass: ColorRGB_float = SettingsField( - # (0.98, 0.53, 0.21), title="Arnold StandIn:" - # ) - # camera: ColorRGB_float = SettingsField( - # (0.53, 0.45, 0.96), title="Camera:" - # ) - # fbx: ColorRGB_float = SettingsField( - # (0.84, 0.65, 1.0), title="FBX:" - # ) - # mayaAscii: ColorRGB_float = SettingsField( - # (0.26, 0.68, 1.0), title="Maya Ascii:" - # ) - # mayaScene: ColorRGB_float = SettingsField( - # (0.26, 0.68, 1.0), title="Maya Scene:" - # ) - # setdress: ColorRGB_float = SettingsField( - # (1.0, 0.98, 0.35), title="Set Dress:" - # ) - # layout: ColorRGB_float = SettingsField( - # (1.0, 0.98, 0.35), title="Layout:" - # ) - # vdbcache: ColorRGB_float = SettingsField( - # (0.98, 0.21, 0.0), title="VDB Cache:" - # ) - # vrayproxy: ColorRGB_float = SettingsField( - # (1.0, 0.59, 0.05), title="VRay Proxy:" - # ) - # vrayscene_layer: ColorRGB_float = SettingsField( - # (1.0, 0.59, 0.05), title="VRay Scene:" - # ) - # yeticache: ColorRGB_float = SettingsField( - # (0.39, 0.81, 0.86), title="Yeti Cache:" - # ) - # yetiRig: ColorRGB_float = SettingsField( - # (0.0, 0.80, 0.49), title="Yeti Rig:" - # ) - - -class ReferenceLoaderModel(BaseSettingsModel): - namespace: str = SettingsField(title="Namespace") - group_name: str = SettingsField(title="Group name") - display_handle: bool = SettingsField( - title="Display Handle On Load References" - ) - - -class ImportLoaderModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - namespace: str = SettingsField(title="Namespace") - group_name: str = SettingsField(title="Group name") - - -class YetiRigLoaderModel(LoaderEnabledModel): - create_cache_instance_on_load: bool = SettingsField( - title="Create Yeti Cache instance on load", - description=( - "When enabled, upon loading a Yeti Rig product a new Yeti cache " - "instance is automatically created as preparation to publishing " - "the output directly." - ) - ) - - -class LoadersModel(BaseSettingsModel): - colors: ColorsSetting = SettingsField( - default_factory=ColorsSetting, - title="Loaded Products Outliner Colors") - - reference_loader: ReferenceLoaderModel = SettingsField( - default_factory=ReferenceLoaderModel, - title="Reference Loader" - ) - - import_loader: ImportLoaderModel = SettingsField( - default_factory=ImportLoaderModel, - title="Import Loader" - ) - - # Enable/disable loaders - ArnoldStandinLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Arnold Standin Loader" - ) - AssemblyLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Assembly Loader" - ) - AudioLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Audio Loader" - ) - GpuCacheLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="GPU Cache Loader" - ) - FileNodeLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="File Node (Image) Loader" - ) - ImagePlaneLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Image Plane Loader" - ) - LookLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Look Loader" - ) - MatchmoveLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Matchmove Loader" - ) - MultiverseUsdLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Multiverse USD Loader" - ) - MultiverseUsdOverLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Multiverse USD Override Loader" - ) - RedshiftProxyLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Redshift Proxy Loader" - ) - RenderSetupLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Render Setup Loader" - ) - LoadVDBtoArnold: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="VDB to Arnold Loader" - ) - LoadVDBtoRedShift: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="VDB to Redshift Loader" - ) - LoadVDBtoVRay: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="VDB to V-Ray Loader" - ) - VRayProxyLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Vray Proxy Loader" - ) - VRaySceneLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="VrayScene Loader" - ) - XgenLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Xgen Loader" - ) - YetiCacheLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, - title="Yeti Cache Loader" - ) - YetiRigLoader: YetiRigLoaderModel = SettingsField( - default_factory=YetiRigLoaderModel, - title="Yeti Rig Loader" - ) - - -DEFAULT_LOADERS_SETTING = { - "colors": { - "model": [209, 132, 30, 1.0], - "rig": [59, 226, 235, 1.0], - "pointcache": [94, 209, 30, 1.0], - "animation": [94, 209, 30, 1.0], - "ass": [249, 135, 53, 1.0], - "camera": [136, 114, 244, 1.0], - "fbx": [215, 166, 255, 1.0], - "mayaAscii": [67, 174, 255, 1.0], - "mayaScene": [67, 174, 255, 1.0], - "setdress": [255, 250, 90, 1.0], - "layout": [255, 250, 90, 1.0], - "vdbcache": [249, 54, 0, 1.0], - "vrayproxy": [255, 150, 12, 1.0], - "vrayscene_layer": [255, 150, 12, 1.0], - "yeticache": [99, 206, 220, 1.0], - "yetiRig": [0, 205, 125, 1.0] - # "model": [0.82, 0.52, 0.12], - # "rig": [0.23, 0.89, 0.92], - # "pointcache": [0.37, 0.82, 0.12], - # "animation": [0.37, 0.82, 0.12], - # "ass": [0.98, 0.53, 0.21], - # "camera":[0.53, 0.45, 0.96], - # "fbx": [0.84, 0.65, 1.0], - # "mayaAscii": [0.26, 0.68, 1.0], - # "mayaScene": [0.26, 0.68, 1.0], - # "setdress": [1.0, 0.98, 0.35], - # "layout": [1.0, 0.98, 0.35], - # "vdbcache": [0.98, 0.21, 0.0], - # "vrayproxy": [1.0, 0.59, 0.05], - # "vrayscene_layer": [1.0, 0.59, 0.05], - # "yeticache": [0.39, 0.81, 0.86], - # "yetiRig": [0.0, 0.80, 0.49], - }, - "reference_loader": { - "namespace": "{folder[name]}_{product[name]}_##_", - "group_name": "_GRP", - "display_handle": True - }, - "import_loader": { - "enabled": True, - "namespace": "{folder[name]}_{product[name]}_##_", - "group_name": "_GRP", - "display_handle": True - }, - "ArnoldStandinLoader": {"enabled": True}, - "AssemblyLoader": {"enabled": True}, - "AudioLoader": {"enabled": True}, - "FileNodeLoader": {"enabled": True}, - "GpuCacheLoader": {"enabled": True}, - "ImagePlaneLoader": {"enabled": True}, - "LookLoader": {"enabled": True}, - "MatchmoveLoader": {"enabled": True}, - "MultiverseUsdLoader": {"enabled": True}, - "MultiverseUsdOverLoader": {"enabled": True}, - "RedshiftProxyLoader": {"enabled": True}, - "RenderSetupLoader": {"enabled": True}, - "LoadVDBtoArnold": {"enabled": True}, - "LoadVDBtoRedShift": {"enabled": True}, - "LoadVDBtoVRay": {"enabled": True}, - "VRayProxyLoader": {"enabled": True}, - "VRaySceneLoader": {"enabled": True}, - "XgenLoader": {"enabled": True}, - "YetiCacheLoader": {"enabled": True}, - "YetiRigLoader": { - "enabled": True, - "create_cache_instance_on_load": True - }, -} diff --git a/server_addon/maya/server/settings/main.py b/server_addon/maya/server/settings/main.py deleted file mode 100644 index a4562f54d7..0000000000 --- a/server_addon/maya/server/settings/main.py +++ /dev/null @@ -1,123 +0,0 @@ -from pydantic import validator -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - ensure_unique_names, -) -from .imageio import ImageIOSettings, DEFAULT_IMAGEIO_SETTINGS -from .maya_dirmap import MayaDirmapModel, DEFAULT_MAYA_DIRMAP_SETTINGS -from .include_handles import IncludeHandlesModel, DEFAULT_INCLUDE_HANDLES -from .explicit_plugins_loading import ( - ExplicitPluginsLoadingModel, DEFAULT_EXPLITCIT_PLUGINS_LOADING_SETTINGS -) -from .scriptsmenu import ScriptsmenuModel, DEFAULT_SCRIPTSMENU_SETTINGS -from .render_settings import RenderSettingsModel, DEFAULT_RENDER_SETTINGS -from .creators import CreatorsModel, DEFAULT_CREATORS_SETTINGS -from .publishers import PublishersModel, DEFAULT_PUBLISH_SETTINGS -from .loaders import LoadersModel, DEFAULT_LOADERS_SETTING -from .workfile_build_settings import ProfilesModel, DEFAULT_WORKFILE_SETTING -from .templated_workfile_settings import ( - TemplatedProfilesModel, DEFAULT_TEMPLATED_WORKFILE_SETTINGS -) - - -class ExtMappingItemModel(BaseSettingsModel): - _layout = "compact" - name: str = SettingsField(title="Product type") - value: str = SettingsField(title="Extension") - - -class MayaSettings(BaseSettingsModel): - """Maya Project Settings.""" - - use_cbid_workflow: bool = SettingsField( - True, title="Use cbId workflow", - description=( - "When enabled, a per node `cbId` identifier will be created and " - "validated for many product types. This is then used for look " - "publishing and many others. By disabling this, the `cbId` " - "attribute will still be created on scene save but it will not " - "be validated.")) - - open_workfile_post_initialization: bool = SettingsField( - True, title="Open Workfile Post Initialization") - explicit_plugins_loading: ExplicitPluginsLoadingModel = SettingsField( - default_factory=ExplicitPluginsLoadingModel, - title="Explicit Plugins Loading") - imageio: ImageIOSettings = SettingsField( - default_factory=ImageIOSettings, title="Color Management (imageio)") - mel_workspace: str = SettingsField( - title="Maya MEL Workspace", widget="textarea" - ) - ext_mapping: list[ExtMappingItemModel] = SettingsField( - default_factory=list, title="Extension Mapping") - maya_dirmap: MayaDirmapModel = SettingsField( - default_factory=MayaDirmapModel, title="Maya dirmap Settings") - include_handles: IncludeHandlesModel = SettingsField( - default_factory=IncludeHandlesModel, - title="Include/Exclude Handles in default playback & render range" - ) - scriptsmenu: ScriptsmenuModel = SettingsField( - default_factory=ScriptsmenuModel, - title="Scriptsmenu Settings" - ) - render_settings: RenderSettingsModel = SettingsField( - default_factory=RenderSettingsModel, title="Render Settings") - create: CreatorsModel = SettingsField( - default_factory=CreatorsModel, title="Creators") - publish: PublishersModel = SettingsField( - default_factory=PublishersModel, title="Publishers") - load: LoadersModel = SettingsField( - default_factory=LoadersModel, title="Loaders") - workfile_build: ProfilesModel = SettingsField( - default_factory=ProfilesModel, title="Workfile Build Settings") - templated_workfile_build: TemplatedProfilesModel = SettingsField( - default_factory=TemplatedProfilesModel, - title="Templated Workfile Build Settings") - - @validator("ext_mapping") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -DEFAULT_MEL_WORKSPACE_SETTINGS = "\n".join(( - 'workspace -fr "shaders" "renderData/shaders";', - 'workspace -fr "images" "renders/maya";', - 'workspace -fr "particles" "particles";', - 'workspace -fr "mayaAscii" "";', - 'workspace -fr "mayaBinary" "";', - 'workspace -fr "scene" "";', - 'workspace -fr "alembicCache" "cache/alembic";', - 'workspace -fr "renderData" "renderData";', - 'workspace -fr "sourceImages" "sourceimages";', - 'workspace -fr "fileCache" "cache/nCache";', - 'workspace -fr "autoSave" "autosave";', - '', -)) - -DEFAULT_MAYA_SETTING = { - "use_cbid_workflow": True, - "open_workfile_post_initialization": True, - "explicit_plugins_loading": DEFAULT_EXPLITCIT_PLUGINS_LOADING_SETTINGS, - "imageio": DEFAULT_IMAGEIO_SETTINGS, - "mel_workspace": DEFAULT_MEL_WORKSPACE_SETTINGS, - "ext_mapping": [ - {"name": "model", "value": "ma"}, - {"name": "mayaAscii", "value": "ma"}, - {"name": "camera", "value": "ma"}, - {"name": "rig", "value": "ma"}, - {"name": "workfile", "value": "ma"}, - {"name": "yetiRig", "value": "ma"} - ], - # `maya_dirmap` was originally with dash - `maya-dirmap` - "maya_dirmap": DEFAULT_MAYA_DIRMAP_SETTINGS, - "include_handles": DEFAULT_INCLUDE_HANDLES, - "scriptsmenu": DEFAULT_SCRIPTSMENU_SETTINGS, - "render_settings": DEFAULT_RENDER_SETTINGS, - "create": DEFAULT_CREATORS_SETTINGS, - "publish": DEFAULT_PUBLISH_SETTINGS, - "load": DEFAULT_LOADERS_SETTING, - "workfile_build": DEFAULT_WORKFILE_SETTING, - "templated_workfile_build": DEFAULT_TEMPLATED_WORKFILE_SETTINGS -} diff --git a/server_addon/maya/server/settings/maya_dirmap.py b/server_addon/maya/server/settings/maya_dirmap.py deleted file mode 100644 index f68028cd79..0000000000 --- a/server_addon/maya/server/settings/maya_dirmap.py +++ /dev/null @@ -1,38 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class MayaDirmapPathsSubmodel(BaseSettingsModel): - _layout = "compact" - source_path: list[str] = SettingsField( - default_factory=list, title="Source Paths" - ) - destination_path: list[str] = SettingsField( - default_factory=list, title="Destination Paths" - ) - - -class MayaDirmapModel(BaseSettingsModel): - """Maya dirmap settings.""" - # _layout = "expanded" - _isGroup: bool = True - - enabled: bool = SettingsField(title="enabled") - # Use ${} placeholder instead of absolute value of a root in - # referenced filepaths. - use_env_var_as_root: bool = SettingsField( - title="Use env var placeholder in referenced paths" - ) - paths: MayaDirmapPathsSubmodel = SettingsField( - default_factory=MayaDirmapPathsSubmodel, - title="Dirmap Paths" - ) - - -DEFAULT_MAYA_DIRMAP_SETTINGS = { - "use_env_var_as_root": False, - "enabled": False, - "paths": { - "source-path": [], - "destination-path": [] - } -} diff --git a/server_addon/maya/server/settings/publish_playblast.py b/server_addon/maya/server/settings/publish_playblast.py deleted file mode 100644 index d513a43e99..0000000000 --- a/server_addon/maya/server/settings/publish_playblast.py +++ /dev/null @@ -1,402 +0,0 @@ -from pydantic import validator - -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - ensure_unique_names, - task_types_enum, -) -from ayon_server.types import ColorRGBA_uint8 - - -def hardware_falloff_enum(): - return [ - {"label": "Linear", "value": "0"}, - {"label": "Exponential", "value": "1"}, - {"label": "Exponential Squared", "value": "2"} - ] - - -def renderer_enum(): - return [ - {"label": "Viewport 2.0", "value": "vp2Renderer"} - ] - - -def displayLights_enum(): - return [ - {"label": "Default Lighting", "value": "default"}, - {"label": "All Lights", "value": "all"}, - {"label": "Selected Lights", "value": "selected"}, - {"label": "Flat Lighting", "value": "flat"}, - {"label": "No Lights", "value": "nolights"} - ] - - -def plugin_objects_default(): - return [ - { - "name": "gpuCacheDisplayFilter", - "value": False - } - ] - - -class CodecSetting(BaseSettingsModel): - _layout = "expanded" - compression: str = SettingsField("png", title="Encoding") - format: str = SettingsField("image", title="Format") - quality: int = SettingsField(95, title="Quality", ge=0, le=100) - - -class DisplayOptionsSetting(BaseSettingsModel): - _layout = "expanded" - override_display: bool = SettingsField( - True, title="Override display options" - ) - background: ColorRGBA_uint8 = SettingsField( - (125, 125, 125, 1.0), title="Background Color" - ) - # background: ColorRGB_float = SettingsField( - # (0.5, 0.5, 0.5), title="Background Color" - # ) - displayGradient: bool = SettingsField( - True, title="Display background gradient" - ) - backgroundTop: ColorRGBA_uint8 = SettingsField( - (125, 125, 125, 1.0), title="Background Top" - ) - backgroundBottom: ColorRGBA_uint8 = SettingsField( - (125, 125, 125, 1.0), title="Background Bottom" - ) - # backgroundTop: ColorRGB_float = SettingsField( - # (0.5, 0.5, 0.5), title="Background Top" - # ) - # backgroundBottom: ColorRGB_float = SettingsField( - # (0.5, 0.5, 0.5), title="Background Bottom" - # ) - - -class GenericSetting(BaseSettingsModel): - _layout = "expanded" - isolate_view: bool = SettingsField(True, title="Isolate View") - off_screen: bool = SettingsField(True, title="Off Screen") - pan_zoom: bool = SettingsField(False, title="2D Pan/Zoom") - - -class RendererSetting(BaseSettingsModel): - _layout = "expanded" - rendererName: str = SettingsField( - "vp2Renderer", - enum_resolver=renderer_enum, - title="Renderer name" - ) - - -class ResolutionSetting(BaseSettingsModel): - _layout = "expanded" - width: int = SettingsField(0, title="Width") - height: int = SettingsField(0, title="Height") - - -class PluginObjectsModel(BaseSettingsModel): - name: str = SettingsField("", title="Name") - value: bool = SettingsField(True, title="Enabled") - - -class ViewportOptionsSetting(BaseSettingsModel): - override_viewport_options: bool = SettingsField( - True, title="Override viewport options" - ) - displayLights: str = SettingsField( - "default", enum_resolver=displayLights_enum, title="Display Lights" - ) - displayTextures: bool = SettingsField(True, title="Display Textures") - textureMaxResolution: int = SettingsField( - 1024, title="Texture Clamp Resolution" - ) - renderDepthOfField: bool = SettingsField( - True, title="Depth of Field", section="Depth of Field" - ) - shadows: bool = SettingsField(True, title="Display Shadows") - twoSidedLighting: bool = SettingsField(True, title="Two Sided Lighting") - lineAAEnable: bool = SettingsField( - True, title="Enable Anti-Aliasing", section="Anti-Aliasing" - ) - multiSample: int = SettingsField(8, title="Anti Aliasing Samples") - loadTextures: bool = SettingsField(False, title="Load Textures") - useDefaultMaterial: bool = SettingsField( - False, title="Use Default Material" - ) - wireframeOnShaded: bool = SettingsField(False, title="Wireframe On Shaded") - xray: bool = SettingsField(False, title="X-Ray") - jointXray: bool = SettingsField(False, title="X-Ray Joints") - backfaceCulling: bool = SettingsField(False, title="Backface Culling") - ssaoEnable: bool = SettingsField( - False, title="Screen Space Ambient Occlusion", section="SSAO" - ) - ssaoAmount: int = SettingsField(1, title="SSAO Amount") - ssaoRadius: int = SettingsField(16, title="SSAO Radius") - ssaoFilterRadius: int = SettingsField(16, title="SSAO Filter Radius") - ssaoSamples: int = SettingsField(16, title="SSAO Samples") - fogging: bool = SettingsField( - False, title="Enable Hardware Fog", section="Fog" - ) - hwFogFalloff: str = SettingsField( - "0", enum_resolver=hardware_falloff_enum, title="Hardware Falloff" - ) - hwFogDensity: float = SettingsField(0.0, title="Fog Density") - hwFogStart: int = SettingsField(0, title="Fog Start") - hwFogEnd: int = SettingsField(100, title="Fog End") - hwFogAlpha: int = SettingsField(0, title="Fog Alpha") - hwFogColorR: float = SettingsField(1.0, title="Fog Color R") - hwFogColorG: float = SettingsField(1.0, title="Fog Color G") - hwFogColorB: float = SettingsField(1.0, title="Fog Color B") - motionBlurEnable: bool = SettingsField( - False, title="Enable Motion Blur", section="Motion Blur" - ) - motionBlurSampleCount: int = SettingsField( - 8, title="Motion Blur Sample Count" - ) - motionBlurShutterOpenFraction: float = SettingsField( - 0.2, title="Shutter Open Fraction" - ) - cameras: bool = SettingsField(False, title="Cameras", section="Show") - clipGhosts: bool = SettingsField(False, title="Clip Ghosts") - deformers: bool = SettingsField(False, title="Deformers") - dimensions: bool = SettingsField(False, title="Dimensions") - dynamicConstraints: bool = SettingsField( - False, title="Dynamic Constraints" - ) - dynamics: bool = SettingsField(False, title="Dynamics") - fluids: bool = SettingsField(False, title="Fluids") - follicles: bool = SettingsField(False, title="Follicles") - greasePencils: bool = SettingsField(False, title="Grease Pencils") - grid: bool = SettingsField(False, title="Grid") - hairSystems: bool = SettingsField(True, title="Hair Systems") - handles: bool = SettingsField(False, title="Handles") - headsUpDisplay: bool = SettingsField(False, title="HUD") - ikHandles: bool = SettingsField(False, title="IK Handles") - imagePlane: bool = SettingsField(True, title="Image Plane") - joints: bool = SettingsField(False, title="Joints") - lights: bool = SettingsField(False, title="Lights") - locators: bool = SettingsField(False, title="Locators") - manipulators: bool = SettingsField(False, title="Manipulators") - motionTrails: bool = SettingsField(False, title="Motion Trails") - nCloths: bool = SettingsField(False, title="nCloths") - nParticles: bool = SettingsField(False, title="nParticles") - nRigids: bool = SettingsField(False, title="nRigids") - controlVertices: bool = SettingsField(False, title="NURBS CVs") - nurbsCurves: bool = SettingsField(False, title="NURBS Curves") - hulls: bool = SettingsField(False, title="NURBS Hulls") - nurbsSurfaces: bool = SettingsField(False, title="NURBS Surfaces") - particleInstancers: bool = SettingsField( - False, title="Particle Instancers" - ) - pivots: bool = SettingsField(False, title="Pivots") - planes: bool = SettingsField(False, title="Planes") - pluginShapes: bool = SettingsField(False, title="Plugin Shapes") - polymeshes: bool = SettingsField(True, title="Polygons") - strokes: bool = SettingsField(False, title="Strokes") - subdivSurfaces: bool = SettingsField(False, title="Subdiv Surfaces") - textures: bool = SettingsField(False, title="Texture Placements") - pluginObjects: list[PluginObjectsModel] = SettingsField( - default_factory=plugin_objects_default, - title="Plugin Objects" - ) - - @validator("pluginObjects") - def validate_unique_plugin_objects(cls, value): - ensure_unique_names(value) - return value - - -class CameraOptionsSetting(BaseSettingsModel): - displayGateMask: bool = SettingsField(False, title="Display Gate Mask") - displayResolution: bool = SettingsField(False, title="Display Resolution") - displayFilmGate: bool = SettingsField(False, title="Display Film Gate") - displayFieldChart: bool = SettingsField(False, title="Display Field Chart") - displaySafeAction: bool = SettingsField(False, title="Display Safe Action") - displaySafeTitle: bool = SettingsField(False, title="Display Safe Title") - displayFilmPivot: bool = SettingsField(False, title="Display Film Pivot") - displayFilmOrigin: bool = SettingsField(False, title="Display Film Origin") - overscan: int = SettingsField(1.0, title="Overscan") - - -class CapturePresetSetting(BaseSettingsModel): - Codec: CodecSetting = SettingsField( - default_factory=CodecSetting, - title="Codec", - section="Codec") - DisplayOptions: DisplayOptionsSetting = SettingsField( - default_factory=DisplayOptionsSetting, - title="Display Options", - section="Display Options") - Generic: GenericSetting = SettingsField( - default_factory=GenericSetting, - title="Generic", - section="Generic") - Renderer: RendererSetting = SettingsField( - default_factory=RendererSetting, - title="Renderer", - section="Renderer") - Resolution: ResolutionSetting = SettingsField( - default_factory=ResolutionSetting, - title="Resolution", - section="Resolution") - ViewportOptions: ViewportOptionsSetting = SettingsField( - default_factory=ViewportOptionsSetting, - title="Viewport Options") - CameraOptions: CameraOptionsSetting = SettingsField( - default_factory=CameraOptionsSetting, - title="Camera Options") - - -class ProfilesModel(BaseSettingsModel): - _layout = "expanded" - task_types: list[str] = SettingsField( - default_factory=list, - title="Task types", - enum_resolver=task_types_enum - ) - task_names: list[str] = SettingsField( - default_factory=list, title="Task names" - ) - product_names: list[str] = SettingsField( - default_factory=list, title="Products names" - ) - capture_preset: CapturePresetSetting = SettingsField( - default_factory=CapturePresetSetting, - title="Capture Preset" - ) - - -class ExtractPlayblastSetting(BaseSettingsModel): - capture_preset: CapturePresetSetting = SettingsField( - default_factory=CapturePresetSetting, - title="DEPRECATED! Please use \"Profiles\" below. Capture Preset" - ) - profiles: list[ProfilesModel] = SettingsField( - default_factory=list, - title="Profiles" - ) - - -DEFAULT_PLAYBLAST_SETTING = { - "capture_preset": { - "Codec": { - "compression": "png", - "format": "image", - "quality": 95 - }, - "DisplayOptions": { - "override_display": True, - "background": [125, 125, 125, 1.0], - "backgroundBottom": [125, 125, 125, 1.0], - "backgroundTop": [125, 125, 125, 1.0], - # "background": [0.5, 0.5, 0.5], - # "backgroundBottom": [0.5, 0.5, 0.5], - # "backgroundTop": [0.5, 0.5, 0.5], - "displayGradient": True - }, - "Generic": { - "isolate_view": True, - "off_screen": True, - "pan_zoom": False - }, - "Renderer": { - "rendererName": "vp2Renderer" - }, - "Resolution": { - "width": 1920, - "height": 1080 - }, - "ViewportOptions": { - "override_viewport_options": True, - "displayLights": "default", - "displayTextures": True, - "textureMaxResolution": 1024, - "renderDepthOfField": True, - "shadows": True, - "twoSidedLighting": True, - "lineAAEnable": True, - "multiSample": 8, - "loadTextures": False, - "useDefaultMaterial": False, - "wireframeOnShaded": False, - "xray": False, - "jointXray": False, - "backfaceCulling": False, - "ssaoEnable": False, - "ssaoAmount": 1, - "ssaoRadius": 16, - "ssaoFilterRadius": 16, - "ssaoSamples": 16, - "fogging": False, - "hwFogFalloff": "0", - "hwFogDensity": 0.0, - "hwFogStart": 0, - "hwFogEnd": 100, - "hwFogAlpha": 0, - "hwFogColorR": 1.0, - "hwFogColorG": 1.0, - "hwFogColorB": 1.0, - "motionBlurEnable": False, - "motionBlurSampleCount": 8, - "motionBlurShutterOpenFraction": 0.2, - "cameras": False, - "clipGhosts": False, - "deformers": False, - "dimensions": False, - "dynamicConstraints": False, - "dynamics": False, - "fluids": False, - "follicles": False, - "greasePencils": False, - "grid": False, - "hairSystems": True, - "handles": False, - "headsUpDisplay": False, - "ikHandles": False, - "imagePlane": True, - "joints": False, - "lights": False, - "locators": False, - "manipulators": False, - "motionTrails": False, - "nCloths": False, - "nParticles": False, - "nRigids": False, - "controlVertices": False, - "nurbsCurves": False, - "hulls": False, - "nurbsSurfaces": False, - "particleInstancers": False, - "pivots": False, - "planes": False, - "pluginShapes": False, - "polymeshes": True, - "strokes": False, - "subdivSurfaces": False, - "textures": False, - "pluginObjects": [ - { - "name": "gpuCacheDisplayFilter", - "value": False - } - ] - }, - "CameraOptions": { - "displayGateMask": False, - "displayResolution": False, - "displayFilmGate": False, - "displayFieldChart": False, - "displaySafeAction": False, - "displaySafeTitle": False, - "displayFilmPivot": False, - "displayFilmOrigin": False, - "overscan": 1.0 - } - }, - "profiles": [] -} diff --git a/server_addon/maya/server/settings/publishers.py b/server_addon/maya/server/settings/publishers.py deleted file mode 100644 index 6a127cc998..0000000000 --- a/server_addon/maya/server/settings/publishers.py +++ /dev/null @@ -1,1640 +0,0 @@ -import json -from pydantic import validator -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - MultiplatformPathModel, - ensure_unique_names, -) -from ayon_server.exceptions import BadRequestException -from .publish_playblast import ( - ExtractPlayblastSetting, - DEFAULT_PLAYBLAST_SETTING, -) - - -def linear_unit_enum(): - """Get linear units enumerator.""" - return [ - {"label": "mm", "value": "millimeter"}, - {"label": "cm", "value": "centimeter"}, - {"label": "m", "value": "meter"}, - {"label": "km", "value": "kilometer"}, - {"label": "in", "value": "inch"}, - {"label": "ft", "value": "foot"}, - {"label": "yd", "value": "yard"}, - {"label": "mi", "value": "mile"} - ] - - -def angular_unit_enum(): - """Get angular units enumerator.""" - return [ - {"label": "deg", "value": "degree"}, - {"label": "rad", "value": "radian"}, - ] - - -def extract_alembic_data_format_enum(): - return [ - {"label": "ogawa", "value": "ogawa"}, - {"label": "HDF", "value": "HDF"} - ] - - -def extract_alembic_overrides_enum(): - return [ - {"label": "Custom Attributes", "value": "attr"}, - {"label": "Custom Attributes Prefix", "value": "attrPrefix"}, - {"label": "Data Format", "value": "dataFormat"}, - {"label": "Euler Filter", "value": "eulerFilter"}, - {"label": "Mel Per Frame Callback", "value": "melPerFrameCallback"}, - {"label": "Mel Post Job Callback", "value": "melPostJobCallback"}, - {"label": "Pre Roll", "value": "preRoll"}, - {"label": "Pre Roll Start Frame", "value": "preRollStartFrame"}, - { - "label": "Python Per Frame Callback", - "value": "pythonPerFrameCallback" - }, - { - "label": "Python Post Job Callback", - "value": "pythonPostJobCallback" - }, - {"label": "Renderable Only", "value": "renderableOnly"}, - {"label": "Strip Namespaces", "value": "stripNamespaces"}, - {"label": "User Attr", "value": "userAttr"}, - {"label": "User Attr Prefix", "value": "userAttrPrefix"}, - {"label": "UV Write", "value": "uvWrite"}, - {"label": "UVs Only", "value": "uvsOnly"}, - {"label": "Verbose", "value": "verbose"}, - {"label": "Visible Only", "value": "visibleOnly"}, - {"label": "Whole Frame Geo", "value": "wholeFrameGeo"}, - {"label": "World Space", "value": "worldSpace"}, - {"label": "Write Color Sets", "value": "writeColorSets"}, - {"label": "Write Creases", "value": "writeCreases"}, - {"label": "Write Face Sets", "value": "writeFaceSets"}, - {"label": "Write Normals", "value": "writeNormals"}, - {"label": "Write UV Sets", "value": "writeUVSets"}, - {"label": "Write Visibility", "value": "writeVisibility"} - ] - - -class BasicValidateModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - - -class ValidateMeshUVSetMap1Model(BasicValidateModel): - """Validate model's default uv set exists and is named 'map1'.""" - pass - - -class ValidateNoAnimationModel(BasicValidateModel): - """Ensure no keyframes on nodes in the Instance.""" - pass - - -class ValidateRigOutSetNodeIdsModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ValidateSkinclusterDeformerSet") - optional: bool = SettingsField(title="Optional") - allow_history_only: bool = SettingsField(title="Allow history only") - - -class ValidateModelNameModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - database: bool = SettingsField( - title="Use database shader name definitions" - ) - material_file: MultiplatformPathModel = SettingsField( - default_factory=MultiplatformPathModel, - title="Material File", - description=( - "Path to material file defining list of material names to check." - ) - ) - regex: str = SettingsField( - "(.*)_(\\d)*_(?P.*)_(GEO)", - title="Validation regex", - description=( - "Regex for validating name of top level group name. You can use" - " named capturing groups:(?P.*) for Asset name" - ) - ) - top_level_regex: str = SettingsField( - ".*_GRP", - title="Top level group name regex", - description=( - "To check for asset in name so *_some_asset_name_GRP" - " is valid, use:.*?_(?P.*)_GEO" - ) - ) - - -class ValidateModelContentModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - validate_top_group: bool = SettingsField(title="Validate one top group") - - -class ValidateTransformNamingSuffixModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - SUFFIX_NAMING_TABLE: str = SettingsField( - "{}", - title="Suffix Naming Tables", - widget="textarea", - description=( - "Validates transform suffix based on" - " the type of its children shapes." - ) - ) - - @validator("SUFFIX_NAMING_TABLE") - def validate_json(cls, value): - if not value.strip(): - return "{}" - try: - converted_value = json.loads(value) - success = isinstance(converted_value, dict) - except json.JSONDecodeError: - success = False - - if not success: - raise BadRequestException( - "The text can't be parsed as json object" - ) - return value - ALLOW_IF_NOT_IN_SUFFIX_TABLE: bool = SettingsField( - title="Allow if suffix not in table" - ) - - -class CollectMayaRenderModel(BaseSettingsModel): - sync_workfile_version: bool = SettingsField( - title="Sync render version with workfile" - ) - - -class CollectFbxAnimationModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Collect Fbx Animation") - - -class CollectFbxCameraModel(BaseSettingsModel): - enabled: bool = SettingsField(title="CollectFbxCamera") - - -class CollectGLTFModel(BaseSettingsModel): - enabled: bool = SettingsField(title="CollectGLTF") - - -class ValidateFrameRangeModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ValidateFrameRange") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - exclude_product_types: list[str] = SettingsField( - default_factory=list, - title="Exclude product types" - ) - - -class ValidateShaderNameModel(BaseSettingsModel): - """ - Shader name regex can use named capture group asset to validate against current asset name. - """ - enabled: bool = SettingsField(title="ValidateShaderName") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - regex: str = SettingsField( - "(?P.*)_(.*)_SHD", - title="Validation regex" - ) - - -class ValidateAttributesModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ValidateAttributes") - attributes: str = SettingsField( - "{}", title="Attributes", widget="textarea") - - @validator("attributes") - def validate_json(cls, value): - if not value.strip(): - return "{}" - try: - converted_value = json.loads(value) - success = isinstance(converted_value, dict) - except json.JSONDecodeError: - success = False - - if not success: - raise BadRequestException( - "The attributes can't be parsed as json object" - ) - return value - - -class ValidateLoadedPluginModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ValidateLoadedPlugin") - optional: bool = SettingsField(title="Optional") - whitelist_native_plugins: bool = SettingsField( - title="Whitelist Maya Native Plugins" - ) - authorized_plugins: list[str] = SettingsField( - default_factory=list, title="Authorized plugins" - ) - - -class ValidateMayaUnitsModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ValidateMayaUnits") - optional: bool = SettingsField(title="Optional") - validate_linear_units: bool = SettingsField(title="Validate linear units") - linear_units: str = SettingsField( - enum_resolver=linear_unit_enum, title="Linear Units" - ) - validate_angular_units: bool = SettingsField( - title="Validate angular units" - ) - angular_units: str = SettingsField( - enum_resolver=angular_unit_enum, title="Angular units" - ) - validate_fps: bool = SettingsField(title="Validate fps") - - -class ValidateUnrealStaticMeshNameModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ValidateUnrealStaticMeshName") - optional: bool = SettingsField(title="Optional") - validate_mesh: bool = SettingsField(title="Validate mesh names") - validate_collision: bool = SettingsField(title="Validate collision names") - - -class ValidateCycleErrorModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ValidateCycleError") - optional: bool = SettingsField(title="Optional") - families: list[str] = SettingsField( - default_factory=list, title="Families" - ) - - -class ValidatePluginPathAttributesAttrModel(BaseSettingsModel): - name: str = SettingsField(title="Node type") - value: str = SettingsField(title="Attribute") - - -class ValidatePluginPathAttributesModel(BaseSettingsModel): - """Fill in the node types and attributes you want to validate. - -

e.g. AlembicNode.abc_file, the node type is AlembicNode - and the node attribute is abc_file - """ - - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - attribute: list[ValidatePluginPathAttributesAttrModel] = SettingsField( - default_factory=list, - title="File Attribute" - ) - - @validator("attribute") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -# Validate Render Setting -class RendererAttributesModel(BaseSettingsModel): - _layout = "compact" - type: str = SettingsField(title="Type") - value: str = SettingsField(title="Value") - - -class ValidateRenderSettingsModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - arnold_render_attributes: list[RendererAttributesModel] = SettingsField( - default_factory=list, title="Arnold Render Attributes") - vray_render_attributes: list[RendererAttributesModel] = SettingsField( - default_factory=list, title="VRay Render Attributes") - redshift_render_attributes: list[RendererAttributesModel] = SettingsField( - default_factory=list, title="Redshift Render Attributes") - renderman_render_attributes: list[RendererAttributesModel] = SettingsField( - default_factory=list, title="Renderman Render Attributes") - - -class BasicValidateModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - - -class ValidateCameraContentsModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - validate_shapes: bool = SettingsField(title="Validate presence of shapes") - - -class ExtractProxyAlembicModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - families: list[str] = SettingsField( - default_factory=list, - title="Families") - - -class ExtractAlembicModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - families: list[str] = SettingsField( - default_factory=list, - title="Families") - eulerFilter: bool = SettingsField( - title="Euler Filter", - description="Apply Euler filter while sampling rotations." - ) - renderableOnly: bool = SettingsField( - title="Renderable Only", - description="Only export renderable visible shapes." - ) - stripNamespaces: bool = SettingsField( - title="Strip Namespaces", - description=( - "Namespaces will be stripped off of the node before being written " - "to Alembic." - ) - ) - uvsOnly: bool = SettingsField( - title="UVs Only", - description=( - "If this flag is present, only uv data for PolyMesh and SubD " - "shapes will be written to the Alembic file." - ) - ) - uvWrite: bool = SettingsField( - title="UV Write", - description=( - "Uv data for PolyMesh and SubD shapes will be written to the " - "Alembic file." - ) - ) - verbose: bool = SettingsField( - title="Verbose", - description="Prints the current frame that is being evaluated." - ) - visibleOnly: bool = SettingsField( - title="Visible Only", - description="Only export dag objects visible during frame range." - ) - wholeFrameGeo: bool = SettingsField( - title="Whole Frame Geo", - description=( - "Data for geometry will only be written out on whole frames." - ) - ) - worldSpace: bool = SettingsField( - title="World Space", - description="Any root nodes will be stored in world space." - ) - writeColorSets: bool = SettingsField( - title="Write Color Sets", - description="Write vertex colors with the geometry." - ) - writeCreases: bool = SettingsField( - title="Write Creases", - description="Write the geometry's edge and vertex crease information." - ) - writeFaceSets: bool = SettingsField( - title="Write Face Sets", - description="Write face sets with the geometry." - ) - writeNormals: bool = SettingsField( - title="Write Normals", - description="Write normals with the deforming geometry." - ) - writeUVSets: bool = SettingsField( - title="Write UV Sets", - description=( - "Write all uv sets on MFnMeshes as vector 2 indexed geometry " - "parameters with face varying scope." - ) - ) - writeVisibility: bool = SettingsField( - title="Write Visibility", - description=( - "Visibility state will be stored in the Alembic file. Otherwise " - "everything written out is treated as visible." - ) - ) - preRoll: bool = SettingsField( - title="Pre Roll", - description=( - "When enabled, the pre roll start frame is used to pre roll the " - "When enabled, the pre roll start frame is used to being the " - "evaluation of the mesh. From the pre roll start frame to the " - "alembic start frame, will not be written to disk. This can be " - "used for simulation run up." - ) - ) - preRollStartFrame: int = SettingsField( - title="Pre Roll Start Frame", - description=( - "The frame to start scene evaluation at. This is used to set the " - "starting frame for time dependent translations and can be used to" - " evaluate run-up that isn't actually translated.\n" - "NOTE: Pre Roll needs to be enabled for this start frame " - "to be considered." - ) - ) - dataFormat: str = SettingsField( - enum_resolver=extract_alembic_data_format_enum, - title="Data Format", - description="The data format to use to write the file." - ) - bake_attributes: list[str] = SettingsField( - default_factory=list, title="Bake Attributes", - description="List of attributes that will be included in the alembic " - "export.", - ) - bake_attribute_prefixes: list[str] = SettingsField( - default_factory=list, title="Bake Attribute Prefixes", - description="List of attribute prefixes for attributes that will be " - "included in the alembic export.", - ) - attr: str = SettingsField( - title="Custom Attributes", - placeholder="attr1;attr2", - description=( - "Attributes matching by name will be included in the Alembic " - "export. Attributes should be separated by semi-colon `;`" - ) - ) - attrPrefix: str = SettingsField( - title="Custom Attributes Prefix", - placeholder="prefix1;prefix2", - description=( - "Attributes starting with these prefixes will be included in the " - "Alembic export. Attributes should be separated by semi-colon `;`" - ) - ) - userAttr: str = SettingsField( - title="User Attr", - placeholder="attr1;attr2", - description=( - "Attributes matching by name will be included in the Alembic " - "export. Attributes should be separated by semi-colon `;`" - ) - ) - userAttrPrefix: str = SettingsField( - title="User Attr Prefix", - placeholder="prefix1;prefix2", - description=( - "Attributes starting with these prefixes will be included in the " - "Alembic export. Attributes should be separated by semi-colon `;`" - ) - ) - melPerFrameCallback: str = SettingsField( - title="Mel Per Frame Callback", - description=( - "When each frame (and the static frame) is evaluated the string " - "specified is evaluated as a Mel command." - ) - ) - melPostJobCallback: str = SettingsField( - title="Mel Post Job Callback", - description=( - "When the translation has finished the string specified is " - "evaluated as a Mel command." - ) - ) - pythonPerFrameCallback: str = SettingsField( - title="Python Per Frame Callback", - description=( - "When each frame (and the static frame) is evaluated the string " - "specified is evaluated as a python command." - ) - ) - pythonPostJobCallback: str = SettingsField( - title="Python Post Job Callback", - description=( - "When the translation has finished the string specified is " - "evaluated as a python command." - ) - ) - overrides: list[str] = SettingsField( - enum_resolver=extract_alembic_overrides_enum, - title="Exposed Overrides", - description=( - "Expose the attribute in this list to the user when publishing." - ) - ) - - -class ExtractObjModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - - -class ExtractModelModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - - -class ExtractMayaSceneRawModel(BaseSettingsModel): - """Add loaded instances to those published families:""" - enabled: bool = SettingsField(title="ExtractMayaSceneRaw") - add_for_families: list[str] = SettingsField( - default_factory=list, title="Families" - ) - - -class ExtractCameraAlembicModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ExtractCameraAlembic") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - bake_attributes: str = SettingsField( - "[]", title="Bake Attributes", widget="textarea", - description="List of attributes that will be included in the alembic " - "camera export. Needs to be written as a JSON list.", - ) - - @validator("bake_attributes") - def validate_json_list(cls, value): - if not value.strip(): - return "[]" - try: - converted_value = json.loads(value) - success = isinstance(converted_value, list) - except json.JSONDecodeError: - success = False - - if not success: - raise BadRequestException( - "The text can't be parsed as json object" - ) - return value - - -class ExtractGLBModel(BaseSettingsModel): - enabled: bool = True - active: bool = SettingsField(title="Active") - ogsfx_path: str = SettingsField(title="GLSL Shader Directory") - - -class ExtractLookArgsModel(BaseSettingsModel): - argument: str = SettingsField(title="Argument") - parameters: list[str] = SettingsField( - default_factory=list, title="Parameters" - ) - - -class ExtractLookModel(BaseSettingsModel): - maketx_arguments: list[ExtractLookArgsModel] = SettingsField( - default_factory=list, - title="Extra arguments for maketx command line" - ) - - -class ExtractGPUCacheModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - families: list[str] = SettingsField(default_factory=list, title="Families") - step: float = SettingsField(1.0, ge=1.0, title="Step") - stepSave: int = SettingsField(1, ge=1, title="Step Save") - optimize: bool = SettingsField(title="Optimize Hierarchy") - optimizationThreshold: int = SettingsField( - 1, ge=1, title="Optimization Threshold" - ) - optimizeAnimationsForMotionBlur: bool = SettingsField( - title="Optimize Animations For Motion Blur" - ) - writeMaterials: bool = SettingsField(title="Write Materials") - useBaseTessellation: bool = SettingsField(title="User Based Tessellation") - - -class PublishersModel(BaseSettingsModel): - CollectMayaRender: CollectMayaRenderModel = SettingsField( - default_factory=CollectMayaRenderModel, - title="Collect Render Layers", - section="Collectors" - ) - CollectFbxAnimation: CollectFbxAnimationModel = SettingsField( - default_factory=CollectFbxAnimationModel, - title="Collect FBX Animation", - ) - CollectFbxCamera: CollectFbxCameraModel = SettingsField( - default_factory=CollectFbxCameraModel, - title="Collect Camera for FBX export", - ) - CollectFbxModel: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Collect Model for FBX export", - ) - CollectGLTF: CollectGLTFModel = SettingsField( - default_factory=CollectGLTFModel, - title="Collect Assets for GLB/GLTF export" - ) - ValidateInstanceInContext: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Instance In Context", - section="Validators" - ) - ValidateFrameRange: ValidateFrameRangeModel = SettingsField( - default_factory=ValidateFrameRangeModel, - title="Validate Frame Range" - ) - ValidateShaderName: ValidateShaderNameModel = SettingsField( - default_factory=ValidateShaderNameModel, - title="Validate Shader Name" - ) - ValidateShadingEngine: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Look Shading Engine Naming" - ) - ValidateMayaColorSpace: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Colorspace" - ) - ValidateAttributes: ValidateAttributesModel = SettingsField( - default_factory=ValidateAttributesModel, - title="Validate Attributes" - ) - ValidateLoadedPlugin: ValidateLoadedPluginModel = SettingsField( - default_factory=ValidateLoadedPluginModel, - title="Validate Loaded Plugin" - ) - ValidateMayaUnits: ValidateMayaUnitsModel = SettingsField( - default_factory=ValidateMayaUnitsModel, - title="Validate Maya Units" - ) - ValidateUnrealStaticMeshName: ValidateUnrealStaticMeshNameModel = ( - SettingsField( - default_factory=ValidateUnrealStaticMeshNameModel, - title="Validate Unreal Static Mesh Name" - ) - ) - ValidateCycleError: ValidateCycleErrorModel = SettingsField( - default_factory=ValidateCycleErrorModel, - title="Validate Cycle Error" - ) - ValidatePluginPathAttributes: ValidatePluginPathAttributesModel = ( - SettingsField( - default_factory=ValidatePluginPathAttributesModel, - title="Plug-in Path Attributes" - ) - ) - ValidateRenderSettings: ValidateRenderSettingsModel = SettingsField( - default_factory=ValidateRenderSettingsModel, - title="Validate Render Settings" - ) - ValidateResolution: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Resolution Setting" - ) - ValidateCurrentRenderLayerIsRenderable: BasicValidateModel = ( - SettingsField( - default_factory=BasicValidateModel, - title="Validate Current Render Layer Has Renderable Camera" - ) - ) - ValidateGLSLMaterial: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate GLSL Material" - ) - ValidateGLSLPlugin: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate GLSL Plugin" - ) - ValidateRenderImageRule: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Render Image Rule (Workspace)" - ) - ValidateRenderNoDefaultCameras: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate No Default Cameras Renderable" - ) - ValidateRenderSingleCamera: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Render Single Camera " - ) - ValidateRenderLayerAOVs: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Render Passes/AOVs Are Registered" - ) - ValidateStepSize: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Step Size" - ) - ValidateVRayDistributedRendering: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="VRay Distributed Rendering" - ) - ValidateVrayReferencedAOVs: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="VRay Referenced AOVs" - ) - ValidateVRayTranslatorEnabled: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="VRay Translator Settings" - ) - ValidateVrayProxy: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="VRay Proxy Settings" - ) - ValidateVrayProxyMembers: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="VRay Proxy Members" - ) - ValidateYetiRenderScriptCallbacks: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Yeti Render Script Callbacks" - ) - ValidateYetiRigCacheState: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Yeti Rig Cache State" - ) - ValidateYetiRigInputShapesInInstance: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Yeti Rig Input Shapes In Instance" - ) - ValidateYetiRigSettings: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Yeti Rig Settings" - ) - # Model - START - ValidateModelName: ValidateModelNameModel = SettingsField( - default_factory=ValidateModelNameModel, - title="Validate Model Name", - section="Model", - ) - ValidateModelContent: ValidateModelContentModel = SettingsField( - default_factory=ValidateModelContentModel, - title="Validate Model Content", - ) - ValidateTransformNamingSuffix: ValidateTransformNamingSuffixModel = ( - SettingsField( - default_factory=ValidateTransformNamingSuffixModel, - title="Validate Transform Naming Suffix", - ) - ) - ValidateColorSets: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Color Sets", - ) - ValidateMeshHasOverlappingUVs: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Has Overlapping UVs", - ) - ValidateMeshArnoldAttributes: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Arnold Attributes", - ) - ValidateMeshShaderConnections: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Shader Connections", - ) - ValidateMeshSingleUVSet: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Single UV Set", - ) - ValidateMeshHasUVs: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Has UVs", - ) - ValidateMeshLaminaFaces: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Lamina Faces", - ) - ValidateMeshNgons: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Ngons", - ) - ValidateMeshNonManifold: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Non-Manifold", - ) - ValidateMeshNoNegativeScale: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh No Negative Scale", - ) - ValidateMeshNonZeroEdgeLength: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Edge Length Non Zero", - ) - ValidateMeshNormalsUnlocked: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Normals Unlocked", - ) - ValidateMeshUVSetMap1: ValidateMeshUVSetMap1Model = SettingsField( - default_factory=ValidateMeshUVSetMap1Model, - title="Validate Mesh UV Set Map 1", - ) - ValidateMeshVerticesHaveEdges: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Vertices Have Edges", - ) - ValidateNoAnimation: ValidateNoAnimationModel = SettingsField( - default_factory=ValidateNoAnimationModel, - title="Validate No Animation", - ) - ValidateNoNamespace: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate No Namespace", - ) - ValidateNoNullTransforms: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate No Null Transforms", - ) - ValidateNoUnknownNodes: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate No Unknown Nodes", - ) - ValidateNodeNoGhosting: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Node No Ghosting", - ) - ValidateShapeDefaultNames: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Shape Default Names", - ) - ValidateShapeRenderStats: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Shape Render Stats", - ) - ValidateShapeZero: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Shape Zero", - ) - ValidateTransformZero: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Transform Zero", - ) - ValidateUniqueNames: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Unique Names", - ) - ValidateNoVRayMesh: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate No V-Ray Proxies (VRayMesh)", - ) - ValidateUnrealMeshTriangulated: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate if Mesh is Triangulated", - ) - ValidateAlembicVisibleOnly: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Alembic Visible Node", - ) - ValidateAlembicDefaultsPointcache: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Alembic Defaults Pointcache" - ) - ValidateAlembicDefaultsAnimation: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Alembic Defaults Animation" - ) - ExtractProxyAlembic: ExtractProxyAlembicModel = SettingsField( - default_factory=ExtractProxyAlembicModel, - title="Extract Proxy Alembic", - section="Model Extractors", - ) - ExtractObj: ExtractObjModel = SettingsField( - default_factory=ExtractObjModel, - title="Extract OBJ" - ) - # Model - END - - # Rig - START - ValidateRigContents: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Rig Contents", - section="Rig", - ) - ValidateRigJointsHidden: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Rig Joints Hidden", - ) - ValidateRigControllers: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Rig Controllers", - ) - ValidateAnimationContent: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Animation Content", - ) - ValidateOutRelatedNodeIds: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Animation Out Set Related Node Ids", - ) - ValidateRigControllersArnoldAttributes: BasicValidateModel = ( - SettingsField( - default_factory=BasicValidateModel, - title="Validate Rig Controllers (Arnold Attributes)", - ) - ) - ValidateSkeletalMeshHierarchy: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Skeletal Mesh Top Node", - ) - ValidateSkeletonRigContents: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Skeleton Rig Contents" - ) - ValidateSkeletonRigControllers: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Skeleton Rig Controllers" - ) - ValidateSkinclusterDeformerSet: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Skincluster Deformer Relationships", - ) - ValidateSkeletonRigOutputIds: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Skeleton Rig Output Ids" - ) - ValidateSkeletonTopGroupHierarchy: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Skeleton Top Group Hierarchy", - ) - ValidateRigOutSetNodeIds: ValidateRigOutSetNodeIdsModel = SettingsField( - default_factory=ValidateRigOutSetNodeIdsModel, - title="Validate Rig Out Set Node Ids", - ) - ValidateSkeletonRigOutSetNodeIds: ValidateRigOutSetNodeIdsModel = ( - SettingsField( - default_factory=ValidateRigOutSetNodeIdsModel, - title="Validate Skeleton Rig Out Set Node Ids", - ) - ) - # Rig - END - ValidateCameraAttributes: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Camera Attributes" - ) - ValidateAssemblyName: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Assembly Name" - ) - ValidateAssemblyNamespaces: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Assembly Namespaces" - ) - ValidateAssemblyModelTransforms: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Assembly Model Transforms" - ) - ValidateAssRelativePaths: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Ass Relative Paths" - ) - ValidateNoDefaultCameras: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate No Default Cameras" - ) - ValidateUnrealUpAxis: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Unreal Up-Axis Check" - ) - ValidateCameraContents: ValidateCameraContentsModel = SettingsField( - default_factory=ValidateCameraContentsModel, - title="Validate Camera Content" - ) - ExtractPlayblast: ExtractPlayblastSetting = SettingsField( - default_factory=ExtractPlayblastSetting, - title="Extract Playblast Settings", - section="Extractors" - ) - ExtractMayaSceneRaw: ExtractMayaSceneRawModel = SettingsField( - default_factory=ExtractMayaSceneRawModel, - title="Maya Scene(Raw)" - ) - ExtractCameraAlembic: ExtractCameraAlembicModel = SettingsField( - default_factory=ExtractCameraAlembicModel, - title="Extract Camera Alembic" - ) - ExtractGLB: ExtractGLBModel = SettingsField( - default_factory=ExtractGLBModel, - title="Extract GLB" - ) - ExtractLook: ExtractLookModel = SettingsField( - default_factory=ExtractLookModel, - title="Extract Look" - ) - ExtractGPUCache: ExtractGPUCacheModel = SettingsField( - default_factory=ExtractGPUCacheModel, - title="Extract GPU Cache", - ) - ExtractModel: ExtractModelModel = SettingsField( - default_factory=ExtractModelModel, - title="Extract Model (Maya Scene)" - ) - ExtractAlembic: ExtractAlembicModel = SettingsField( - default_factory=ExtractAlembicModel, - title="Extract Alembic" - ) - - -DEFAULT_SUFFIX_NAMING = { - "mesh": ["_GEO", "_GES", "_GEP", "_OSD"], - "nurbsCurve": ["_CRV"], - "nurbsSurface": ["_NRB"], - "locator": ["_LOC"], - "group": ["_GRP"] -} - -DEFAULT_PUBLISH_SETTINGS = { - "CollectMayaRender": { - "sync_workfile_version": False - }, - "CollectFbxAnimation": { - "enabled": False - }, - "CollectFbxCamera": { - "enabled": False - }, - "CollectFbxModel": { - "enabled": False, - "optional": True, - "active": True - }, - "CollectGLTF": { - "enabled": False - }, - "ValidateInstanceInContext": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateFrameRange": { - "enabled": True, - "optional": True, - "active": True, - "exclude_product_types": [ - "model", - "rig", - "staticMesh" - ] - }, - "ValidateShaderName": { - "enabled": False, - "optional": True, - "active": True, - "regex": "(?P.*)_(.*)_SHD" - }, - "ValidateShadingEngine": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateMayaColorSpace": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateAttributes": { - "enabled": False, - "attributes": "{}" - }, - "ValidateLoadedPlugin": { - "enabled": False, - "optional": True, - "whitelist_native_plugins": False, - "authorized_plugins": [] - }, - "ValidateMayaUnits": { - "enabled": True, - "optional": False, - "validate_linear_units": True, - "linear_units": "cm", - "validate_angular_units": True, - "angular_units": "deg", - "validate_fps": True - }, - "ValidateUnrealStaticMeshName": { - "enabled": True, - "optional": True, - "validate_mesh": False, - "validate_collision": True - }, - "ValidateCycleError": { - "enabled": True, - "optional": False, - "families": [ - "rig" - ] - }, - "ValidatePluginPathAttributes": { - "enabled": False, - "optional": False, - "active": True, - "attribute": [ - {"name": "AlembicNode", "value": "abc_File"}, - {"name": "VRayProxy", "value": "fileName"}, - {"name": "RenderManArchive", "value": "filename"}, - {"name": "pgYetiMaya", "value": "cacheFileName"}, - {"name": "aiStandIn", "value": "dso"}, - {"name": "RedshiftSprite", "value": "tex0"}, - {"name": "RedshiftBokeh", "value": "dofBokehImage"}, - {"name": "RedshiftCameraMap", "value": "tex0"}, - {"name": "RedshiftEnvironment", "value": "tex2"}, - {"name": "RedshiftDomeLight", "value": "tex1"}, - {"name": "RedshiftIESLight", "value": "profile"}, - {"name": "RedshiftLightGobo", "value": "tex0"}, - {"name": "RedshiftNormalMap", "value": "tex0"}, - {"name": "RedshiftProxyMesh", "value": "fileName"}, - {"name": "RedshiftVolumeShape", "value": "fileName"}, - {"name": "VRayTexGLSL", "value": "fileName"}, - {"name": "VRayMtlGLSL", "value": "fileName"}, - {"name": "VRayVRmatMtl", "value": "fileName"}, - {"name": "VRayPtex", "value": "ptexFile"}, - {"name": "VRayLightIESShape", "value": "iesFile"}, - {"name": "VRayMesh", "value": "materialAssignmentsFile"}, - {"name": "VRayMtlOSL", "value": "fileName"}, - {"name": "VRayTexOSL", "value": "fileName"}, - {"name": "VRayTexOCIO", "value": "ocioConfigFile"}, - {"name": "VRaySettingsNode", "value": "pmap_autoSaveFile2"}, - {"name": "VRayScannedMtl", "value": "file"}, - {"name": "VRayScene", "value": "parameterOverrideFilePath"}, - {"name": "VRayMtlMDL", "value": "filename"}, - {"name": "VRaySimbiont", "value": "file"}, - {"name": "dlOpenVDBShape", "value": "filename"}, - {"name": "pgYetiMayaShape", "value": "liveABCFilename"}, - {"name": "gpuCache", "value": "cacheFileName"}, - ] - }, - "ValidateRenderSettings": { - "enabled": True, - "active": True, - "optional": False, - "arnold_render_attributes": [], - "vray_render_attributes": [], - "redshift_render_attributes": [], - "renderman_render_attributes": [] - }, - "ValidateResolution": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateCurrentRenderLayerIsRenderable": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateGLSLMaterial": { - "enabled": False, - "optional": False, - "active": True - }, - "ValidateGLSLPlugin": { - "enabled": False, - "optional": False, - "active": True - }, - "ValidateRenderImageRule": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateRenderNoDefaultCameras": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateRenderSingleCamera": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateRenderLayerAOVs": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateStepSize": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateVRayDistributedRendering": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateVrayReferencedAOVs": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateVRayTranslatorEnabled": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateVrayProxy": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateVrayProxyMembers": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateYetiRenderScriptCallbacks": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateYetiRigCacheState": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateYetiRigInputShapesInInstance": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateYetiRigSettings": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateModelName": { - "enabled": False, - "database": True, - "material_file": { - "windows": "", - "darwin": "", - "linux": "" - }, - "regex": "(.*)_(\\d)*_(?P.*)_(GEO)", - "top_level_regex": ".*_GRP" - }, - "ValidateModelContent": { - "enabled": True, - "optional": False, - "validate_top_group": True - }, - "ValidateTransformNamingSuffix": { - "enabled": True, - "optional": True, - "SUFFIX_NAMING_TABLE": json.dumps(DEFAULT_SUFFIX_NAMING, indent=4), - "ALLOW_IF_NOT_IN_SUFFIX_TABLE": True - }, - "ValidateColorSets": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateMeshHasOverlappingUVs": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateMeshArnoldAttributes": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateMeshShaderConnections": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateMeshSingleUVSet": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateMeshHasUVs": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateMeshLaminaFaces": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateMeshNgons": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateMeshNonManifold": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateMeshNoNegativeScale": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateMeshNonZeroEdgeLength": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateMeshNormalsUnlocked": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateMeshUVSetMap1": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateMeshVerticesHaveEdges": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateNoAnimation": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateNoNamespace": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateNoNullTransforms": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateNoUnknownNodes": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateNodeNoGhosting": { - "enabled": False, - "optional": False, - "active": True - }, - "ValidateShapeDefaultNames": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateShapeRenderStats": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateShapeZero": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateTransformZero": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateUniqueNames": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateNoVRayMesh": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateUnrealMeshTriangulated": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateAlembicVisibleOnly": { - "enabled": True, - "optional": False, - "active": True - }, - "ExtractProxyAlembic": { - "enabled": False, - "families": [ - "proxyAbc" - ] - }, - "ExtractObj": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateRigContents": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateRigJointsHidden": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateRigControllers": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateAnimationContent": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateOutRelatedNodeIds": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateRigControllersArnoldAttributes": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateSkeletalMeshHierarchy": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateSkeletonRigContents": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateSkeletonRigControllers": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateSkinclusterDeformerSet": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateRigOutSetNodeIds": { - "enabled": True, - "optional": False, - "allow_history_only": False - }, - "ValidateSkeletonRigOutSetNodeIds": { - "enabled": False, - "optional": False, - "allow_history_only": False - }, - "ValidateSkeletonRigOutputIds": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateSkeletonTopGroupHierarchy": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateCameraAttributes": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateAssemblyName": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateAssemblyNamespaces": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateAssemblyModelTransforms": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateAssRelativePaths": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateNoDefaultCameras": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateUnrealUpAxis": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateCameraContents": { - "enabled": True, - "optional": False, - "validate_shapes": True - }, - "ValidateAlembicDefaultsPointcache": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateAlembicDefaultsAnimation": { - "enabled": True, - "optional": True, - "active": True - }, - "ExtractPlayblast": DEFAULT_PLAYBLAST_SETTING, - "ExtractMayaSceneRaw": { - "enabled": True, - "add_for_families": [ - "layout" - ] - }, - "ExtractCameraAlembic": { - "enabled": True, - "optional": True, - "active": True, - "bake_attributes": "[]" - }, - "ExtractGLB": { - "enabled": False, - "active": True, - "ogsfx_path": "/maya2glTF/PBR/shaders/glTF_PBR.ogsfx" - }, - "ExtractLook": { - "maketx_arguments": [] - }, - "ExtractGPUCache": { - "enabled": False, - "optional": False, - "active": True, - "families": [ - "model", - "animation", - "pointcache" - ], - "step": 1.0, - "stepSave": 1, - "optimize": True, - "optimizationThreshold": 40000, - "optimizeAnimationsForMotionBlur": True, - "writeMaterials": True, - "useBaseTessellation": True - }, - "ExtractModel": { - "enabled": True, - "optional": True, - "active": True - }, - "ExtractAlembic": { - "enabled": True, - "families": [ - "pointcache", - "model", - "vrayproxy.alembic" - ], - "attr": "", - "attrPrefix": "", - "bake_attributes": [], - "bake_attribute_prefixes": [], - "dataFormat": "ogawa", - "eulerFilter": False, - "melPerFrameCallback": "", - "melPostJobCallback": "", - "overrides": [ - "attr", - "attrPrefix", - "renderableOnly", - "visibleOnly", - "worldSpace", - "writeColorSets", - "writeFaceSets", - "writeNormals" - ], - "preRoll": False, - "preRollStartFrame": 0, - "pythonPerFrameCallback": "", - "pythonPostJobCallback": "", - "renderableOnly": False, - "stripNamespaces": True, - "uvsOnly": False, - "uvWrite": True, - "userAttr": "", - "userAttrPrefix": "", - "verbose": False, - "visibleOnly": False, - "wholeFrameGeo": False, - "worldSpace": True, - "writeColorSets": False, - "writeCreases": False, - "writeFaceSets": False, - "writeNormals": True, - "writeUVSets": False, - "writeVisibility": False - } -} diff --git a/server_addon/maya/server/settings/render_settings.py b/server_addon/maya/server/settings/render_settings.py deleted file mode 100644 index bc476ec49c..0000000000 --- a/server_addon/maya/server/settings/render_settings.py +++ /dev/null @@ -1,499 +0,0 @@ -"""Providing models and values for Maya Render Settings.""" -from ayon_server.settings import BaseSettingsModel, SettingsField - - -def aov_separators_enum(): - return [ - {"value": "dash", "label": "- (dash)"}, - {"value": "underscore", "label": "_ (underscore)"}, - {"value": "dot", "label": ". (dot)"} - ] - - -def arnold_image_format_enum(): - """Return enumerator for Arnold output formats.""" - return [ - {"label": "jpeg", "value": "jpeg"}, - {"label": "png", "value": "png"}, - {"label": "deepexr", "value": "deep exr"}, - {"label": "tif", "value": "tif"}, - {"label": "exr", "value": "exr"}, - {"label": "maya", "value": "maya"}, - {"label": "mtoa_shaders", "value": "mtoa_shaders"} - ] - - -def arnold_aov_list_enum(): - """Return enumerator for Arnold AOVs. - - Note: Key is value, Value in this case is Label. This - was taken from v3 settings. - """ - return [ - {"value": "empty", "label": "< empty >"}, - {"value": "ID", "label": "ID"}, - {"value": "N", "label": "N"}, - {"value": "P", "label": "P"}, - {"value": "Pref", "label": "Pref"}, - {"value": "RGBA", "label": "RGBA"}, - {"value": "Z", "label": "Z"}, - {"value": "albedo", "label": "albedo"}, - {"value": "background", "label": "background"}, - {"value": "coat", "label": "coat"}, - {"value": "coat_albedo", "label": "coat_albedo"}, - {"value": "coat_direct", "label": "coat_direct"}, - {"value": "coat_indirect", "label": "coat_indirect"}, - {"value": "cputime", "label": "cputime"}, - {"value": "crypto_asset", "label": "crypto_asset"}, - {"value": "crypto_material", "label": "cypto_material"}, - {"value": "crypto_object", "label": "crypto_object"}, - {"value": "diffuse", "label": "diffuse"}, - {"value": "diffuse_albedo", "label": "diffuse_albedo"}, - {"value": "diffuse_direct", "label": "diffuse_direct"}, - {"value": "diffuse_indirect", "label": "diffuse_indirect"}, - {"value": "direct", "label": "direct"}, - {"value": "emission", "label": "emission"}, - {"value": "highlight", "label": "highlight"}, - {"value": "indirect", "label": "indirect"}, - {"value": "motionvector", "label": "motionvector"}, - {"value": "opacity", "label": "opacity"}, - {"value": "raycount", "label": "raycount"}, - {"value": "rim_light", "label": "rim_light"}, - {"value": "shadow", "label": "shadow"}, - {"value": "shadow_diff", "label": "shadow_diff"}, - {"value": "shadow_mask", "label": "shadow_mask"}, - {"value": "shadow_matte", "label": "shadow_matte"}, - {"value": "sheen", "label": "sheen"}, - {"value": "sheen_albedo", "label": "sheen_albedo"}, - {"value": "sheen_direct", "label": "sheen_direct"}, - {"value": "sheen_indirect", "label": "sheen_indirect"}, - {"value": "specular", "label": "specular"}, - {"value": "specular_albedo", "label": "specular_albedo"}, - {"value": "specular_direct", "label": "specular_direct"}, - {"value": "specular_indirect", "label": "specular_indirect"}, - {"value": "sss", "label": "sss"}, - {"value": "sss_albedo", "label": "sss_albedo"}, - {"value": "sss_direct", "label": "sss_direct"}, - {"value": "sss_indirect", "label": "sss_indirect"}, - {"value": "transmission", "label": "transmission"}, - {"value": "transmission_albedo", "label": "transmission_albedo"}, - {"value": "transmission_direct", "label": "transmission_direct"}, - {"value": "transmission_indirect", "label": "transmission_indirect"}, - {"value": "volume", "label": "volume"}, - {"value": "volume_Z", "label": "volume_Z"}, - {"value": "volume_albedo", "label": "volume_albedo"}, - {"value": "volume_direct", "label": "volume_direct"}, - {"value": "volume_indirect", "label": "volume_indirect"}, - {"value": "volume_opacity", "label": "volume_opacity"}, - ] - - -def vray_image_output_enum(): - """Return output format for Vray enumerator.""" - return [ - {"label": "png", "value": "png"}, - {"label": "jpg", "value": "jpg"}, - {"label": "vrimg", "value": "vrimg"}, - {"label": "hdr", "value": "hdr"}, - {"label": "exr", "value": "exr"}, - {"label": "exr (multichannel)", "value": "exr (multichannel)"}, - {"label": "exr (deep)", "value": "exr (deep)"}, - {"label": "tga", "value": "tga"}, - {"label": "bmp", "value": "bmp"}, - {"label": "sgi", "value": "sgi"} - ] - - -def vray_aov_list_enum(): - """Return enumerator for Vray AOVs. - - Note: Key is value, Value in this case is Label. This - was taken from v3 settings. - """ - - return [ - {"value": "empty", "label": "< empty >"}, - {"value": "atmosphereChannel", "label": "atmosphere"}, - {"value": "backgroundChannel", "label": "background"}, - {"value": "bumpNormalsChannel", "label": "bumpnormals"}, - {"value": "causticsChannel", "label": "caustics"}, - {"value": "coatFilterChannel", "label": "coat_filter"}, - {"value": "coatGlossinessChannel", "label": "coatGloss"}, - {"value": "coatReflectionChannel", "label": "coat_reflection"}, - {"value": "vrayCoatChannel", "label": "coat_specular"}, - {"value": "CoverageChannel", "label": "coverage"}, - {"value": "cryptomatteChannel", "label": "cryptomatte"}, - {"value": "customColor", "label": "custom_color"}, - {"value": "drBucketChannel", "label": "DR"}, - {"value": "denoiserChannel", "label": "denoiser"}, - {"value": "diffuseChannel", "label": "diffuse"}, - {"value": "ExtraTexElement", "label": "extraTex"}, - {"value": "giChannel", "label": "GI"}, - {"value": "LightMixElement", "label": "None"}, - {"value": "lightingChannel", "label": "lighting"}, - {"value": "LightingAnalysisChannel", "label": "LightingAnalysis"}, - {"value": "materialIDChannel", "label": "materialID"}, - {"value": "MaterialSelectElement", "label": "materialSelect"}, - {"value": "matteShadowChannel", "label": "matteShadow"}, - {"value": "MultiMatteElement", "label": "multimatte"}, - {"value": "multimatteIDChannel", "label": "multimatteID"}, - {"value": "normalsChannel", "label": "normals"}, - {"value": "nodeIDChannel", "label": "objectId"}, - {"value": "objectSelectChannel", "label": "objectSelect"}, - {"value": "rawCoatFilterChannel", "label": "raw_coat_filter"}, - {"value": "rawCoatReflectionChannel", "label": "raw_coat_reflection"}, - {"value": "rawDiffuseFilterChannel", "label": "rawDiffuseFilter"}, - {"value": "rawGiChannel", "label": "rawGI"}, - {"value": "rawLightChannel", "label": "rawLight"}, - {"value": "rawReflectionChannel", "label": "rawReflection"}, - { - "value": "rawReflectionFilterChannel", - "label": "rawReflectionFilter" - }, - {"value": "rawRefractionChannel", "label": "rawRefraction"}, - { - "value": "rawRefractionFilterChannel", - "label": "rawRefractionFilter" - }, - {"value": "rawShadowChannel", "label": "rawShadow"}, - {"value": "rawSheenFilterChannel", "label": "raw_sheen_filter"}, - { - "value": "rawSheenReflectionChannel", - "label": "raw_sheen_reflection" - }, - {"value": "rawTotalLightChannel", "label": "rawTotalLight"}, - {"value": "reflectIORChannel", "label": "reflIOR"}, - {"value": "reflectChannel", "label": "reflect"}, - {"value": "reflectionFilterChannel", "label": "reflectionFilter"}, - {"value": "reflectGlossinessChannel", "label": "reflGloss"}, - {"value": "refractChannel", "label": "refract"}, - {"value": "refractionFilterChannel", "label": "refractionFilter"}, - {"value": "refractGlossinessChannel", "label": "refrGloss"}, - {"value": "renderIDChannel", "label": "renderId"}, - {"value": "FastSSS2Channel", "label": "SSS"}, - {"value": "sampleRateChannel", "label": "sampleRate"}, - {"value": "samplerInfo", "label": "samplerInfo"}, - {"value": "selfIllumChannel", "label": "selfIllum"}, - {"value": "shadowChannel", "label": "shadow"}, - {"value": "sheenFilterChannel", "label": "sheen_filter"}, - {"value": "sheenGlossinessChannel", "label": "sheenGloss"}, - {"value": "sheenReflectionChannel", "label": "sheen_reflection"}, - {"value": "vraySheenChannel", "label": "sheen_specular"}, - {"value": "specularChannel", "label": "specular"}, - {"value": "Toon", "label": "Toon"}, - {"value": "toonLightingChannel", "label": "toonLighting"}, - {"value": "toonSpecularChannel", "label": "toonSpecular"}, - {"value": "totalLightChannel", "label": "totalLight"}, - {"value": "unclampedColorChannel", "label": "unclampedColor"}, - {"value": "VRScansPaintMaskChannel", "label": "VRScansPaintMask"}, - {"value": "VRScansZoneMaskChannel", "label": "VRScansZoneMask"}, - {"value": "velocityChannel", "label": "velocity"}, - {"value": "zdepthChannel", "label": "zDepth"}, - {"value": "LightSelectElement", "label": "lightselect"}, - ] - - -def redshift_engine_enum(): - """Get Redshift engine type enumerator.""" - return [ - {"value": "0", "label": "None"}, - {"value": "1", "label": "Photon Map"}, - {"value": "2", "label": "Irradiance Cache"}, - {"value": "3", "label": "Brute Force"} - ] - - -def redshift_image_output_enum(): - """Return output format for Redshift enumerator.""" - return [ - {"value": "iff", "label": "Maya IFF"}, - {"value": "exr", "label": "OpenEXR"}, - {"value": "tif", "label": "TIFF"}, - {"value": "png", "label": "PNG"}, - {"value": "tga", "label": "Targa"}, - {"value": "jpg", "label": "JPEG"} - ] - - -def redshift_aov_list_enum(): - """Return enumerator for Vray AOVs. - - Note: Key is value, Value in this case is Label. This - was taken from v3 settings. - """ - return [ - {"value": "empty", "label": "< none >"}, - {"value": "AO", "label": "Ambient Occlusion"}, - {"value": "Background", "label": "Background"}, - {"value": "Beauty", "label": "Beauty"}, - {"value": "BumpNormals", "label": "Bump Normals"}, - {"value": "Caustics", "label": "Caustics"}, - {"value": "CausticsRaw", "label": "Caustics Raw"}, - {"value": "Cryptomatte", "label": "Cryptomatte"}, - {"value": "Custom", "label": "Custom"}, - {"value": "Z", "label": "Depth"}, - {"value": "DiffuseFilter", "label": "Diffuse Filter"}, - {"value": "DiffuseLighting", "label": "Diffuse Lighting"}, - {"value": "DiffuseLightingRaw", "label": "Diffuse Lighting Raw"}, - {"value": "Emission", "label": "Emission"}, - {"value": "GI", "label": "Global Illumination"}, - {"value": "GIRaw", "label": "Global Illumination Raw"}, - {"value": "Matte", "label": "Matte"}, - {"value": "MotionVectors", "label": "Ambient Occlusion"}, - {"value": "N", "label": "Normals"}, - {"value": "ID", "label": "ObjectID"}, - {"value": "ObjectBumpNormal", "label": "Object-Space Bump Normals"}, - {"value": "ObjectPosition", "label": "Object-Space Positions"}, - {"value": "PuzzleMatte", "label": "Puzzle Matte"}, - {"value": "Reflections", "label": "Reflections"}, - {"value": "ReflectionsFilter", "label": "Reflections Filter"}, - {"value": "ReflectionsRaw", "label": "Reflections Raw"}, - {"value": "Refractions", "label": "Refractions"}, - {"value": "RefractionsFilter", "label": "Refractions Filter"}, - {"value": "RefractionsRaw", "label": "Refractions Filter"}, - {"value": "Shadows", "label": "Shadows"}, - {"value": "SpecularLighting", "label": "Specular Lighting"}, - {"value": "SSS", "label": "Sub Surface Scatter"}, - {"value": "SSSRaw", "label": "Sub Surface Scatter Raw"}, - { - "value": "TotalDiffuseLightingRaw", - "label": "Total Diffuse Lighting Raw" - }, - { - "value": "TotalTransLightingRaw", - "label": "Total Translucency Filter" - }, - {"value": "TransTint", "label": "Translucency Filter"}, - {"value": "TransGIRaw", "label": "Translucency Lighting Raw"}, - {"value": "VolumeFogEmission", "label": "Volume Fog Emission"}, - {"value": "VolumeFogTint", "label": "Volume Fog Tint"}, - {"value": "VolumeLighting", "label": "Volume Lighting"}, - {"value": "P", "label": "World Position"}, - ] - - -class AdditionalOptionsModel(BaseSettingsModel): - """Additional Option""" - _layout = "compact" - - attribute: str = SettingsField("", title="Attribute name") - value: str = SettingsField("", title="Value") - - -class ArnoldSettingsModel(BaseSettingsModel): - image_prefix: str = SettingsField(title="Image prefix template") - image_format: str = SettingsField( - enum_resolver=arnold_image_format_enum, title="Output Image Format") - multilayer_exr: bool = SettingsField(title="Multilayer (exr)") - tiled: bool = SettingsField(title="Tiled (tif, exr)") - aov_list: list[str] = SettingsField( - default_factory=list, - enum_resolver=arnold_aov_list_enum, - title="AOVs to create" - ) - additional_options: list[AdditionalOptionsModel] = SettingsField( - default_factory=list, - title="Additional Arnold Options", - description=( - "Add additional options - put attribute and value, like AASamples" - " and 4" - ) - ) - - -class VraySettingsModel(BaseSettingsModel): - image_prefix: str = SettingsField(title="Image prefix template") - # engine was str because of JSON limitation (key must be string) - engine: str = SettingsField( - enum_resolver=lambda: [ - {"label": "V-Ray", "value": "1"}, - {"label": "V-Ray GPU", "value": "2"} - ], - title="Production Engine" - ) - image_format: str = SettingsField( - enum_resolver=vray_image_output_enum, - title="Output Image Format" - ) - aov_list: list[str] = SettingsField( - default_factory=list, - enum_resolver=vray_aov_list_enum, - title="AOVs to create" - ) - additional_options: list[AdditionalOptionsModel] = SettingsField( - default_factory=list, - title="Additional Vray Options", - description=( - "Add additional options - put attribute and value, like " - "aaFilterSize and 1.5" - ) - ) - - -class RedshiftSettingsModel(BaseSettingsModel): - image_prefix: str = SettingsField(title="Image prefix template") - # both engines are using the same enumerator, - # both were originally str because of JSON limitation. - primary_gi_engine: str = SettingsField( - enum_resolver=redshift_engine_enum, - title="Primary GI Engine" - ) - secondary_gi_engine: str = SettingsField( - enum_resolver=redshift_engine_enum, - title="Secondary GI Engine" - ) - image_format: str = SettingsField( - enum_resolver=redshift_image_output_enum, - title="Output Image Format" - ) - multilayer_exr: bool = SettingsField(title="Multilayer (exr)") - force_combine: bool = SettingsField(title="Force combine beauty and AOVs") - aov_list: list[str] = SettingsField( - default_factory=list, - enum_resolver=redshift_aov_list_enum, - title="AOVs to create" - ) - additional_options: list[AdditionalOptionsModel] = SettingsField( - default_factory=list, - title="Additional Redshift Options", - description=( - "Add additional options - put attribute and value, like " - "reflectionMaxTraceDepth and 3" - ) - ) - - -def renderman_display_filters(): - return [ - "PxrBackgroundDisplayFilter", - "PxrCopyAOVDisplayFilter", - "PxrEdgeDetect", - "PxrFilmicTonemapperDisplayFilter", - "PxrGradeDisplayFilter", - "PxrHalfBufferErrorFilter", - "PxrImageDisplayFilter", - "PxrLightSaturation", - "PxrShadowDisplayFilter", - "PxrStylizedHatching", - "PxrStylizedLines", - "PxrStylizedToon", - "PxrWhitePointDisplayFilter" - ] - - -def renderman_sample_filters_enum(): - return [ - "PxrBackgroundSampleFilter", - "PxrCopyAOVSampleFilter", - "PxrCryptomatte", - "PxrFilmicTonemapperSampleFilter", - "PxrGradeSampleFilter", - "PxrShadowFilter", - "PxrWatermarkFilter", - "PxrWhitePointSampleFilter" - ] - - -class RendermanSettingsModel(BaseSettingsModel): - image_prefix: str = SettingsField( - "", title="Image prefix template") - image_dir: str = SettingsField( - "", title="Image Output Directory") - display_filters: list[str] = SettingsField( - default_factory=list, - title="Display Filters", - enum_resolver=renderman_display_filters - ) - imageDisplay_dir: str = SettingsField( - "", title="Image Display Filter Directory") - sample_filters: list[str] = SettingsField( - default_factory=list, - title="Sample Filters", - enum_resolver=renderman_sample_filters_enum - ) - cryptomatte_dir: str = SettingsField( - "", title="Cryptomatte Output Directory") - watermark_dir: str = SettingsField( - "", title="Watermark Filter Directory") - additional_options: list[AdditionalOptionsModel] = SettingsField( - default_factory=list, - title="Additional Renderer Options" - ) - - -class RenderSettingsModel(BaseSettingsModel): - apply_render_settings: bool = SettingsField( - title="Apply Render Settings on creation" - ) - default_render_image_folder: str = SettingsField( - title="Default render image folder" - ) - enable_all_lights: bool = SettingsField( - title="Include all lights in Render Setup Layers by default" - ) - aov_separator: str = SettingsField( - "underscore", - title="AOV Separator character", - enum_resolver=aov_separators_enum - ) - reset_current_frame: bool = SettingsField( - title="Reset Current Frame") - remove_aovs: bool = SettingsField( - title="Remove existing AOVs") - arnold_renderer: ArnoldSettingsModel = SettingsField( - default_factory=ArnoldSettingsModel, - title="Arnold Renderer") - vray_renderer: VraySettingsModel = SettingsField( - default_factory=VraySettingsModel, - title="Vray Renderer") - redshift_renderer: RedshiftSettingsModel = SettingsField( - default_factory=RedshiftSettingsModel, - title="Redshift Renderer") - renderman_renderer: RendermanSettingsModel = SettingsField( - default_factory=RendermanSettingsModel, - title="Renderman Renderer") - - -DEFAULT_RENDER_SETTINGS = { - "apply_render_settings": True, - "default_render_image_folder": "renders/maya", - "enable_all_lights": True, - "aov_separator": "underscore", - "reset_current_frame": False, - "remove_aovs": False, - "arnold_renderer": { - "image_prefix": "//_", - "image_format": "exr", - "multilayer_exr": True, - "tiled": True, - "aov_list": [], - "additional_options": [] - }, - "vray_renderer": { - "image_prefix": "//", - "engine": "1", - "image_format": "exr", - "aov_list": [], - "additional_options": [] - }, - "redshift_renderer": { - "image_prefix": "//", - "primary_gi_engine": "0", - "secondary_gi_engine": "0", - "image_format": "exr", - "multilayer_exr": True, - "force_combine": True, - "aov_list": [], - "additional_options": [] - }, - "renderman_renderer": { - "image_prefix": "{aov_separator}..", - "image_dir": "/", - "display_filters": [], - "imageDisplay_dir": "/{aov_separator}imageDisplayFilter..", - "sample_filters": [], - "cryptomatte_dir": "/{aov_separator}cryptomatte..", - "watermark_dir": "/{aov_separator}watermarkFilter..", - "additional_options": [] - } -} diff --git a/server_addon/maya/server/settings/scriptsmenu.py b/server_addon/maya/server/settings/scriptsmenu.py deleted file mode 100644 index 7b0ba7d831..0000000000 --- a/server_addon/maya/server/settings/scriptsmenu.py +++ /dev/null @@ -1,89 +0,0 @@ -import json - -from pydantic import validator -from ayon_server.exceptions import BadRequestException -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class ScriptsmenuSubmodel(BaseSettingsModel): - """Item Definition""" - _isGroup = True - type: str = SettingsField(title="Type") - command: str = SettingsField(title="Command") - sourcetype: str = SettingsField(title="Source Type") - title: str = SettingsField(title="Title") - tooltip: str = SettingsField(title="Tooltip") - tags: list[str] = SettingsField( - default_factory=list, title="A list of tags" - ) - - -_definition_mode_type = [ - {"value": "definition", "label": "Menu Builder"}, - {"value": "definition_json", "label": "Raw JSON (advanced)"} -] - - -class ScriptsmenuModel(BaseSettingsModel): - """Add a custom scripts menu to Maya""" - _isGroup = True - - name: str = SettingsField(title="Menu Name") - - definition_type: str = SettingsField( - title="Define menu using", - description="Choose the way to define the custom scripts menu " - "via settings", - enum_resolver=lambda: _definition_mode_type, - conditionalEnum=True, - default="definition" - ) - definition: list[ScriptsmenuSubmodel] = SettingsField( - default_factory=list, - title="Menu Definition", - description="Scriptmenu Items Definition" - ) - definition_json: str = SettingsField( - "[]", title="Menu Definition JSON", widget="textarea", - description=( - "Define the custom tools menu using a JSON list. " - "For more details on the JSON format, see " - "[here](https://github.com/Colorbleed/scriptsmenu?tab=readme-ov-file#configuration)." # noqa: E501 - ) - ) - - @validator("definition_json") - def validate_json(cls, value): - if not value.strip(): - return "[]" - try: - converted_value = json.loads(value) - success = isinstance(converted_value, list) - except json.JSONDecodeError: - success = False - - if not success: - raise BadRequestException( - "The definition can't be parsed as json list object" - ) - return value - - -DEFAULT_SCRIPTSMENU_SETTINGS = { - "name": "Custom Tools", - "definition_type": "definition", - "definition": [ - { - "type": "action", - "command": "import openpype.hosts.maya.api.commands as op_cmds; op_cmds.edit_shader_definitions()", - "sourcetype": "python", - "title": "Edit shader name definitions", - "tooltip": "Edit shader name definitions used in validation and renaming.", - "tags": [ - "pipeline", - "shader" - ] - } - ], - "definition_json": "[]" -} diff --git a/server_addon/maya/server/settings/templated_workfile_settings.py b/server_addon/maya/server/settings/templated_workfile_settings.py deleted file mode 100644 index 1baa2c895c..0000000000 --- a/server_addon/maya/server/settings/templated_workfile_settings.py +++ /dev/null @@ -1,30 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - task_types_enum, -) - - -class WorkfileBuildProfilesModel(BaseSettingsModel): - _layout = "expanded" - task_types: list[str] = SettingsField( - default_factory=list, - title="Task types", - enum_resolver=task_types_enum - ) - task_names: list[str] = SettingsField( - default_factory=list, title="Task names" - ) - path: str = SettingsField("", title="Path to template") - - -class TemplatedProfilesModel(BaseSettingsModel): - profiles: list[WorkfileBuildProfilesModel] = SettingsField( - default_factory=list, - title="Profiles" - ) - - -DEFAULT_TEMPLATED_WORKFILE_SETTINGS = { - "profiles": [] -} diff --git a/server_addon/maya/server/settings/workfile_build_settings.py b/server_addon/maya/server/settings/workfile_build_settings.py deleted file mode 100644 index ee0b793405..0000000000 --- a/server_addon/maya/server/settings/workfile_build_settings.py +++ /dev/null @@ -1,134 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - task_types_enum, -) - - -class ContextItemModel(BaseSettingsModel): - _layout = "expanded" - product_name_filters: list[str] = SettingsField( - default_factory=list, title="Product name Filters") - product_types: list[str] = SettingsField( - default_factory=list, title="Product types") - repre_names: list[str] = SettingsField( - default_factory=list, title="Repre Names") - loaders: list[str] = SettingsField( - default_factory=list, title="Loaders") - - -class WorkfileSettingModel(BaseSettingsModel): - _layout = "expanded" - task_types: list[str] = SettingsField( - default_factory=list, - enum_resolver=task_types_enum, - title="Task types") - tasks: list[str] = SettingsField( - default_factory=list, - title="Task names") - current_context: list[ContextItemModel] = SettingsField( - default_factory=list, - title="Current Context") - linked_assets: list[ContextItemModel] = SettingsField( - default_factory=list, - title="Linked Assets") - - -class ProfilesModel(BaseSettingsModel): - profiles: list[WorkfileSettingModel] = SettingsField( - default_factory=list, - title="Profiles" - ) - - -DEFAULT_WORKFILE_SETTING = { - "profiles": [ - { - "task_types": [], - "tasks": [ - "Lighting" - ], - "current_context": [ - { - "product_name_filters": [ - ".+[Mm]ain" - ], - "product_types": [ - "model" - ], - "repre_names": [ - "abc", - "ma" - ], - "loaders": [ - "ReferenceLoader" - ] - }, - { - "product_name_filters": [], - "product_types": [ - "animation", - "pointcache", - "proxyAbc" - ], - "repre_names": [ - "abc" - ], - "loaders": [ - "ReferenceLoader" - ] - }, - { - "product_name_filters": [], - "product_types": [ - "rendersetup" - ], - "repre_names": [ - "json" - ], - "loaders": [ - "RenderSetupLoader" - ] - }, - { - "product_name_filters": [], - "product_types": [ - "camera" - ], - "repre_names": [ - "abc" - ], - "loaders": [ - "ReferenceLoader" - ] - } - ], - "linked_assets": [ - { - "product_name_filters": [], - "product_types": [ - "setdress" - ], - "repre_names": [ - "ma" - ], - "loaders": [ - "ReferenceLoader" - ] - }, - { - "product_name_filters": [], - "product_types": [ - "ArnoldStandin" - ], - "repre_names": [ - "ass" - ], - "loaders": [ - "assLoader" - ] - } - ] - } - ] -} From c62763ffbde226b6cc22ce24d1ab48214899d04a Mon Sep 17 00:00:00 2001 From: Jakub Trllo <43494761+iLLiCiTiT@users.noreply.github.com> Date: Tue, 2 Jul 2024 15:17:30 +0200 Subject: [PATCH 05/10] removed resolve addon --- .../client/ayon_resolve/README.markdown | 28 - .../RESOLVE_API_v19.0B-build20.txt | 838 --------------- .../resolve/client/ayon_resolve/__init__.py | 13 - .../resolve/client/ayon_resolve/addon.py | 22 - .../client/ayon_resolve/api/__init__.py | 133 --- .../resolve/client/ayon_resolve/api/action.py | 52 - .../resolve/client/ayon_resolve/api/lib.py | 970 ------------------ .../resolve/client/ayon_resolve/api/menu.py | 184 ---- .../client/ayon_resolve/api/menu_style.qss | 71 -- .../client/ayon_resolve/api/pipeline.py | 325 ------ .../resolve/client/ayon_resolve/api/plugin.py | 910 ---------------- .../client/ayon_resolve/api/testing_utils.py | 71 -- .../client/ayon_resolve/api/todo-rendering.py | 134 --- .../resolve/client/ayon_resolve/api/utils.py | 83 -- .../resolve/client/ayon_resolve/api/workio.py | 96 -- .../hooks/pre_resolve_last_workfile.py | 35 - .../ayon_resolve/hooks/pre_resolve_setup.py | 138 --- .../ayon_resolve/hooks/pre_resolve_startup.py | 24 - .../client/ayon_resolve/otio/__init__.py | 0 .../ayon_resolve/otio/davinci_export.py | 326 ------ .../ayon_resolve/otio/davinci_import.py | 108 -- .../resolve/client/ayon_resolve/otio/utils.py | 70 -- .../plugins/create/create_shot_clip.py | 272 ----- .../remove_unused_media_pool_items.py | 31 - .../ayon_resolve/plugins/load/load_clip.py | 168 --- .../plugins/load/load_editorial_package.py | 52 - .../ayon_resolve/plugins/load/load_media.py | 533 ---------- .../plugins/publish/extract_workfile.py | 52 - .../plugins/publish/precollect_instances.py | 178 ---- .../plugins/publish/precollect_workfile.py | 54 - .../resolve/client/ayon_resolve/startup.py | 70 -- .../utility_scripts/AYON__Menu.py | 22 - .../utility_scripts/ayon_startup.scriptlib | 21 - .../utility_scripts/develop/OTIO_export.py | 83 -- .../utility_scripts/develop/OTIO_import.py | 72 -- .../develop/OpenPype_sync_util_scripts.py | 18 - .../resolve/client/ayon_resolve/utils.py | 71 -- .../resolve/client/ayon_resolve/version.py | 3 - server_addon/resolve/package.py | 10 - server_addon/resolve/server/__init__.py | 13 - server_addon/resolve/server/imageio.py | 79 -- server_addon/resolve/server/settings.py | 208 ---- 42 files changed, 6641 deletions(-) delete mode 100644 server_addon/resolve/client/ayon_resolve/README.markdown delete mode 100644 server_addon/resolve/client/ayon_resolve/RESOLVE_API_v19.0B-build20.txt delete mode 100644 server_addon/resolve/client/ayon_resolve/__init__.py delete mode 100644 server_addon/resolve/client/ayon_resolve/addon.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/__init__.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/action.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/lib.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/menu.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/menu_style.qss delete mode 100644 server_addon/resolve/client/ayon_resolve/api/pipeline.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/plugin.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/testing_utils.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/todo-rendering.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/utils.py delete mode 100644 server_addon/resolve/client/ayon_resolve/api/workio.py delete mode 100644 server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_last_workfile.py delete mode 100644 server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_setup.py delete mode 100644 server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_startup.py delete mode 100644 server_addon/resolve/client/ayon_resolve/otio/__init__.py delete mode 100644 server_addon/resolve/client/ayon_resolve/otio/davinci_export.py delete mode 100644 server_addon/resolve/client/ayon_resolve/otio/davinci_import.py delete mode 100644 server_addon/resolve/client/ayon_resolve/otio/utils.py delete mode 100644 server_addon/resolve/client/ayon_resolve/plugins/create/create_shot_clip.py delete mode 100644 server_addon/resolve/client/ayon_resolve/plugins/inventory/remove_unused_media_pool_items.py delete mode 100644 server_addon/resolve/client/ayon_resolve/plugins/load/load_clip.py delete mode 100644 server_addon/resolve/client/ayon_resolve/plugins/load/load_editorial_package.py delete mode 100644 server_addon/resolve/client/ayon_resolve/plugins/load/load_media.py delete mode 100644 server_addon/resolve/client/ayon_resolve/plugins/publish/extract_workfile.py delete mode 100644 server_addon/resolve/client/ayon_resolve/plugins/publish/precollect_instances.py delete mode 100644 server_addon/resolve/client/ayon_resolve/plugins/publish/precollect_workfile.py delete mode 100644 server_addon/resolve/client/ayon_resolve/startup.py delete mode 100644 server_addon/resolve/client/ayon_resolve/utility_scripts/AYON__Menu.py delete mode 100644 server_addon/resolve/client/ayon_resolve/utility_scripts/ayon_startup.scriptlib delete mode 100644 server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OTIO_export.py delete mode 100644 server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OTIO_import.py delete mode 100644 server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py delete mode 100644 server_addon/resolve/client/ayon_resolve/utils.py delete mode 100644 server_addon/resolve/client/ayon_resolve/version.py delete mode 100644 server_addon/resolve/package.py delete mode 100644 server_addon/resolve/server/__init__.py delete mode 100644 server_addon/resolve/server/imageio.py delete mode 100644 server_addon/resolve/server/settings.py diff --git a/server_addon/resolve/client/ayon_resolve/README.markdown b/server_addon/resolve/client/ayon_resolve/README.markdown deleted file mode 100644 index 064e791f65..0000000000 --- a/server_addon/resolve/client/ayon_resolve/README.markdown +++ /dev/null @@ -1,28 +0,0 @@ -## Basic setup - -- Actually supported version is up to v18 -- install Python 3.6.2 (latest tested v17) or up to 3.9.13 (latest tested on v18) -- pip install PySide2: - - Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install PySide2` -- pip install OpenTimelineIO: - - Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install OpenTimelineIO` - - Python 3.6: open terminal and go to python.exe directory, then `python -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `./Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `./Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and - ![image](https://user-images.githubusercontent.com/40640033/102792588-ffcb1c80-43a8-11eb-9c6b-bf2114ed578e.png) with installed CMake in PATH. -- make sure Resolve Fusion (Fusion Tab/menu/Fusion/Fusion Settings) is set to Python 3.6 - ![image](https://user-images.githubusercontent.com/40640033/102631545-280b0f00-414e-11eb-89fc-98ac268d209d.png) -- Open OpenPype **Tray/Admin/Studio settings** > `applications/resolve/environment` and add Python3 path to `RESOLVE_PYTHON3_HOME` platform related. - -## Editorial setup - -This is how it looks on my testing project timeline -![image](https://user-images.githubusercontent.com/40640033/102637638-96ec6600-4156-11eb-9656-6e8e3ce4baf8.png) -Notice I had renamed tracks to `main` (holding metadata markers) and `review` used for generating review data with ffmpeg confersion to jpg sequence. - -1. you need to start AYON menu from Resolve/EditTab/Menu/Workspace/Scripts/Comp/**__OpenPype_Menu__** -2. then select any clips in `main` track and change their color to `Chocolate` -3. in OpenPype Menu select `Create` -4. in Creator select `Create Publishable Clip [New]` (temporary name) -5. set `Rename clips` to True, Master Track to `main` and Use review track to `review` as in picture - ![image](https://user-images.githubusercontent.com/40640033/102643773-0d419600-4160-11eb-919e-9c2be0aecab8.png) -6. after you hit `ok` all clips are colored to `ping` and marked with openpype metadata tag -7. git `Publish` on openpype menu and see that all had been collected correctly. That is the last step for now as rest is Work in progress. Next steps will follow. diff --git a/server_addon/resolve/client/ayon_resolve/RESOLVE_API_v19.0B-build20.txt b/server_addon/resolve/client/ayon_resolve/RESOLVE_API_v19.0B-build20.txt deleted file mode 100644 index a2f3fa6f73..0000000000 --- a/server_addon/resolve/client/ayon_resolve/RESOLVE_API_v19.0B-build20.txt +++ /dev/null @@ -1,838 +0,0 @@ -Last Updated: 1 April 2024 ----------------------------- -In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import -modules for scripting access (DaVinciResolve.py) and some representative examples. - -From v16.2.0 onwards, the nodeIndex parameters accepted by SetLUT() and SetCDL() are 1-based instead of 0-based, i.e. 1 <= nodeIndex <= total number of nodes. - -Overview --------- -As with Blackmagic Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page, -or via command line. This permission can be changed in Resolve Preferences, to be only from Console, or to be invoked from the local network. Please be aware of the security implications when -allowing scripting access from outside of the Resolve application. - -Prerequisites -------------- -DaVinci Resolve scripting requires one of the following to be installed (for all users): - - Lua 5.1 - Python >= 3.6 64-bit - Python 2.7 64-bit - -Using a script --------------- -DaVinci Resolve needs to be running for a script to be invoked. - -For a Resolve script to be executed from an external folder, the script needs to know of the API location. -You may need to set the these environment variables to allow for your Python installation to pick up the appropriate dependencies as shown below: - - Mac OS X: - RESOLVE_SCRIPT_API="/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting" - RESOLVE_SCRIPT_LIB="/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so" - PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/" - - Windows: - RESOLVE_SCRIPT_API="%PROGRAMDATA%\Blackmagic Design\DaVinci Resolve\Support\Developer\Scripting" - RESOLVE_SCRIPT_LIB="C:\Program Files\Blackmagic Design\DaVinci Resolve\fusionscript.dll" - PYTHONPATH="%PYTHONPATH%;%RESOLVE_SCRIPT_API%\Modules\" - - Linux: - RESOLVE_SCRIPT_API="/opt/resolve/Developer/Scripting" - RESOLVE_SCRIPT_LIB="/opt/resolve/libs/Fusion/fusionscript.so" - PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/" - (Note: For standard ISO Linux installations, the path above may need to be modified to refer to /home/resolve instead of /opt/resolve) - -As with Fusion scripts, Resolve scripts can also be invoked via the menu and the Console. - -On startup, DaVinci Resolve scans the subfolders in the directories shown below and enumerates the scripts found in the Workspace application menu under Scripts. -Place your script under Utility to be listed in all pages, under Comp or Tool to be available in the Fusion page or under folders for individual pages (Edit, Color or Deliver). Scripts under Deliver are additionally listed under render jobs. -Placing your script here and invoking it from the menu is the easiest way to use scripts. - Mac OS X: - - All users: /Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts - - Specific user: /Users//Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts - Windows: - - All users: %PROGRAMDATA%\Blackmagic Design\DaVinci Resolve\Fusion\Scripts - - Specific user: %APPDATA%\Roaming\Blackmagic Design\DaVinci Resolve\Support\Fusion\Scripts - Linux: - - All users: /opt/resolve/Fusion/Scripts (or /home/resolve/Fusion/Scripts/ depending on installation) - - Specific user: $HOME/.local/share/DaVinciResolve/Fusion/Scripts - -The interactive Console window allows for an easy way to execute simple scripting commands, to query or modify properties, and to test scripts. The console accepts commands in Python 2.7, Python 3.6 -and Lua and evaluates and executes them immediately. For more information on how to use the Console, please refer to the DaVinci Resolve User Manual. - -This example Python script creates a simple project: - - #!/usr/bin/env python - import DaVinciResolveScript as dvr_script - resolve = dvr_script.scriptapp("Resolve") - fusion = resolve.Fusion() - projectManager = resolve.GetProjectManager() - projectManager.CreateProject("Hello World") - -The resolve object is the fundamental starting point for scripting via Resolve. As a native object, it can be inspected for further scriptable properties - using table iteration and "getmetatable" -in Lua and dir, help etc in Python (among other methods). A notable scriptable object above is fusion - it allows access to all existing Fusion scripting functionality. - - -Running DaVinci Resolve in headless mode ----------------------------------------- -DaVinci Resolve can be launched in a headless mode without the user interface using the -nogui command line option. When DaVinci Resolve is launched using this option, the user interface is disabled. -However, the various scripting APIs will continue to work as expected. - -DaVinci Resolve API -------------------- -Some commonly used API functions are described below (*). As with the resolve object, each object is inspectable for properties and functions. - -Resolve - Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts. - GetMediaStorage() --> MediaStorage # Returns the media storage object to query and act on media locations. - GetProjectManager() --> ProjectManager # Returns the project manager object for currently open database. - OpenPage(pageName) --> Bool # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver"). - GetCurrentPage() --> String # Returns the page currently displayed in the main window. Returned value can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver", None). - GetProductName() --> string # Returns product name. - GetVersion() --> [version fields] # Returns list of product version fields in [major, minor, patch, build, suffix] format. - GetVersionString() --> string # Returns product version in "major.minor.patch[suffix].build" format. - LoadLayoutPreset(presetName) --> Bool # Loads UI layout from saved preset named 'presetName'. - UpdateLayoutPreset(presetName) --> Bool # Overwrites preset named 'presetName' with current UI layout. - ExportLayoutPreset(presetName, presetFilePath) --> Bool # Exports preset named 'presetName' to path 'presetFilePath'. - DeleteLayoutPreset(presetName) --> Bool # Deletes preset named 'presetName'. - SaveLayoutPreset(presetName) --> Bool # Saves current UI layout as a preset named 'presetName'. - ImportLayoutPreset(presetFilePath, presetName) --> Bool # Imports preset from path 'presetFilePath'. The optional argument 'presetName' specifies how the preset shall be named. If not specified, the preset is named based on the filename. - Quit() --> None # Quits the Resolve App. - ImportRenderPreset(presetPath) --> Bool # Import a preset from presetPath (string) and set it as current preset for rendering. - ExportRenderPreset(presetName, exportPath) --> Bool # Export a preset to a given path (string) if presetName(string) exists. - ImportBurnInPreset(presetPath) --> Bool # Import a data burn in preset from a given presetPath (string) - ExportBurnInPreset(presetName, exportPath) --> Bool # Export a data burn in preset to a given path (string) if presetName (string) exists. - GetKeyframeMode() --> keyframeMode # Returns the currently set keyframe mode (int). Refer to section 'Keyframe Mode information' below for details. - SetKeyframeMode(keyframeMode) --> Bool # Returns True when 'keyframeMode'(enum) is successfully set. Refer to section 'Keyframe Mode information' below for details. - -ProjectManager - ArchiveProject(projectName, - filePath, - isArchiveSrcMedia=True, - isArchiveRenderCache=True, - isArchiveProxyMedia=False) --> Bool # Archives project to provided file path with the configuration as provided by the optional arguments - CreateProject(projectName) --> Project # Creates and returns a project if projectName (string) is unique, and None if it is not. - DeleteProject(projectName) --> Bool # Delete project in the current folder if not currently loaded - LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (string) if there is a match found, and None if there is no matching Project. - GetCurrentProject() --> Project # Returns the currently loaded Resolve project. - SaveProject() --> Bool # Saves the currently loaded project with its own name. Returns True if successful. - CloseProject(project) --> Bool # Closes the specified project without saving. - CreateFolder(folderName) --> Bool # Creates a folder if folderName (string) is unique. - DeleteFolder(folderName) --> Bool # Deletes the specified folder if it exists. Returns True in case of success. - GetProjectListInCurrentFolder() --> [project names...] # Returns a list of project names in current folder. - GetFolderListInCurrentFolder() --> [folder names...] # Returns a list of folder names in current folder. - GotoRootFolder() --> Bool # Opens root folder in database. - GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent. - GetCurrentFolder() --> string # Returns the current folder name. - OpenFolder(folderName) --> Bool # Opens folder under given name. - ImportProject(filePath, projectName=None) --> Bool # Imports a project from the file path provided with given project name, if any. Returns True if successful. - ExportProject(projectName, filePath, withStillsAndLUTs=True) --> Bool # Exports project to provided file path, including stills and LUTs if withStillsAndLUTs is True (enabled by default). Returns True in case of success. - RestoreProject(filePath, projectName=None) --> Bool # Restores a project from the file path provided with given project name, if any. Returns True if successful. - GetCurrentDatabase() --> {dbInfo} # Returns a dictionary (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to the current database connection - GetDatabaseList() --> [{dbInfo}] # Returns a list of dictionary items (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to all the databases added to Resolve - SetCurrentDatabase({dbInfo}) --> Bool # Switches current database connection to the database specified by the keys below, and closes any open project. - # 'DbType': 'Disk' or 'PostgreSQL' (string) - # 'DbName': database name (string) - # 'IpAddress': IP address of the PostgreSQL server (string, optional key - defaults to '127.0.0.1') - CreateCloudProject({cloudSettings}) --> Project # Creates and returns a cloud project. - # '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information. - ImportCloudProject(filePath, {cloudSettings}) --> Bool # Returns True if import cloud project is successful; False otherwise - # 'filePath': String; filePath of file to import - # '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information. - RestoreCloudProject(folderPath, {cloudSettings}) --> Bool # Returns True if restore cloud project is successful; False otherwise - # 'folderPath': String; path of folder to restore - # '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information. - -Project - GetMediaPool() --> MediaPool # Returns the Media Pool object. - GetTimelineCount() --> int # Returns the number of timelines currently present in the project. - GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount() - GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline. - SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful. - GetGallery() --> Gallery # Returns the Gallery object. - GetName() --> string # Returns project name. - SetName(projectName) --> Bool # Sets project name if given projectName (string) is unique. - GetPresetList() --> [presets...] # Returns a list of presets and their information. - SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project. - AddRenderJob() --> string # Adds a render job based on current render settings to the render queue. Returns a unique job id (string) for the new render job. - DeleteRenderJob(jobId) --> Bool # Deletes render job for input job id (string). - DeleteAllRenderJobs() --> Bool # Deletes all render jobs in the queue. - GetRenderJobList() --> [render jobs...] # Returns a list of render jobs and their information. - GetRenderPresetList() --> [presets...] # Returns a list of render presets and their information. - StartRendering(jobId1, jobId2, ...) --> Bool # Starts rendering jobs indicated by the input job ids. - StartRendering([jobIds...], isInteractiveMode=False) --> Bool # Starts rendering jobs indicated by the input job ids. - # The optional "isInteractiveMode", when set, enables error feedback in the UI during rendering. - StartRendering(isInteractiveMode=False) --> Bool # Starts rendering all queued render jobs. - # The optional "isInteractiveMode", when set, enables error feedback in the UI during rendering. - StopRendering() --> None # Stops any current render processes. - IsRenderingInProgress() --> Bool # Returns True if rendering is in progress. - LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (string) exists. - SaveAsNewRenderPreset(presetName) --> Bool # Creates new render preset by given name if presetName(string) is unique. - SetRenderSettings({settings}) --> Bool # Sets given settings for rendering. Settings is a dict, with support for the keys: - # Refer to "Looking up render settings" section for information for supported settings - GetRenderJobStatus(jobId) --> {status info} # Returns a dict with job status and completion percentage of the job by given jobId (string). - GetSetting(settingName) --> string # Returns value of project setting (indicated by settingName, string). Check the section below for more information. - SetSetting(settingName, settingValue) --> Bool # Sets the project setting (indicated by settingName, string) to the value (settingValue, string). Check the section below for more information. - GetRenderFormats() --> {render formats..} # Returns a dict (format -> file extension) of available render formats. - GetRenderCodecs(renderFormat) --> {render codecs...} # Returns a dict (codec description -> codec name) of available codecs for given render format (string). - GetCurrentRenderFormatAndCodec() --> {format, codec} # Returns a dict with currently selected format 'format' and render codec 'codec'. - SetCurrentRenderFormatAndCodec(format, codec) --> Bool # Sets given render format (string) and render codec (string) as options for rendering. - GetCurrentRenderMode() --> int # Returns the render mode: 0 - Individual clips, 1 - Single clip. - SetCurrentRenderMode(renderMode) --> Bool # Sets the render mode. Specify renderMode = 0 for Individual clips, 1 for Single clip. - GetRenderResolutions(format, codec) --> [{Resolution}] # Returns list of resolutions applicable for the given render format (string) and render codec (string). Returns full list of resolutions if no argument is provided. Each element in the list is a dictionary with 2 keys "Width" and "Height". - RefreshLUTList() --> Bool # Refreshes LUT List - GetUniqueId() --> string # Returns a unique ID for the project item - InsertAudioToCurrentTrackAtPlayhead(mediaPath, --> Bool # Inserts the media specified by mediaPath (string) with startOffsetInSamples (int) and durationInSamples (int) at the playhead on a selected track on the Fairlight page. Returns True if successful, otherwise False. - startOffsetInSamples, durationInSamples) - LoadBurnInPreset(presetName) --> Bool # Loads user defined data burn in preset for project when supplied presetName (string). Returns true if successful. - ExportCurrentFrameAsStill(filePath) --> Bool # Exports current frame as still to supplied filePath. filePath must end in valid export file format. Returns True if succssful, False otherwise. - GetColorGroupsList() --> [ColorGroups...] # Returns a list of all group objects in the timeline. - AddColorGroup(groupName) --> ColorGroup # Creates a new ColorGroup. groupName must be a unique string. - DeleteColorGroup(colorGroup) --> Bool # Deletes the given color group and sets clips to ungrouped. - -MediaStorage - GetMountedVolumeList() --> [paths...] # Returns list of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage. - GetSubFolderList(folderPath) --> [paths...] # Returns list of folder paths in the given absolute folder path. - GetFileList(folderPath) --> [paths...] # Returns list of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries. - RevealInStorage(path) --> Bool # Expands and displays given file/folder path in Resolve’s Media Storage. - AddItemListToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a list of the MediaPoolItems created. - AddItemListToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created. - AddItemListToMediaPool([{itemInfo}, ...]) --> [clips...] # Adds list of itemInfos specified as dict of "media", "startFrame" (int), "endFrame" (int) from Media Storage into current Media Pool folder. Returns a list of the MediaPoolItems created. - AddClipMattesToMediaPool(MediaPoolItem, [paths], stereoEye) --> Bool # Adds specified media files as mattes for the specified MediaPoolItem. StereoEye is an optional argument for specifying which eye to add the matte to for stereo clips ("left" or "right"). Returns True if successful. - AddTimelineMattesToMediaPool([paths]) --> [MediaPoolItems] # Adds specified media files as timeline mattes in current media pool folder. Returns a list of created MediaPoolItems. - -MediaPool - GetRootFolder() --> Folder # Returns root Folder of Media Pool - AddSubFolder(folder, name) --> Folder # Adds new subfolder under specified Folder object with the given name. - RefreshFolders() --> Bool # Updates the folders in collaboration mode - CreateEmptyTimeline(name) --> Timeline # Adds new timeline with given name. - AppendToTimeline(clip1, clip2, ...) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems. - AppendToTimeline([clips]) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems. - AppendToTimeline([{clipInfo}, ...]) --> [TimelineItem] # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), (optional) "mediaType" (int; 1 - Video only, 2 - Audio only), "trackIndex" (int) and "recordFrame" (int). Returns the list of appended timelineItems. - CreateTimelineFromClips(name, clip1, clip2,...) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects. - CreateTimelineFromClips(name, [clips]) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects. - CreateTimelineFromClips(name, [{clipInfo}]) --> Timeline # Creates new timeline with specified name, appending the list of clipInfos specified as a dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), "recordFrame" (int). - ImportTimelineFromFile(filePath, {importOptions}) --> Timeline # Creates timeline based on parameters within given file (AAF/EDL/XML/FCPXML/DRT/ADL/OTIO) and optional importOptions dict, with support for the keys: - # "timelineName": string, specifies the name of the timeline to be created. Not valid for DRT import - # "importSourceClips": Bool, specifies whether source clips should be imported, True by default. Not valid for DRT import - # "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "importSourceClips" is True - # "sourceClipsFolders": List of Media Pool folder objects to search for source clips if the media is not present in current folder and if "importSourceClips" is False. Not valid for DRT import - # "interlaceProcessing": Bool, specifies whether to enable interlace processing on the imported timeline being created. valid only for AAF import - DeleteTimelines([timeline]) --> Bool # Deletes specified timelines in the media pool. - GetCurrentFolder() --> Folder # Returns currently selected Folder. - SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder. - DeleteClips([clips]) --> Bool # Deletes specified clips or timeline mattes in the media pool - ImportFolderFromFile(filePath, sourceClipsPath="") --> Bool # Returns true if import from given DRB filePath is successful, false otherwise - # sourceClipsPath is a string that specifies a filesystem path to search for source clips if the media is inaccessible in their original path, empty by default - DeleteFolders([subfolders]) --> Bool # Deletes specified subfolders in the media pool - MoveClips([clips], targetFolder) --> Bool # Moves specified clips to target folder. - MoveFolders([folders], targetFolder) --> Bool # Moves specified folders to target folder. - GetClipMatteList(MediaPoolItem) --> [paths] # Get mattes for specified MediaPoolItem, as a list of paths to the matte files. - GetTimelineMatteList(Folder) --> [MediaPoolItems] # Get mattes in specified Folder, as list of MediaPoolItems. - DeleteClipMattes(MediaPoolItem, [paths]) --> Bool # Delete mattes based on their file paths, for specified MediaPoolItem. Returns True on success. - RelinkClips([MediaPoolItem], folderPath) --> Bool # Update the folder location of specified media pool clips with the specified folder path. - UnlinkClips([MediaPoolItem]) --> Bool # Unlink specified media pool clips. - ImportMedia([items...]) --> [MediaPoolItems] # Imports specified file/folder paths into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created. - ImportMedia([{clipInfo}]) --> [MediaPoolItems] # Imports file path(s) into current Media Pool folder as specified in list of clipInfo dict. Returns a list of the MediaPoolItems created. - # Each clipInfo gets imported as one MediaPoolItem unless 'Show Individual Frames' is turned on. - # Example: ImportMedia([{"FilePath":"file_%03d.dpx", "StartIndex":1, "EndIndex":100}]) would import clip "file_[001-100].dpx". - ExportMetadata(fileName, [clips]) --> Bool # Exports metadata of specified clips to 'fileName' in CSV format. - # If no clips are specified, all clips from media pool will be used. - GetUniqueId() --> string # Returns a unique ID for the media pool - CreateStereoClip(LeftMediaPoolItem, - RightMediaPoolItem) --> MediaPoolItem # Takes in two existing media pool items and creates a new 3D stereoscopic media pool entry replacing the input media in the media pool. - -Folder - GetClipList() --> [clips...] # Returns a list of clips (items) within the folder. - GetName() --> string # Returns the media folder name. - GetSubFolderList() --> [folders...] # Returns a list of subfolders in the folder. - GetIsFolderStale() --> bool # Returns true if folder is stale in collaboration mode, false otherwise - GetUniqueId() --> string # Returns a unique ID for the media pool folder - Export(filePath) --> bool # Returns true if export of DRB folder to filePath is successful, false otherwise - TranscribeAudio() --> Bool # Transcribes audio of the MediaPoolItems within the folder and nested folders. Returns True if successful; False otherwise - ClearTranscription() --> Bool # Clears audio transcription of the MediaPoolItems within the folder and nested folders. Returns True if successful; False otherwise. - -MediaPoolItem - GetName() --> string # Returns the clip name. - GetMetadata(metadataType=None) --> string|dict # Returns the metadata value for the key 'metadataType'. - # If no argument is specified, a dict of all set metadata properties is returned. - SetMetadata(metadataType, metadataValue) --> Bool # Sets the given metadata to metadataValue (string). Returns True if successful. - SetMetadata({metadata}) --> Bool # Sets the item metadata with specified 'metadata' dict. Returns True if successful. - GetMediaId() --> string # Returns the unique ID for the MediaPoolItem. - AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker. - customData) - GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information. - # Example of output format: {96.0: {'color': 'Green', 'duration': 1.0, 'note': '', 'name': 'Marker 1', 'customData': ''}, ...} - # In the above example - there is one 'Green' marker at offset 96 (position of the marker) - GetMarkerByCustomData(customData) --> {markers...} # Returns marker {information} for the first matching marker with specified customData. - UpdateMarkerCustomData(frameId, customData) --> Bool # Updates customData (string) for the marker at given frameId position. CustomData is not exposed via UI and is useful for scripting developer to attach any user specific data to markers. - GetMarkerCustomData(frameId) --> string # Returns customData string for the marker at given frameId position. - DeleteMarkersByColor(color) --> Bool # Delete all markers of the specified color from the media pool item. "All" as argument deletes all color markers. - DeleteMarkerAtFrame(frameNum) --> Bool # Delete marker at frame number from the media pool item. - DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData. - AddFlag(color) --> Bool # Adds a flag with given color (string). - GetFlagList() --> [colors...] # Returns a list of flag colors assigned to the item. - ClearFlags(color) --> Bool # Clears the flag of the given color if one exists. An "All" argument is supported and clears all flags. - GetClipColor() --> string # Returns the item color as a string. - SetClipColor(colorName) --> Bool # Sets the item color based on the colorName (string). - ClearClipColor() --> Bool # Clears the item color. - GetClipProperty(propertyName=None) --> string|dict # Returns the property value for the key 'propertyName'. - # If no argument is specified, a dict of all clip properties is returned. Check the section below for more information. - SetClipProperty(propertyName, propertyValue) --> Bool # Sets the given property to propertyValue (string). Check the section below for more information. - LinkProxyMedia(proxyMediaFilePath) --> Bool # Links proxy media located at path specified by arg 'proxyMediaFilePath' with the current clip. 'proxyMediaFilePath' should be absolute clip path. - UnlinkProxyMedia() --> Bool # Unlinks any proxy media associated with clip. - ReplaceClip(filePath) --> Bool # Replaces the underlying asset and metadata of MediaPoolItem with the specified absolute clip path. - GetUniqueId() --> string # Returns a unique ID for the media pool item - TranscribeAudio() --> Bool # Transcribes audio of the MediaPoolItem. Returns True if successful; False otherwise - ClearTranscription() --> Bool # Clears audio transcription of the MediaPoolItem. Returns True if successful; False otherwise. - -Timeline - GetName() --> string # Returns the timeline name. - SetName(timelineName) --> Bool # Sets the timeline name if timelineName (string) is unique. Returns True if successful. - GetStartFrame() --> int # Returns the frame number at the start of timeline. - GetEndFrame() --> int # Returns the frame number at the end of timeline. - SetStartTimecode(timecode) --> Bool # Set the start timecode of the timeline to the string 'timecode'. Returns true when the change is successful, false otherwise. - GetStartTimecode() --> string # Returns the start timecode for the timeline. - GetTrackCount(trackType) --> int # Returns the number of tracks for the given track type ("audio", "video" or "subtitle"). - AddTrack(trackType, optionalSubTrackType) --> Bool # Adds track of trackType ("video", "subtitle", "audio"). Second argument optionalSubTrackType is required for "audio" - # optionalSubTrackType can be one of {"mono", "stereo", "5.1", "5.1film", "7.1", "7.1film", "adaptive1", ... , "adaptive24"} - DeleteTrack(trackType, trackIndex) --> Bool # Deletes track of trackType ("video", "subtitle", "audio") and given trackIndex. 1 <= trackIndex <= GetTrackCount(trackType). - SetTrackEnable(trackType, trackIndex, Bool) --> Bool # Enables/Disables track with given trackType and trackIndex - # trackType is one of {"audio", "video", "subtitle"} - # 1 <= trackIndex <= GetTrackCount(trackType). - GetIsTrackEnabled(trackType, trackIndex) --> Bool # Returns True if track with given trackType and trackIndex is enabled and False otherwise. - # trackType is one of {"audio", "video", "subtitle"} - # 1 <= trackIndex <= GetTrackCount(trackType). - SetTrackLock(trackType, trackIndex, Bool) --> Bool # Locks/Unlocks track with given trackType and trackIndex - # trackType is one of {"audio", "video", "subtitle"} - # 1 <= trackIndex <= GetTrackCount(trackType). - GetIsTrackLocked(trackType, trackIndex) --> Bool # Returns True if track with given trackType and trackIndex is locked and False otherwise. - # trackType is one of {"audio", "video", "subtitle"} - # 1 <= trackIndex <= GetTrackCount(trackType). - DeleteClips([timelineItems], Bool) --> Bool # Deletes specified TimelineItems from the timeline, performing ripple delete if the second argument is True. Second argument is optional (The default for this is False) - SetClipsLinked([timelineItems], Bool) --> Bool # Links or unlinks the specified TimelineItems depending on second argument. - GetItemListInTrack(trackType, index) --> [items...] # Returns a list of timeline items on that track (based on trackType and index). 1 <= index <= GetTrackCount(trackType). - AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker. - customData) - GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information. - # Example: a value of {96.0: {'color': 'Green', 'duration': 1.0, 'note': '', 'name': 'Marker 1', 'customData': ''}, ...} indicates a single green marker at timeline offset 96 - GetMarkerByCustomData(customData) --> {markers...} # Returns marker {information} for the first matching marker with specified customData. - UpdateMarkerCustomData(frameId, customData) --> Bool # Updates customData (string) for the marker at given frameId position. CustomData is not exposed via UI and is useful for scripting developer to attach any user specific data to markers. - GetMarkerCustomData(frameId) --> string # Returns customData string for the marker at given frameId position. - DeleteMarkersByColor(color) --> Bool # Deletes all timeline markers of the specified color. An "All" argument is supported and deletes all timeline markers. - DeleteMarkerAtFrame(frameNum) --> Bool # Deletes the timeline marker at the given frame number. - DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData. - ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". - ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". - GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color, Fairlight and Deliver pages. - SetCurrentTimecode(timecode) --> Bool # Sets current playhead position from input timecode for Cut, Edit, Color, Fairlight and Deliver pages. - GetCurrentVideoItem() --> item # Returns the current video timeline item. - GetCurrentClipThumbnailImage() --> {thumbnailData} # Returns a dict (keys "width", "height", "format" and "data") with data containing raw thumbnail image data (RGB 8-bit image data encoded in base64 format) for current media in the Color Page. - # An example of how to retrieve and interpret thumbnails is provided in 6_get_current_media_thumbnail.py in the Examples folder. - GetTrackName(trackType, trackIndex) --> string # Returns the track name for track indicated by trackType ("audio", "video" or "subtitle") and index. 1 <= trackIndex <= GetTrackCount(trackType). - SetTrackName(trackType, trackIndex, name) --> Bool # Sets the track name (string) for track indicated by trackType ("audio", "video" or "subtitle") and index. 1 <= trackIndex <= GetTrackCount(trackType). - DuplicateTimeline(timelineName) --> timeline # Duplicates the timeline and returns the created timeline, with the (optional) timelineName, on success. - CreateCompoundClip([timelineItems], {clipInfo}) --> timelineItem # Creates a compound clip of input timeline items with an optional clipInfo map: {"startTimecode" : "00:00:00:00", "name" : "Compound Clip 1"}. It returns the created timeline item. - CreateFusionClip([timelineItems]) --> timelineItem # Creates a Fusion clip of input timeline items. It returns the created timeline item. - ImportIntoTimeline(filePath, {importOptions}) --> Bool # Imports timeline items from an AAF file and optional importOptions dict into the timeline, with support for the keys: - # "autoImportSourceClipsIntoMediaPool": Bool, specifies if source clips should be imported into media pool, True by default - # "ignoreFileExtensionsWhenMatching": Bool, specifies if file extensions should be ignored when matching, False by default - # "linkToSourceCameraFiles": Bool, specifies if link to source camera files should be enabled, False by default - # "useSizingInfo": Bool, specifies if sizing information should be used, False by default - # "importMultiChannelAudioTracksAsLinkedGroups": Bool, specifies if multi-channel audio tracks should be imported as linked groups, False by default - # "insertAdditionalTracks": Bool, specifies if additional tracks should be inserted, True by default - # "insertWithOffset": string, specifies insert with offset value in timecode format - defaults to "00:00:00:00", applicable if "insertAdditionalTracks" is False - # "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "ignoreFileExtensionsWhenMatching" is True - # "sourceClipsFolders": string, list of Media Pool folder objects to search for source clips if the media is not present in current folder - - Export(fileName, exportType, exportSubtype) --> Bool # Exports timeline to 'fileName' as per input exportType & exportSubtype format. - # Refer to section "Looking up timeline export properties" for information on the parameters. - GetSetting(settingName) --> string # Returns value of timeline setting (indicated by settingName : string). Check the section below for more information. - SetSetting(settingName, settingValue) --> Bool # Sets timeline setting (indicated by settingName : string) to the value (settingValue : string). Check the section below for more information. - InsertGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a generator (indicated by generatorName : string) into the timeline. - InsertFusionGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a Fusion generator (indicated by generatorName : string) into the timeline. - InsertFusionCompositionIntoTimeline() --> TimelineItem # Inserts a Fusion composition into the timeline. - InsertOFXGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts an OFX generator (indicated by generatorName : string) into the timeline. - InsertTitleIntoTimeline(titleName) --> TimelineItem # Inserts a title (indicated by titleName : string) into the timeline. - InsertFusionTitleIntoTimeline(titleName) --> TimelineItem # Inserts a Fusion title (indicated by titleName : string) into the timeline. - GrabStill() --> galleryStill # Grabs still from the current video clip. Returns a GalleryStill object. - GrabAllStills(stillFrameSource) --> [galleryStill] # Grabs stills from all the clips of the timeline at 'stillFrameSource' (1 - First frame, 2 - Middle frame). Returns the list of GalleryStill objects. - GetUniqueId() --> string # Returns a unique ID for the timeline - CreateSubtitlesFromAudio({autoCaptionSettings}) --> Bool # Creates subtitles from audio for the timeline. - # Takes in optional dictionary {autoCaptionSettings}. Check 'Auto Caption Settings' subsection below for more information. - # Returns True on success, False otherwise. - DetectSceneCuts() --> Bool # Detects and makes scene cuts along the timeline. Returns True if successful, False otherwise. - ConvertTimelineToStereo() --> Bool # Converts timeline to stereo. Returns True if successful; False otherwise. - GetNodeGraph() --> Graph # Returns the timeline's node graph object. - -TimelineItem - GetName() --> string # Returns the item name. - GetDuration() --> int # Returns the item duration. - GetEnd() --> int # Returns the end frame position on the timeline. - GetFusionCompCount() --> int # Returns number of Fusion compositions associated with the timeline item. - GetFusionCompByIndex(compIndex) --> fusionComp # Returns the Fusion composition object based on given index. 1 <= compIndex <= timelineItem.GetFusionCompCount() - GetFusionCompNameList() --> [names...] # Returns a list of Fusion composition names associated with the timeline item. - GetFusionCompByName(compName) --> fusionComp # Returns the Fusion composition object based on given name. - GetLeftOffset() --> int # Returns the maximum extension by frame for clip from left side. - GetRightOffset() --> int # Returns the maximum extension by frame for clip from right side. - GetStart() --> int # Returns the start frame position on the timeline. - SetProperty(propertyKey, propertyValue) --> Bool # Sets the value of property "propertyKey" to value "propertyValue" - # Refer to "Looking up Timeline item properties" for more information - GetProperty(propertyKey) --> int/[key:value] # returns the value of the specified key - # if no key is specified, the method returns a dictionary(python) or table(lua) for all supported keys - AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker. - customData) - GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information. - # Example: a value of {96.0: {'color': 'Green', 'duration': 1.0, 'note': '', 'name': 'Marker 1', 'customData': ''}, ...} indicates a single green marker at clip offset 96 - GetMarkerByCustomData(customData) --> {markers...} # Returns marker {information} for the first matching marker with specified customData. - UpdateMarkerCustomData(frameId, customData) --> Bool # Updates customData (string) for the marker at given frameId position. CustomData is not exposed via UI and is useful for scripting developer to attach any user specific data to markers. - GetMarkerCustomData(frameId) --> string # Returns customData string for the marker at given frameId position. - DeleteMarkersByColor(color) --> Bool # Delete all markers of the specified color from the timeline item. "All" as argument deletes all color markers. - DeleteMarkerAtFrame(frameNum) --> Bool # Delete marker at frame number from the timeline item. - DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData. - AddFlag(color) --> Bool # Adds a flag with given color (string). - GetFlagList() --> [colors...] # Returns a list of flag colors assigned to the item. - ClearFlags(color) --> Bool # Clear flags of the specified color. An "All" argument is supported to clear all flags. - GetClipColor() --> string # Returns the item color as a string. - SetClipColor(colorName) --> Bool # Sets the item color based on the colorName (string). - ClearClipColor() --> Bool # Clears the item color. - AddFusionComp() --> fusionComp # Adds a new Fusion composition associated with the timeline item. - ImportFusionComp(path) --> fusionComp # Imports a Fusion composition from given file path by creating and adding a new composition for the item. - ExportFusionComp(path, compIndex) --> Bool # Exports the Fusion composition based on given index to the path provided. - DeleteFusionCompByName(compName) --> Bool # Deletes the named Fusion composition. - LoadFusionCompByName(compName) --> fusionComp # Loads the named Fusion composition as the active composition. - RenameFusionCompByName(oldName, newName) --> Bool # Renames the Fusion composition identified by oldName. - AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clip based on versionType (0 - local, 1 - remote). - GetCurrentVersion() --> {versionName...} # Returns the current version of the video clip. The returned value will have the keys versionName and versionType(0 - local, 1 - remote). - DeleteVersionByName(versionName, versionType) --> Bool # Deletes a color version by name and versionType (0 - local, 1 - remote). - LoadVersionByName(versionName, versionType) --> Bool # Loads a named color version as the active version. versionType: 0 - local, 1 - remote. - RenameVersionByName(oldName, newName, versionType)--> Bool # Renames the color version identified by oldName and versionType (0 - local, 1 - remote). - GetVersionNameList(versionType) --> [names...] # Returns a list of all color versions for the given versionType (0 - local, 1 - remote). - GetMediaPoolItem() --> MediaPoolItem # Returns the media pool item corresponding to the timeline item if one exists. - GetStereoConvergenceValues() --> {keyframes...} # Returns a dict (offset -> value) of keyframe offsets and respective convergence values. - GetStereoLeftFloatingWindowParams() --> {keyframes...} # For the LEFT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values. - GetStereoRightFloatingWindowParams() --> {keyframes...} # For the RIGHT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values. - ApplyArriCdlLut() --> Bool # Applies ARRI CDL and LUT. Returns True if successful, False otherwise. - SetCDL([CDL map]) --> Bool # Keys of map are: "NodeIndex", "Slope", "Offset", "Power", "Saturation", where 1 <= NodeIndex <= total number of nodes. - # Example python code - SetCDL({"NodeIndex" : "1", "Slope" : "0.5 0.4 0.2", "Offset" : "0.4 0.3 0.2", "Power" : "0.6 0.7 0.8", "Saturation" : "0.65"}) - AddTake(mediaPoolItem, startFrame, endFrame) --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the full clip extents is added. startFrame (int) and endFrame (int) are optional arguments used to specify the extents. - GetSelectedTakeIndex() --> int # Returns the index of the currently selected take, or 0 if the clip is not a take selector. - GetTakesCount() --> int # Returns the number of takes in take selector, or 0 if the clip is not a take selector. - GetTakeByIndex(idx) --> {takeInfo...} # Returns a dict (keys "startFrame", "endFrame" and "mediaPoolItem") with take info for specified index. - DeleteTakeByIndex(idx) --> Bool # Deletes a take by index, 1 <= idx <= number of takes. - SelectTakeByIndex(idx) --> Bool # Selects a take by index, 1 <= idx <= number of takes. - FinalizeTake() --> Bool # Finalizes take selection. - CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occurred. - SetClipEnabled(Bool) --> Bool # Sets clip enabled based on argument. - GetClipEnabled() --> Bool # Gets clip enabled status. - UpdateSidecar() --> Bool # Updates sidecar file for BRAW clips or RMD file for R3D clips. - GetUniqueId() --> string # Returns a unique ID for the timeline item - LoadBurnInPreset(presetName) --> Bool # Loads user defined data burn in preset for clip when supplied presetName (string). Returns true if successful. - CreateMagicMask(mode) --> Bool # Returns True if magic mask was created successfully, False otherwise. mode can "F" (forward), "B" (backward), or "BI" (bidirection) - RegenerateMagicMask() --> Bool # Returns True if magic mask was regenerated successfully, False otherwise. - Stabilize() --> Bool # Returns True if stabilization was successful, False otherwise - SmartReframe() --> Bool # Performs Smart Reframe. Returns True if successful, False otherwise. - GetNodeGraph() --> Graph # Returns the clip's node graph object. - GetColorGroup() --> ColorGroup # Returns the clip's color group if one exists. - AssignToColorGroup(ColorGroup) --> Bool # Returns True if TiItem to successfully assigned to given ColorGroup. ColorGroup must be an existing group in the current project. - RemoveFromColorGroup() --> Bool # Returns True if the TiItem is successfully removed from the ColorGroup it is in. - ExportLUT(exportType, path) --> Bool # Exports LUTs from tiItem referring to value passed in 'exportType' (enum) for LUT size. Refer to. 'ExportLUT notes' section for possible values. - # Saves generated LUT in the provided 'path' (string). 'path' should include the intended file name. - # If an empty or incorrect extension is provided, the appropriate extension (.cube/.vlt) will be appended at the end of the path. - -Gallery - GetAlbumName(galleryStillAlbum) --> string # Returns the name of the GalleryStillAlbum object 'galleryStillAlbum'. - SetAlbumName(galleryStillAlbum, albumName) --> Bool # Sets the name of the GalleryStillAlbum object 'galleryStillAlbum' to 'albumName'. - GetCurrentStillAlbum() --> galleryStillAlbum # Returns current album as a GalleryStillAlbum object. - SetCurrentStillAlbum(galleryStillAlbum) --> Bool # Sets current album to GalleryStillAlbum object 'galleryStillAlbum'. - GetGalleryStillAlbums() --> [galleryStillAlbum] # Returns the gallery albums as a list of GalleryStillAlbum objects. - -GalleryStillAlbum - GetStills() --> [galleryStill] # Returns the list of GalleryStill objects in the album. - GetLabel(galleryStill) --> string # Returns the label of the galleryStill. - SetLabel(galleryStill, label) --> Bool # Sets the new 'label' to GalleryStill object 'galleryStill'. - ImportStills([filePaths]) --> Bool # Imports GalleryStill from each filePath in [filePaths] list. True if at least one still is imported successfully. False otherwise. - ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm, drx). - DeleteStills([galleryStill]) --> Bool # Deletes specified list of GalleryStill objects '[galleryStill]'. - -GalleryStill # This class does not provide any API functions but the object type is used by functions in other classes. - -Graph - GetNumNodes() --> int # Returns the number of nodes in the graph - SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= self.GetNumNodes(). - # The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path). - # The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList). - GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes. - GetNodeLabel(nodeIndex) --> string # Returns the label of the node at nodeIndex. - GetToolsInNode(nodeIndex) --> [toolsList] # Returns toolsList (list of strings) of the tools used in the node indicated by given nodeIndex (int). - -ColorGroup - GetName() --> String # Returns the name (string) of the ColorGroup. - SetName(groupName) --> Bool # Renames ColorGroup to groupName (string). - GetClipsInTimeline(Timeline=CurrTimeline) --> [TimelineItem] # Returns a list of TimelineItem that are in colorGroup in the given Timeline. Timeline is Current Timeline by default. - GetPreClipNodeGraph() --> Graph # Returns the ColorGroup Pre-clip graph. - GetPostClipNodeGraph() --> Graph # Returns the ColorGroup Post-clip graph. - -List and Dict Data Structures ------------------------------ -Beside primitive data types, Resolve's Python API mainly uses list and dict data structures. Lists are denoted by [ ... ] and dicts are denoted by { ... } above. -As Lua does not support list and dict data structures, the Lua API implements "list" as a table with indices, e.g. { [1] = listValue1, [2] = listValue2, ... }. -Similarly the Lua API implements "dict" as a table with the dictionary key as first element, e.g. { [dictKey1] = dictValue1, [dictKey2] = dictValue2, ... }. - -Keyframe Mode information -------------------------- -This section covers additional notes for the functions Resolve.GetKeyframeMode() and Resolve.SetKeyframeMode(keyframeMode). - -'keyframeMode' can be one of the following enums: - - resolve.KEYFRAME_MODE_ALL == 0 - - resolve.KEYFRAME_MODE_COLOR == 1 - - resolve.KEYFRAME_MODE_SIZING == 2 - -Integer values returned by Resolve.GetKeyframeMode() will correspond to the enums above. - -Cloud Projects Settings --------------------------------------- -This section covers additional notes for the functions "ProjectManager:CreateCloudProject," "ProjectManager:ImportCloudProject," and "ProjectManager:RestoreCloudProject" - -All three functions take in a {cloudSettings} dict, that have the following keys: -* resolve.CLOUD_SETTING_PROJECT_NAME: String, ["" by default] -* resolve.CLOUD_SETTING_PROJECT_MEDIA_PATH: String, ["" by default] -* resolve.CLOUD_SETTING_IS_COLLAB: Bool, [False by default] -* resolve.CLOUD_SETTING_SYNC_MODE: syncMode (see below), [resolve.CLOUD_SYNC_PROXY_ONLY by default] -* resolve.CLOUD_SETTING_IS_CAMERA_ACCESS: Bool [False by default] - -Where syncMode is one of the following values: -* resolve.CLOUD_SYNC_NONE, -* resolve.CLOUD_SYNC_PROXY_ONLY, -* resolve.CLOUD_SYNC_PROXY_AND_ORIG - -All three "ProjectManager:CreateCloudProject," "ProjectManager:ImportCloudProject," and "ProjectManager:RestoreCloudProject" require resolve.PROJECT_MEDIA_PATH to be defined. "ProjectManager:CreateCloudProject" also requires resolve.PROJECT_NAME to be defined. - -Looking up Project and Clip properties --------------------------------------- -This section covers additional notes for the functions "Project:GetSetting", "Project:SetSetting", "Timeline:GetSetting", "Timeline:SetSetting", "MediaPoolItem:GetClipProperty" and -"MediaPoolItem:SetClipProperty". These functions are used to get and set properties otherwise available to the user through the Project Settings and the Clip Attributes dialogs. - -The functions follow a key-value pair format, where each property is identified by a key (the settingName or propertyName parameter) and possesses a value (typically a text value). Keys and values are -designed to be easily correlated with parameter names and values in the Resolve UI. Explicitly enumerated values for some parameters are listed below. - -Some properties may be read only - these include intrinsic clip properties like date created or sample rate, and properties that can be disabled in specific application contexts (e.g. custom colorspaces -in an ACES workflow, or output sizing parameters when behavior is set to match timeline) - -Getting values: -Invoke "Project:GetSetting", "Timeline:GetSetting" or "MediaPoolItem:GetClipProperty" with the appropriate property key. To get a snapshot of all queryable properties (keys and values), you can call -"Project:GetSetting", "Timeline:GetSetting" or "MediaPoolItem:GetClipProperty" without parameters (or with a NoneType or a blank property key). Using specific keys to query individual properties will -be faster. Note that getting a property using an invalid key will return a trivial result. - -Setting values: -Invoke "Project:SetSetting", "Timeline:SetSetting" or "MediaPoolItem:SetClipProperty" with the appropriate property key and a valid value. When setting a parameter, please check the return value to -ensure the success of the operation. You can troubleshoot the validity of keys and values by setting the desired result from the UI and checking property snapshots before and after the change. - -The following Project properties have specifically enumerated values: -"superScale" - the property value is an enumerated integer between 0 and 4 with these meanings: 0=Auto, 1=no scaling, and 2, 3 and 4 represent the Super Scale multipliers 2x, 3x and 4x. - for super scale multiplier '2x Enhanced', exactly 4 arguments must be passed as outlined below. If less than 4 arguments are passed, it will default to 2x. -Affects: -• x = Project:GetSetting('superScale') and Project:SetSetting('superScale', x) -• for '2x Enhanced' --> Project:SetSetting('superScale', 2, sharpnessValue, noiseReductionValue), where sharpnessValue is a float in the range [0.0, 1.0] and noiseReductionValue is a float in the range [0.0, 1.0] - -"timelineFrameRate" - the property value is one of the frame rates available to the user in project settings under "Timeline frame rate" option. Drop Frame can be configured for supported frame rates - by appending the frame rate with "DF", e.g. "29.97 DF" will enable drop frame and "29.97" will disable drop frame -Affects: -• x = Project:GetSetting('timelineFrameRate') and Project:SetSetting('timelineFrameRate', x) - -The following Clip properties have specifically enumerated values: -"Super Scale" - the property value is an enumerated integer between 1 and 4 with these meanings: 1=no scaling, and 2, 3 and 4 represent the Super Scale multipliers 2x, 3x and 4x. - for super scale multiplier '2x Enhanced', exactly 4 arguments must be passed as outlined below. If less than 4 arguments are passed, it will default to 2x. -Affects: -• x = MediaPoolItem:GetClipProperty('Super Scale') and MediaPoolItem:SetClipProperty('Super Scale', x) -• for '2x Enhanced' --> MediaPoolItem:SetClipProperty('Super Scale', 2, sharpnessValue, noiseReductionValue), where sharpnessValue is a float in the range [0.0, 1.0] and noiseReductionValue is a float in the range [0.0, 1.0] - -Auto Caption Settings ----------------------- -This section covers the supported settings for the method Timeline.CreateSubtitlesFromAudio({autoCaptionSettings}) - -The parameter setting is a dictionary containing the following keys: -* resolve.SUBTITLE_LANGUAGE: languageID (see below), [resolve.AUTO_CAPTION_AUTO by default] -* resolve.SUBTITLE_CAPTION_PRESET: presetType (see below), [resolve.AUTO_CAPTION_SUBTITLE_DEFAULT by default] -* resolve.SUBTITLE_CHARS_PER_LINE: Number between 1 and 60 inclusive [42 by default] -* resolve.SUBTITLE_LINE_BREAK: lineBreakType (see below), [resolve.AUTO_CAPTION_LINE_SINGLE by default] -* resolve.SUBTITLE_GAP: Number between 0 and 10 inclusive [0 by default] - -Note that the default values for some keys may change based on values defined for other keys, as per the UI. -For example, if the following dictionary is supplied, - CreateSubtitlesFromAudio( { resolve.SUBTITLE_LANGUAGE = resolve.AUTO_CAPTION_KOREAN, - resolve.SUBTITLE_CAPTION_PRESET = resolve.AUTO_CAPTION_NETFLIX } ) -the default value for resolve.SUBTITLE_CHARS_PER_LINE will be 16 instead of 42 - -languageIDs: -* resolve.AUTO_CAPTION_AUTO -* resolve.AUTO_CAPTION_DANISH -* resolve.AUTO_CAPTION_DUTCH -* resolve.AUTO_CAPTION_ENGLISH -* resolve.AUTO_CAPTION_FRENCH -* resolve.AUTO_CAPTION_GERMAN -* resolve.AUTO_CAPTION_ITALIAN -* resolve.AUTO_CAPTION_JAPANESE -* resolve.AUTO_CAPTION_KOREAN -* resolve.AUTO_CAPTION_MANDARIN_SIMPLIFIED -* resolve.AUTO_CAPTION_MANDARIN_TRADITIONAL -* resolve.AUTO_CAPTION_NORWEGIAN -* resolve.AUTO_CAPTION_PORTUGUESE -* resolve.AUTO_CAPTION_RUSSIAN -* resolve.AUTO_CAPTION_SPANISH -* resolve.AUTO_CAPTION_SWEDISH - -presetTypes: -* resolve.AUTO_CAPTION_SUBTITLE_DEFAULT -* resolve.AUTO_CAPTION_TELETEXT -* resolve.AUTO_CAPTION_NETFLIX - -lineBreakTypes: -* resolve.AUTO_CAPTION_LINE_SINGLE -* resolve.AUTO_CAPTION_LINE_DOUBLE - -Looking up Render Settings --------------------------- -This section covers the supported settings for the method SetRenderSettings({settings}) - -The parameter setting is a dictionary containing the following keys: - - "SelectAllFrames": Bool (when set True, the settings MarkIn and MarkOut are ignored) - - "MarkIn": int - - "MarkOut": int - - "TargetDir": string - - "CustomName": string - - "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix. - - "ExportVideo": Bool - - "ExportAudio": Bool - - "FormatWidth": int - - "FormatHeight": int - - "FrameRate": float (examples: 23.976, 24) - - "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope") - - "VideoQuality" possible values for current codec (if applicable): - - 0 (int) - will set quality to automatic - - [1 -> MAX] (int) - will set input bit rate - - ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level - - "AudioCodec": string (example: "aac") - - "AudioBitDepth": int - - "AudioSampleRate": int - - "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign") - - "GammaTag" : string (example: "Same as Project", "ACEScct") - - "ExportAlpha": Bool - - "EncodingProfile": string (example: "Main10"). Can only be set for H.264 and H.265. - - "MultiPassEncode": Bool. Can only be set for H.264. - - "AlphaMode": 0 - Premultiplied, 1 - Straight. Can only be set if "ExportAlpha" is true. - - "NetworkOptimization": Bool. Only supported by QuickTime and MP4 formats. - -Looking up timeline export properties -------------------------------------- -This section covers the parameters for the argument Export(fileName, exportType, exportSubtype). - -exportType can be one of the following constants: - - resolve.EXPORT_AAF - - resolve.EXPORT_DRT - - resolve.EXPORT_EDL - - resolve.EXPORT_FCP_7_XML - - resolve.EXPORT_FCPXML_1_8 - - resolve.EXPORT_FCPXML_1_9 - - resolve.EXPORT_FCPXML_1_10 - - resolve.EXPORT_HDR_10_PROFILE_A - - resolve.EXPORT_HDR_10_PROFILE_B - - resolve.EXPORT_TEXT_CSV - - resolve.EXPORT_TEXT_TAB - - resolve.EXPORT_DOLBY_VISION_VER_2_9 - - resolve.EXPORT_DOLBY_VISION_VER_4_0 - - resolve.EXPORT_DOLBY_VISION_VER_5_1 - - resolve.EXPORT_OTIO - - resolve.EXPORT_ALE - - resolve.EXPORT_ALE_CDL -exportSubtype can be one of the following enums: - - resolve.EXPORT_NONE - - resolve.EXPORT_AAF_NEW - - resolve.EXPORT_AAF_EXISTING - - resolve.EXPORT_CDL - - resolve.EXPORT_SDL - - resolve.EXPORT_MISSING_CLIPS -Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored. -When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING. -When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE. -Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used. - -Unsupported exportType types ---------------------------------- -Starting with DaVinci Resolve 18.1, the following export types are not supported: - - resolve.EXPORT_FCPXML_1_3 - - resolve.EXPORT_FCPXML_1_4 - - resolve.EXPORT_FCPXML_1_5 - - resolve.EXPORT_FCPXML_1_6 - - resolve.EXPORT_FCPXML_1_7 - - -Looking up Timeline item properties ------------------------------------ -This section covers additional notes for the function "TimelineItem:SetProperty" and "TimelineItem:GetProperty". These functions are used to get and set properties mentioned. - -The supported keys with their accepted values are: - "Pan" : floating point values from -4.0*width to 4.0*width - "Tilt" : floating point values from -4.0*height to 4.0*height - "ZoomX" : floating point values from 0.0 to 100.0 - "ZoomY" : floating point values from 0.0 to 100.0 - "ZoomGang" : a boolean value - "RotationAngle" : floating point values from -360.0 to 360.0 - "AnchorPointX" : floating point values from -4.0*width to 4.0*width - "AnchorPointY" : floating point values from -4.0*height to 4.0*height - "Pitch" : floating point values from -1.5 to 1.5 - "Yaw" : floating point values from -1.5 to 1.5 - "FlipX" : boolean value for flipping horizontally - "FlipY" : boolean value for flipping vertically - "CropLeft" : floating point values from 0.0 to width - "CropRight" : floating point values from 0.0 to width - "CropTop" : floating point values from 0.0 to height - "CropBottom" : floating point values from 0.0 to height - "CropSoftness" : floating point values from -100.0 to 100.0 - "CropRetain" : boolean value for "Retain Image Position" checkbox - "DynamicZoomEase" : A value from the following constants - - DYNAMIC_ZOOM_EASE_LINEAR = 0 - - DYNAMIC_ZOOM_EASE_IN - - DYNAMIC_ZOOM_EASE_OUT - - DYNAMIC_ZOOM_EASE_IN_AND_OUT - "CompositeMode" : A value from the following constants - - COMPOSITE_NORMAL = 0 - - COMPOSITE_ADD - - COMPOSITE_SUBTRACT - - COMPOSITE_DIFF - - COMPOSITE_MULTIPLY - - COMPOSITE_SCREEN - - COMPOSITE_OVERLAY - - COMPOSITE_HARDLIGHT - - COMPOSITE_SOFTLIGHT - - COMPOSITE_DARKEN - - COMPOSITE_LIGHTEN - - COMPOSITE_COLOR_DODGE - - COMPOSITE_COLOR_BURN - - COMPOSITE_EXCLUSION - - COMPOSITE_HUE - - COMPOSITE_SATURATE - - COMPOSITE_COLORIZE - - COMPOSITE_LUMA_MASK - - COMPOSITE_DIVIDE - - COMPOSITE_LINEAR_DODGE - - COMPOSITE_LINEAR_BURN - - COMPOSITE_LINEAR_LIGHT - - COMPOSITE_VIVID_LIGHT - - COMPOSITE_PIN_LIGHT - - COMPOSITE_HARD_MIX - - COMPOSITE_LIGHTER_COLOR - - COMPOSITE_DARKER_COLOR - - COMPOSITE_FOREGROUND - - COMPOSITE_ALPHA - - COMPOSITE_INVERTED_ALPHA - - COMPOSITE_LUM - - COMPOSITE_INVERTED_LUM - "Opacity" : floating point value from 0.0 to 100.0 - "Distortion" : floating point value from -1.0 to 1.0 - "RetimeProcess" : A value from the following constants - - RETIME_USE_PROJECT = 0 - - RETIME_NEAREST - - RETIME_FRAME_BLEND - - RETIME_OPTICAL_FLOW - "MotionEstimation" : A value from the following constants - - MOTION_EST_USE_PROJECT = 0 - - MOTION_EST_STANDARD_FASTER - - MOTION_EST_STANDARD_BETTER - - MOTION_EST_ENHANCED_FASTER - - MOTION_EST_ENHANCED_BETTER - - MOTION_EST_SPEED_WARP_BETTER - - MOTION_EST_SPEED_WARP_FASTER - "Scaling" : A value from the following constants - - SCALE_USE_PROJECT = 0 - - SCALE_CROP - - SCALE_FIT - - SCALE_FILL - - SCALE_STRETCH - "ResizeFilter" : A value from the following constants - - RESIZE_FILTER_USE_PROJECT = 0 - - RESIZE_FILTER_SHARPER - - RESIZE_FILTER_SMOOTHER - - RESIZE_FILTER_BICUBIC - - RESIZE_FILTER_BILINEAR - - RESIZE_FILTER_BESSEL - - RESIZE_FILTER_BOX - - RESIZE_FILTER_CATMULL_ROM - - RESIZE_FILTER_CUBIC - - RESIZE_FILTER_GAUSSIAN - - RESIZE_FILTER_LANCZOS - - RESIZE_FILTER_MITCHELL - - RESIZE_FILTER_NEAREST_NEIGHBOR - - RESIZE_FILTER_QUADRATIC - - RESIZE_FILTER_SINC - - RESIZE_FILTER_LINEAR -Values beyond the range will be clipped -width and height are same as the UI max limits - -The arguments can be passed as a key and value pair or they can be grouped together into a dictionary (for python) or table (for lua) and passed -as a single argument. - -Getting the values for the keys that uses constants will return the number which is in the constant - -ExportLUT notes ---------------- -The following section covers additional notes for TimelineItem.ExportLUT(exportType, path). - -Supported values for 'exportType' (enum) are: - - resolve.EXPORT_LUT_17PTCUBE - - resolve.EXPORT_LUT_33PTCUBE - - resolve.EXPORT_LUT_65PTCUBE - - resolve.EXPORT_LUT_PANASONICVLUT - -Deprecated Resolve API Functions --------------------------------- -The following API functions are deprecated. - -ProjectManager - GetProjectsInCurrentFolder() --> {project names...} # Returns a dict of project names in current folder. - GetFoldersInCurrentFolder() --> {folder names...} # Returns a dict of folder names in current folder. - -Project - GetPresets() --> {presets...} # Returns a dict of presets and their information. - GetRenderJobs() --> {render jobs...} # Returns a dict of render jobs and their information. - GetRenderPresets() --> {presets...} # Returns a dict of render presets and their information. - -MediaStorage - GetMountedVolumes() --> {paths...} # Returns a dict of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage. - GetSubFolders(folderPath) --> {paths...} # Returns a dict of folder paths in the given absolute folder path. - GetFiles(folderPath) --> {paths...} # Returns a dict of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries. - AddItemsToMediaPool(item1, item2, ...) --> {clips...} # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a dict of the MediaPoolItems created. - AddItemsToMediaPool([items...]) --> {clips...} # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a dict of the MediaPoolItems created. - -Folder - GetClips() --> {clips...} # Returns a dict of clips (items) within the folder. - GetSubFolders() --> {folders...} # Returns a dict of subfolders in the folder. - -MediaPoolItem - GetFlags() --> {colors...} # Returns a dict of flag colors assigned to the item. - -Timeline - GetItemsInTrack(trackType, index) --> {items...} # Returns a dict of Timeline items on the video or audio track (based on trackType) at specified - -TimelineItem - GetFusionCompNames() --> {names...} # Returns a dict of Fusion composition names associated with the timeline item. - GetFlags() --> {colors...} # Returns a dict of flag colors assigned to the item. - GetVersionNames(versionType) --> {names...} # Returns a dict of version names by provided versionType: 0 - local, 1 - remote. - GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item - SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes. - # The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path). - # The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList). - GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes. - GetNodeLabel(nodeIndex) --> string # Returns the label of the node at nodeIndex. - -Unsupported Resolve API Functions ---------------------------------- -The following API (functions and parameters) are no longer supported. Use job IDs instead of indices. - -Project - StartRendering(index1, index2, ...) --> Bool # Please use unique job ids (string) instead of indices. - StartRendering([idxs...]) --> Bool # Please use unique job ids (string) instead of indices. - DeleteRenderJobByIndex(idx) --> Bool # Please use unique job ids (string) instead of indices. - GetRenderJobStatus(idx) --> {status info} # Please use unique job ids (string) instead of indices. - GetSetting and SetSetting --> {} # settingName videoMonitorUseRec601For422SDI is now replaced with videoMonitorUseMatrixOverrideFor422SDI and videoMonitorMatrixOverrideFor422SDI. - # settingName perfProxyMediaOn is now replaced with perfProxyMediaMode which takes values 0 - disabled, 1 - when available, 2 - when source not available. diff --git a/server_addon/resolve/client/ayon_resolve/__init__.py b/server_addon/resolve/client/ayon_resolve/__init__.py deleted file mode 100644 index ba9afb67d5..0000000000 --- a/server_addon/resolve/client/ayon_resolve/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .version import __version__ -from .addon import ( - RESOLVE_ADDON_ROOT, - ResolveAddon, -) - - -__all__ = ( - "__version__", - - "RESOLVE_ADDON_ROOT", - "ResolveAddon", -) diff --git a/server_addon/resolve/client/ayon_resolve/addon.py b/server_addon/resolve/client/ayon_resolve/addon.py deleted file mode 100644 index 706d2802b0..0000000000 --- a/server_addon/resolve/client/ayon_resolve/addon.py +++ /dev/null @@ -1,22 +0,0 @@ -import os - -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ -from .utils import RESOLVE_ADDON_ROOT - - -class ResolveAddon(AYONAddon, IHostAddon): - name = "resolve" - version = __version__ - host_name = "resolve" - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(RESOLVE_ADDON_ROOT, "hooks") - ] - - def get_workfile_extensions(self): - return [".drp"] diff --git a/server_addon/resolve/client/ayon_resolve/api/__init__.py b/server_addon/resolve/client/ayon_resolve/api/__init__.py deleted file mode 100644 index 3359430ef5..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/__init__.py +++ /dev/null @@ -1,133 +0,0 @@ -""" -resolve api -""" -from .utils import ( - get_resolve_module -) - -from .pipeline import ( - ResolveHost, - ls, - containerise, - update_container, - maintained_selection, - remove_instance, - list_instances -) - -from .lib import ( - maintain_current_timeline, - publish_clip_color, - get_project_manager, - get_current_project, - get_current_timeline, - get_any_timeline, - get_new_timeline, - create_bin, - get_media_pool_item, - create_media_pool_item, - create_timeline_item, - get_timeline_item, - get_video_track_names, - get_current_timeline_items, - get_pype_timeline_item_by_name, - get_timeline_item_pype_tag, - set_timeline_item_pype_tag, - imprint, - set_publish_attribute, - get_publish_attribute, - create_compound_clip, - swap_clips, - get_pype_clip_metadata, - set_project_manager_to_folder_name, - get_otio_clip_instance_data, - get_reformated_path -) - -from .menu import launch_ayon_menu - -from .plugin import ( - ClipLoader, - TimelineItemLoader, - Creator, - PublishClip -) - -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root -) - -from .testing_utils import TestGUI - - -bmdvr = None -bmdvf = None - -__all__ = [ - "bmdvr", - "bmdvf", - - # pipeline - "ResolveHost", - "ls", - "containerise", - "update_container", - "maintained_selection", - "remove_instance", - "list_instances", - - # utils - "get_resolve_module", - - # lib - "maintain_current_timeline", - "publish_clip_color", - "get_project_manager", - "get_current_project", - "get_current_timeline", - "get_any_timeline", - "get_new_timeline", - "create_bin", - "get_media_pool_item", - "create_media_pool_item", - "create_timeline_item", - "get_timeline_item", - "get_video_track_names", - "get_current_timeline_items", - "get_pype_timeline_item_by_name", - "get_timeline_item_pype_tag", - "set_timeline_item_pype_tag", - "imprint", - "set_publish_attribute", - "get_publish_attribute", - "create_compound_clip", - "swap_clips", - "get_pype_clip_metadata", - "set_project_manager_to_folder_name", - "get_otio_clip_instance_data", - "get_reformated_path", - - # menu - "launch_ayon_menu", - - # plugin - "ClipLoader", - "TimelineItemLoader", - "Creator", - "PublishClip", - - # workio - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root", - - "TestGUI" -] diff --git a/server_addon/resolve/client/ayon_resolve/api/action.py b/server_addon/resolve/client/ayon_resolve/api/action.py deleted file mode 100644 index 620d51b2b3..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/action.py +++ /dev/null @@ -1,52 +0,0 @@ -# absolute_import is needed to counter the `module has no cmds error` in Maya -from __future__ import absolute_import - -import pyblish.api - - -from ayon_core.pipeline.publish import get_errored_instances_from_context - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid clips in Resolve timeline when plug-in failed. - - To retrieve the invalid nodes this assumes a static `get_invalid()` - method is available on the plugin. - - """ - label = "Select invalid" - on = "failed" # This action is only available on a failed plug-in - icon = "search" # Icon from Awesome Icon - - def process(self, context, plugin): - - try: - from .lib import get_project_manager - pm = get_project_manager() - self.log.debug(pm) - except ImportError: - raise ImportError("Current host is not Resolve") - - errored_instances = get_errored_instances_from_context(context, - plugin=plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid clips..") - invalid = list() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning("Plug-in returned to be invalid, " - "but has no selectable nodes.") - - # Ensure unique (process each node only once) - invalid = list(set(invalid)) - - if invalid: - self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid)) - # TODO: select resolve timeline track items in current timeline - else: - self.log.info("No invalid nodes found.") diff --git a/server_addon/resolve/client/ayon_resolve/api/lib.py b/server_addon/resolve/client/ayon_resolve/api/lib.py deleted file mode 100644 index 829c72b80a..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/lib.py +++ /dev/null @@ -1,970 +0,0 @@ -import sys -import json -import re -import os -import contextlib -from opentimelineio import opentime - -from ayon_core.lib import Logger -from ayon_core.pipeline.editorial import ( - is_overlapping_otio_ranges, - frames_to_timecode -) - -from ..otio import davinci_export as otio_export - -log = Logger.get_logger(__name__) - -self = sys.modules[__name__] -self.project_manager = None -self.media_storage = None - -# OpenPype sequential rename variables -self.rename_index = 0 -self.rename_add = 0 - -self.publish_clip_color = "Pink" -self.pype_marker_workflow = True - -# OpenPype compound clip workflow variable -self.pype_tag_name = "VFX Notes" - -# OpenPype marker workflow variables -self.pype_marker_name = "OpenPypeData" -self.pype_marker_duration = 1 -self.pype_marker_color = "Mint" -self.temp_marker_frame = None - -# OpenPype default timeline -self.pype_timeline_name = "OpenPypeTimeline" - - -@contextlib.contextmanager -def maintain_current_timeline(to_timeline: object, - from_timeline: object = None): - """Maintain current timeline selection during context - - Attributes: - from_timeline (resolve.Timeline)[optional]: - Example: - >>> print(from_timeline.GetName()) - timeline1 - >>> print(to_timeline.GetName()) - timeline2 - - >>> with maintain_current_timeline(to_timeline): - ... print(get_current_timeline().GetName()) - timeline2 - - >>> print(get_current_timeline().GetName()) - timeline1 - """ - project = get_current_project() - working_timeline = from_timeline or project.GetCurrentTimeline() - - # switch to the input timeline - project.SetCurrentTimeline(to_timeline) - - try: - # do a work - yield - finally: - # put the original working timeline to context - project.SetCurrentTimeline(working_timeline) - - -def get_project_manager(): - from . import bmdvr - if not self.project_manager: - self.project_manager = bmdvr.GetProjectManager() - return self.project_manager - - -def get_media_storage(): - from . import bmdvr - if not self.media_storage: - self.media_storage = bmdvr.GetMediaStorage() - return self.media_storage - - -def get_current_project(): - """Get current project object. - """ - return get_project_manager().GetCurrentProject() - - -def get_current_timeline(new=False): - """Get current timeline object. - - Args: - new (bool)[optional]: [DEPRECATED] if True it will create - new timeline if none exists - - Returns: - TODO: will need to reflect future `None` - object: resolve.Timeline - """ - project = get_current_project() - timeline = project.GetCurrentTimeline() - - # return current timeline if any - if timeline: - return timeline - - # TODO: [deprecated] and will be removed in future - if new: - return get_new_timeline() - - -def get_any_timeline(): - """Get any timeline object. - - Returns: - object | None: resolve.Timeline - """ - project = get_current_project() - timeline_count = project.GetTimelineCount() - if timeline_count > 0: - return project.GetTimelineByIndex(1) - - -def get_new_timeline(timeline_name: str = None): - """Get new timeline object. - - Arguments: - timeline_name (str): New timeline name. - - Returns: - object: resolve.Timeline - """ - project = get_current_project() - media_pool = project.GetMediaPool() - new_timeline = media_pool.CreateEmptyTimeline( - timeline_name or self.pype_timeline_name) - project.SetCurrentTimeline(new_timeline) - return new_timeline - - -def create_bin(name: str, - root: object = None, - set_as_current: bool = True) -> object: - """ - Create media pool's folder. - - Return folder object and if the name does not exist it will create a new. - If the input name is with forward or backward slashes then it will create - all parents and return the last child bin object - - Args: - name (str): name of folder / bin, or hierarchycal name "parent/name" - root (resolve.Folder)[optional]: root folder / bin object - set_as_current (resolve.Folder)[optional]: Whether to set the - resulting bin as current folder or not. - - Returns: - object: resolve.Folder - """ - # get all variables - media_pool = get_current_project().GetMediaPool() - root_bin = root or media_pool.GetRootFolder() - - # create hierarchy of bins in case there is slash in name - if "/" in name.replace("\\", "/"): - child_bin = None - for bname in name.split("/"): - child_bin = create_bin(bname, - root=child_bin or root_bin, - set_as_current=set_as_current) - if child_bin: - return child_bin - else: - # Find existing folder or create it - for subfolder in root_bin.GetSubFolderList(): - if subfolder.GetName() == name: - created_bin = subfolder - break - else: - created_bin = media_pool.AddSubFolder(root_bin, name) - - if set_as_current: - media_pool.SetCurrentFolder(created_bin) - - return created_bin - - -def remove_media_pool_item(media_pool_item: object) -> bool: - media_pool = get_current_project().GetMediaPool() - return media_pool.DeleteClips([media_pool_item]) - - -def create_media_pool_item( - files: list, - root: object = None, -) -> object: - """ - Create media pool item. - - Args: - files (list[str]): list of absolute paths to files - root (resolve.Folder)[optional]: root folder / bin object - - Returns: - object: resolve.MediaPoolItem - """ - # get all variables - media_pool = get_current_project().GetMediaPool() - root_bin = root or media_pool.GetRootFolder() - - # make sure files list is not empty and first available file exists - filepath = next((f for f in files if os.path.isfile(f)), None) - if not filepath: - raise FileNotFoundError("No file found in input files list") - - # try to search in bin if the clip does not exist - existing_mpi = get_media_pool_item(filepath, root_bin) - - if existing_mpi: - return existing_mpi - - # add all data in folder to media pool - media_pool_items = media_pool.ImportMedia(files) - - return media_pool_items.pop() if media_pool_items else False - - -def get_media_pool_item(filepath, root: object = None) -> object: - """ - Return clip if found in folder with use of input file path. - - Args: - filepath (str): absolute path to a file - root (resolve.Folder)[optional]: root folder / bin object - - Returns: - object: resolve.MediaPoolItem - """ - media_pool = get_current_project().GetMediaPool() - root = root or media_pool.GetRootFolder() - fname = os.path.basename(filepath) - - for _mpi in root.GetClipList(): - _mpi_name = _mpi.GetClipProperty("File Name") - _mpi_name = get_reformated_path(_mpi_name, first=True) - if fname in _mpi_name: - return _mpi - return None - - -def create_timeline_item( - media_pool_item: object, - timeline: object = None, - timeline_in: int = None, - source_start: int = None, - source_end: int = None, -) -> object: - """ - Add media pool item to current or defined timeline. - - Args: - media_pool_item (resolve.MediaPoolItem): resolve's object - timeline (Optional[resolve.Timeline]): resolve's object - timeline_in (Optional[int]): timeline input frame (sequence frame) - source_start (Optional[int]): media source input frame (sequence frame) - source_end (Optional[int]): media source output frame (sequence frame) - - Returns: - object: resolve.TimelineItem - """ - # get all variables - project = get_current_project() - media_pool = project.GetMediaPool() - clip_name = media_pool_item.GetClipProperty("File Name") - timeline = timeline or get_current_timeline() - - # timing variables - if all([timeline_in, source_start, source_end]): - fps = timeline.GetSetting("timelineFrameRate") - duration = source_end - source_start - timecode_in = frames_to_timecode(timeline_in, fps) - timecode_out = frames_to_timecode(timeline_in + duration, fps) - else: - timecode_in = None - timecode_out = None - - # if timeline was used then switch it to current timeline - with maintain_current_timeline(timeline): - # Add input mediaPoolItem to clip data - clip_data = { - "mediaPoolItem": media_pool_item, - } - - if source_start: - clip_data["startFrame"] = source_start - if source_end: - clip_data["endFrame"] = source_end - if timecode_in: - # Note: specifying a recordFrame will fail to place the timeline - # item if there's already an existing clip at that time on the - # active track. - clip_data["recordFrame"] = timeline_in - - # add to timeline - output_timeline_item = media_pool.AppendToTimeline([clip_data])[0] - - # Adding the item may fail whilst Resolve will still return a - # TimelineItem instance - however all `Get*` calls return None - # Hence, we check whether the result is valid - if output_timeline_item.GetDuration() is None: - output_timeline_item = None - - assert output_timeline_item, AssertionError(( - "Clip name '{}' wasn't created on the timeline: '{}' \n\n" - "Please check if correct track position is activated, \n" - "or if a clip is not already at the timeline in \n" - "position: '{}' out: '{}'. \n\n" - "Clip data: {}" - ).format( - clip_name, timeline.GetName(), timecode_in, timecode_out, clip_data - )) - return output_timeline_item - - -def get_timeline_item(media_pool_item: object, - timeline: object = None) -> object: - """ - Returns clips related to input mediaPoolItem. - - Args: - media_pool_item (resolve.MediaPoolItem): resolve's object - timeline (resolve.Timeline)[optional]: resolve's object - - Returns: - object: resolve.TimelineItem - """ - clip_name = media_pool_item.GetClipProperty("File Name") - output_timeline_item = None - timeline = timeline or get_current_timeline() - - with maintain_current_timeline(timeline): - # search the timeline for the added clip - - for ti_data in get_current_timeline_items(): - ti_clip_item = ti_data["clip"]["item"] - ti_media_pool_item = ti_clip_item.GetMediaPoolItem() - - # Skip items that do not have a media pool item, like for example - # an "Adjustment Clip" or a "Fusion Composition" from the effects - # toolbox - if not ti_media_pool_item: - continue - - if clip_name in ti_media_pool_item.GetClipProperty("File Name"): - output_timeline_item = ti_clip_item - - return output_timeline_item - - -def get_video_track_names() -> list: - tracks = list() - track_type = "video" - timeline = get_current_timeline() - - # get all tracks count filtered by track type - selected_track_count = timeline.GetTrackCount(track_type) - - # loop all tracks and get items - track_index: int - for track_index in range(1, (int(selected_track_count) + 1)): - track_name = timeline.GetTrackName("video", track_index) - tracks.append(track_name) - - return tracks - - -def get_current_timeline_items( - filter: bool = False, - track_type: str = None, - track_name: str = None, - selecting_color: str = None) -> list: - """ Gets all available current timeline track items - """ - track_type = track_type or "video" - selecting_color = selecting_color or "Chocolate" - project = get_current_project() - - # get timeline anyhow - timeline = ( - get_current_timeline() or - get_any_timeline() or - get_new_timeline() - ) - selected_clips = [] - - # get all tracks count filtered by track type - selected_track_count = timeline.GetTrackCount(track_type) - - # loop all tracks and get items - _clips = {} - for track_index in range(1, (int(selected_track_count) + 1)): - _track_name = timeline.GetTrackName(track_type, track_index) - - # filter out all unmathed track names - if track_name and _track_name not in track_name: - continue - - timeline_items = timeline.GetItemListInTrack( - track_type, track_index) - _clips[track_index] = timeline_items - - _data = { - "project": project, - "timeline": timeline, - "track": { - "name": _track_name, - "index": track_index, - "type": track_type} - } - # get track item object and its color - for clip_index, ti in enumerate(_clips[track_index]): - data = _data.copy() - data["clip"] = { - "item": ti, - "index": clip_index - } - ti_color = ti.GetClipColor() - if filter and selecting_color in ti_color or not filter: - selected_clips.append(data) - return selected_clips - - -def get_pype_timeline_item_by_name(name: str) -> object: - """Get timeline item by name. - - Args: - name (str): name of timeline item - - Returns: - object: resolve.TimelineItem - """ - for _ti_data in get_current_timeline_items(): - _ti_clip = _ti_data["clip"]["item"] - tag_data = get_timeline_item_pype_tag(_ti_clip) - tag_name = tag_data.get("namespace") - if not tag_name: - continue - if tag_name in name: - return _ti_clip - return None - - -def get_timeline_item_pype_tag(timeline_item): - """ - Get openpype track item tag created by creator or loader plugin. - - Attributes: - trackItem (resolve.TimelineItem): resolve object - - Returns: - dict: openpype tag data - """ - return_tag = None - - if self.pype_marker_workflow: - return_tag = get_pype_marker(timeline_item) - else: - media_pool_item = timeline_item.GetMediaPoolItem() - - # get all tags from track item - _tags = media_pool_item.GetMetadata() - if not _tags: - return None - for key, data in _tags.items(): - # return only correct tag defined by global name - if key in self.pype_tag_name: - return_tag = json.loads(data) - - return return_tag - - -def set_timeline_item_pype_tag(timeline_item, data=None): - """ - Set openpype track item tag to input timeline_item. - - Attributes: - trackItem (resolve.TimelineItem): resolve api object - - Returns: - dict: json loaded data - """ - data = data or dict() - - # get available openpype tag if any - tag_data = get_timeline_item_pype_tag(timeline_item) - - if self.pype_marker_workflow: - # delete tag as it is not updatable - if tag_data: - delete_pype_marker(timeline_item) - - tag_data.update(data) - set_pype_marker(timeline_item, tag_data) - else: - if tag_data: - media_pool_item = timeline_item.GetMediaPoolItem() - # it not tag then create one - tag_data.update(data) - media_pool_item.SetMetadata( - self.pype_tag_name, json.dumps(tag_data)) - else: - tag_data = data - # if openpype tag available then update with input data - # add it to the input track item - timeline_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data)) - - return tag_data - - -def imprint(timeline_item, data=None): - """ - Adding `Avalon data` into a hiero track item tag. - - Also including publish attribute into tag. - - Arguments: - timeline_item (hiero.core.TrackItem): hiero track item object - data (dict): Any data which needs to be imprinted - - Examples: - data = { - 'folderPath': 'sq020sh0280', - 'productType': 'render', - 'productName': 'productMain' - } - """ - data = data or {} - - set_timeline_item_pype_tag(timeline_item, data) - - # add publish attribute - set_publish_attribute(timeline_item, True) - - -def set_publish_attribute(timeline_item, value): - """ Set Publish attribute in input Tag object - - Attribute: - tag (hiero.core.Tag): a tag object - value (bool): True or False - """ - tag_data = get_timeline_item_pype_tag(timeline_item) - tag_data["publish"] = value - # set data to the publish attribute - set_timeline_item_pype_tag(timeline_item, tag_data) - - -def get_publish_attribute(timeline_item): - """ Get Publish attribute from input Tag object - - Attribute: - tag (hiero.core.Tag): a tag object - value (bool): True or False - """ - tag_data = get_timeline_item_pype_tag(timeline_item) - return tag_data["publish"] - - -def set_pype_marker(timeline_item, tag_data): - source_start = timeline_item.GetLeftOffset() - item_duration = timeline_item.GetDuration() - frame = int(source_start + (item_duration / 2)) - - # marker attributes - frameId = (frame / 10) * 10 - color = self.pype_marker_color - name = self.pype_marker_name - note = json.dumps(tag_data) - duration = (self.pype_marker_duration / 10) * 10 - - timeline_item.AddMarker( - frameId, - color, - name, - note, - duration - ) - - -def get_pype_marker(timeline_item): - timeline_item_markers = timeline_item.GetMarkers() - for marker_frame, marker in timeline_item_markers.items(): - color = marker["color"] - name = marker["name"] - if name == self.pype_marker_name and color == self.pype_marker_color: - note = marker["note"] - self.temp_marker_frame = marker_frame - return json.loads(note) - - return dict() - - -def delete_pype_marker(timeline_item): - timeline_item.DeleteMarkerAtFrame(self.temp_marker_frame) - self.temp_marker_frame = None - - -def create_compound_clip(clip_data, name, folder): - """ - Convert timeline object into nested timeline object - - Args: - clip_data (dict): timeline item object packed into dict - with project, timeline (sequence) - folder (resolve.MediaPool.Folder): media pool folder object, - name (str): name for compound clip - - Returns: - resolve.MediaPoolItem: media pool item with compound clip timeline(cct) - """ - # get basic objects form data - project = clip_data["project"] - timeline = clip_data["timeline"] - clip = clip_data["clip"] - - # get details of objects - clip_item = clip["item"] - - mp = project.GetMediaPool() - - # get clip attributes - clip_attributes = get_clip_attributes(clip_item) - - mp_item = clip_item.GetMediaPoolItem() - _mp_props = mp_item.GetClipProperty - - mp_first_frame = int(_mp_props("Start")) - mp_last_frame = int(_mp_props("End")) - - # initialize basic source timing for otio - ci_l_offset = clip_item.GetLeftOffset() - ci_duration = clip_item.GetDuration() - rate = float(_mp_props("FPS")) - - # source rational times - mp_in_rc = opentime.RationalTime((ci_l_offset), rate) - mp_out_rc = opentime.RationalTime((ci_l_offset + ci_duration - 1), rate) - - # get frame in and out for clip swapping - in_frame = opentime.to_frames(mp_in_rc) - out_frame = opentime.to_frames(mp_out_rc) - - # keep original sequence - tl_origin = timeline - - # Set current folder to input media_pool_folder: - mp.SetCurrentFolder(folder) - - # check if clip doesn't exist already: - clips = folder.GetClipList() - cct = next((c for c in clips - if c.GetName() in name), None) - - if cct: - print(f"Compound clip exists: {cct}") - else: - # Create empty timeline in current folder and give name: - cct = mp.CreateEmptyTimeline(name) - - # check if clip doesn't exist already: - clips = folder.GetClipList() - cct = next((c for c in clips - if c.GetName() in name), None) - print(f"Compound clip created: {cct}") - - with maintain_current_timeline(cct, tl_origin): - # Add input clip to the current timeline: - mp.AppendToTimeline([{ - "mediaPoolItem": mp_item, - "startFrame": mp_first_frame, - "endFrame": mp_last_frame - }]) - - # Add collected metadata and attributes to the comound clip: - if mp_item.GetMetadata(self.pype_tag_name): - clip_attributes[self.pype_tag_name] = mp_item.GetMetadata( - self.pype_tag_name)[self.pype_tag_name] - - # stringify - clip_attributes = json.dumps(clip_attributes) - - # add attributes to metadata - for k, v in mp_item.GetMetadata().items(): - cct.SetMetadata(k, v) - - # add metadata to cct - cct.SetMetadata(self.pype_tag_name, clip_attributes) - - # reset start timecode of the compound clip - cct.SetClipProperty("Start TC", _mp_props("Start TC")) - - # swap clips on timeline - swap_clips(clip_item, cct, in_frame, out_frame) - - cct.SetClipColor("Pink") - return cct - - -def swap_clips(from_clip, to_clip, to_in_frame, to_out_frame): - """ - Swapping clips on timeline in timelineItem - - It will add take and activate it to the frame range which is inputted - - Args: - from_clip (resolve.TimelineItem) - to_clip (resolve.mediaPoolItem) - to_clip_name (str): name of to_clip - to_in_frame (float): cut in frame, usually `GetLeftOffset()` - to_out_frame (float): cut out frame, usually left offset plus duration - - Returns: - bool: True if successfully replaced - - """ - # copy ACES input transform from timeline clip to new media item - mediapool_item_from_timeline = from_clip.GetMediaPoolItem() - _idt = mediapool_item_from_timeline.GetClipProperty('IDT') - to_clip.SetClipProperty('IDT', _idt) - - _clip_prop = to_clip.GetClipProperty - to_clip_name = _clip_prop("File Name") - # add clip item as take to timeline - take = from_clip.AddTake( - to_clip, - float(to_in_frame), - float(to_out_frame) - ) - - if not take: - return False - - for take_index in range(1, (int(from_clip.GetTakesCount()) + 1)): - take_item = from_clip.GetTakeByIndex(take_index) - take_mp_item = take_item["mediaPoolItem"] - if to_clip_name in take_mp_item.GetName(): - from_clip.SelectTakeByIndex(take_index) - from_clip.FinalizeTake() - return True - return False - - -def _validate_tc(x): - # Validate and reformat timecode string - - if len(x) != 11: - print('Invalid timecode. Try again.') - - c = ':' - colonized = x[:2] + c + x[3:5] + c + x[6:8] + c + x[9:] - - if colonized.replace(':', '').isdigit(): - print(f"_ colonized: {colonized}") - return colonized - else: - print('Invalid timecode. Try again.') - - -def get_pype_clip_metadata(clip): - """ - Get openpype metadata created by creator plugin - - Attributes: - clip (resolve.TimelineItem): resolve's object - - Returns: - dict: hierarchy, orig clip attributes - """ - mp_item = clip.GetMediaPoolItem() - metadata = mp_item.GetMetadata() - - return metadata.get(self.pype_tag_name) - - -def get_clip_attributes(clip): - """ - Collect basic attributes from resolve timeline item - - Args: - clip (resolve.TimelineItem): timeline item object - - Returns: - dict: all collected attributres as key: values - """ - mp_item = clip.GetMediaPoolItem() - - return { - "clipIn": clip.GetStart(), - "clipOut": clip.GetEnd(), - "clipLeftOffset": clip.GetLeftOffset(), - "clipRightOffset": clip.GetRightOffset(), - "clipMarkers": clip.GetMarkers(), - "clipFlags": clip.GetFlagList(), - "sourceId": mp_item.GetMediaId(), - "sourceProperties": mp_item.GetClipProperty() - } - - -def set_project_manager_to_folder_name(folder_name): - """ - Sets context of Project manager to given folder by name. - - Searching for folder by given name from root folder to nested. - If no existing folder by name it will create one in root folder. - - Args: - folder_name (str): name of searched folder - - Returns: - bool: True if success - - Raises: - Exception: Cannot create folder in root - - """ - # initialize project manager - get_project_manager() - - set_folder = False - - # go back to root folder - if self.project_manager.GotoRootFolder(): - log.info(f"Testing existing folder: {folder_name}") - folders = _convert_resolve_list_type( - self.project_manager.GetFoldersInCurrentFolder()) - log.info(f"Testing existing folders: {folders}") - # get me first available folder object - # with the same name as in `folder_name` else return False - if next((f for f in folders if f in folder_name), False): - log.info(f"Found existing folder: {folder_name}") - set_folder = self.project_manager.OpenFolder(folder_name) - - if set_folder: - return True - - # if folder by name is not existent then create one - # go back to root folder - log.info(f"Folder `{folder_name}` not found and will be created") - if self.project_manager.GotoRootFolder(): - try: - # create folder by given name - self.project_manager.CreateFolder(folder_name) - self.project_manager.OpenFolder(folder_name) - return True - except NameError as e: - log.error((f"Folder with name `{folder_name}` cannot be created!" - f"Error: {e}")) - return False - - -def _convert_resolve_list_type(resolve_list): - """ Resolve is using indexed dictionary as list type. - `{1.0: 'vaule'}` - This will convert it to normal list class - """ - assert isinstance(resolve_list, dict), ( - "Input argument should be dict() type") - - return [resolve_list[i] for i in sorted(resolve_list.keys())] - - -def create_otio_time_range_from_timeline_item_data(timeline_item_data): - timeline_item = timeline_item_data["clip"]["item"] - project = timeline_item_data["project"] - timeline = timeline_item_data["timeline"] - timeline_start = timeline.GetStartFrame() - - frame_start = int(timeline_item.GetStart() - timeline_start) - frame_duration = int(timeline_item.GetDuration()) - fps = project.GetSetting("timelineFrameRate") - - return otio_export.create_otio_time_range( - frame_start, frame_duration, fps) - - -def get_otio_clip_instance_data(otio_timeline, timeline_item_data): - """ - Return otio objects for timeline, track and clip - - Args: - timeline_item_data (dict): timeline_item_data from list returned by - resolve.get_current_timeline_items() - otio_timeline (otio.schema.Timeline): otio object - - Returns: - dict: otio clip object - - """ - - timeline_item = timeline_item_data["clip"]["item"] - track_name = timeline_item_data["track"]["name"] - timeline_range = create_otio_time_range_from_timeline_item_data( - timeline_item_data) - - for otio_clip in otio_timeline.each_clip(): - track_name = otio_clip.parent().name - parent_range = otio_clip.range_in_parent() - if track_name not in track_name: - continue - if otio_clip.name not in timeline_item.GetName(): - continue - if is_overlapping_otio_ranges( - parent_range, timeline_range, strict=True): - - # add pypedata marker to otio_clip metadata - for marker in otio_clip.markers: - if self.pype_marker_name in marker.name: - otio_clip.metadata.update(marker.metadata) - return {"otioClip": otio_clip} - - return None - - -def get_reformated_path(path, padded=False, first=False): - """ - Return fixed python expression path - - Args: - path (str): path url or simple file name - - Returns: - type: string with reformatted path - - Example: - get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr - - """ - first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]") - - if "[" in path: - padding_pattern = r"(\d+)(?=-)" - padding = len(re.findall(padding_pattern, path).pop()) - num_pattern = r"(\[\d+\-\d+\])" - if padded: - path = re.sub(num_pattern, f"%0{padding}d", path) - elif first: - first_frame = re.findall(first_frame_pattern, path, flags=0) - if len(first_frame) >= 1: - first_frame = first_frame[0] - path = re.sub(num_pattern, first_frame, path) - else: - path = re.sub(num_pattern, "%d", path) - return path - - -def iter_all_media_pool_clips(): - """Recursively iterate all media pool clips in current project""" - root = get_current_project().GetMediaPool().GetRootFolder() - queue = [root] - for folder in queue: - for clip in folder.GetClipList(): - yield clip - queue.extend(folder.GetSubFolderList()) diff --git a/server_addon/resolve/client/ayon_resolve/api/menu.py b/server_addon/resolve/client/ayon_resolve/api/menu.py deleted file mode 100644 index fc2c15ad6d..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/menu.py +++ /dev/null @@ -1,184 +0,0 @@ -import os -import sys - -from qtpy import QtWidgets, QtCore, QtGui - -from ayon_core.tools.utils import host_tools -from ayon_core.pipeline import registered_host - - -MENU_LABEL = os.environ["AYON_MENU_LABEL"] - - -def load_stylesheet(): - path = os.path.join(os.path.dirname(__file__), "menu_style.qss") - if not os.path.exists(path): - print("Unable to load stylesheet, file not found in resources") - return "" - - with open(path, "r") as file_stream: - stylesheet = file_stream.read() - return stylesheet - - -class Spacer(QtWidgets.QWidget): - def __init__(self, height, *args, **kwargs): - super(Spacer, self).__init__(*args, **kwargs) - - self.setFixedHeight(height) - - real_spacer = QtWidgets.QWidget(self) - real_spacer.setObjectName("Spacer") - real_spacer.setFixedHeight(height) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(real_spacer) - - self.setLayout(layout) - - -class AYONMenu(QtWidgets.QWidget): - def __init__(self, *args, **kwargs): - super(AYONMenu, self).__init__(*args, **kwargs) - - self.setObjectName(f"{MENU_LABEL}Menu") - - self.setWindowFlags( - QtCore.Qt.Window - | QtCore.Qt.CustomizeWindowHint - | QtCore.Qt.WindowTitleHint - | QtCore.Qt.WindowMinimizeButtonHint - | QtCore.Qt.WindowCloseButtonHint - | QtCore.Qt.WindowStaysOnTopHint - ) - - self.setWindowTitle(f"{MENU_LABEL}") - save_current_btn = QtWidgets.QPushButton("Save current file", self) - workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self) - create_btn = QtWidgets.QPushButton("Create ...", self) - publish_btn = QtWidgets.QPushButton("Publish ...", self) - load_btn = QtWidgets.QPushButton("Load ...", self) - inventory_btn = QtWidgets.QPushButton("Manager ...", self) - subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self) - libload_btn = QtWidgets.QPushButton("Library ...", self) - experimental_btn = QtWidgets.QPushButton( - "Experimental tools ...", self - ) - # rename_btn = QtWidgets.QPushButton("Rename", self) - # set_colorspace_btn = QtWidgets.QPushButton( - # "Set colorspace from presets", self - # ) - # reset_resolution_btn = QtWidgets.QPushButton( - # "Set Resolution from presets", self - # ) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(10, 20, 10, 20) - - layout.addWidget(save_current_btn) - - layout.addWidget(Spacer(15, self)) - - layout.addWidget(workfiles_btn) - layout.addWidget(create_btn) - layout.addWidget(publish_btn) - layout.addWidget(load_btn) - layout.addWidget(inventory_btn) - layout.addWidget(subsetm_btn) - - layout.addWidget(Spacer(15, self)) - - layout.addWidget(libload_btn) - - # layout.addWidget(Spacer(15, self)) - - # layout.addWidget(rename_btn) - - # layout.addWidget(Spacer(15, self)) - - # layout.addWidget(set_colorspace_btn) - # layout.addWidget(reset_resolution_btn) - layout.addWidget(Spacer(15, self)) - layout.addWidget(experimental_btn) - - self.setLayout(layout) - - save_current_btn.clicked.connect(self.on_save_current_clicked) - save_current_btn.setShortcut(QtGui.QKeySequence.Save) - workfiles_btn.clicked.connect(self.on_workfile_clicked) - create_btn.clicked.connect(self.on_create_clicked) - publish_btn.clicked.connect(self.on_publish_clicked) - load_btn.clicked.connect(self.on_load_clicked) - inventory_btn.clicked.connect(self.on_inventory_clicked) - subsetm_btn.clicked.connect(self.on_subsetm_clicked) - libload_btn.clicked.connect(self.on_libload_clicked) - # rename_btn.clicked.connect(self.on_rename_clicked) - # set_colorspace_btn.clicked.connect(self.on_set_colorspace_clicked) - # reset_resolution_btn.clicked.connect(self.on_set_resolution_clicked) - experimental_btn.clicked.connect(self.on_experimental_clicked) - - def on_save_current_clicked(self): - host = registered_host() - current_file = host.get_current_workfile() - if not current_file: - print("Current project is not saved. " - "Please save once first via workfiles tool.") - host_tools.show_workfiles() - return - - print(f"Saving current file to: {current_file}") - host.save_workfile(current_file) - - def on_workfile_clicked(self): - print("Clicked Workfile") - host_tools.show_workfiles() - - def on_create_clicked(self): - print("Clicked Create") - host_tools.show_creator() - - def on_publish_clicked(self): - print("Clicked Publish") - host_tools.show_publish(parent=None) - - def on_load_clicked(self): - print("Clicked Load") - host_tools.show_loader(use_context=True) - - def on_inventory_clicked(self): - print("Clicked Inventory") - host_tools.show_scene_inventory() - - def on_subsetm_clicked(self): - print("Clicked Subset Manager") - host_tools.show_subset_manager() - - def on_libload_clicked(self): - print("Clicked Library") - host_tools.show_library_loader() - - def on_rename_clicked(self): - print("Clicked Rename") - - def on_set_colorspace_clicked(self): - print("Clicked Set Colorspace") - - def on_set_resolution_clicked(self): - print("Clicked Set Resolution") - - def on_experimental_clicked(self): - host_tools.show_experimental_tools_dialog() - - -def launch_ayon_menu(): - app = QtWidgets.QApplication(sys.argv) - - ayon_menu = AYONMenu() - - stylesheet = load_stylesheet() - ayon_menu.setStyleSheet(stylesheet) - - ayon_menu.show() - - sys.exit(app.exec_()) diff --git a/server_addon/resolve/client/ayon_resolve/api/menu_style.qss b/server_addon/resolve/client/ayon_resolve/api/menu_style.qss deleted file mode 100644 index ad8932d881..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/menu_style.qss +++ /dev/null @@ -1,71 +0,0 @@ -QWidget { - background-color: #282828; - border-radius: 3; - font-size: 13px; -} - -QComboBox { - border: 1px solid #090909; - background-color: #201f1f; - color: #ffffff; -} - -QComboBox QAbstractItemView -{ - color: white; -} - -QPushButton { - border: 1px solid #090909; - background-color: #201f1f; - color: #ffffff; - padding: 5; -} - -QPushButton:focus { - background-color: "#171717"; - color: #d0d0d0; -} - -QPushButton:hover { - background-color: "#171717"; - color: #e64b3d; -} - -QSpinBox { - border: 1px solid #090909; - background-color: #201f1f; - color: #ffffff; - padding: 2; - max-width: 8em; - qproperty-alignment: AlignCenter; -} - -QLineEdit { - border: 1px solid #090909; - border-radius: 3px; - background-color: #201f1f; - color: #ffffff; - padding: 2; - min-width: 10em; - qproperty-alignment: AlignCenter; -} - -#AYONMenu { - qproperty-alignment: AlignLeft; - min-width: 10em; - border: 1px solid #fef9ef; -} - -QVBoxLayout { - background-color: #282828; -} - -#Divider { - border: 1px solid #090909; - background-color: #585858; -} - -QLabel { - color: #77776b; -} diff --git a/server_addon/resolve/client/ayon_resolve/api/pipeline.py b/server_addon/resolve/client/ayon_resolve/api/pipeline.py deleted file mode 100644 index 05d2c9bcd1..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/pipeline.py +++ /dev/null @@ -1,325 +0,0 @@ -""" -Basic avalon integration -""" -import os -import json -import contextlib -from collections import OrderedDict - -from pyblish import api as pyblish - -from ayon_core.lib import Logger -from ayon_core.pipeline import ( - schema, - register_loader_plugin_path, - register_creator_plugin_path, - register_inventory_action_path, - AVALON_CONTAINER_ID, -) -from ayon_core.host import ( - HostBase, - IWorkfileHost, - ILoadHost -) - -from . import lib -from .utils import get_resolve_module -from .workio import ( - open_file, - save_file, - file_extensions, - has_unsaved_changes, - work_root, - current_file -) - -log = Logger.get_logger(__name__) - -HOST_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - -AVALON_CONTAINERS = ":AVALON_CONTAINERS" - - -class ResolveHost(HostBase, IWorkfileHost, ILoadHost): - name = "resolve" - - def install(self): - """Install resolve-specific functionality of avalon-core. - - This is where you install menus and register families, data - and loaders into resolve. - - It is called automatically when installing via `api.install(resolve)`. - - See the Maya equivalent for inspiration on how to implement this. - - """ - - log.info("ayon_resolve installed") - - pyblish.register_host(self.name) - pyblish.register_plugin_path(PUBLISH_PATH) - print("Registering DaVinci Resolve plug-ins..") - - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - - # register callback for switching publishable - pyblish.register_callback("instanceToggled", - on_pyblish_instance_toggled) - - get_resolve_module() - - def open_workfile(self, filepath): - return open_file(filepath) - - def save_workfile(self, filepath=None): - return save_file(filepath) - - def work_root(self, session): - return work_root(session) - - def get_current_workfile(self): - return current_file() - - def workfile_has_unsaved_changes(self): - return has_unsaved_changes() - - def get_workfile_extensions(self): - return file_extensions() - - def get_containers(self): - return ls() - - -def containerise(timeline_item, - name, - namespace, - context, - loader=None, - data=None): - """Bundle Hiero's object into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - timeline_item (hiero.core.TrackItem): object to imprint as container - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - context (dict): Asset information - loader (str, optional): Name of node used to produce this container. - - Returns: - timeline_item (hiero.core.TrackItem): containerised object - - """ - - data_imprint = OrderedDict({ - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": str(name), - "namespace": str(namespace), - "loader": str(loader), - "representation": context["representation"]["id"], - }) - - if data: - data_imprint.update(data) - - lib.set_timeline_item_pype_tag(timeline_item, data_imprint) - - return timeline_item - - -def ls(): - """List available containers. - - This function is used by the Container Manager in Nuke. You'll - need to implement a for-loop that then *yields* one Container at - a time. - - See the `container.json` schema for details on how it should look, - and the Maya equivalent, which is in `avalon.maya.pipeline` - """ - - # Media Pool instances from Load Media loader - for clip in lib.iter_all_media_pool_clips(): - data = clip.GetMetadata(lib.pype_tag_name) - if not data: - continue - data = json.loads(data) - - # If not all required data, skip it - required = ['schema', 'id', 'loader', 'representation'] - if not all(key in data for key in required): - continue - - container = {key: data[key] for key in required} - container["objectName"] = clip.GetName() # Get path in folders - container["namespace"] = clip.GetName() - container["name"] = clip.GetUniqueId() - container["_item"] = clip - yield container - - # Timeline instances from Load Clip loader - # get all track items from current timeline - all_timeline_items = lib.get_current_timeline_items(filter=False) - - for timeline_item_data in all_timeline_items: - timeline_item = timeline_item_data["clip"]["item"] - container = parse_container(timeline_item) - if container: - yield container - - -def parse_container(timeline_item, validate=True): - """Return container data from timeline_item's openpype tag. - - Args: - timeline_item (hiero.core.TrackItem): A containerised track item. - validate (bool)[optional]: validating with avalon scheme - - Returns: - dict: The container schema data for input containerized track item. - - """ - # convert tag metadata to normal keys names - data = lib.get_timeline_item_pype_tag(timeline_item) - - if validate and data and data.get("schema"): - schema.validate(data) - - if not isinstance(data, dict): - return - - # If not all required data return the empty container - required = ['schema', 'id', 'name', - 'namespace', 'loader', 'representation'] - - if not all(key in data for key in required): - return - - container = {key: data[key] for key in required} - - container["objectName"] = timeline_item.GetName() - - # Store reference to the node object - container["_timeline_item"] = timeline_item - - return container - - -def update_container(timeline_item, data=None): - """Update container data to input timeline_item's openpype tag. - - Args: - timeline_item (hiero.core.TrackItem): A containerised track item. - data (dict)[optional]: dictionery with data to be updated - - Returns: - bool: True if container was updated correctly - - """ - data = data or dict() - - container = lib.get_timeline_item_pype_tag(timeline_item) - - for _key, _value in container.items(): - try: - container[_key] = data[_key] - except KeyError: - pass - - log.info("Updating container: `{}`".format(timeline_item)) - return bool(lib.set_timeline_item_pype_tag(timeline_item, container)) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context - - Example: - >>> with maintained_selection(): - ... node['selected'].setValue(True) - >>> print(node['selected'].value()) - False - """ - try: - # do the operation - yield - finally: - pass - - -def reset_selection(): - """Deselect all selected nodes - """ - pass - - -def on_pyblish_instance_toggled(instance, old_value, new_value): - """Toggle node passthrough states on instance toggles.""" - - log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( - instance, old_value, new_value)) - - from ayon_resolve.api import set_publish_attribute - - # Whether instances should be passthrough based on new value - timeline_item = instance.data["item"] - set_publish_attribute(timeline_item, new_value) - - -def remove_instance(instance): - """Remove instance marker from track item.""" - instance_id = instance.get("uuid") - - selected_timeline_items = lib.get_current_timeline_items( - filter=True, selecting_color=lib.publish_clip_color) - - found_ti = None - for timeline_item_data in selected_timeline_items: - timeline_item = timeline_item_data["clip"]["item"] - - # get openpype tag data - tag_data = lib.get_timeline_item_pype_tag(timeline_item) - _ti_id = tag_data.get("uuid") - if _ti_id == instance_id: - found_ti = timeline_item - break - - if found_ti is None: - return - - # removing instance by marker color - print(f"Removing instance: {found_ti.GetName()}") - found_ti.DeleteMarkersByColor(lib.pype_marker_color) - - -def list_instances(): - """List all created instances from current workfile.""" - listed_instances = [] - selected_timeline_items = lib.get_current_timeline_items( - filter=True, selecting_color=lib.publish_clip_color) - - for timeline_item_data in selected_timeline_items: - timeline_item = timeline_item_data["clip"]["item"] - ti_name = timeline_item.GetName().split(".")[0] - - # get openpype tag data - tag_data = lib.get_timeline_item_pype_tag(timeline_item) - - if tag_data: - asset = tag_data.get("asset") - product_name = tag_data.get("productName") - tag_data["label"] = f"{ti_name} [{asset}-{product_name}]" - listed_instances.append(tag_data) - - return listed_instances diff --git a/server_addon/resolve/client/ayon_resolve/api/plugin.py b/server_addon/resolve/client/ayon_resolve/api/plugin.py deleted file mode 100644 index 0b339cdf7c..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/plugin.py +++ /dev/null @@ -1,910 +0,0 @@ -import re -import uuid -import copy - -import qargparse -from qtpy import QtWidgets, QtCore - -from ayon_core.settings import get_current_project_settings -from ayon_core.pipeline import ( - LegacyCreator, - LoaderPlugin, - Anatomy -) - -from . import lib -from .menu import load_stylesheet - - -class CreatorWidget(QtWidgets.QDialog): - - # output items - items = {} - - def __init__(self, name, info, ui_inputs, parent=None): - super(CreatorWidget, self).__init__(parent) - - self.setObjectName(name) - - self.setWindowFlags( - QtCore.Qt.Window - | QtCore.Qt.CustomizeWindowHint - | QtCore.Qt.WindowTitleHint - | QtCore.Qt.WindowCloseButtonHint - | QtCore.Qt.WindowStaysOnTopHint - ) - self.setWindowTitle(name or "OpenPype Creator Input") - self.resize(500, 700) - - # Where inputs and labels are set - self.content_widget = [QtWidgets.QWidget(self)] - top_layout = QtWidgets.QFormLayout(self.content_widget[0]) - top_layout.setObjectName("ContentLayout") - top_layout.addWidget(Spacer(5, self)) - - # first add widget tag line - top_layout.addWidget(QtWidgets.QLabel(info)) - - # main dynamic layout - self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAsNeeded) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOn) - self.scroll_area.setHorizontalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOff) - self.scroll_area.setWidgetResizable(True) - - self.content_widget.append(self.scroll_area) - - scroll_widget = QtWidgets.QWidget(self) - in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget) - self.content_layout = [in_scroll_area] - - # add preset data into input widget layout - self.items = self.populate_widgets(ui_inputs) - self.scroll_area.setWidget(scroll_widget) - - # Confirmation buttons - btns_widget = QtWidgets.QWidget(self) - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - - cancel_btn = QtWidgets.QPushButton("Cancel") - btns_layout.addWidget(cancel_btn) - - ok_btn = QtWidgets.QPushButton("Ok") - btns_layout.addWidget(ok_btn) - - # Main layout of the dialog - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(10, 10, 10, 10) - main_layout.setSpacing(0) - - # adding content widget - for w in self.content_widget: - main_layout.addWidget(w) - - main_layout.addWidget(btns_widget) - - ok_btn.clicked.connect(self._on_ok_clicked) - cancel_btn.clicked.connect(self._on_cancel_clicked) - - stylesheet = load_stylesheet() - self.setStyleSheet(stylesheet) - - def _on_ok_clicked(self): - self.result = self.value(self.items) - self.close() - - def _on_cancel_clicked(self): - self.result = None - self.close() - - def value(self, data, new_data=None): - new_data = new_data or {} - for k, v in data.items(): - new_data[k] = { - "target": None, - "value": None - } - if v["type"] == "dict": - new_data[k]["target"] = v["target"] - new_data[k]["value"] = self.value(v["value"]) - if v["type"] == "section": - new_data.pop(k) - new_data = self.value(v["value"], new_data) - elif getattr(v["value"], "currentText", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].currentText() - elif getattr(v["value"], "isChecked", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].isChecked() - elif getattr(v["value"], "value", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].value() - elif getattr(v["value"], "text", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].text() - - return new_data - - def camel_case_split(self, text): - matches = re.finditer( - '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) - return " ".join([str(m.group(0)).capitalize() for m in matches]) - - def create_row(self, layout, type, text, **kwargs): - # get type attribute from qwidgets - attr = getattr(QtWidgets, type) - - # convert label text to normal capitalized text with spaces - label_text = self.camel_case_split(text) - - # assign the new text to label widget - label = QtWidgets.QLabel(label_text) - label.setObjectName("LineLabel") - - # create attribute name text strip of spaces - attr_name = text.replace(" ", "") - - # create attribute and assign default values - setattr( - self, - attr_name, - attr(parent=self)) - - # assign the created attribute to variable - item = getattr(self, attr_name) - for func, val in kwargs.items(): - if getattr(item, func): - func_attr = getattr(item, func) - if isinstance(val, tuple): - func_attr(*val) - else: - func_attr(val) - - # add to layout - layout.addRow(label, item) - - return item - - def populate_widgets(self, data, content_layout=None): - """ - Populate widget from input dict. - - Each plugin has its own set of widget rows defined in dictionary - each row values should have following keys: `type`, `target`, - `label`, `order`, `value` and optionally also `toolTip`. - - Args: - data (dict): widget rows or organized groups defined - by types `dict` or `section` - content_layout (QtWidgets.QFormLayout)[optional]: used when nesting - - Returns: - dict: redefined data dict updated with created widgets - - """ - - content_layout = content_layout or self.content_layout[-1] - # fix order of process by defined order value - ordered_keys = list(data.keys()) - for k, v in data.items(): - try: - # try removing a key from index which should - # be filled with new - ordered_keys.pop(v["order"]) - except IndexError: - pass - # add key into correct order - ordered_keys.insert(v["order"], k) - - # process ordered - for k in ordered_keys: - v = data[k] - tool_tip = v.get("toolTip", "") - if v["type"] == "dict": - # adding spacer between sections - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - if v["type"] == "section": - # adding spacer between sections - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - elif v["type"] == "QLineEdit": - data[k]["value"] = self.create_row( - content_layout, "QLineEdit", v["label"], - setText=v["value"], setToolTip=tool_tip) - elif v["type"] == "QComboBox": - data[k]["value"] = self.create_row( - content_layout, "QComboBox", v["label"], - addItems=v["value"], setToolTip=tool_tip) - elif v["type"] == "QCheckBox": - data[k]["value"] = self.create_row( - content_layout, "QCheckBox", v["label"], - setChecked=v["value"], setToolTip=tool_tip) - elif v["type"] == "QSpinBox": - data[k]["value"] = self.create_row( - content_layout, "QSpinBox", v["label"], - setRange=(0, 99999), - setValue=v["value"], - setToolTip=tool_tip) - return data - - -class Spacer(QtWidgets.QWidget): - def __init__(self, height, *args, **kwargs): - super(self.__class__, self).__init__(*args, **kwargs) - - self.setFixedHeight(height) - - real_spacer = QtWidgets.QWidget(self) - real_spacer.setObjectName("Spacer") - real_spacer.setFixedHeight(height) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(real_spacer) - - self.setLayout(layout) - - -class ClipLoader: - - active_bin = None - data = {} - - def __init__(self, loader_obj, context, **options): - """ Initialize object - - Arguments: - loader_obj (ayon_core.pipeline.load.LoaderPlugin): plugin object - context (dict): loader plugin context - options (dict)[optional]: possible keys: - projectBinPath: "path/to/binItem" - - """ - self.__dict__.update(loader_obj.__dict__) - self.context = context - self.active_project = lib.get_current_project() - - # try to get value from options or evaluate key value for `handles` - self.with_handles = options.get("handles") is True - - # try to get value from options or evaluate key value for `load_to` - self.new_timeline = ( - options.get("newTimeline") or - options.get("load_to") == "New timeline" - ) - # try to get value from options or evaluate key value for `load_how` - self.sequential_load = ( - options.get("sequentially") or - options.get("load_how") == "Sequentially in order" - ) - - assert self._populate_data(), str( - "Cannot Load selected data, look into database " - "or call your supervisor") - - # inject asset data to representation dict - self._get_folder_attributes() - - # add active components to class - if self.new_timeline: - loader_cls = loader_obj.__class__ - if loader_cls.timeline: - # if multiselection is set then use options sequence - self.active_timeline = loader_cls.timeline - else: - # create new sequence - self.active_timeline = lib.get_new_timeline( - "{}_{}".format( - self.data["timeline_basename"], - str(uuid.uuid4())[:8] - ) - ) - loader_cls.timeline = self.active_timeline - - else: - self.active_timeline = lib.get_current_timeline() - - def _populate_data(self): - """ Gets context and convert it to self.data - data structure: - { - "name": "assetName_productName_representationName" - "binPath": "projectBinPath", - } - """ - # create name - folder_entity = self.context["folder"] - product_name = self.context["product"]["name"] - repre_entity = self.context["representation"] - - folder_name = folder_entity["name"] - folder_path = folder_entity["path"] - representation_name = repre_entity["name"] - - self.data["clip_name"] = "_".join([ - folder_name, - product_name, - representation_name - ]) - self.data["versionAttributes"] = self.context["version"]["attrib"] - - self.data["timeline_basename"] = "timeline_{}_{}".format( - product_name, representation_name) - - # solve project bin structure path - hierarchy = "Loader{}".format(folder_path) - - self.data["binPath"] = hierarchy - - return True - - def _get_folder_attributes(self): - """ Get all available asset data - - joint `data` key with asset.data dict into the representation - - """ - - self.data["folderAttributes"] = copy.deepcopy( - self.context["folder"]["attrib"] - ) - - def load(self, files): - """Load clip into timeline - - Arguments: - files (list[str]): list of files to load into timeline - """ - # create project bin for the media to be imported into - self.active_bin = lib.create_bin(self.data["binPath"]) - - # create mediaItem in active project bin - # create clip media - media_pool_item = lib.create_media_pool_item( - files, - self.active_bin - ) - _clip_property = media_pool_item.GetClipProperty - source_in = int(_clip_property("Start")) - source_out = int(_clip_property("End")) - source_duration = int(_clip_property("Frames")) - - # Trim clip start if slate is present - if "slate" in self.data["versionAttributes"]["families"]: - source_in += 1 - source_duration = source_out - source_in + 1 - - if not self.with_handles: - # Load file without the handles of the source media - # We remove the handles from the source in and source out - # so that the handles are excluded in the timeline - - # get version data frame data from db - version_attributes = self.data["versionAttributes"] - frame_start = version_attributes.get("frameStart") - frame_end = version_attributes.get("frameEnd") - - # The version data usually stored the frame range + handles of the - # media however certain representations may be shorter because they - # exclude those handles intentionally. Unfortunately the - # representation does not store that in the database currently; - # so we should compensate for those cases. If the media is shorter - # than the frame range specified in the database we assume it is - # without handles and thus we do not need to remove the handles - # from source and out - if frame_start is not None and frame_end is not None: - # Version has frame range data, so we can compare media length - handle_start = version_attributes.get("handleStart", 0) - handle_end = version_attributes.get("handleEnd", 0) - frame_start_handle = frame_start - handle_start - frame_end_handle = frame_end + handle_end - database_frame_duration = int( - frame_end_handle - frame_start_handle + 1 - ) - if source_duration >= database_frame_duration: - source_in += handle_start - source_out -= handle_end - - # get timeline in - timeline_start = self.active_timeline.GetStartFrame() - if self.sequential_load: - # set timeline start frame - timeline_in = int(timeline_start) - else: - # set timeline start frame + original clip in frame - timeline_in = int( - timeline_start + self.data["folderAttributes"]["clipIn"]) - - # make track item from source in bin as item - timeline_item = lib.create_timeline_item( - media_pool_item, - self.active_timeline, - timeline_in, - source_in, - source_out, - ) - - print("Loading clips: `{}`".format(self.data["clip_name"])) - return timeline_item - - def update(self, timeline_item, files): - # create project bin for the media to be imported into - self.active_bin = lib.create_bin(self.data["binPath"]) - - # create mediaItem in active project bin - # create clip media - media_pool_item = lib.create_media_pool_item( - files, - self.active_bin - ) - _clip_property = media_pool_item.GetClipProperty - - # Read trimming from timeline item - timeline_item_in = timeline_item.GetLeftOffset() - timeline_item_len = timeline_item.GetDuration() - timeline_item_out = timeline_item_in + timeline_item_len - - lib.swap_clips( - timeline_item, - media_pool_item, - timeline_item_in, - timeline_item_out - ) - - print("Loading clips: `{}`".format(self.data["clip_name"])) - return timeline_item - - -class TimelineItemLoader(LoaderPlugin): - """A basic SequenceLoader for Resolve - - This will implement the basic behavior for a loader to inherit from that - will containerize the reference and will implement the `remove` and - `update` logic. - - """ - - options = [ - qargparse.Boolean( - "handles", - label="Include handles", - default=0, - help="Load with handles or without?" - ), - qargparse.Choice( - "load_to", - label="Where to load clips", - items=[ - "Current timeline", - "New timeline" - ], - default=0, - help="Where do you want clips to be loaded?" - ), - qargparse.Choice( - "load_how", - label="How to load clips", - items=[ - "Original timing", - "Sequentially in order" - ], - default="Original timing", - help="Would you like to place it at original timing?" - ) - ] - - def load( - self, - context, - name=None, - namespace=None, - options=None - ): - pass - - def update(self, container, context): - """Update an existing `container` - """ - pass - - def remove(self, container): - """Remove an existing `container` - """ - pass - - -class Creator(LegacyCreator): - """Creator class wrapper - """ - marker_color = "Purple" - - def __init__(self, *args, **kwargs): - super(Creator, self).__init__(*args, **kwargs) - - resolve_p_settings = get_current_project_settings().get("resolve") - self.presets = {} - if resolve_p_settings: - self.presets = resolve_p_settings["create"].get( - self.__class__.__name__, {}) - - # adding basic current context resolve objects - self.project = lib.get_current_project() - self.timeline = lib.get_current_timeline() - - if (self.options or {}).get("useSelection"): - self.selected = lib.get_current_timeline_items(filter=True) - else: - self.selected = lib.get_current_timeline_items(filter=False) - - self.widget = CreatorWidget - - -class PublishClip: - """ - Convert a track item to publishable instance - - Args: - timeline_item (hiero.core.TrackItem): hiero track item object - kwargs (optional): additional data needed for rename=True (presets) - - Returns: - hiero.core.TrackItem: hiero track item object with openpype tag - """ - vertical_clip_match = {} - tag_data = {} - types = { - "shot": "shot", - "folder": "folder", - "episode": "episode", - "sequence": "sequence", - "track": "sequence", - } - - # parents search pattern - parents_search_pattern = r"\{([a-z]*?)\}" - - # default templates for non-ui use - rename_default = False - hierarchy_default = "{_folder_}/{_sequence_}/{_track_}" - clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}" - base_product_name_default = "" - review_track_default = "< none >" - product_type_default = "plate" - count_from_default = 10 - count_steps_default = 10 - vertical_sync_default = False - driving_layer_default = "" - - def __init__(self, cls, timeline_item_data, **kwargs): - # populate input cls attribute onto self.[attr] - self.__dict__.update(cls.__dict__) - - # get main parent objects - self.timeline_item_data = timeline_item_data - self.timeline_item = timeline_item_data["clip"]["item"] - timeline_name = timeline_item_data["timeline"].GetName() - self.timeline_name = str(timeline_name).replace(" ", "_") - - # track item (clip) main attributes - self.ti_name = self.timeline_item.GetName() - self.ti_index = int(timeline_item_data["clip"]["index"]) - - # get track name and index - track_name = timeline_item_data["track"]["name"] - self.track_name = str(track_name).replace(" ", "_") - self.track_index = int(timeline_item_data["track"]["index"]) - - if kwargs.get("avalon"): - self.tag_data.update(kwargs["avalon"]) - - # adding ui inputs if any - self.ui_inputs = kwargs.get("ui_inputs", {}) - - # adding media pool folder if any - self.mp_folder = kwargs.get("mp_folder") - - # populate default data before we get other attributes - self._populate_timeline_item_default_data() - - # use all populated default data to create all important attributes - self._populate_attributes() - - # create parents with correct types - self._create_parents() - - def convert(self): - # solve track item data and add them to tag data - self._convert_to_tag_data() - - # if track name is in review track name and also if driving track name - # is not in review track name: skip tag creation - if (self.track_name in self.review_layer) and ( - self.driving_layer not in self.review_layer): - return - - # deal with clip name - new_name = self.tag_data.pop("newClipName") - - if self.rename: - self.tag_data["asset_name"] = new_name - else: - self.tag_data["asset_name"] = self.ti_name - - # AYON unique identifier - folder_path = "/{}/{}".format( - self.tag_data["hierarchy"], - self.tag_data["asset_name"] - ) - self.tag_data["folder_path"] = folder_path - - # create new name for track item - if not lib.pype_marker_workflow: - # create compound clip workflow - lib.create_compound_clip( - self.timeline_item_data, - self.tag_data["asset_name"], - self.mp_folder - ) - - # add timeline_item_data selection to tag - self.tag_data.update({ - "track_data": self.timeline_item_data["track"] - }) - - # create openpype tag on timeline_item and add data - lib.imprint(self.timeline_item, self.tag_data) - - return self.timeline_item - - def _populate_timeline_item_default_data(self): - """ Populate default formatting data from track item. """ - - self.timeline_item_default_data = { - "_folder_": "shots", - "_sequence_": self.timeline_name, - "_track_": self.track_name, - "_clip_": self.ti_name, - "_trackIndex_": self.track_index, - "_clipIndex_": self.ti_index - } - - def _populate_attributes(self): - """ Populate main object attributes. """ - # track item frame range and parent track name for vertical sync check - self.clip_in = int(self.timeline_item.GetStart()) - self.clip_out = int(self.timeline_item.GetEnd()) - - # define ui inputs if non gui mode was used - self.shot_num = self.ti_index - - # ui_inputs data or default values if gui was not used - self.rename = self.ui_inputs.get( - "clipRename", {}).get("value") or self.rename_default - self.clip_name = self.ui_inputs.get( - "clipName", {}).get("value") or self.clip_name_default - self.hierarchy = self.ui_inputs.get( - "hierarchy", {}).get("value") or self.hierarchy_default - self.hierarchy_data = self.ui_inputs.get( - "hierarchyData", {}).get("value") or \ - self.timeline_item_default_data.copy() - self.count_from = self.ui_inputs.get( - "countFrom", {}).get("value") or self.count_from_default - self.count_steps = self.ui_inputs.get( - "countSteps", {}).get("value") or self.count_steps_default - self.base_product_name = self.ui_inputs.get( - "productName", {}).get("value") or self.base_product_name_default - self.product_type = self.ui_inputs.get( - "productType", {}).get("value") or self.product_type_default - self.vertical_sync = self.ui_inputs.get( - "vSyncOn", {}).get("value") or self.vertical_sync_default - self.driving_layer = self.ui_inputs.get( - "vSyncTrack", {}).get("value") or self.driving_layer_default - self.review_track = self.ui_inputs.get( - "reviewTrack", {}).get("value") or self.review_track_default - - # build product name from layer name - if self.base_product_name == "": - self.base_product_name = self.track_name - - # create product name for publishing - self.product_name = ( - self.product_type + self.base_product_name.capitalize() - ) - - def _replace_hash_to_expression(self, name, text): - """ Replace hash with number in correct padding. """ - _spl = text.split("#") - _len = (len(_spl) - 1) - _repl = "{{{0}:0>{1}}}".format(name, _len) - new_text = text.replace(("#" * _len), _repl) - return new_text - - def _convert_to_tag_data(self): - """ Convert internal data to tag data. - - Populating the tag data into internal variable self.tag_data - """ - # define vertical sync attributes - hero_track = True - self.review_layer = "" - if self.vertical_sync: - # check if track name is not in driving layer - if self.track_name not in self.driving_layer: - # if it is not then define vertical sync as None - hero_track = False - - # increasing steps by index of rename iteration - self.count_steps *= self.rename_index - - hierarchy_formatting_data = {} - _data = self.timeline_item_default_data.copy() - if self.ui_inputs: - # adding tag metadata from ui - for _k, _v in self.ui_inputs.items(): - if _v["target"] == "tag": - self.tag_data[_k] = _v["value"] - - # driving layer is set as positive match - if hero_track or self.vertical_sync: - # mark review layer - if self.review_track and ( - self.review_track not in self.review_track_default): - # if review layer is defined and not the same as default - self.review_layer = self.review_track - # shot num calculate - if self.rename_index == 0: - self.shot_num = self.count_from - else: - self.shot_num = self.count_from + self.count_steps - - # clip name sequence number - _data.update({"shot": self.shot_num}) - - # solve # in test to pythonic expression - for _k, _v in self.hierarchy_data.items(): - if "#" not in _v["value"]: - continue - self.hierarchy_data[ - _k]["value"] = self._replace_hash_to_expression( - _k, _v["value"]) - - # fill up pythonic expresisons in hierarchy data - for k, _v in self.hierarchy_data.items(): - hierarchy_formatting_data[k] = _v["value"].format(**_data) - else: - # if no gui mode then just pass default data - hierarchy_formatting_data = self.hierarchy_data - - tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formatting_data - ) - - tag_hierarchy_data.update({"heroTrack": True}) - if hero_track and self.vertical_sync: - self.vertical_clip_match.update({ - (self.clip_in, self.clip_out): tag_hierarchy_data - }) - - if not hero_track and self.vertical_sync: - # driving layer is set as negative match - for (_in, _out), hero_data in self.vertical_clip_match.items(): - hero_data.update({"heroTrack": False}) - if _in != self.clip_in or _out != self.clip_out: - continue - - data_product_name = hero_data["productName"] - # add track index in case duplicity of names in hero data - if self.product_name in data_product_name: - hero_data["productName"] = self.product_name + str( - self.track_index) - # in case track name and product name is the same then add - if self.base_product_name == self.track_name: - hero_data["productName"] = self.product_name - # assign data to return hierarchy data to tag - tag_hierarchy_data = hero_data - - # add data to return data dict - self.tag_data.update(tag_hierarchy_data) - - # add uuid to tag data - self.tag_data["uuid"] = str(uuid.uuid4()) - - # add review track only to hero track - if hero_track and self.review_layer: - self.tag_data.update({"reviewTrack": self.review_layer}) - else: - self.tag_data.update({"reviewTrack": None}) - - def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): - """ Solve tag data from hierarchy data and templates. """ - # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) - clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) - - return { - "newClipName": clip_name_filled, - "hierarchy": hierarchy_filled, - "parents": self.parents, - "hierarchyData": hierarchy_formatting_data, - "productName": self.product_name, - "productType": self.product_type - } - - def _convert_to_entity(self, key): - """ Converting input key to key with type. """ - # convert to entity type - folder_type = self.types.get(key) - - assert folder_type, "Missing folder type for `{}`".format( - key - ) - - return { - "folder_type": folder_type, - "entity_name": self.hierarchy_data[key]["value"].format( - **self.timeline_item_default_data - ) - } - - def _create_parents(self): - """ Create parents and return it in list. """ - self.parents = [] - - pattern = re.compile(self.parents_search_pattern) - par_split = [pattern.findall(t).pop() - for t in self.hierarchy.split("/")] - - for key in par_split: - parent = self._convert_to_entity(key) - self.parents.append(parent) - - -def get_representation_files(representation): - anatomy = Anatomy() - files = [] - for file_data in representation["files"]: - path = anatomy.fill_root(file_data["path"]) - files.append(path) - return files diff --git a/server_addon/resolve/client/ayon_resolve/api/testing_utils.py b/server_addon/resolve/client/ayon_resolve/api/testing_utils.py deleted file mode 100644 index 4aac66f4b7..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/testing_utils.py +++ /dev/null @@ -1,71 +0,0 @@ -#! python3 - - -class TestGUI: - def __init__(self): - resolve = bmd.scriptapp("Resolve") # noqa - self.fu = resolve.Fusion() - ui = self.fu.UIManager - self.disp = bmd.UIDispatcher(self.fu.UIManager) # noqa - self.title_font = ui.Font({"PixelSize": 18}) - self._dialogue = self.disp.AddWindow( - { - "WindowTitle": "Get Testing folder", - "ID": "TestingWin", - "Geometry": [250, 250, 250, 100], - "Spacing": 0, - "Margin": 10 - }, - [ - ui.VGroup( - { - "Spacing": 2 - }, - [ - ui.Button( - { - "ID": "inputTestSourcesFolder", - "Text": "Select folder with testing media", - "Weight": 1.25, - "ToolTip": ( - "Chose folder with videos, sequences, " - "single images, nested folders with " - "media" - ), - "Flat": False - } - ), - ui.VGap(), - ui.Button( - { - "ID": "openButton", - "Text": "Process Test", - "Weight": 2, - "ToolTip": "Run the test...", - "Flat": False - } - ) - ] - ) - ] - ) - self._widgets = self._dialogue.GetItems() - self._dialogue.On.TestingWin.Close = self._close_window - self._dialogue.On.inputTestSourcesFolder.Clicked = self._open_dir_button_pressed # noqa - self._dialogue.On.openButton.Clicked = self.process - - def _close_window(self, event): - self.disp.ExitLoop() - - def process(self, event): - # placeholder function this supposed to be run from child class - pass - - def _open_dir_button_pressed(self, event): - # placeholder function this supposed to be run from child class - pass - - def show_gui(self): - self._dialogue.Show() - self.disp.RunLoop() - self._dialogue.Hide() diff --git a/server_addon/resolve/client/ayon_resolve/api/todo-rendering.py b/server_addon/resolve/client/ayon_resolve/api/todo-rendering.py deleted file mode 100644 index 5238d76dec..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/todo-rendering.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python -# TODO: convert this script to be usable with OpenPype -""" -Example DaVinci Resolve script: -Load a still from DRX file, apply the still to all clips in all timelines. -Set render format and codec, add render jobs for all timelines, render -to specified path and wait for rendering completion. -Once render is complete, delete all jobs -""" -# clonned from: https://github.com/survos/transcribe/blob/fe3cf51eb95b82dabcf21fbe5f89bfb3d8bb6ce2/python/3_grade_and_render_all_timelines.py # noqa - -from python_get_resolve import GetResolve -import sys -import time - - -def AddTimelineToRender(project, timeline, presetName, - targetDirectory, renderFormat, renderCodec): - project.SetCurrentTimeline(timeline) - project.LoadRenderPreset(presetName) - - if not project.SetCurrentRenderFormatAndCodec(renderFormat, renderCodec): - return False - - project.SetRenderSettings( - {"SelectAllFrames": 1, "TargetDir": targetDirectory}) - return project.AddRenderJob() - - -def RenderAllTimelines(resolve, presetName, targetDirectory, - renderFormat, renderCodec): - projectManager = resolve.GetProjectManager() - project = projectManager.GetCurrentProject() - if not project: - return False - - resolve.OpenPage("Deliver") - timelineCount = project.GetTimelineCount() - - for index in range(0, int(timelineCount)): - if not AddTimelineToRender( - project, - project.GetTimelineByIndex(index + 1), - presetName, - targetDirectory, - renderFormat, - renderCodec): - return False - return project.StartRendering() - - -def IsRenderingInProgress(resolve): - projectManager = resolve.GetProjectManager() - project = projectManager.GetCurrentProject() - if not project: - return False - - return project.IsRenderingInProgress() - - -def WaitForRenderingCompletion(resolve): - while IsRenderingInProgress(resolve): - time.sleep(1) - return - - -def ApplyDRXToAllTimelineClips(timeline, path, gradeMode=0): - trackCount = timeline.GetTrackCount("video") - - clips = {} - for index in range(1, int(trackCount) + 1): - clips.update(timeline.GetItemsInTrack("video", index)) - return timeline.ApplyGradeFromDRX(path, int(gradeMode), clips) - - -def ApplyDRXToAllTimelines(resolve, path, gradeMode=0): - projectManager = resolve.GetProjectManager() - project = projectManager.GetCurrentProject() - if not project: - return False - timelineCount = project.GetTimelineCount() - - for index in range(0, int(timelineCount)): - timeline = project.GetTimelineByIndex(index + 1) - project.SetCurrentTimeline(timeline) - if not ApplyDRXToAllTimelineClips(timeline, path, gradeMode): - return False - return True - - -def DeleteAllRenderJobs(resolve): - projectManager = resolve.GetProjectManager() - project = projectManager.GetCurrentProject() - project.DeleteAllRenderJobs() - return - - -# Inputs: -# - DRX file to import grade still and apply it for clips -# - grade mode (0, 1 or 2) -# - preset name for rendering -# - render path -# - render format -# - render codec -if len(sys.argv) < 7: - print( - "input parameters for scripts are [drx file path] [grade mode] " - "[render preset name] [render path] [render format] [render codec]") - sys.exit() - -drxPath = sys.argv[1] -gradeMode = sys.argv[2] -renderPresetName = sys.argv[3] -renderPath = sys.argv[4] -renderFormat = sys.argv[5] -renderCodec = sys.argv[6] - -# Get currently open project -resolve = GetResolve() - -if not ApplyDRXToAllTimelines(resolve, drxPath, gradeMode): - print("Unable to apply a still from drx file to all timelines") - sys.exit() - -if not RenderAllTimelines(resolve, renderPresetName, renderPath, - renderFormat, renderCodec): - print("Unable to set all timelines for rendering") - sys.exit() - -WaitForRenderingCompletion(resolve) - -DeleteAllRenderJobs(resolve) - -print("Rendering is completed.") diff --git a/server_addon/resolve/client/ayon_resolve/api/utils.py b/server_addon/resolve/client/ayon_resolve/api/utils.py deleted file mode 100644 index d63ade9d51..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/utils.py +++ /dev/null @@ -1,83 +0,0 @@ -#! python3 - -""" -Resolve's tools for setting environment -""" - -import os -import sys - -from ayon_core.lib import Logger - -log = Logger.get_logger(__name__) - - -def get_resolve_module(): - from ayon_resolve import api - # dont run if already loaded - if api.bmdvr: - log.info(("resolve module is assigned to " - f"`ayon_resolve.api.bmdvr`: {api.bmdvr}")) - return api.bmdvr - try: - """ - The PYTHONPATH needs to be set correctly for this import - statement to work. An alternative is to import the - DaVinciResolveScript by specifying absolute path - (see ExceptionHandler logic) - """ - import DaVinciResolveScript as bmd - except ImportError: - if sys.platform.startswith("darwin"): - expected_path = ("/Library/Application Support/Blackmagic Design" - "/DaVinci Resolve/Developer/Scripting/Modules") - elif sys.platform.startswith("win") \ - or sys.platform.startswith("cygwin"): - expected_path = os.path.normpath( - os.getenv('PROGRAMDATA') + ( - "/Blackmagic Design/DaVinci Resolve/Support/Developer" - "/Scripting/Modules" - ) - ) - elif sys.platform.startswith("linux"): - expected_path = "/opt/resolve/libs/Fusion/Modules" - else: - raise NotImplementedError( - "Unsupported platform: {}".format(sys.platform) - ) - - # check if the default path has it... - print(("Unable to find module DaVinciResolveScript from " - "$PYTHONPATH - trying default locations")) - - module_path = os.path.normpath( - os.path.join( - expected_path, - "DaVinciResolveScript.py" - ) - ) - - try: - import imp - bmd = imp.load_source('DaVinciResolveScript', module_path) - except ImportError: - # No fallbacks ... report error: - log.error( - ("Unable to find module DaVinciResolveScript - please " - "ensure that the module DaVinciResolveScript is " - "discoverable by python") - ) - log.error( - ("For a default DaVinci Resolve installation, the " - f"module is expected to be located in: {expected_path}") - ) - sys.exit() - # assign global var and return - bmdvr = bmd.scriptapp("Resolve") - bmdvf = bmd.scriptapp("Fusion") - api.bmdvr = bmdvr - api.bmdvf = bmdvf - log.info(("Assigning resolve module to " - f"`ayon_resolve.api.bmdvr`: {api.bmdvr}")) - log.info(("Assigning resolve module to " - f"`ayon_resolve.api.bmdvf`: {api.bmdvf}")) diff --git a/server_addon/resolve/client/ayon_resolve/api/workio.py b/server_addon/resolve/client/ayon_resolve/api/workio.py deleted file mode 100644 index b6c2f63432..0000000000 --- a/server_addon/resolve/client/ayon_resolve/api/workio.py +++ /dev/null @@ -1,96 +0,0 @@ -"""Host API required Work Files tool""" - -import os -from ayon_core.lib import Logger -from .lib import ( - get_project_manager, - get_current_project -) - - -log = Logger.get_logger(__name__) - - -def file_extensions(): - return [".drp"] - - -def has_unsaved_changes(): - get_project_manager().SaveProject() - return False - - -def save_file(filepath): - pm = get_project_manager() - file = os.path.basename(filepath) - fname, _ = os.path.splitext(file) - project = get_current_project() - name = project.GetName() - - response = False - if name == "Untitled Project": - response = pm.CreateProject(fname) - log.info("New project created: {}".format(response)) - pm.SaveProject() - elif name != fname: - response = project.SetName(fname) - log.info("Project renamed: {}".format(response)) - - exported = pm.ExportProject(fname, filepath) - log.info("Project exported: {}".format(exported)) - - -def open_file(filepath): - """ - Loading project - """ - - from . import bmdvr - - pm = get_project_manager() - page = bmdvr.GetCurrentPage() - if page is not None: - # Save current project only if Resolve has an active page, otherwise - # we consider Resolve being in a pre-launch state (no open UI yet) - project = pm.GetCurrentProject() - print(f"Saving current project: {project}") - pm.SaveProject() - - file = os.path.basename(filepath) - fname, _ = os.path.splitext(file) - - try: - # load project from input path - project = pm.LoadProject(fname) - log.info(f"Project {project.GetName()} opened...") - - except AttributeError: - log.warning((f"Project with name `{fname}` does not exist! It will " - f"be imported from {filepath} and then loaded...")) - if pm.ImportProject(filepath): - # load project from input path - project = pm.LoadProject(fname) - log.info(f"Project imported/loaded {project.GetName()}...") - return True - return False - return True - - -def current_file(): - pm = get_project_manager() - file_ext = file_extensions()[0] - workdir_path = os.getenv("AYON_WORKDIR") - project = pm.GetCurrentProject() - project_name = project.GetName() - file_name = project_name + file_ext - - # create current file path - current_file_path = os.path.join(workdir_path, file_name) - - # return current file path if it exists - if os.path.exists(current_file_path): - return os.path.normpath(current_file_path) - - -def work_root(session): - return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/") diff --git a/server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_last_workfile.py b/server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_last_workfile.py deleted file mode 100644 index cf9953bfe9..0000000000 --- a/server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_last_workfile.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -from ayon_applications import PreLaunchHook, LaunchTypes - - -class PreLaunchResolveLastWorkfile(PreLaunchHook): - """Special hook to open last workfile for Resolve. - - Checks 'start_last_workfile', if set to False, it will not open last - workfile. This property is set explicitly in Launcher. - """ - order = 10 - app_groups = {"resolve"} - launch_types = {LaunchTypes.local} - - def execute(self): - if not self.data.get("start_last_workfile"): - self.log.info("It is set to not start last workfile on start.") - return - - last_workfile = self.data.get("last_workfile_path") - if not last_workfile: - self.log.warning("Last workfile was not collected.") - return - - if not os.path.exists(last_workfile): - self.log.info("Current context does not have any workfile yet.") - return - - # Add path to launch environment for the startup script to pick up - self.log.info( - "Setting AYON_RESOLVE_OPEN_ON_LAUNCH to launch " - f"last workfile: {last_workfile}" - ) - key = "AYON_RESOLVE_OPEN_ON_LAUNCH" - self.launch_context.env[key] = last_workfile diff --git a/server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_setup.py b/server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_setup.py deleted file mode 100644 index ffd34d7b8d..0000000000 --- a/server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_setup.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -from pathlib import Path -import platform -from ayon_applications import PreLaunchHook, LaunchTypes -from ayon_resolve.utils import setup - - -class PreLaunchResolveSetup(PreLaunchHook): - """ - This hook will set up the Resolve scripting environment as described in - Resolve's documentation found with the installed application at - {resolve}/Support/Developer/Scripting/README.txt - - Prepares the following environment variables: - - `RESOLVE_SCRIPT_API` - - `RESOLVE_SCRIPT_LIB` - - It adds $RESOLVE_SCRIPT_API/Modules to PYTHONPATH. - - Additionally it sets up the Python home for Python 3 based on the - RESOLVE_PYTHON3_HOME in the environment (usually defined in OpenPype's - Application environment for Resolve by the admin). For this it sets - PYTHONHOME and PATH variables. - - It also defines: - - `RESOLVE_UTILITY_SCRIPTS_DIR`: Destination directory for OpenPype - Fusion scripts to be copied to for Resolve to pick them up. - - `AYON_LOG_NO_COLORS` to True to ensure OP doesn't try to - use logging with terminal colors as it fails in Resolve. - - """ - - app_groups = {"resolve"} - launch_types = {LaunchTypes.local} - - def execute(self): - current_platform = platform.system().lower() - - programdata = self.launch_context.env.get("PROGRAMDATA", "") - resolve_script_api_locations = { - "windows": ( - f"{programdata}/Blackmagic Design/" - "DaVinci Resolve/Support/Developer/Scripting" - ), - "darwin": ( - "/Library/Application Support/Blackmagic Design" - "/DaVinci Resolve/Developer/Scripting" - ), - "linux": "/opt/resolve/Developer/Scripting", - } - resolve_script_api = Path( - resolve_script_api_locations[current_platform] - ) - self.log.info( - f"setting RESOLVE_SCRIPT_API variable to {resolve_script_api}" - ) - self.launch_context.env[ - "RESOLVE_SCRIPT_API" - ] = resolve_script_api.as_posix() - - resolve_script_lib_dirs = { - "windows": ( - "C:/Program Files/Blackmagic Design" - "/DaVinci Resolve/fusionscript.dll" - ), - "darwin": ( - "/Applications/DaVinci Resolve/DaVinci Resolve.app" - "/Contents/Libraries/Fusion/fusionscript.so" - ), - "linux": "/opt/resolve/libs/Fusion/fusionscript.so", - } - resolve_script_lib = Path(resolve_script_lib_dirs[current_platform]) - self.launch_context.env[ - "RESOLVE_SCRIPT_LIB" - ] = resolve_script_lib.as_posix() - self.log.info( - f"setting RESOLVE_SCRIPT_LIB variable to {resolve_script_lib}" - ) - - # TODO: add OTIO installation from `openpype/requirements.py` - # making sure python <3.9.* is installed at provided path - python3_home = Path( - self.launch_context.env.get("RESOLVE_PYTHON3_HOME", "") - ) - - assert python3_home.is_dir(), ( - "Python 3 is not installed at the provided folder path. Either " - "make sure the `environments\resolve.json` is having correctly " - "set `RESOLVE_PYTHON3_HOME` or make sure Python 3 is installed " - f"in given path. \nRESOLVE_PYTHON3_HOME: `{python3_home}`" - ) - python3_home_str = python3_home.as_posix() - self.launch_context.env["PYTHONHOME"] = python3_home_str - self.log.info(f"Path to Resolve Python folder: `{python3_home_str}`") - - # add to the PYTHONPATH - env_pythonpath = self.launch_context.env["PYTHONPATH"] - modules_path = Path(resolve_script_api, "Modules").as_posix() - self.launch_context.env[ - "PYTHONPATH" - ] = f"{modules_path}{os.pathsep}{env_pythonpath}" - - self.log.debug(f"PYTHONPATH: {self.launch_context.env['PYTHONPATH']}") - - # add the pythonhome folder to PATH because on Windows - # this is needed for Py3 to be correctly detected within Resolve - env_path = self.launch_context.env["PATH"] - self.log.info(f"Adding `{python3_home_str}` to the PATH variable") - self.launch_context.env[ - "PATH" - ] = f"{python3_home_str}{os.pathsep}{env_path}" - - self.log.debug(f"PATH: {self.launch_context.env['PATH']}") - - resolve_utility_scripts_dirs = { - "windows": ( - f"{programdata}/Blackmagic Design" - "/DaVinci Resolve/Fusion/Scripts/Comp" - ), - "darwin": ( - "/Library/Application Support/Blackmagic Design" - "/DaVinci Resolve/Fusion/Scripts/Comp" - ), - "linux": "/opt/resolve/Fusion/Scripts/Comp", - } - resolve_utility_scripts_dir = Path( - resolve_utility_scripts_dirs[current_platform] - ) - # setting utility scripts dir for scripts syncing - self.launch_context.env[ - "RESOLVE_UTILITY_SCRIPTS_DIR" - ] = resolve_utility_scripts_dir.as_posix() - - # remove terminal coloring tags - self.launch_context.env["AYON_LOG_NO_COLORS"] = "1" - - # Resolve Setup integration - setup(self.launch_context.env) diff --git a/server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_startup.py b/server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_startup.py deleted file mode 100644 index b357b10056..0000000000 --- a/server_addon/resolve/client/ayon_resolve/hooks/pre_resolve_startup.py +++ /dev/null @@ -1,24 +0,0 @@ -import os - -from ayon_applications import PreLaunchHook, LaunchTypes -from ayon_resolve import RESOLVE_ADDON_ROOT - - -class PreLaunchResolveStartup(PreLaunchHook): - """Special hook to configure startup script. - - """ - order = 11 - app_groups = {"resolve"} - launch_types = {LaunchTypes.local} - - def execute(self): - # Set the openpype prelaunch startup script path for easy access - # in the LUA .scriptlib code - script_path = os.path.join(RESOLVE_ADDON_ROOT, "startup.py") - key = "AYON_RESOLVE_STARTUP_SCRIPT" - self.launch_context.env[key] = script_path - - self.log.info( - f"Setting AYON_RESOLVE_STARTUP_SCRIPT to: {script_path}" - ) diff --git a/server_addon/resolve/client/ayon_resolve/otio/__init__.py b/server_addon/resolve/client/ayon_resolve/otio/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/resolve/client/ayon_resolve/otio/davinci_export.py b/server_addon/resolve/client/ayon_resolve/otio/davinci_export.py deleted file mode 100644 index 5f11c81fc5..0000000000 --- a/server_addon/resolve/client/ayon_resolve/otio/davinci_export.py +++ /dev/null @@ -1,326 +0,0 @@ -""" compatibility OpenTimelineIO 0.12.0 and older -""" - -import os -import re -import sys -import json -import opentimelineio as otio -from . import utils -import clique - -self = sys.modules[__name__] -self.track_types = { - "video": otio.schema.TrackKind.Video, - "audio": otio.schema.TrackKind.Audio -} -self.project_fps = None - - -def create_otio_rational_time(frame, fps): - return otio.opentime.RationalTime( - float(frame), - float(fps) - ) - - -def create_otio_time_range(start_frame, frame_duration, fps): - return otio.opentime.TimeRange( - start_time=create_otio_rational_time(start_frame, fps), - duration=create_otio_rational_time(frame_duration, fps) - ) - - -def create_otio_reference(media_pool_item): - metadata = _get_metadata_media_pool_item(media_pool_item) - print("media pool item: {}".format(media_pool_item.GetName())) - - _mp_clip_property = media_pool_item.GetClipProperty - - path = _mp_clip_property("File Path") - reformat_path = utils.get_reformated_path(path, padded=True) - padding = utils.get_padding_from_path(path) - - if padding: - metadata.update({ - "isSequence": True, - "padding": padding - }) - - # get clip property regarding to type - fps = float(_mp_clip_property("FPS")) - if _mp_clip_property("Type") == "Video": - frame_start = int(_mp_clip_property("Start")) - frame_duration = int(_mp_clip_property("Frames")) - else: - audio_duration = str(_mp_clip_property("Duration")) - frame_start = 0 - frame_duration = int(utils.timecode_to_frames( - audio_duration, float(fps))) - - otio_ex_ref_item = None - - if padding: - # if it is file sequence try to create `ImageSequenceReference` - # the OTIO might not be compatible so return nothing and do it old way - try: - dirname, filename = os.path.split(path) - collection = clique.parse(filename, '{head}[{ranges}]{tail}') - padding_num = len(re.findall("(\\d+)(?=-)", filename).pop()) - otio_ex_ref_item = otio.schema.ImageSequenceReference( - target_url_base=dirname + os.sep, - name_prefix=collection.format("{head}"), - name_suffix=collection.format("{tail}"), - start_frame=frame_start, - frame_zero_padding=padding_num, - rate=fps, - available_range=create_otio_time_range( - frame_start, - frame_duration, - fps - ) - ) - except AttributeError: - pass - - if not otio_ex_ref_item: - # in case old OTIO or video file create `ExternalReference` - otio_ex_ref_item = otio.schema.ExternalReference( - target_url=reformat_path, - available_range=create_otio_time_range( - frame_start, - frame_duration, - fps - ) - ) - - # add metadata to otio item - add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata) - - return otio_ex_ref_item - - -def create_otio_markers(track_item, fps): - track_item_markers = track_item.GetMarkers() - markers = [] - for marker_frame in track_item_markers: - note = track_item_markers[marker_frame]["note"] - if "{" in note and "}" in note: - metadata = json.loads(note) - else: - metadata = {"note": note} - markers.append( - otio.schema.Marker( - name=track_item_markers[marker_frame]["name"], - marked_range=create_otio_time_range( - marker_frame, - track_item_markers[marker_frame]["duration"], - fps - ), - color=track_item_markers[marker_frame]["color"].upper(), - metadata=metadata - ) - ) - return markers - - -def create_otio_clip(track_item): - media_pool_item = track_item.GetMediaPoolItem() - _mp_clip_property = media_pool_item.GetClipProperty - - if not self.project_fps: - fps = float(_mp_clip_property("FPS")) - else: - fps = self.project_fps - - name = track_item.GetName() - - media_reference = create_otio_reference(media_pool_item) - source_range = create_otio_time_range( - int(track_item.GetLeftOffset()), - int(track_item.GetDuration()), - fps - ) - - if _mp_clip_property("Type") == "Audio": - return_clips = list() - audio_chanels = _mp_clip_property("Audio Ch") - for channel in range(0, int(audio_chanels)): - clip = otio.schema.Clip( - name=f"{name}_{channel}", - source_range=source_range, - media_reference=media_reference - ) - for marker in create_otio_markers(track_item, fps): - clip.markers.append(marker) - return_clips.append(clip) - return return_clips - else: - clip = otio.schema.Clip( - name=name, - source_range=source_range, - media_reference=media_reference - ) - for marker in create_otio_markers(track_item, fps): - clip.markers.append(marker) - - return clip - - -def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): - return otio.schema.Gap( - source_range=create_otio_time_range( - gap_start, - (clip_start - tl_start_frame) - gap_start, - fps - ) - ) - - -def _create_otio_timeline(project, timeline, fps): - metadata = _get_timeline_metadata(project, timeline) - start_time = create_otio_rational_time( - timeline.GetStartFrame(), fps) - otio_timeline = otio.schema.Timeline( - name=timeline.GetName(), - global_start_time=start_time, - metadata=metadata - ) - return otio_timeline - - -def _get_timeline_metadata(project, timeline): - media_pool = project.GetMediaPool() - root_folder = media_pool.GetRootFolder() - ls_folder = root_folder.GetClipList() - timeline = project.GetCurrentTimeline() - timeline_name = timeline.GetName() - for tl in ls_folder: - if tl.GetName() not in timeline_name: - continue - return _get_metadata_media_pool_item(tl) - - -def _get_metadata_media_pool_item(media_pool_item): - data = dict() - data.update({k: v for k, v in media_pool_item.GetMetadata().items()}) - property = media_pool_item.GetClipProperty() or {} - for name, value in property.items(): - if "Resolution" in name and "" != value: - width, height = value.split("x") - data.update({ - "width": int(width), - "height": int(height) - }) - if "PAR" in name and "" != value: - try: - data.update({"pixelAspect": float(value)}) - except ValueError: - if "Square" in value: - data.update({"pixelAspect": float(1)}) - else: - data.update({"pixelAspect": float(1)}) - - return data - - -def create_otio_track(track_type, track_name): - return otio.schema.Track( - name=track_name, - kind=self.track_types[track_type] - ) - - -def add_otio_gap(clip_start, otio_track, track_item, timeline): - # if gap between track start and clip start - if clip_start > otio_track.available_range().duration.value: - # create gap and add it to track - otio_track.append( - create_otio_gap( - otio_track.available_range().duration.value, - track_item.GetStart(), - timeline.GetStartFrame(), - self.project_fps - ) - ) - - -def add_otio_metadata(otio_item, media_pool_item, **kwargs): - mp_metadata = media_pool_item.GetMetadata() - # add additional metadata from kwargs - if kwargs: - mp_metadata.update(kwargs) - - # add metadata to otio item metadata - for key, value in mp_metadata.items(): - otio_item.metadata.update({key: value}) - - -def create_otio_timeline(resolve_project): - - # get current timeline - self.project_fps = resolve_project.GetSetting("timelineFrameRate") - timeline = resolve_project.GetCurrentTimeline() - - # convert timeline to otio - otio_timeline = _create_otio_timeline( - resolve_project, timeline, self.project_fps) - - # loop all defined track types - for track_type in list(self.track_types.keys()): - # get total track count - track_count = timeline.GetTrackCount(track_type) - - # loop all tracks by track indexes - for track_index in range(1, int(track_count) + 1): - # get current track name - track_name = timeline.GetTrackName(track_type, track_index) - - # convert track to otio - otio_track = create_otio_track( - track_type, track_name) - - # get all track items in current track - current_track_items = timeline.GetItemListInTrack( - track_type, track_index) - - # loop available track items in current track items - for track_item in current_track_items: - # skip offline track items - if track_item.GetMediaPoolItem() is None: - continue - - # calculate real clip start - clip_start = track_item.GetStart() - timeline.GetStartFrame() - - add_otio_gap( - clip_start, otio_track, track_item, timeline) - - # create otio clip and add it to track - otio_clip = create_otio_clip(track_item) - - if not isinstance(otio_clip, list): - otio_track.append(otio_clip) - else: - for index, clip in enumerate(otio_clip): - if index == 0: - otio_track.append(clip) - else: - # add previous otio track to timeline - otio_timeline.tracks.append(otio_track) - # convert track to otio - otio_track = create_otio_track( - track_type, track_name) - add_otio_gap( - clip_start, otio_track, - track_item, timeline) - otio_track.append(clip) - - # add track to otio timeline - otio_timeline.tracks.append(otio_track) - - return otio_timeline - - -def write_to_file(otio_timeline, path): - otio.adapters.write_to_file(otio_timeline, path) diff --git a/server_addon/resolve/client/ayon_resolve/otio/davinci_import.py b/server_addon/resolve/client/ayon_resolve/otio/davinci_import.py deleted file mode 100644 index 3bbb007b25..0000000000 --- a/server_addon/resolve/client/ayon_resolve/otio/davinci_import.py +++ /dev/null @@ -1,108 +0,0 @@ -import sys -import json -import DaVinciResolveScript -import opentimelineio as otio - - -self = sys.modules[__name__] -self.resolve = DaVinciResolveScript.scriptapp('Resolve') -self.fusion = DaVinciResolveScript.scriptapp('Fusion') -self.project_manager = self.resolve.GetProjectManager() -self.current_project = self.project_manager.GetCurrentProject() -self.media_pool = self.current_project.GetMediaPool() -self.track_types = { - "video": otio.schema.TrackKind.Video, - "audio": otio.schema.TrackKind.Audio -} -self.project_fps = None - - -def build_timeline(otio_timeline): - # TODO: build timeline in mediapool `otioImport` folder - # TODO: loop otio tracks and build them in the new timeline - for clip in otio_timeline.each_clip(): - # TODO: create track item - print(clip.name) - print(clip.parent().name) - print(clip.range_in_parent()) - - -def _build_track(otio_track): - # TODO: _build_track - pass - - -def _build_media_pool_item(otio_media_reference): - # TODO: _build_media_pool_item - pass - - -def _build_track_item(otio_clip): - # TODO: _build_track_item - pass - - -def _build_gap(otio_clip): - # TODO: _build_gap - pass - - -def _build_marker(track_item, otio_marker): - frame_start = otio_marker.marked_range.start_time.value - frame_duration = otio_marker.marked_range.duration.value - - # marker attributes - frameId = (frame_start / 10) * 10 - color = otio_marker.color - name = otio_marker.name - note = otio_marker.metadata.get("note") or json.dumps(otio_marker.metadata) - duration = (frame_duration / 10) * 10 - - track_item.AddMarker( - frameId, - color, - name, - note, - duration - ) - - -def _build_media_pool_folder(name): - """ - Returns folder with input name and sets it as current folder. - - It will create new media bin if none is found in root media bin - - Args: - name (str): name of bin - - Returns: - resolve.api.MediaPool.Folder: description - - """ - - root_folder = self.media_pool.GetRootFolder() - sub_folders = root_folder.GetSubFolderList() - testing_names = list() - - for subfolder in sub_folders: - subf_name = subfolder.GetName() - if name in subf_name: - testing_names.append(subfolder) - else: - testing_names.append(False) - - matching = next((f for f in testing_names if f is not False), None) - - if not matching: - new_folder = self.media_pool.AddSubFolder(root_folder, name) - self.media_pool.SetCurrentFolder(new_folder) - else: - self.media_pool.SetCurrentFolder(matching) - - return self.media_pool.GetCurrentFolder() - - -def read_from_file(otio_file): - otio_timeline = otio.adapters.read_from_file(otio_file) - build_timeline(otio_timeline) diff --git a/server_addon/resolve/client/ayon_resolve/otio/utils.py b/server_addon/resolve/client/ayon_resolve/otio/utils.py deleted file mode 100644 index c03305ff23..0000000000 --- a/server_addon/resolve/client/ayon_resolve/otio/utils.py +++ /dev/null @@ -1,70 +0,0 @@ -import re -import opentimelineio as otio - - -def timecode_to_frames(timecode, framerate): - rt = otio.opentime.from_timecode(timecode, 24) - return int(otio.opentime.to_frames(rt)) - - -def frames_to_timecode(frames, framerate): - rt = otio.opentime.from_frames(frames, framerate) - return otio.opentime.to_timecode(rt) - - -def frames_to_secons(frames, framerate): - rt = otio.opentime.from_frames(frames, framerate) - return otio.opentime.to_seconds(rt) - - -def get_reformated_path(path, padded=True, first=False): - """ - Return fixed python expression path - - Args: - path (str): path url or simple file name - - Returns: - type: string with reformatted path - - Example: - get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr - - """ - num_pattern = r"(\[\d+\-\d+\])" - padding_pattern = r"(\d+)(?=-)" - first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]") - - if "[" in path: - padding = len(re.findall(padding_pattern, path).pop()) - if padded: - path = re.sub(num_pattern, f"%0{padding}d", path) - elif first: - first_frame = re.findall(first_frame_pattern, path, flags=0) - if len(first_frame) >= 1: - first_frame = first_frame[0] - path = re.sub(num_pattern, first_frame, path) - else: - path = re.sub(num_pattern, "%d", path) - return path - - -def get_padding_from_path(path): - """ - Return padding number from DaVinci Resolve sequence path style - - Args: - path (str): path url or simple file name - - Returns: - int: padding number - - Example: - get_padding_from_path("plate.[0001-1008].exr") > 4 - - """ - padding_pattern = "(\\d+)(?=-)" - if "[" in path: - return len(re.findall(padding_pattern, path).pop()) - - return None diff --git a/server_addon/resolve/client/ayon_resolve/plugins/create/create_shot_clip.py b/server_addon/resolve/client/ayon_resolve/plugins/create/create_shot_clip.py deleted file mode 100644 index da98c8de7d..0000000000 --- a/server_addon/resolve/client/ayon_resolve/plugins/create/create_shot_clip.py +++ /dev/null @@ -1,272 +0,0 @@ -# from pprint import pformat -from ayon_resolve.api import plugin, lib -from ayon_resolve.api.lib import ( - get_video_track_names, - create_bin, -) - - -class CreateShotClip(plugin.Creator): - """Publishable clip""" - - label = "Create Publishable Clip" - product_type = "clip" - icon = "film" - defaults = ["Main"] - - gui_tracks = get_video_track_names() - gui_name = "AYON publish attributes creator" - gui_info = "Define sequential rename and fill hierarchy data." - gui_inputs = { - "renameHierarchy": { - "type": "section", - "label": "Shot Hierarchy And Rename Settings", - "target": "ui", - "order": 0, - "value": { - "hierarchy": { - "value": "{folder}/{sequence}", - "type": "QLineEdit", - "label": "Shot Parent Hierarchy", - "target": "tag", - "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa - "order": 0}, - "clipRename": { - "value": False, - "type": "QCheckBox", - "label": "Rename clips", - "target": "ui", - "toolTip": "Renaming selected clips on fly", # noqa - "order": 1}, - "clipName": { - "value": "{sequence}{shot}", - "type": "QLineEdit", - "label": "Clip Name Template", - "target": "ui", - "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa - "order": 2}, - "countFrom": { - "value": 10, - "type": "QSpinBox", - "label": "Count sequence from", - "target": "ui", - "toolTip": "Set when the sequence number stafrom", # noqa - "order": 3}, - "countSteps": { - "value": 10, - "type": "QSpinBox", - "label": "Stepping number", - "target": "ui", - "toolTip": "What number is adding every new step", # noqa - "order": 4}, - } - }, - "hierarchyData": { - "type": "dict", - "label": "Shot Template Keywords", - "target": "tag", - "order": 1, - "value": { - "folder": { - "value": "shots", - "type": "QLineEdit", - "label": "{folder}", - "target": "tag", - "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 0}, - "episode": { - "value": "ep01", - "type": "QLineEdit", - "label": "{episode}", - "target": "tag", - "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 1}, - "sequence": { - "value": "sq01", - "type": "QLineEdit", - "label": "{sequence}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 2}, - "track": { - "value": "{_track_}", - "type": "QLineEdit", - "label": "{track}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 3}, - "shot": { - "value": "sh###", - "type": "QLineEdit", - "label": "{shot}", - "target": "tag", - "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 4} - } - }, - "verticalSync": { - "type": "section", - "label": "Vertical Synchronization Of Attributes", - "target": "ui", - "order": 2, - "value": { - "vSyncOn": { - "value": True, - "type": "QCheckBox", - "label": "Enable Vertical Sync", - "target": "ui", - "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa - "order": 0}, - "vSyncTrack": { - "value": gui_tracks, # noqa - "type": "QComboBox", - "label": "Hero track", - "target": "ui", - "toolTip": "Select driving track name which should be mastering all others", # noqa - "order": 1 - } - } - }, - "publishSettings": { - "type": "section", - "label": "Publish Settings", - "target": "ui", - "order": 3, - "value": { - "productName": { - "value": ["", "main", "bg", "fg", "bg", - "animatic"], - "type": "QComboBox", - "label": "Product Name", - "target": "ui", - "toolTip": "chose product name pattern, if is selected, name of track layer will be used", # noqa - "order": 0}, - "productType": { - "value": ["plate", "take"], - "type": "QComboBox", - "label": "Product type", - "target": "ui", "toolTip": "What use of this product is for", # noqa - "order": 1}, - "reviewTrack": { - "value": ["< none >"] + gui_tracks, - "type": "QComboBox", - "label": "Use Review Track", - "target": "ui", - "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa - "order": 2}, - "audio": { - "value": False, - "type": "QCheckBox", - "label": "Include audio", - "target": "tag", - "toolTip": "Process products with corresponding audio", # noqa - "order": 3}, - "sourceResolution": { - "value": False, - "type": "QCheckBox", - "label": "Source resolution", - "target": "tag", - "toolTip": "Is resolution taken from timeline or source?", # noqa - "order": 4}, - } - }, - "shotAttr": { - "type": "section", - "label": "Shot Attributes", - "target": "ui", - "order": 4, - "value": { - "workfileFrameStart": { - "value": 1001, - "type": "QSpinBox", - "label": "Workfiles Start Frame", - "target": "tag", - "toolTip": "Set workfile starting frame number", # noqa - "order": 0 - }, - "handleStart": { - "value": 0, - "type": "QSpinBox", - "label": "Handle start (head)", - "target": "tag", - "toolTip": "Handle at start of clip", # noqa - "order": 1 - }, - "handleEnd": { - "value": 0, - "type": "QSpinBox", - "label": "Handle end (tail)", - "target": "tag", - "toolTip": "Handle at end of clip", # noqa - "order": 2 - } - } - } - } - - presets = None - - def process(self): - # get key pairs from presets and match it on ui inputs - for k, v in self.gui_inputs.items(): - if v["type"] in ("dict", "section"): - # nested dictionary (only one level allowed - # for sections and dict) - for _k, _v in v["value"].items(): - if self.presets.get(_k) is not None: - self.gui_inputs[k][ - "value"][_k]["value"] = self.presets[_k] - if self.presets.get(k): - self.gui_inputs[k]["value"] = self.presets[k] - - # open widget for plugins inputs - widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs) - widget.exec_() - - if len(self.selected) < 1: - return - - if not widget.result: - print("Operation aborted") - return - - self.rename_add = 0 - - # get ui output for track name for vertical sync - v_sync_track = widget.result["vSyncTrack"]["value"] - - # sort selected trackItems by - sorted_selected_track_items = [] - unsorted_selected_track_items = [] - print("_____ selected ______") - print(self.selected) - for track_item_data in self.selected: - if track_item_data["track"]["name"] in v_sync_track: - sorted_selected_track_items.append(track_item_data) - else: - unsorted_selected_track_items.append(track_item_data) - - sorted_selected_track_items.extend(unsorted_selected_track_items) - - # sequence attrs - sq_frame_start = self.timeline.GetStartFrame() - sq_markers = self.timeline.GetMarkers() - - # create media bin for compound clips (trackItems) - mp_folder = create_bin(self.timeline.GetName()) - - kwargs = { - "ui_inputs": widget.result, - "avalon": self.data, - "mp_folder": mp_folder, - "sq_frame_start": sq_frame_start, - "sq_markers": sq_markers - } - print(kwargs) - for i, track_item_data in enumerate(sorted_selected_track_items): - self.rename_index = i - self.log.info(track_item_data) - # convert track item to timeline media pool item - track_item = plugin.PublishClip( - self, track_item_data, **kwargs).convert() - track_item.SetClipColor(lib.publish_clip_color) diff --git a/server_addon/resolve/client/ayon_resolve/plugins/inventory/remove_unused_media_pool_items.py b/server_addon/resolve/client/ayon_resolve/plugins/inventory/remove_unused_media_pool_items.py deleted file mode 100644 index 7ea55dc1ff..0000000000 --- a/server_addon/resolve/client/ayon_resolve/plugins/inventory/remove_unused_media_pool_items.py +++ /dev/null @@ -1,31 +0,0 @@ -from ayon_core.pipeline import ( - InventoryAction, -) -from ayon_core.pipeline.load.utils import remove_container - - -class RemoveUnusedMedia(InventoryAction): - - label = "Remove Unused Selected Media" - icon = "trash" - - @staticmethod - def is_compatible(container): - return ( - container.get("loader") == "LoadMedia" - ) - - def process(self, containers): - any_removed = False - for container in containers: - media_pool_item = container["_item"] - usage = int(media_pool_item.GetClipProperty("Usage")) - name = media_pool_item.GetName() - if usage == 0: - print(f"Removing {name}") - remove_container(container) - any_removed = True - else: - print(f"Keeping {name} with usage: {usage}") - - return any_removed diff --git a/server_addon/resolve/client/ayon_resolve/plugins/load/load_clip.py b/server_addon/resolve/client/ayon_resolve/plugins/load/load_clip.py deleted file mode 100644 index 7e3a5a254e..0000000000 --- a/server_addon/resolve/client/ayon_resolve/plugins/load/load_clip.py +++ /dev/null @@ -1,168 +0,0 @@ -import ayon_api - -from ayon_resolve.api import lib, plugin -from ayon_resolve.api.pipeline import ( - containerise, - update_container, -) -from ayon_core.lib.transcoding import ( - VIDEO_EXTENSIONS, - IMAGE_EXTENSIONS -) - - -class LoadClip(plugin.TimelineItemLoader): - """Load a product to timeline as clip - - Place clip to timeline on its asset origin timings collected - during conforming to project - """ - - product_types = {"render2d", "source", "plate", "render", "review"} - - representations = {"*"} - extensions = set( - ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) - ) - - label = "Load as clip" - order = -10 - icon = "code-fork" - color = "orange" - - # for loader multiselection - timeline = None - - # presets - clip_color_last = "Olive" - clip_color = "Orange" - - def load(self, context, name, namespace, options): - - # load clip to timeline and get main variables - files = plugin.get_representation_files(context["representation"]) - - timeline_item = plugin.ClipLoader( - self, context, **options).load(files) - namespace = namespace or timeline_item.GetName() - - # update color of clip regarding the version order - self.set_item_color( - context["project"]["name"], - timeline_item, - context["version"] - ) - - data_imprint = self.get_tag_data(context, name, namespace) - return containerise( - timeline_item, - name, namespace, context, - self.__class__.__name__, - data_imprint) - - def switch(self, container, context): - self.update(container, context) - - def update(self, container, context): - """ Updating previously loaded clips - """ - - repre_entity = context["representation"] - name = container['name'] - namespace = container['namespace'] - timeline_item = container["_timeline_item"] - - media_pool_item = timeline_item.GetMediaPoolItem() - - files = plugin.get_representation_files(repre_entity) - - loader = plugin.ClipLoader(self, context) - timeline_item = loader.update(timeline_item, files) - - # update color of clip regarding the version order - self.set_item_color( - context["project"]["name"], - timeline_item, - context["version"] - ) - - # if original media pool item has no remaining usages left - # remove it from the media pool - if int(media_pool_item.GetClipProperty("Usage")) == 0: - lib.remove_media_pool_item(media_pool_item) - - data_imprint = self.get_tag_data(context, name, namespace) - return update_container(timeline_item, data_imprint) - - def get_tag_data(self, context, name, namespace): - """Return data to be imprinted on the timeline item marker""" - - repre_entity = context["representation"] - version_entity = context["version"] - version_attributes = version_entity["attrib"] - colorspace = version_attributes.get("colorSpace", None) - object_name = "{}_{}".format(name, namespace) - - # add additional metadata from the version to imprint Avalon knob - # move all version data keys to tag data - add_version_data_keys = [ - "frameStart", "frameEnd", "source", "author", - "fps", "handleStart", "handleEnd" - ] - data = { - key: version_attributes.get(key, "None") - for key in add_version_data_keys - } - - # add variables related to version context - data.update({ - "representation": repre_entity["id"], - "version": version_entity["version"], - "colorspace": colorspace, - "objectName": object_name - }) - return data - - @classmethod - def set_item_color(cls, project_name, timeline_item, version_entity): - """Color timeline item based on whether it is outdated or latest""" - # get all versions in list - last_version_entity = ayon_api.get_last_version_by_product_id( - project_name, - version_entity["productId"], - fields=["name"] - ) - last_version_id = None - if last_version_entity: - last_version_id = last_version_entity["id"] - - # set clip colour - if version_entity["id"] == last_version_id: - timeline_item.SetClipColor(cls.clip_color_last) - else: - timeline_item.SetClipColor(cls.clip_color) - - def remove(self, container): - timeline_item = container["_timeline_item"] - media_pool_item = timeline_item.GetMediaPoolItem() - timeline = lib.get_current_timeline() - - # DeleteClips function was added in Resolve 18.5+ - # by checking None we can detect whether the - # function exists in Resolve - if timeline.DeleteClips is not None: - timeline.DeleteClips([timeline_item]) - else: - # Resolve versions older than 18.5 can't delete clips via API - # so all we can do is just remove the pype marker to 'untag' it - if lib.get_pype_marker(timeline_item): - # Note: We must call `get_pype_marker` because - # `delete_pype_marker` uses a global variable set by - # `get_pype_marker` to delete the right marker - # TODO: Improve code to avoid the global `temp_marker_frame` - lib.delete_pype_marker(timeline_item) - - # if media pool item has no remaining usages left - # remove it from the media pool - if int(media_pool_item.GetClipProperty("Usage")) == 0: - lib.remove_media_pool_item(media_pool_item) diff --git a/server_addon/resolve/client/ayon_resolve/plugins/load/load_editorial_package.py b/server_addon/resolve/client/ayon_resolve/plugins/load/load_editorial_package.py deleted file mode 100644 index 234e7b7f71..0000000000 --- a/server_addon/resolve/client/ayon_resolve/plugins/load/load_editorial_package.py +++ /dev/null @@ -1,52 +0,0 @@ -from pathlib import Path - -from ayon_core.pipeline import ( - load, - get_representation_path, -) - -from ayon_resolve.api import lib - - -class LoadEditorialPackage(load.LoaderPlugin): - """Load editorial package to timeline. - - Loading timeline from OTIO file included media sources - and timeline structure. - """ - - product_types = {"editorial_pkg"} - - representations = {"*"} - extensions = {"otio"} - - label = "Load as Timeline" - order = -10 - icon = "ei.align-left" - color = "orange" - - def load(self, context, name, namespace, data): - files = get_representation_path(context["representation"]) - - search_folder_path = Path(files).parent / "resources" - - project = lib.get_current_project() - media_pool = project.GetMediaPool() - - # create versioned bin for editorial package - version_name = context["version"]["name"] - bin_name = f"{name}_{version_name}" - lib.create_bin(bin_name) - - import_options = { - "timelineName": "Editorial Package Timeline", - "importSourceClips": True, - "sourceClipsPath": search_folder_path.as_posix(), - } - - timeline = media_pool.ImportTimelineFromFile(files, import_options) - print("Timeline imported: ", timeline) - - def update(self, container, context): - # TODO: implement update method in future - pass diff --git a/server_addon/resolve/client/ayon_resolve/plugins/load/load_media.py b/server_addon/resolve/client/ayon_resolve/plugins/load/load_media.py deleted file mode 100644 index c1aaeca6bd..0000000000 --- a/server_addon/resolve/client/ayon_resolve/plugins/load/load_media.py +++ /dev/null @@ -1,533 +0,0 @@ -import json -import contextlib -from pathlib import Path -from collections import defaultdict -from typing import Union, List, Optional, TypedDict, Tuple - -from ayon_api import version_is_latest -from ayon_core.lib import StringTemplate -from ayon_core.pipeline.colorspace import get_remapped_colorspace_to_native -from ayon_core.pipeline import ( - Anatomy, - LoaderPlugin, - get_representation_path, - registered_host -) -from ayon_core.pipeline.load import get_representation_path_with_anatomy -from ayon_core.lib.transcoding import ( - VIDEO_EXTENSIONS, - IMAGE_EXTENSIONS -) -from ayon_core.lib import BoolDef -from ayon_resolve.api import lib -from ayon_resolve.api.pipeline import AVALON_CONTAINER_ID - - -FRAME_SPLITTER = "__frame_splitter__" - - -class MetadataEntry(TypedDict): - """Metadata entry is dict with {"name": "key", "value: "value"}""" - name: str - value: str - - -@contextlib.contextmanager -def project_color_science_mode(project=None, mode="davinciYRGBColorManagedv2"): - """Set project color science mode during context. - - This is especially useful as context for setting the colorspace for media - pool items, because when Resolve is not set to `davinciYRGBColorManagedv2` - it fails to set its "Input Color Space" clip property even though it is - accessible and settable via the Resolve User Interface. - - Args - project (Project): The active Resolve Project. - mode (Optional[str]): The color science mode to apply during the - context. Defaults to 'davinciYRGBColorManagedv2' - - See Also: - https://forum.blackmagicdesign.com/viewtopic.php?f=21&t=197441 - """ - - if project is None: - project = lib.get_current_project() - - original_mode = project.GetSetting("colorScienceMode") - if original_mode != mode: - project.SetSetting("colorScienceMode", mode) - try: - yield - finally: - if project.GetSetting("colorScienceMode") != original_mode: - project.SetSetting("colorScienceMode", original_mode) - - -def set_colorspace(media_pool_item, - colorspace, - mode="davinciYRGBColorManagedv2"): - """Set MediaPoolItem colorspace. - - This implements a workaround that you cannot set the input colorspace - unless the Resolve project's color science mode is set to - `davinciYRGBColorManagedv2`. - - Args: - media_pool_item (MediaPoolItem): The media pool item. - colorspace (str): The colorspace to apply. - mode (Optional[str]): The Resolve project color science mode to be in - while setting the colorspace. - Defaults to 'davinciYRGBColorManagedv2' - - Returns: - bool: Whether applying the colorspace succeeded. - """ - with project_color_science_mode(mode=mode): - return media_pool_item.SetClipProperty("Input Color Space", colorspace) - - -def find_clip_usage(media_pool_item, project=None): - """Return all Timeline Items in the project using the Media Pool Item. - - Each entry in the list is a tuple of Timeline and TimelineItem so that - it's easy to know which Timeline the TimelineItem belongs to. - - Arguments: - media_pool_item (MediaPoolItem): The Media Pool Item to search for. - project (Project): The resolve project the media pool item resides in. - - Returns: - List[Tuple[Timeline, TimelineItem]]: A 2-tuple of a timeline with - the timeline item. - - """ - usage = int(media_pool_item.GetClipProperty("Usage")) - if not usage: - return [] - - if project is None: - project = lib.get_current_project() - - matching_items = [] - unique_id = media_pool_item.GetUniqueId() - for timeline_idx in range(project.GetTimelineCount()): - timeline = project.GetTimelineByIndex(timeline_idx + 1) - - # Consider audio and video tracks - for track_type in ["video", "audio"]: - for track_idx in range(timeline.GetTrackCount(track_type)): - timeline_items = timeline.GetItemListInTrack(track_type, - track_idx + 1) - for timeline_item in timeline_items: - timeline_item_mpi = timeline_item.GetMediaPoolItem() - if not timeline_item_mpi: - continue - - if timeline_item_mpi.GetUniqueId() == unique_id: - matching_items.append((timeline, timeline_item)) - usage -= 1 - if usage <= 0: - # If there should be no usage left after this found - # entry we return early - return matching_items - - return matching_items - - -class LoadMedia(LoaderPlugin): - """Load product as media pool item.""" - - product_types = {"render2d", "source", "plate", "render", "review"} - - representations = ["*"] - extensions = set( - ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) - ) - - label = "Load media" - order = -20 - icon = "code-fork" - color = "orange" - - options = [ - BoolDef( - "load_to_timeline", - label="Load to timeline", - default=True, - tooltip="Whether on load to automatically add it to the current " - "timeline" - ), - BoolDef( - "load_once", - label="Re-use existing", - default=True, - tooltip="When enabled - if this particular version is already" - "loaded it will not be loaded again but will be re-used." - ) - ] - - # for loader multiselection - timeline = None - - # presets - clip_color_last = "Olive" - clip_color_old = "Orange" - - media_pool_bin_path = "Loader/{folder[path]}" - - metadata: List[MetadataEntry] = [] - - # cached on apply settings - _host_imageio_settings = None - - @classmethod - def apply_settings(cls, project_settings): - super(LoadMedia, cls).apply_settings(project_settings) - cls._host_imageio_settings = project_settings["resolve"]["imageio"] - - def load(self, context, name, namespace, options): - - # For loading multiselection, we store timeline before first load - # because the current timeline can change with the imported media. - if self.timeline is None: - self.timeline = lib.get_current_timeline() - - representation = context["representation"] - self._project_name = context["project"]["name"] - - project = lib.get_current_project() - media_pool = project.GetMediaPool() - - # Allow to use an existing media pool item and re-use it - item = None - if options.get("load_once", True): - host = registered_host() - repre_id = context["representation"]["id"] - for container in host.ls(): - if container["representation"] != repre_id: - continue - - if container["loader"] != self.__class__.__name__: - continue - - print(f"Re-using existing container: {container}") - item = container["_item"] - - if item is None: - item = self._import_media_to_bin(context, media_pool, representation) - # Always update clip color - even if re-using existing clip - color = self.get_item_color(context) - item.SetClipColor(color) - - if options.get("load_to_timeline", True): - timeline = options.get("timeline", self.timeline) - if timeline: - # Add media to active timeline - lib.create_timeline_item( - media_pool_item=item, - timeline=timeline - ) - - def _import_media_to_bin( - self, context, media_pool, representation - ): - """Import media to Resolve Media Pool. - - Also create a bin if `media_pool_bin_path` is set. - - Args: - context (dict): The context dictionary. - media_pool (resolve.MediaPool): The Resolve Media Pool. - representation (dict): The representation data. - - Returns: - resolve.MediaPoolItem: The imported media pool item. - """ - # Create or set the bin folder, we add it in there - # If bin path is not set we just add into the current active bin - if self.media_pool_bin_path: - media_pool_bin_path = StringTemplate( - self.media_pool_bin_path).format_strict(context) - - folder = lib.create_bin( - # double slashes will create unconnected folders - name=media_pool_bin_path.replace("//", "/"), - root=media_pool.GetRootFolder(), - set_as_current=False - ) - media_pool.SetCurrentFolder(folder) - - # Import media - # Resolve API: ImportMedia function requires a list of dictionaries - # with keys "FilePath", "StartIndex" and "EndIndex" for sequences - # but only string with absolute path for single files. - is_sequence, file_info = self._get_file_info(context) - items = ( - media_pool.ImportMedia([file_info]) - if is_sequence - else media_pool.ImportMedia([file_info["FilePath"]]) - ) - assert len(items) == 1, "Must import only one media item" - - result = items[0] - - self._set_metadata(result, context) - self._set_colorspace_from_representation(result, representation) - - data = self._get_container_data(context) - - # Add containerise data only needed on first load - data.update({ - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "loader": str(self.__class__.__name__), - }) - - result.SetMetadata(lib.pype_tag_name, json.dumps(data)) - - return result - - def switch(self, container, context): - self.update(container, context) - - def update(self, container, context): - # Update MediaPoolItem filepath and metadata - item = container["_item"] - - # Get the existing metadata before we update because the - # metadata gets removed - data = json.loads(item.GetMetadata(lib.pype_tag_name)) - - # Get metadata to preserve after the clip replacement - # TODO: Maybe preserve more, like LUT, Alpha Mode, Input Sizing Preset - colorspace_before = item.GetClipProperty("Input Color Space") - - # Update path - path = get_representation_path(context["representation"]) - success = item.ReplaceClip(path) - if not success: - raise RuntimeError( - f"Failed to replace media pool item clip to filepath: {path}" - ) - - # Update the metadata - update_data = self._get_container_data(context) - data.update(update_data) - item.SetMetadata(lib.pype_tag_name, json.dumps(data)) - - self._set_metadata(media_pool_item=item, context=context) - self._set_colorspace_from_representation( - item, - representation=context["representation"] - ) - - # If no specific colorspace is set then we want to preserve the - # colorspace a user might have set before the clip replacement - if ( - item.GetClipProperty("Input Color Space") == "Project" - and colorspace_before != "Project" - ): - result = set_colorspace(item, colorspace_before) - if not result: - self.log.warning( - f"Failed to re-apply colorspace: {colorspace_before}." - ) - - # Update the clip color - color = self.get_item_color(context) - item.SetClipColor(color) - - def remove(self, container): - # Remove MediaPoolItem entry - project = lib.get_current_project() - media_pool = project.GetMediaPool() - item = container["_item"] - - # Delete any usages of the media pool item so there's no trail - # left in existing timelines. Currently only the media pool item - # gets removed which fits the Resolve workflow but is confusing - # artists - usage = find_clip_usage(media_pool_item=item, project=project) - if usage: - # Group all timeline items per timeline, so we can delete the clips - # in the timeline at once. The Resolve objects are not hashable, so - # we need to store them in the dict by id - usage_by_timeline = defaultdict(list) - timeline_by_id = {} - for timeline, timeline_item in usage: - timeline_id = timeline.GetUniqueId() - timeline_by_id[timeline_id] = timeline - usage_by_timeline[timeline.GetUniqueId()].append(timeline_item) - - for timeline_id, timeline_items in usage_by_timeline.items(): - timeline = timeline_by_id[timeline_id] - timeline.DeleteClips(timeline_items) - - # Delete the media pool item - media_pool.DeleteClips([item]) - - def _get_container_data(self, context: dict) -> dict: - """Return metadata related to the representation and version.""" - - # add additional metadata from the version to imprint AYON knob - version = context["version"] - data = {} - - # version.attrib - for key in [ - "frameStart", "frameEnd", - "handleStart", "handleEnd", - "source", "fps", "colorSpace" - ]: - data[key] = version["attrib"][key] - - # version.data - for key in ["author"]: - data[key] = version["data"][key] - - # add variables related to version context - data.update({ - "representation": context["representation"]["id"], - "version": version["name"], - }) - - return data - - @classmethod - def get_item_color(cls, context: dict) -> str: - """Return item color name. - - Coloring depends on whether representation is the latest version. - """ - # Compare version with last version - # set clip colour - if version_is_latest(project_name=context["project"]["name"], - version_id=context["version"]["id"]): - return cls.clip_color_last - else: - return cls.clip_color_old - - def _set_metadata(self, media_pool_item, context: dict): - """Set Media Pool Item Clip Properties""" - - # Set more clip metadata based on the loaded clip's context - for meta_item in self.metadata: - clip_property = meta_item["name"] - value = meta_item["value"] - value_formatted = StringTemplate(value).format_strict(context) - media_pool_item.SetClipProperty(clip_property, value_formatted) - - def _get_file_info(self, context: dict) -> Tuple[bool, Union[str, dict]]: - """Return file info for Resolve ImportMedia. - - Args: - context (dict): The context dictionary. - - Returns: - Tuple[bool, Union[str, dict]]: A tuple of whether the file is a - sequence and the file info dictionary. - """ - - representation = context["representation"] - anatomy = Anatomy(self._project_name) - - # Get path to representation with correct frame number - repre_path = get_representation_path_with_anatomy( - representation, anatomy) - - first_frame = representation["context"].get("frame") - - is_sequence = False - # is not sequence - if first_frame is None: - return ( - is_sequence, {"FilePath": repre_path} - ) - - # This is sequence - is_sequence = True - repre_files = [ - file["path"].format(root=anatomy.roots) - for file in representation["files"] - ] - - # Change frame in representation context to get path with frame - # splitter. - representation["context"]["frame"] = FRAME_SPLITTER - frame_repre_path = get_representation_path_with_anatomy( - representation, anatomy - ) - frame_repre_path = Path(frame_repre_path) - repre_dir, repre_filename = ( - frame_repre_path.parent, frame_repre_path.name) - # Get sequence prefix and suffix - file_prefix, file_suffix = repre_filename.split(FRAME_SPLITTER) - # Get frame number from path as string to get frame padding - frame_str = str(repre_path)[len(file_prefix):][:len(file_suffix)] - frame_padding = len(frame_str) - - file_name = f"{file_prefix}%0{frame_padding}d{file_suffix}" - - abs_filepath = Path(repre_dir, file_name) - - start_index = int(first_frame) - end_index = int(int(first_frame) + len(repre_files) - 1) - - # See Resolve API, to import for example clip "file_[001-100].dpx": - # ImportMedia([{"FilePath":"file_%03d.dpx", - # "StartIndex":1, - # "EndIndex":100}]) - return ( - is_sequence, - { - "FilePath": abs_filepath.as_posix(), - "StartIndex": start_index, - "EndIndex": end_index, - } - ) - - def _get_colorspace(self, representation: dict) -> Optional[str]: - """Return Resolve native colorspace from OCIO colorspace data. - - Returns: - Optional[str]: The Resolve native colorspace name, if any mapped. - """ - - data = representation.get("data", {}).get("colorspaceData", {}) - if not data: - return - - ocio_colorspace = data["colorspace"] - if not ocio_colorspace: - return - - resolve_colorspace = get_remapped_colorspace_to_native( - ocio_colorspace_name=ocio_colorspace, - host_name="resolve", - imageio_host_settings=self._host_imageio_settings - ) - if resolve_colorspace: - return resolve_colorspace - else: - self.log.warning( - f"No mapping from OCIO colorspace '{ocio_colorspace}' " - "found to a Resolve colorspace. " - "Ignoring colorspace." - ) - - def _set_colorspace_from_representation( - self, media_pool_item, representation: dict): - """Set the colorspace for the media pool item. - - Args: - media_pool_item (MediaPoolItem): The media pool item. - representation (dict): The representation data. - """ - # Set the Resolve Input Color Space for the media. - colorspace = self._get_colorspace(representation) - if colorspace: - result = set_colorspace(media_pool_item, colorspace) - if not result: - self.log.warning( - f"Failed to apply colorspace: {colorspace}." - ) diff --git a/server_addon/resolve/client/ayon_resolve/plugins/publish/extract_workfile.py b/server_addon/resolve/client/ayon_resolve/plugins/publish/extract_workfile.py deleted file mode 100644 index 77d14ccdc5..0000000000 --- a/server_addon/resolve/client/ayon_resolve/plugins/publish/extract_workfile.py +++ /dev/null @@ -1,52 +0,0 @@ -import os -import pyblish.api - -from ayon_core.pipeline import publish -from ayon_resolve.api.lib import get_project_manager - - -class ExtractWorkfile(publish.Extractor): - """ - Extractor export DRP workfile file representation - """ - - label = "Extract Workfile" - order = pyblish.api.ExtractorOrder - families = ["workfile"] - hosts = ["resolve"] - - def process(self, instance): - # create representation data - if "representations" not in instance.data: - instance.data["representations"] = [] - - name = instance.data["name"] - project = instance.context.data["activeProject"] - staging_dir = self.staging_dir(instance) - - resolve_workfile_ext = ".drp" - drp_file_name = name + resolve_workfile_ext - - drp_file_path = os.path.normpath( - os.path.join(staging_dir, drp_file_name)) - - # write out the drp workfile - get_project_manager().ExportProject( - project.GetName(), drp_file_path) - - # create drp workfile representation - representation_drp = { - 'name': resolve_workfile_ext[1:], - 'ext': resolve_workfile_ext[1:], - 'files': drp_file_name, - "stagingDir": staging_dir, - } - - instance.data["representations"].append(representation_drp) - - # add sourcePath attribute to instance - if not instance.data.get("sourcePath"): - instance.data["sourcePath"] = drp_file_path - - self.log.info("Added Resolve file representation: {}".format( - representation_drp)) diff --git a/server_addon/resolve/client/ayon_resolve/plugins/publish/precollect_instances.py b/server_addon/resolve/client/ayon_resolve/plugins/publish/precollect_instances.py deleted file mode 100644 index e2b6e7ba37..0000000000 --- a/server_addon/resolve/client/ayon_resolve/plugins/publish/precollect_instances.py +++ /dev/null @@ -1,178 +0,0 @@ -from pprint import pformat - -import pyblish - -from ayon_core.pipeline import AYON_INSTANCE_ID, AVALON_INSTANCE_ID -from ayon_resolve.api.lib import ( - get_current_timeline_items, - get_timeline_item_pype_tag, - publish_clip_color, - get_publish_attribute, - get_otio_clip_instance_data, -) - - -class PrecollectInstances(pyblish.api.ContextPlugin): - """Collect all Track items selection.""" - - order = pyblish.api.CollectorOrder - 0.49 - label = "Precollect Instances" - hosts = ["resolve"] - - def process(self, context): - otio_timeline = context.data["otioTimeline"] - selected_timeline_items = get_current_timeline_items( - filter=True, selecting_color=publish_clip_color) - - self.log.info( - "Processing enabled track items: {}".format( - len(selected_timeline_items))) - - for timeline_item_data in selected_timeline_items: - - data = {} - timeline_item = timeline_item_data["clip"]["item"] - - # get pype tag data - tag_data = get_timeline_item_pype_tag(timeline_item) - self.log.debug(f"__ tag_data: {pformat(tag_data)}") - - if not tag_data: - continue - - if tag_data.get("id") not in { - AYON_INSTANCE_ID, AVALON_INSTANCE_ID - }: - continue - - media_pool_item = timeline_item.GetMediaPoolItem() - source_duration = int(media_pool_item.GetClipProperty("Frames")) - - # solve handles length - handle_start = min( - tag_data["handleStart"], int(timeline_item.GetLeftOffset())) - handle_end = min( - tag_data["handleEnd"], int( - source_duration - timeline_item.GetRightOffset())) - - self.log.debug("Handles: <{}, {}>".format(handle_start, handle_end)) - - # add tag data to instance data - data.update({ - k: v for k, v in tag_data.items() - if k not in ("id", "applieswhole", "label") - }) - - folder_path = tag_data["folder_path"] - # Backward compatibility fix of 'entity_type' > 'folder_type' - if "parents" in data: - for parent in data["parents"]: - if "entity_type" in parent: - parent["folder_type"] = parent.pop("entity_type") - - # TODO: remove backward compatibility - product_name = tag_data.get("productName") - if product_name is None: - # backward compatibility: subset -> productName - product_name = tag_data.get("subset") - - # backward compatibility: product_name should not be missing - if not product_name: - self.log.error( - "Product name is not defined for: {}".format(folder_path)) - - # TODO: remove backward compatibility - product_type = tag_data.get("productType") - if product_type is None: - # backward compatibility: family -> productType - product_type = tag_data.get("family") - - # backward compatibility: product_type should not be missing - if not product_type: - self.log.error( - "Product type is not defined for: {}".format(folder_path)) - - data.update({ - "name": "{}_{}".format(folder_path, product_name), - "label": "{} {}".format(folder_path, product_name), - "folderPath": folder_path, - "item": timeline_item, - "publish": get_publish_attribute(timeline_item), - "fps": context.data["fps"], - "handleStart": handle_start, - "handleEnd": handle_end, - "newHierarchyIntegration": True, - # Backwards compatible (Deprecated since 24/06/06) - "newAssetPublishing": True, - "families": ["clip"], - "productType": product_type, - "productName": product_name, - "family": product_type - }) - - # otio clip data - otio_data = get_otio_clip_instance_data( - otio_timeline, timeline_item_data) or {} - data.update(otio_data) - - # add resolution - self.get_resolution_to_data(data, context) - - # create instance - instance = context.create_instance(**data) - - # create shot instance for shot attributes create/update - self.create_shot_instance(context, timeline_item, **data) - - self.log.info("Creating instance: {}".format(instance)) - self.log.debug( - "_ instance.data: {}".format(pformat(instance.data))) - - def get_resolution_to_data(self, data, context): - assert data.get("otioClip"), "Missing `otioClip` data" - - # solve source resolution option - if data.get("sourceResolution", None): - otio_clip_metadata = data[ - "otioClip"].media_reference.metadata - data.update({ - "resolutionWidth": otio_clip_metadata["width"], - "resolutionHeight": otio_clip_metadata["height"], - "pixelAspect": otio_clip_metadata["pixelAspect"] - }) - else: - otio_tl_metadata = context.data["otioTimeline"].metadata - data.update({ - "resolutionWidth": otio_tl_metadata["width"], - "resolutionHeight": otio_tl_metadata["height"], - "pixelAspect": otio_tl_metadata["pixelAspect"] - }) - - def create_shot_instance(self, context, timeline_item, **data): - hero_track = data.get("heroTrack") - hierarchy_data = data.get("hierarchyData") - - if not hero_track: - return - - if not hierarchy_data: - return - - folder_path = data["folderPath"] - product_name = "shotMain" - - # insert family into families - product_type = "shot" - - data.update({ - "name": "{}_{}".format(folder_path, product_name), - "label": "{} {}".format(folder_path, product_name), - "folderPath": folder_path, - "productName": product_name, - "productType": product_type, - "family": product_type, - "families": [product_type], - "publish": get_publish_attribute(timeline_item) - }) - - context.create_instance(**data) diff --git a/server_addon/resolve/client/ayon_resolve/plugins/publish/precollect_workfile.py b/server_addon/resolve/client/ayon_resolve/plugins/publish/precollect_workfile.py deleted file mode 100644 index a388d4bc59..0000000000 --- a/server_addon/resolve/client/ayon_resolve/plugins/publish/precollect_workfile.py +++ /dev/null @@ -1,54 +0,0 @@ -import pyblish.api -from pprint import pformat - -from ayon_core.pipeline import get_current_folder_path - -from ayon_resolve import api as rapi -from ayon_resolve.otio import davinci_export - - -class PrecollectWorkfile(pyblish.api.ContextPlugin): - """Precollect the current working file into context""" - - label = "Precollect Workfile" - order = pyblish.api.CollectorOrder - 0.5 - - def process(self, context): - current_folder_path = get_current_folder_path() - folder_name = current_folder_path.split("/")[-1] - - product_name = "workfileMain" - project = rapi.get_current_project() - fps = project.GetSetting("timelineFrameRate") - video_tracks = rapi.get_video_track_names() - - # adding otio timeline to context - otio_timeline = davinci_export.create_otio_timeline(project) - - instance_data = { - "name": "{}_{}".format(folder_name, product_name), - "label": "{} {}".format(current_folder_path, product_name), - "item": project, - "folderPath": current_folder_path, - "productName": product_name, - "productType": "workfile", - "family": "workfile", - "families": [] - } - - # create instance with workfile - instance = context.create_instance(**instance_data) - - # update context with main project attributes - context_data = { - "activeProject": project, - "otioTimeline": otio_timeline, - "videoTracks": video_tracks, - "currentFile": project.GetName(), - "fps": fps, - } - context.data.update(context_data) - - self.log.info("Creating instance: {}".format(instance)) - self.log.debug("__ instance.data: {}".format(pformat(instance.data))) - self.log.debug("__ context_data: {}".format(pformat(context_data))) diff --git a/server_addon/resolve/client/ayon_resolve/startup.py b/server_addon/resolve/client/ayon_resolve/startup.py deleted file mode 100644 index 7f0bd59055..0000000000 --- a/server_addon/resolve/client/ayon_resolve/startup.py +++ /dev/null @@ -1,70 +0,0 @@ -"""This script is used as a startup script in Resolve through a .scriptlib file - -It triggers directly after the launch of Resolve and it's recommended to keep -it optimized for fast performance since the Resolve UI is actually interactive -while this is running. As such, there's nothing ensuring the user isn't -continuing manually before any of the logic here runs. As such we also try -to delay any imports as much as possible. - -This code runs in a separate process to the main Resolve process. - -""" -import os -from ayon_core.lib import Logger -import ayon_resolve.api - -log = Logger.get_logger(__name__) - - -def ensure_installed_host(): - """Install resolve host with openpype and return the registered host. - - This function can be called multiple times without triggering an - additional install. - """ - from ayon_core.pipeline import install_host, registered_host - host = registered_host() - if host: - return host - - host = ayon_resolve.api.ResolveHost() - install_host(host) - return registered_host() - - -def launch_menu(): - print("Launching Resolve AYON menu..") - ensure_installed_host() - ayon_resolve.api.launch_ayon_menu() - - -def open_workfile(path): - # Avoid the need to "install" the host - host = ensure_installed_host() - host.open_workfile(path) - - -def main(): - # Open last workfile - workfile_path = os.environ.get("AYON_RESOLVE_OPEN_ON_LAUNCH") - - if workfile_path and os.path.exists(workfile_path): - log.info(f"Opening last workfile: {workfile_path}") - open_workfile(workfile_path) - else: - log.info("No last workfile set to open. Skipping..") - - # Launch AYON menu - from ayon_core.settings import get_project_settings - from ayon_core.pipeline.context_tools import get_current_project_name - project_name = get_current_project_name() - log.info(f"Current project name in context: {project_name}") - - settings = get_project_settings(project_name) - if settings.get("resolve", {}).get("launch_openpype_menu_on_start", True): - log.info("Launching AYON menu..") - launch_menu() - - -if __name__ == "__main__": - main() diff --git a/server_addon/resolve/client/ayon_resolve/utility_scripts/AYON__Menu.py b/server_addon/resolve/client/ayon_resolve/utility_scripts/AYON__Menu.py deleted file mode 100644 index 670544d605..0000000000 --- a/server_addon/resolve/client/ayon_resolve/utility_scripts/AYON__Menu.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys - -from ayon_core.pipeline import install_host -from ayon_core.lib import Logger - -log = Logger.get_logger(__name__) - - -def main(env): - from ayon_resolve.api import ResolveHost, launch_ayon_menu - - # activate resolve from openpype - host = ResolveHost() - install_host(host) - - launch_ayon_menu() - - -if __name__ == "__main__": - result = main(os.environ) - sys.exit(not bool(result)) diff --git a/server_addon/resolve/client/ayon_resolve/utility_scripts/ayon_startup.scriptlib b/server_addon/resolve/client/ayon_resolve/utility_scripts/ayon_startup.scriptlib deleted file mode 100644 index 22253390a3..0000000000 --- a/server_addon/resolve/client/ayon_resolve/utility_scripts/ayon_startup.scriptlib +++ /dev/null @@ -1,21 +0,0 @@ --- Run OpenPype's Python launch script for resolve -function file_exists(name) - local f = io.open(name, "r") - return f ~= nil and io.close(f) -end - - -ayon_startup_script = os.getenv("AYON_RESOLVE_STARTUP_SCRIPT") -if ayon_startup_script ~= nil then - script = fusion:MapPath(ayon_startup_script) - - if file_exists(script) then - -- We must use RunScript to ensure it runs in a separate - -- process to Resolve itself to avoid a deadlock for - -- certain imports of OpenPype libraries or Qt - print("Running launch script: " .. script) - fusion:RunScript(script) - else - print("Launch script not found at: " .. script) - end -end \ No newline at end of file diff --git a/server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OTIO_export.py b/server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OTIO_export.py deleted file mode 100644 index 4572d1354d..0000000000 --- a/server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OTIO_export.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python -import os -from ayon_resolve.otio import davinci_export as otio_export - -resolve = bmd.scriptapp("Resolve") # noqa -fu = resolve.Fusion() - -ui = fu.UIManager -disp = bmd.UIDispatcher(fu.UIManager) # noqa - - -title_font = ui.Font({"PixelSize": 18}) -dlg = disp.AddWindow( - { - "WindowTitle": "Export OTIO", - "ID": "OTIOwin", - "Geometry": [250, 250, 250, 100], - "Spacing": 0, - "Margin": 10 - }, - [ - ui.VGroup( - { - "Spacing": 2 - }, - [ - ui.Button( - { - "ID": "exportfilebttn", - "Text": "Select Destination", - "Weight": 1.25, - "ToolTip": "Choose where to save the otio", - "Flat": False - } - ), - ui.VGap(), - ui.Button( - { - "ID": "exportbttn", - "Text": "Export", - "Weight": 2, - "ToolTip": "Export the current timeline", - "Flat": False - } - ) - ] - ) - ] -) - -itm = dlg.GetItems() - - -def _close_window(event): - disp.ExitLoop() - - -def _export_button(event): - pm = resolve.GetProjectManager() - project = pm.GetCurrentProject() - timeline = project.GetCurrentTimeline() - otio_timeline = otio_export.create_otio_timeline(project) - otio_path = os.path.join( - itm["exportfilebttn"].Text, - timeline.GetName() + ".otio") - print(otio_path) - otio_export.write_to_file( - otio_timeline, - otio_path) - _close_window(None) - - -def _export_file_pressed(event): - selectedPath = fu.RequestDir(os.path.expanduser("~/Documents")) - itm["exportfilebttn"].Text = selectedPath - - -dlg.On.OTIOwin.Close = _close_window -dlg.On.exportfilebttn.Clicked = _export_file_pressed -dlg.On.exportbttn.Clicked = _export_button -dlg.Show() -disp.RunLoop() -dlg.Hide() diff --git a/server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OTIO_import.py b/server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OTIO_import.py deleted file mode 100644 index 17de1b6fc3..0000000000 --- a/server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OTIO_import.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -import os -from ayon_resolve.otio import davinci_import as otio_import - -resolve = bmd.scriptapp("Resolve") # noqa -fu = resolve.Fusion() -ui = fu.UIManager -disp = bmd.UIDispatcher(fu.UIManager) # noqa - - -title_font = ui.Font({"PixelSize": 18}) -dlg = disp.AddWindow( - { - "WindowTitle": "Import OTIO", - "ID": "OTIOwin", - "Geometry": [250, 250, 250, 100], - "Spacing": 0, - "Margin": 10 - }, - [ - ui.VGroup( - { - "Spacing": 2 - }, - [ - ui.Button( - { - "ID": "importOTIOfileButton", - "Text": "Select OTIO File Path", - "Weight": 1.25, - "ToolTip": "Choose otio file to import from", - "Flat": False - } - ), - ui.VGap(), - ui.Button( - { - "ID": "importButton", - "Text": "Import", - "Weight": 2, - "ToolTip": "Import otio to new timeline", - "Flat": False - } - ) - ] - ) - ] -) - -itm = dlg.GetItems() - - -def _close_window(event): - disp.ExitLoop() - - -def _import_button(event): - otio_import.read_from_file(itm["importOTIOfileButton"].Text) - _close_window(None) - - -def _import_file_pressed(event): - selected_path = fu.RequestFile(os.path.expanduser("~/Documents")) - itm["importOTIOfileButton"].Text = selected_path - - -dlg.On.OTIOwin.Close = _close_window -dlg.On.importOTIOfileButton.Clicked = _import_file_pressed -dlg.On.importButton.Clicked = _import_button -dlg.Show() -disp.RunLoop() -dlg.Hide() diff --git a/server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py b/server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py deleted file mode 100644 index 5a069aff9e..0000000000 --- a/server_addon/resolve/client/ayon_resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -import os -import sys - -from ayon_core.pipeline import install_host - - -def main(env): - from ayon_resolve.utils import setup - import ayon_resolve.api as bmdvr - # Registers openpype's Global pyblish plugins - install_host(bmdvr) - setup(env) - - -if __name__ == "__main__": - result = main(os.environ) - sys.exit(not bool(result)) diff --git a/server_addon/resolve/client/ayon_resolve/utils.py b/server_addon/resolve/client/ayon_resolve/utils.py deleted file mode 100644 index d256fda18d..0000000000 --- a/server_addon/resolve/client/ayon_resolve/utils.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -import shutil -from ayon_core.lib import Logger, is_running_from_build - -RESOLVE_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__)) - - -def setup(env): - log = Logger.get_logger("ResolveSetup") - scripts = {} - util_scripts_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR") - util_scripts_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"] - - util_scripts_paths = [os.path.join( - RESOLVE_ADDON_ROOT, - "utility_scripts" - )] - - # collect script dirs - if util_scripts_env: - log.info("Utility Scripts Env: `{}`".format(util_scripts_env)) - util_scripts_paths = util_scripts_env.split( - os.pathsep) + util_scripts_paths - - # collect scripts from dirs - for path in util_scripts_paths: - scripts.update({path: os.listdir(path)}) - - log.info("Utility Scripts Dir: `{}`".format(util_scripts_paths)) - log.info("Utility Scripts: `{}`".format(scripts)) - - # Make sure scripts dir exists - os.makedirs(util_scripts_dir, exist_ok=True) - - # make sure no script file is in folder - for script in os.listdir(util_scripts_dir): - path = os.path.join(util_scripts_dir, script) - log.info("Removing `{}`...".format(path)) - if os.path.isdir(path): - shutil.rmtree(path, onerror=None) - else: - os.remove(path) - - # copy scripts into Resolve's utility scripts dir - for directory, scripts in scripts.items(): - for script in scripts: - if ( - is_running_from_build() - and script in ["tests", "develop"] - ): - # only copy those if started from build - continue - - src = os.path.join(directory, script) - dst = os.path.join(util_scripts_dir, script) - - # TODO: Make this a less hacky workaround - if script == "ayon_startup.scriptlib": - # Handle special case for scriptlib that needs to be a folder - # up from the Comp folder in the Fusion scripts - dst = os.path.join(os.path.dirname(util_scripts_dir), - script) - - log.info("Copying `{}` to `{}`...".format(src, dst)) - if os.path.isdir(src): - shutil.copytree( - src, dst, symlinks=False, - ignore=None, ignore_dangling_symlinks=False - ) - else: - shutil.copy2(src, dst) diff --git a/server_addon/resolve/client/ayon_resolve/version.py b/server_addon/resolve/client/ayon_resolve/version.py deleted file mode 100644 index 585f44b5a5..0000000000 --- a/server_addon/resolve/client/ayon_resolve/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'resolve' version.""" -__version__ = "0.2.2" diff --git a/server_addon/resolve/package.py b/server_addon/resolve/package.py deleted file mode 100644 index 643e497253..0000000000 --- a/server_addon/resolve/package.py +++ /dev/null @@ -1,10 +0,0 @@ -name = "resolve" -title = "DaVinci Resolve" -version = "0.2.2" - -client_dir = "ayon_resolve" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = {} diff --git a/server_addon/resolve/server/__init__.py b/server_addon/resolve/server/__init__.py deleted file mode 100644 index 35d2db19e4..0000000000 --- a/server_addon/resolve/server/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Type - -from ayon_server.addons import BaseServerAddon - -from .settings import ResolveSettings, DEFAULT_VALUES - - -class ResolveAddon(BaseServerAddon): - settings_model: Type[ResolveSettings] = ResolveSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_VALUES) diff --git a/server_addon/resolve/server/imageio.py b/server_addon/resolve/server/imageio.py deleted file mode 100644 index 301e98e90c..0000000000 --- a/server_addon/resolve/server/imageio.py +++ /dev/null @@ -1,79 +0,0 @@ -from pydantic import validator -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.settings.validators import ensure_unique_names - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class ImageIORemappingRulesModel(BaseSettingsModel): - host_native_name: str = SettingsField( - title="Application native colorspace name" - ) - ocio_name: str = SettingsField(title="OCIO colorspace name") - - -class ImageIORemappingModel(BaseSettingsModel): - rules: list[ImageIORemappingRulesModel] = SettingsField( - default_factory=list) - - -class ResolveImageIOModel(BaseSettingsModel): - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - remapping: ImageIORemappingModel = SettingsField( - title="Remapping colorspace names", - default_factory=ImageIORemappingModel - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) diff --git a/server_addon/resolve/server/settings.py b/server_addon/resolve/server/settings.py deleted file mode 100644 index 4d363b1a8f..0000000000 --- a/server_addon/resolve/server/settings.py +++ /dev/null @@ -1,208 +0,0 @@ -from pydantic import validator -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - ensure_unique_names, -) - -from .imageio import ResolveImageIOModel - - -class CreateShotClipModels(BaseSettingsModel): - hierarchy: str = SettingsField( - "{folder}/{sequence}", - title="Shot parent hierarchy", - section="Shot Hierarchy And Rename Settings" - ) - clipRename: bool = SettingsField( - True, - title="Rename clips" - ) - clipName: str = SettingsField( - "{track}{sequence}{shot}", - title="Clip name template" - ) - countFrom: int = SettingsField( - 10, - title="Count sequence from" - ) - countSteps: int = SettingsField( - 10, - title="Stepping number" - ) - - folder: str = SettingsField( - "shots", - title="{folder}", - section="Shot Template Keywords" - ) - episode: str = SettingsField( - "ep01", - title="{episode}" - ) - sequence: str = SettingsField( - "sq01", - title="{sequence}" - ) - track: str = SettingsField( - "{_track_}", - title="{track}" - ) - shot: str = SettingsField( - "sh###", - title="{shot}" - ) - - vSyncOn: bool = SettingsField( - False, - title="Enable Vertical Sync", - section="Vertical Synchronization Of Attributes" - ) - - workfileFrameStart: int = SettingsField( - 1001, - title="Workfile Start Frame", - section="Shot Attributes" - ) - handleStart: int = SettingsField( - 10, - title="Handle start (head)" - ) - handleEnd: int = SettingsField( - 10, - title="Handle end (tail)" - ) - - -class CreatorPluginsModel(BaseSettingsModel): - CreateShotClip: CreateShotClipModels = SettingsField( - default_factory=CreateShotClipModels, - title="Create Shot Clip" - ) - - -class MetadataMappingModel(BaseSettingsModel): - """Metadata mapping - - Representation document context data are used for formatting of - anatomy tokens. Following are supported: - - version - - task - - asset - - """ - name: str = SettingsField( - "", - title="Metadata property name" - ) - value: str = SettingsField( - "", - title="Metadata value template" - ) - - -class LoadMediaModel(BaseSettingsModel): - clip_color_last: str = SettingsField( - "Olive", - title="Clip color for last version" - ) - clip_color_old: str = SettingsField( - "Orange", - title="Clip color for old version" - ) - media_pool_bin_path: str = SettingsField( - "Loader/{folder[path]}", - title="Media Pool bin path template" - ) - metadata: list[MetadataMappingModel] = SettingsField( - default_factory=list, - title="Metadata mapping", - description=( - "Set these media pool item metadata values on load and update. The" - " keys must match the exact Resolve metadata names like" - " 'Clip Name' or 'Shot'" - ) - ) - - @validator("metadata") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class LoaderPluginsModel(BaseSettingsModel): - LoadMedia: LoadMediaModel = SettingsField( - default_factory=LoadMediaModel, - title="Load Media" - ) - - -class ResolveSettings(BaseSettingsModel): - launch_openpype_menu_on_start: bool = SettingsField( - False, title="Launch OpenPype menu on start of Resolve" - ) - imageio: ResolveImageIOModel = SettingsField( - default_factory=ResolveImageIOModel, - title="Color Management (ImageIO)" - ) - create: CreatorPluginsModel = SettingsField( - default_factory=CreatorPluginsModel, - title="Creator plugins", - ) - load: LoaderPluginsModel = SettingsField( - default_factory=LoaderPluginsModel, - title="Loader plugins", - ) - - -DEFAULT_VALUES = { - "launch_openpype_menu_on_start": False, - "create": { - "CreateShotClip": { - "hierarchy": "{folder}/{sequence}", - "clipRename": True, - "clipName": "{track}{sequence}{shot}", - "countFrom": 10, - "countSteps": 10, - "folder": "shots", - "episode": "ep01", - "sequence": "sq01", - "track": "{_track_}", - "shot": "sh###", - "vSyncOn": False, - "workfileFrameStart": 1001, - "handleStart": 10, - "handleEnd": 10 - } - }, - "load": { - "LoadMedia": { - "clip_color_last": "Olive", - "clip_color_old": "Orange", - "media_pool_bin_path": ( - "Loader/{folder[path]}" - ), - "metadata": [ - { - "name": "Comments", - "value": "{version[attrib][comment]}" - }, - { - "name": "Shot", - "value": "{folder[path]}" - }, - { - "name": "Take", - "value": "{product[name]} {version[name]}" - }, - { - "name": "Clip Name", - "value": ( - "{folder[path]} {product[name]} " - "{version[name]} ({representation[name]})" - ) - } - ] - } - } -} From cc13a565c100efb4227a64ca2370e05a5fb7ba49 Mon Sep 17 00:00:00 2001 From: Jakub Trllo <43494761+iLLiCiTiT@users.noreply.github.com> Date: Tue, 2 Jul 2024 16:41:45 +0200 Subject: [PATCH 06/10] removed houdini addon --- .../houdini/client/ayon_houdini/__init__.py | 13 - .../houdini/client/ayon_houdini/addon.py | 54 - .../client/ayon_houdini/api/__init__.py | 28 - .../houdini/client/ayon_houdini/api/action.py | 83 - .../client/ayon_houdini/api/colorspace.py | 69 - .../ayon_houdini/api/creator_node_shelves.py | 244 --- .../client/ayon_houdini/api/hda_utils.py | 593 ------- .../houdini/client/ayon_houdini/api/lib.py | 1365 ----------------- .../client/ayon_houdini/api/pipeline.py | 449 ------ .../houdini/client/ayon_houdini/api/plugin.py | 347 ----- .../client/ayon_houdini/api/shelves.py | 215 --- .../houdini/client/ayon_houdini/api/usd.py | 379 ----- .../hooks/set_default_display_and_view.py | 64 - .../client/ayon_houdini/hooks/set_paths.py | 18 - .../plugins/create/convert_legacy.py | 78 - .../plugins/create/create_alembic_camera.py | 57 - .../plugins/create/create_arnold_ass.py | 70 - .../plugins/create/create_arnold_rop.py | 110 -- .../plugins/create/create_bgeo.py | 108 -- .../plugins/create/create_composite.py | 61 - .../ayon_houdini/plugins/create/create_hda.py | 323 ---- .../plugins/create/create_karma_rop.py | 147 -- .../plugins/create/create_mantra_rop.py | 128 -- .../plugins/create/create_model.py | 141 -- .../plugins/create/create_pointcache.py | 124 -- .../plugins/create/create_redshift_proxy.py | 68 - .../plugins/create/create_redshift_rop.py | 172 --- .../plugins/create/create_review.py | 153 -- .../plugins/create/create_staticmesh.py | 155 -- .../ayon_houdini/plugins/create/create_usd.py | 55 - .../plugins/create/create_usd_look.py | 73 - .../plugins/create/create_usdrender.py | 165 -- .../plugins/create/create_vbd_cache.py | 122 -- .../plugins/create/create_vray_rop.py | 200 --- .../plugins/create/create_workfile.py | 121 -- .../inventory/set_camera_resolution.py | 26 - .../ayon_houdini/plugins/load/actions.py | 83 - .../ayon_houdini/plugins/load/load_alembic.py | 89 -- .../plugins/load/load_alembic_archive.py | 81 - .../ayon_houdini/plugins/load/load_ass.py | 91 -- .../plugins/load/load_asset_lop.py | 52 - .../ayon_houdini/plugins/load/load_bgeo.py | 111 -- .../ayon_houdini/plugins/load/load_camera.py | 212 --- .../ayon_houdini/plugins/load/load_fbx.py | 140 -- .../plugins/load/load_filepath.py | 130 -- .../ayon_houdini/plugins/load/load_hda.py | 121 -- .../ayon_houdini/plugins/load/load_image.py | 188 --- .../plugins/load/load_redshift_proxy.py | 113 -- .../plugins/load/load_usd_layer.py | 87 -- .../plugins/load/load_usd_reference.py | 87 -- .../ayon_houdini/plugins/load/load_usd_sop.py | 79 - .../ayon_houdini/plugins/load/load_vdb.py | 108 -- .../ayon_houdini/plugins/load/show_usdview.py | 48 - .../plugins/publish/collect_active_state.py | 42 - .../plugins/publish/collect_arnold_rop.py | 168 -- .../plugins/publish/collect_asset_handles.py | 122 -- .../plugins/publish/collect_cache_farm.py | 61 - .../plugins/publish/collect_chunk_size.py | 31 - .../plugins/publish/collect_current_file.py | 37 - .../plugins/publish/collect_farm_instances.py | 36 - .../publish/collect_files_for_cleaning_up.py | 98 -- .../plugins/publish/collect_frames.py | 63 - .../plugins/publish/collect_inputs.py | 137 -- .../plugins/publish/collect_instances_type.py | 24 - .../plugins/publish/collect_karma_rop.py | 113 -- .../publish/collect_local_render_instances.py | 138 -- .../plugins/publish/collect_mantra_rop.py | 159 -- .../plugins/publish/collect_output_node.py | 77 - .../plugins/publish/collect_redshift_rop.py | 185 --- .../publish/collect_render_products.py | 248 --- .../plugins/publish/collect_review_data.py | 85 - .../publish/collect_reviewable_instances.py | 24 - .../publish/collect_rop_frame_range.py | 37 - .../publish/collect_staticmesh_type.py | 20 - .../plugins/publish/collect_usd_layers.py | 158 -- .../publish/collect_usd_look_assets.py | 243 --- .../plugins/publish/collect_usd_render.py | 86 -- .../plugins/publish/collect_vray_rop.py | 154 -- .../plugins/publish/collect_workfile.py | 34 - .../plugins/publish/collect_workscene_fps.py | 15 - .../publish/extract_active_view_thumbnail.py | 59 - .../plugins/publish/extract_hda.py | 41 - .../plugins/publish/extract_render.py | 85 - .../plugins/publish/extract_rop.py | 150 -- .../plugins/publish/extract_usd.py | 104 -- .../publish/help/validate_vdb_output_node.xml | 28 - .../plugins/publish/increment_current_file.py | 54 - .../plugins/publish/save_scene.py | 27 - .../validate_abc_primitive_to_detail.py | 150 -- .../publish/validate_alembic_face_sets.py | 38 - .../publish/validate_alembic_input_node.py | 66 - .../publish/validate_animation_settings.py | 53 - .../plugins/publish/validate_bypass.py | 47 - .../plugins/publish/validate_camera_rop.py | 61 - .../publish/validate_cop_output_node.py | 69 - .../validate_export_is_a_single_frame.py | 59 - .../publish/validate_fbx_output_node.py | 140 -- .../publish/validate_file_extension.py | 64 - .../plugins/publish/validate_frame_range.py | 108 -- .../plugins/publish/validate_frame_token.py | 52 - .../validate_houdini_license_category.py | 41 - .../publish/validate_instance_in_context.py | 84 - .../publish/validate_mesh_is_static.py | 62 - .../publish/validate_mkpaths_toggled.py | 35 - .../plugins/publish/validate_no_errors.py | 77 - .../validate_primitive_hierarchy_paths.py | 183 --- .../publish/validate_render_products.py | 56 - .../publish/validate_review_colorspace.py | 140 -- .../plugins/publish/validate_scene_review.py | 87 -- .../publish/validate_sop_output_node.py | 88 -- .../plugins/publish/validate_subset_name.py | 120 -- .../validate_unreal_staticmesh_naming.py | 96 -- ...ate_usd_asset_contribution_default_prim.py | 102 -- .../publish/validate_usd_look_assignments.py | 95 -- .../publish/validate_usd_look_contents.py | 148 -- .../validate_usd_look_material_defs.py | 137 -- .../publish/validate_usd_output_node.py | 74 - .../publish/validate_usd_render_arnold.py | 311 ---- .../validate_usd_render_product_names.py | 35 - .../validate_usd_render_product_paths.py | 83 - .../publish/validate_usd_rop_default_prim.py | 110 -- .../publish/validate_vdb_output_node.py | 177 --- .../publish/validate_workfile_paths.py | 95 -- .../ayon_houdini/startup/MainMenuCommon.xml | 109 -- .../client/ayon_houdini/startup/OPmenu.xml | 29 - .../outputprocessors/ayon_uri_processor.py | 135 -- .../outputprocessors/remap_to_publish.py | 66 - .../otls/ayon_lop_import.hda/INDEX__SECTION | 13 - .../otls/ayon_lop_import.hda/Sections.list | 4 - .../AYON__icon.png | Bin 16907 -> 0 bytes .../Contents.dir/Contents.createtimes | 6 - .../Contents.dir/Contents.houdini_versions | 9 - .../Contents.dir/Contents.mime | 384 ----- .../Contents.dir/Contents.modtimes | 6 - .../Contents.dir/Sections.list | 2 - .../CreateScript | 15 - .../DialogScript | 345 ----- .../ExtraFileOptions | 122 -- .../ayon_8_8Lop_1lop__import_8_81.0/Help | 0 .../ayon_8_8Lop_1lop__import_8_81.0/IconImage | Bin 6939 -> 0 bytes .../InternalFileOptions | 10 - .../MessageNodes | 1 - .../ayon_8_8Lop_1lop__import_8_81.0/OnCreated | 6 - .../ayon_8_8Lop_1lop__import_8_81.0/OnDeleted | 6 - .../ayon_8_8Lop_1lop__import_8_81.0/OnLoaded | 14 - .../OnNameChanged | 8 - .../PythonModule | 10 - .../Sections.list | 17 - .../Tools.shelf | 18 - .../TypePropertiesOptions | 14 - .../ayon_lop_import.hda/houdini.hdalibrary | 0 .../startup/python2.7libs/pythonrc.py | 12 - .../startup/python3.10libs/pythonrc.py | 12 - .../startup/python3.7libs/pythonrc.py | 12 - .../startup/python3.9libs/pythonrc.py | 12 - .../houdini/client/ayon_houdini/version.py | 3 - server_addon/houdini/package.py | 10 - server_addon/houdini/server/__init__.py | 13 - .../houdini/server/settings/__init__.py | 10 - .../houdini/server/settings/create.py | 187 --- .../houdini/server/settings/general.py | 49 - .../houdini/server/settings/imageio.py | 114 -- server_addon/houdini/server/settings/main.py | 50 - .../houdini/server/settings/publish.py | 218 --- .../houdini/server/settings/shelves.py | 67 - 165 files changed, 17002 deletions(-) delete mode 100644 server_addon/houdini/client/ayon_houdini/__init__.py delete mode 100644 server_addon/houdini/client/ayon_houdini/addon.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/__init__.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/action.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/colorspace.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/creator_node_shelves.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/hda_utils.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/lib.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/pipeline.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/plugin.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/shelves.py delete mode 100644 server_addon/houdini/client/ayon_houdini/api/usd.py delete mode 100644 server_addon/houdini/client/ayon_houdini/hooks/set_default_display_and_view.py delete mode 100644 server_addon/houdini/client/ayon_houdini/hooks/set_paths.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/convert_legacy.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_alembic_camera.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_arnold_ass.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_arnold_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_bgeo.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_composite.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_hda.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_karma_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_mantra_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_model.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_pointcache.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_redshift_proxy.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_redshift_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_review.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_staticmesh.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_usd.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_usd_look.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_usdrender.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_vbd_cache.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_vray_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/create/create_workfile.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/inventory/set_camera_resolution.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/actions.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_alembic.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_alembic_archive.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_ass.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_asset_lop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_bgeo.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_camera.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_fbx.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_filepath.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_hda.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_image.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_redshift_proxy.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_layer.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_reference.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_sop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/load_vdb.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/load/show_usdview.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_active_state.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_arnold_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_asset_handles.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_cache_farm.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_chunk_size.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_current_file.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_farm_instances.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_files_for_cleaning_up.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_frames.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_inputs.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_instances_type.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_karma_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_local_render_instances.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_mantra_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_output_node.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_redshift_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_render_products.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_review_data.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_reviewable_instances.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_rop_frame_range.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_staticmesh_type.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_layers.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_look_assets.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_render.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_vray_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_workfile.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/collect_workscene_fps.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/extract_hda.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/extract_render.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/extract_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/extract_usd.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/help/validate_vdb_output_node.xml delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/increment_current_file.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/save_scene.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_abc_primitive_to_detail.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_alembic_face_sets.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_alembic_input_node.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_animation_settings.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_bypass.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_camera_rop.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_cop_output_node.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_export_is_a_single_frame.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_fbx_output_node.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_file_extension.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_frame_range.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_frame_token.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_houdini_license_category.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_instance_in_context.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_mesh_is_static.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_mkpaths_toggled.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_no_errors.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_primitive_hierarchy_paths.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_render_products.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_review_colorspace.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_scene_review.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_sop_output_node.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_subset_name.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_unreal_staticmesh_naming.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_asset_contribution_default_prim.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_assignments.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_contents.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_material_defs.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_output_node.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_arnold.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_product_names.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_product_paths.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_rop_default_prim.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_vdb_output_node.py delete mode 100644 server_addon/houdini/client/ayon_houdini/plugins/publish/validate_workfile_paths.py delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/MainMenuCommon.xml delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/OPmenu.xml delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/husdplugins/outputprocessors/ayon_uri_processor.py delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/husdplugins/outputprocessors/remap_to_publish.py delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/INDEX__SECTION delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/Sections.list delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/AYON__icon.png delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Contents.dir/Contents.createtimes delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Contents.dir/Contents.houdini_versions delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Contents.dir/Contents.mime delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Contents.dir/Contents.modtimes delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Contents.dir/Sections.list delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/CreateScript delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/DialogScript delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/ExtraFileOptions delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Help delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/IconImage delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/InternalFileOptions delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/MessageNodes delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/OnCreated delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/OnDeleted delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/OnLoaded delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/OnNameChanged delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/PythonModule delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Sections.list delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Tools.shelf delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/TypePropertiesOptions delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/houdini.hdalibrary delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/python2.7libs/pythonrc.py delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/python3.10libs/pythonrc.py delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/python3.7libs/pythonrc.py delete mode 100644 server_addon/houdini/client/ayon_houdini/startup/python3.9libs/pythonrc.py delete mode 100644 server_addon/houdini/client/ayon_houdini/version.py delete mode 100644 server_addon/houdini/package.py delete mode 100644 server_addon/houdini/server/__init__.py delete mode 100644 server_addon/houdini/server/settings/__init__.py delete mode 100644 server_addon/houdini/server/settings/create.py delete mode 100644 server_addon/houdini/server/settings/general.py delete mode 100644 server_addon/houdini/server/settings/imageio.py delete mode 100644 server_addon/houdini/server/settings/main.py delete mode 100644 server_addon/houdini/server/settings/publish.py delete mode 100644 server_addon/houdini/server/settings/shelves.py diff --git a/server_addon/houdini/client/ayon_houdini/__init__.py b/server_addon/houdini/client/ayon_houdini/__init__.py deleted file mode 100644 index afb51f7315..0000000000 --- a/server_addon/houdini/client/ayon_houdini/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .version import __version__ -from .addon import ( - HoudiniAddon, - HOUDINI_HOST_DIR, -) - - -__all__ = ( - "__version__", - - "HoudiniAddon", - "HOUDINI_HOST_DIR", -) diff --git a/server_addon/houdini/client/ayon_houdini/addon.py b/server_addon/houdini/client/ayon_houdini/addon.py deleted file mode 100644 index 4c23553008..0000000000 --- a/server_addon/houdini/client/ayon_houdini/addon.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ - -HOUDINI_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class HoudiniAddon(AYONAddon, IHostAddon): - name = "houdini" - version = __version__ - host_name = "houdini" - - def add_implementation_envs(self, env, _app): - # Add requirements to HOUDINI_PATH and HOUDINI_MENU_PATH - startup_path = os.path.join(HOUDINI_HOST_DIR, "startup") - new_houdini_path = [startup_path] - new_houdini_menu_path = [startup_path] - - old_houdini_path = env.get("HOUDINI_PATH") or "" - old_houdini_menu_path = env.get("HOUDINI_MENU_PATH") or "" - - for path in old_houdini_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_houdini_path: - new_houdini_path.append(norm_path) - - for path in old_houdini_menu_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_houdini_menu_path: - new_houdini_menu_path.append(norm_path) - - # Add ampersand for unknown reason (Maybe is needed in Houdini?) - new_houdini_path.append("&") - new_houdini_menu_path.append("&") - - env["HOUDINI_PATH"] = os.pathsep.join(new_houdini_path) - env["HOUDINI_MENU_PATH"] = os.pathsep.join(new_houdini_menu_path) - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(HOUDINI_HOST_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".hip", ".hiplc", ".hipnc"] diff --git a/server_addon/houdini/client/ayon_houdini/api/__init__.py b/server_addon/houdini/client/ayon_houdini/api/__init__.py deleted file mode 100644 index 358113a555..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from .pipeline import ( - HoudiniHost, - ls, - containerise -) - -from .lib import ( - lsattr, - lsattrs, - read, - - maintained_selection -) - - -__all__ = [ - "HoudiniHost", - - "ls", - "containerise", - - # Utility functions - "lsattr", - "lsattrs", - "read", - - "maintained_selection" -] diff --git a/server_addon/houdini/client/ayon_houdini/api/action.py b/server_addon/houdini/client/ayon_houdini/api/action.py deleted file mode 100644 index a14296950b..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/action.py +++ /dev/null @@ -1,83 +0,0 @@ -import pyblish.api -import hou - -from ayon_core.pipeline.publish import get_errored_instances_from_context - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid nodes in Maya when plug-in failed. - - To retrieve the invalid nodes this assumes a static `get_invalid()` - method is available on the plugin. - - """ - label = "Select invalid" - on = "failed" # This action is only available on a failed plug-in - icon = "search" # Icon from Awesome Icon - - def process(self, context, plugin): - - errored_instances = get_errored_instances_from_context(context, - plugin=plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid nodes..") - invalid = list() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning("Plug-in returned to be invalid, " - "but has no selectable nodes.") - - hou.clearAllSelected() - if invalid: - self.log.info("Selecting invalid nodes: {}".format( - ", ".join(node.path() for node in invalid) - )) - for node in invalid: - node.setSelected(True) - node.setCurrent(True) - else: - self.log.info("No invalid nodes found.") - - -class SelectROPAction(pyblish.api.Action): - """Select ROP. - - It's used to select the associated ROPs with the errored instances. - """ - - label = "Select ROP" - on = "failed" # This action is only available on a failed plug-in - icon = "mdi.cursor-default-click" - - def process(self, context, plugin): - errored_instances = get_errored_instances_from_context(context, plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding ROP nodes..") - rop_nodes = list() - for instance in errored_instances: - node_path = instance.data.get("instance_node") - if not node_path: - continue - - node = hou.node(node_path) - if not node: - continue - - rop_nodes.append(node) - - hou.clearAllSelected() - if rop_nodes: - self.log.info("Selecting ROP nodes: {}".format( - ", ".join(node.path() for node in rop_nodes) - )) - for node in rop_nodes: - node.setSelected(True) - node.setCurrent(True) - else: - self.log.info("No ROP nodes found.") diff --git a/server_addon/houdini/client/ayon_houdini/api/colorspace.py b/server_addon/houdini/client/ayon_houdini/api/colorspace.py deleted file mode 100644 index ec6e4c2091..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/colorspace.py +++ /dev/null @@ -1,69 +0,0 @@ -import attr -import hou -from ayon_houdini.api.lib import get_color_management_preferences -from ayon_core.pipeline.colorspace import get_display_view_colorspace_name - -@attr.s -class LayerMetadata(object): - """Data class for Render Layer metadata.""" - frameStart = attr.ib() - frameEnd = attr.ib() - - -@attr.s -class RenderProduct(object): - """Getting Colorspace as - Specific Render Product Parameter for submitting - publish job. - - """ - colorspace = attr.ib() # colorspace - view = attr.ib() - productName = attr.ib(default=None) - - -class ARenderProduct(object): - - def __init__(self): - """Constructor.""" - # Initialize - self.layer_data = self._get_layer_data() - self.layer_data.products = self.get_colorspace_data() - - def _get_layer_data(self): - return LayerMetadata( - frameStart=int(hou.playbar.frameRange()[0]), - frameEnd=int(hou.playbar.frameRange()[1]), - ) - - def get_colorspace_data(self): - """To be implemented by renderer class. - - This should return a list of RenderProducts. - - Returns: - list: List of RenderProduct - - """ - data = get_color_management_preferences() - colorspace_data = [ - RenderProduct( - colorspace=data["display"], - view=data["view"], - productName="" - ) - ] - return colorspace_data - - -def get_default_display_view_colorspace(): - """Returns the colorspace attribute of the default (display, view) pair. - - It's used for 'ociocolorspace' parm in OpenGL Node.""" - - prefs = get_color_management_preferences() - return get_display_view_colorspace_name( - config_path=prefs["config"], - display=prefs["display"], - view=prefs["view"] - ) diff --git a/server_addon/houdini/client/ayon_houdini/api/creator_node_shelves.py b/server_addon/houdini/client/ayon_houdini/api/creator_node_shelves.py deleted file mode 100644 index 4d5a706749..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/creator_node_shelves.py +++ /dev/null @@ -1,244 +0,0 @@ -"""Library to register OpenPype Creators for Houdini TAB node search menu. - -This can be used to install custom houdini tools for the TAB search -menu which will trigger a publish instance to be created interactively. - -The Creators are automatically registered on launch of Houdini through the -Houdini integration's `host.install()` method. - -""" -import contextlib -import tempfile -import logging -import os - -import ayon_api - -from ayon_core.pipeline import registered_host -from ayon_core.pipeline.create import CreateContext -from ayon_core.resources import get_ayon_icon_filepath - -import hou -import stateutils -import soptoolutils -import loptoolutils -import cop2toolutils - - -log = logging.getLogger(__name__) - -CATEGORY_GENERIC_TOOL = { - hou.sopNodeTypeCategory(): soptoolutils.genericTool, - hou.cop2NodeTypeCategory(): cop2toolutils.genericTool, - hou.lopNodeTypeCategory(): loptoolutils.genericTool -} - - -CREATE_SCRIPT = """ -from ayon_houdini.api.creator_node_shelves import create_interactive -create_interactive("{identifier}", **kwargs) -""" - - -def create_interactive(creator_identifier, **kwargs): - """Create a Creator using its identifier interactively. - - This is used by the generated shelf tools as callback when a user selects - the creator from the node tab search menu. - - The `kwargs` should be what Houdini passes to the tool create scripts - context. For more information see: - https://www.sidefx.com/docs/houdini/hom/tool_script.html#arguments - - Args: - creator_identifier (str): The creator identifier of the Creator plugin - to create. - - Return: - list: The created instances. - - """ - host = registered_host() - context = CreateContext(host) - creator = context.manual_creators.get(creator_identifier) - if not creator: - raise RuntimeError("Invalid creator identifier: {}".format( - creator_identifier) - ) - - # TODO Use Qt instead - result, variant = hou.ui.readInput( - "Define variant name", - buttons=("Ok", "Cancel"), - initial_contents=creator.get_default_variant(), - title="Define variant", - help="Set the variant for the publish instance", - close_choice=1 - ) - - if result == 1: - # User interrupted - return - - variant = variant.strip() - if not variant: - raise RuntimeError("Empty variant value entered.") - - # TODO: Once more elaborate unique create behavior should exist per Creator - # instead of per network editor area then we should move this from here - # to a method on the Creators for which this could be the default - # implementation. - pane = stateutils.activePane(kwargs) - if isinstance(pane, hou.NetworkEditor): - pwd = pane.pwd() - project_name = context.get_current_project_name() - folder_path = context.get_current_folder_path() - task_name = context.get_current_task_name() - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = creator.get_product_name( - project_name=context.get_current_project_name(), - folder_entity=folder_entity, - task_entity=task_entity, - variant=variant, - host_name=context.host_name, - ) - - tool_fn = CATEGORY_GENERIC_TOOL.get(pwd.childTypeCategory()) - if tool_fn is not None: - out_null = tool_fn(kwargs, "null") - out_null.setName("OUT_{}".format(product_name), unique_name=True) - - before = context.instances_by_id.copy() - - # Create the instance - context.create( - creator_identifier=creator_identifier, - variant=variant, - pre_create_data={"use_selection": True} - ) - - # For convenience we set the new node as current since that's much more - # familiar to the artist when creating a node interactively - # TODO Allow to disable auto-select in studio settings or user preferences - after = context.instances_by_id - new = set(after) - set(before) - if new: - # Select the new instance - for instance_id in new: - instance = after[instance_id] - node = hou.node(instance.get("instance_node")) - node.setCurrent(True) - - return list(new) - - -@contextlib.contextmanager -def shelves_change_block(): - """Write shelf changes at the end of the context.""" - hou.shelves.beginChangeBlock() - try: - yield - finally: - hou.shelves.endChangeBlock() - - -def install(): - """Install the Creator plug-ins to show in Houdini's TAB node search menu. - - This function is re-entrant and can be called again to reinstall and - update the node definitions. For example during development it can be - useful to call it manually: - >>> from ayon_houdini.api.creator_node_shelves import install - >>> install() - - Returns: - list: List of `hou.Tool` instances - - """ - - host = registered_host() - - # Store the filepath on the host - # TODO: Define a less hacky static shelf path for current houdini session - filepath_attr = "_creator_node_shelf_filepath" - filepath = getattr(host, filepath_attr, None) - if filepath is None: - f = tempfile.NamedTemporaryFile(prefix="houdini_creator_nodes_", - suffix=".shelf", - delete=False) - f.close() - filepath = f.name - setattr(host, filepath_attr, filepath) - elif os.path.exists(filepath): - # Remove any existing shelf file so that we can completey regenerate - # and update the tools file if creator identifiers change - os.remove(filepath) - - icon = get_ayon_icon_filepath() - tab_menu_label = os.environ.get("AYON_MENU_LABEL") or "AYON" - - # Create context only to get creator plugins, so we don't reset and only - # populate what we need to retrieve the list of creator plugins - create_context = CreateContext(host, reset=False) - create_context.reset_current_context() - create_context._reset_creator_plugins() - - log.debug("Writing OpenPype Creator nodes to shelf: {}".format(filepath)) - tools = [] - - with shelves_change_block(): - for identifier, creator in create_context.manual_creators.items(): - - # Allow the creator plug-in itself to override the categories - # for where they are shown with `Creator.get_network_categories()` - if not hasattr(creator, "get_network_categories"): - log.debug("Creator {} has no `get_network_categories` method " - "and will not be added to TAB search.") - continue - - network_categories = creator.get_network_categories() - if not network_categories: - continue - - key = "ayon_create.{}".format(identifier) - log.debug(f"Registering {key}") - script = CREATE_SCRIPT.format(identifier=identifier) - data = { - "script": script, - "language": hou.scriptLanguage.Python, - "icon": icon, - "help": "Create Ayon publish instance for {}".format( - creator.label - ), - "help_url": None, - "network_categories": network_categories, - "viewer_categories": [], - "cop_viewer_categories": [], - "network_op_type": None, - "viewer_op_type": None, - "locations": [tab_menu_label] - } - label = "Create {}".format(creator.label) - tool = hou.shelves.tool(key) - if tool: - tool.setData(**data) - tool.setLabel(label) - else: - tool = hou.shelves.newTool( - file_path=filepath, - name=key, - label=label, - **data - ) - - tools.append(tool) - - # Ensure the shelf is reloaded - hou.shelves.loadFile(filepath) - - return tools diff --git a/server_addon/houdini/client/ayon_houdini/api/hda_utils.py b/server_addon/houdini/client/ayon_houdini/api/hda_utils.py deleted file mode 100644 index 412364bc04..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/hda_utils.py +++ /dev/null @@ -1,593 +0,0 @@ -"""Helper functions for load HDA""" - -import os -import contextlib -import uuid -from typing import List - -import ayon_api -from ayon_api import ( - get_project, - get_representation_by_id, - get_versions, - get_folder_by_path, - get_product_by_name, - get_version_by_name, - get_representation_by_name -) -from ayon_core.pipeline.load import ( - get_representation_context, - get_representation_path_from_context -) -from ayon_core.pipeline.context_tools import ( - get_current_project_name, - get_current_folder_path -) -from ayon_core.tools.utils import SimpleFoldersWidget -from ayon_core.style import load_stylesheet - -from ayon_houdini.api import lib - -from qtpy import QtCore, QtWidgets -import hou - - -def is_valid_uuid(value) -> bool: - """Return whether value is a valid UUID""" - try: - uuid.UUID(value) - except ValueError: - return False - return True - - -@contextlib.contextmanager -def _unlocked_parm(parm): - """Unlock parm during context; will always lock after""" - try: - parm.lock(False) - yield - finally: - parm.lock(True) - - -def get_available_versions(node): - """Return the versions list for node. - - The versions are sorted with the latest version first and oldest lower - version last. - - Args: - node (hou.Node): Node to query selected products' versions for. - - Returns: - list[int]: Version numbers for the product - """ - - project_name = node.evalParm("project_name") or get_current_project_name() - folder_path = node.evalParm("folder_path") - product_name = node.evalParm("product_name") - - if not all([ - project_name, folder_path, product_name - ]): - return [] - - folder_entity = get_folder_by_path( - project_name, - folder_path, - fields={"id"}) - if not folder_entity: - return [] - product_entity = get_product_by_name( - project_name, - product_name=product_name, - folder_id=folder_entity["id"], - fields={"id"}) - if not product_entity: - return [] - - # TODO: Support hero versions - versions = get_versions( - project_name, - product_ids={product_entity["id"]}, - fields={"version"}, - hero=False) - version_names = [version["version"] for version in versions] - version_names.reverse() - return version_names - - -def update_info(node, context): - """Update project, folder, product, version, representation name parms. - - Arguments: - node (hou.Node): Node to update - context (dict): Context of representation - - """ - # TODO: Avoid 'duplicate' taking over the expression if originally - # it was $OS and by duplicating, e.g. the `folder` does not exist - # anymore since it is now `hero1` instead of `hero` - # TODO: Support hero versions - version = str(context["version"]["version"]) - - # We only set the values if the value does not match the currently - # evaluated result of the other parms, so that if the project name - # value was dynamically set by the user with an expression or alike - # then if it still matches the value of the current representation id - # we preserve it. In essence, only update the value if the current - # *evaluated* value of the parm differs. - parms = { - "project_name": context["project"]["name"], - "folder_path": context["folder"]["path"], - "product_name": context["product"]["name"], - "version": version, - "representation_name": context["representation"]["name"], - } - parms = {key: value for key, value in parms.items() - if node.evalParm(key) != value} - parms["load_message"] = "" # clear any warnings/errors - - # Note that these never trigger any parm callbacks since we do not - # trigger the `parm.pressButton` and programmatically setting values - # in Houdini does not trigger callbacks automatically - node.setParms(parms) - - -def _get_thumbnail(project_name: str, version_id: str, thumbnail_dir: str): - folder = hou.text.expandString(thumbnail_dir) - path = os.path.join(folder, "{}_thumbnail.jpg".format(version_id)) - expanded_path = hou.text.expandString(path) - if os.path.isfile(expanded_path): - return path - - # Try and create a thumbnail cache file - data = ayon_api.get_thumbnail(project_name, - entity_type="version", - entity_id=version_id) - if data: - thumbnail_dir_expanded = hou.text.expandString(thumbnail_dir) - os.makedirs(thumbnail_dir_expanded, exist_ok=True) - with open(expanded_path, "wb") as f: - f.write(data.content) - return path - - -def set_representation(node, representation_id: str): - file_parm = node.parm("file") - if not representation_id: - # Clear filepath and thumbnail - with _unlocked_parm(file_parm): - file_parm.set("") - set_node_thumbnail(node, None) - return - - project_name = ( - node.evalParm("project_name") - or get_current_project_name() - ) - - # Ignore invalid representation ids silently - # TODO remove - added for backwards compatibility with OpenPype scenes - if not is_valid_uuid(representation_id): - return - - repre_entity = get_representation_by_id(project_name, representation_id) - if not repre_entity: - return - - context = get_representation_context(project_name, repre_entity) - update_info(node, context) - path = get_representation_path_from_context(context) - # Load fails on UNC paths with backslashes and also - # fails to resolve @sourcename var with backslashed - # paths correctly. So we force forward slashes - path = path.replace("\\", "/") - with _unlocked_parm(file_parm): - file_parm.set(path) - - if node.evalParm("show_thumbnail"): - # Update thumbnail - # TODO: Cache thumbnail path as well - version_id = repre_entity["versionId"] - thumbnail_dir = node.evalParm("thumbnail_cache_dir") - thumbnail_path = _get_thumbnail( - project_name, version_id, thumbnail_dir - ) - set_node_thumbnail(node, thumbnail_path) - - -def set_node_thumbnail(node, thumbnail: str): - """Update node thumbnail to thumbnail""" - if thumbnail is None: - lib.set_node_thumbnail(node, None) - - rect = compute_thumbnail_rect(node) - lib.set_node_thumbnail(node, thumbnail, rect) - - -def compute_thumbnail_rect(node): - """Compute thumbnail bounding rect based on thumbnail parms""" - offset_x = node.evalParm("thumbnail_offsetx") - offset_y = node.evalParm("thumbnail_offsety") - width = node.evalParm("thumbnail_size") - # todo: compute height from aspect of actual image file. - aspect = 0.5625 # for now assume 16:9 - height = width * aspect - - center = 0.5 - half_width = (width * .5) - - return hou.BoundingRect( - offset_x + center - half_width, - offset_y, - offset_x + center + half_width, - offset_y + height - ) - - -def on_thumbnail_show_changed(node): - """Callback on thumbnail show parm changed""" - if node.evalParm("show_thumbnail"): - # For now, update all - on_representation_id_changed(node) - else: - lib.remove_all_thumbnails(node) - - -def on_thumbnail_size_changed(node): - """Callback on thumbnail offset or size parms changed""" - thumbnail = lib.get_node_thumbnail(node) - if thumbnail: - rect = compute_thumbnail_rect(node) - thumbnail.setRect(rect) - lib.set_node_thumbnail(node, thumbnail) - - -def on_representation_id_changed(node): - """Callback on representation id changed - - Args: - node (hou.Node): Node to update. - """ - repre_id = node.evalParm("representation") - set_representation(node, repre_id) - - -def on_representation_parms_changed(node): - """ - Usually used as callback to the project, folder, product, version and - representation parms which on change - would result in a different - representation id to be resolved. - - Args: - node (hou.Node): Node to update. - """ - project_name = node.evalParm("project_name") or get_current_project_name() - representation_id = get_representation_id( - project_name=project_name, - folder_path=node.evalParm("folder_path"), - product_name=node.evalParm("product_name"), - version=node.evalParm("version"), - representation_name=node.evalParm("representation_name"), - load_message_parm=node.parm("load_message") - ) - if representation_id is None: - representation_id = "" - else: - representation_id = str(representation_id) - - if node.evalParm("representation") != representation_id: - node.parm("representation").set(representation_id) - node.parm("representation").pressButton() # trigger callback - - -def get_representation_id( - project_name, - folder_path, - product_name, - version, - representation_name, - load_message_parm, -): - """Get representation id. - - Args: - project_name (str): Project name - folder_path (str): Folder name - product_name (str): Product name - version (str): Version name as string - representation_name (str): Representation name - load_message_parm (hou.Parm): A string message parm to report - any error messages to. - - Returns: - Optional[str]: Representation id or None if not found. - - """ - - if not all([ - project_name, folder_path, product_name, version, representation_name - ]): - labels = { - "project": project_name, - "folder": folder_path, - "product": product_name, - "version": version, - "representation": representation_name - } - missing = ", ".join(key for key, value in labels.items() if not value) - load_message_parm.set(f"Load info incomplete. Found empty: {missing}") - return - - try: - version = int(version.strip()) - except ValueError: - load_message_parm.set(f"Invalid version format: '{version}'\n" - "Make sure to set a valid version number.") - return - - folder_entity = get_folder_by_path(project_name, - folder_path=folder_path, - fields={"id"}) - if not folder_entity: - # This may be due to the project not existing - so let's validate - # that first - if not get_project(project_name): - load_message_parm.set(f"Project not found: '{project_name}'") - return - load_message_parm.set(f"Folder not found: '{folder_path}'") - return - - product_entity = get_product_by_name( - project_name, - product_name=product_name, - folder_id=folder_entity["id"], - fields={"id"}) - if not product_entity: - load_message_parm.set(f"Product not found: '{product_name}'") - return - version_entity = get_version_by_name( - project_name, - version, - product_id=product_entity["id"], - fields={"id"}) - if not version_entity: - load_message_parm.set(f"Version not found: '{version}'") - return - representation_entity = get_representation_by_name( - project_name, - representation_name, - version_id=version_entity["id"], - fields={"id"}) - if not representation_entity: - load_message_parm.set( - f"Representation not found: '{representation_name}'.") - return - return representation_entity["id"] - - -def setup_flag_changed_callback(node): - """Register flag changed callback (for thumbnail brightness)""" - node.addEventCallback( - (hou.nodeEventType.FlagChanged,), - on_flag_changed - ) - - -def on_flag_changed(node, **kwargs): - """On node flag changed callback. - - Updates the brightness of attached thumbnails - """ - # Showing thumbnail is disabled so can return early since - # there should be no thumbnail to update. - if not node.evalParm('show_thumbnail'): - return - - # Update node thumbnails brightness with the - # bypass state of the node. - parent = node.parent() - images = lib.get_background_images(parent) - if not images: - return - - brightness = 0.3 if node.isBypassed() else 1.0 - has_changes = False - node_path = node.path() - for image in images: - if image.relativeToPath() == node_path: - image.setBrightness(brightness) - has_changes = True - - if has_changes: - lib.set_background_images(parent, images) - - -def keep_background_images_linked(node, old_name): - """Reconnect background images to node from old name. - - Used as callback on node name changes to keep thumbnails linked.""" - from ayon_houdini.api.lib import ( - get_background_images, - set_background_images - ) - - parent = node.parent() - images = get_background_images(parent) - if not images: - return - - changes = False - old_path = f"{node.parent().path()}/{old_name}" - for image in images: - if image.relativeToPath() == old_path: - image.setRelativeToPath(node.path()) - changes = True - - if changes: - set_background_images(parent, images) - - -class SelectFolderPathDialog(QtWidgets.QDialog): - """Simple dialog to allow a user to select project and asset.""" - - def __init__(self, parent=None): - super(SelectFolderPathDialog, self).__init__(parent) - self.setWindowTitle("Set project and folder path") - self.setStyleSheet(load_stylesheet()) - - project_widget = QtWidgets.QComboBox() - project_widget.addItems(self.get_projects()) - - filter_widget = QtWidgets.QLineEdit() - filter_widget.setPlaceholderText("Folder name filter...") - - folder_widget = SimpleFoldersWidget(parent=self) - - accept_button = QtWidgets.QPushButton("Accept") - - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.addWidget(project_widget, 0) - main_layout.addWidget(filter_widget, 0) - main_layout.addWidget(folder_widget, 1) - main_layout.addWidget(accept_button, 0) - - self.project_widget = project_widget - self.folder_widget = folder_widget - - project_widget.currentTextChanged.connect(self.on_project_changed) - filter_widget.textChanged.connect(folder_widget.set_name_filter) - folder_widget.double_clicked.connect(self.accept) - accept_button.clicked.connect(self.accept) - - def get_selected_folder_path(self) -> str: - return self.folder_widget.get_selected_folder_path() - - def get_selected_project_name(self) -> str: - return self.project_widget.currentText() - - def get_projects(self) -> List[str]: - projects = ayon_api.get_projects(fields=["name"]) - return [p["name"] for p in projects] - - def on_project_changed(self, project_name: str): - self.folder_widget.set_project_name(project_name) - - def set_project_name(self, project_name: str): - self.project_widget.setCurrentText(project_name) - - if self.project_widget.currentText() != project_name: - # Project does not exist - return - - # Force the set of widget because even though a callback exist on the - # project widget it may have been initialized to that value and hence - # detect no change. - self.folder_widget.set_project_name(project_name) - - -def select_folder_path(node): - """Show dialog to select folder path. - - When triggered it opens a dialog that shows the available - folder paths within a given project. - - Note: - This function should be refactored. - It currently shows the available - folder paths within the current project only. - - Args: - node (hou.OpNode): The HDA node. - """ - main_window = lib.get_main_window() - - project_name = node.evalParm("project_name") - folder_path = node.evalParm("folder_path") - - dialog = SelectFolderPathDialog(parent=main_window) - dialog.set_project_name(project_name) - if folder_path: - # We add a small delay to the setting of the selected folder - # because the folder widget's set project logic itself also runs - # with a bit of a delay, and unfortunately otherwise the project - # has not been selected yet and thus selection does not work. - def _select_folder_path(): - dialog.folder_widget.set_selected_folder_path(folder_path) - QtCore.QTimer.singleShot(100, _select_folder_path) - - dialog.setStyleSheet(load_stylesheet()) - - result = dialog.exec_() - if result != QtWidgets.QDialog.Accepted: - return - - # Set project - selected_project_name = dialog.get_selected_project_name() - if selected_project_name == get_current_project_name(): - selected_project_name = '$AYON_PROJECT_NAME' - - project_parm = node.parm("project_name") - project_parm.set(selected_project_name) - project_parm.pressButton() # allow any callbacks to trigger - - # Set folder path - selected_folder_path = dialog.get_selected_folder_path() - if not selected_folder_path: - # Do nothing if user accepted with nothing selected - return - - if selected_folder_path == get_current_folder_path(): - selected_folder_path = '$AYON_FOLDER_PATH' - - folder_parm = node.parm("folder_path") - folder_parm.set(selected_folder_path) - folder_parm.pressButton() # allow any callbacks to trigger - - -def get_available_products(node): - """Return products menu items - It gets a list of available products of the specified product types - within the specified folder path with in the specified project. - Users can specify those in the HDA parameters. - - Args: - node (hou.OpNode): The HDA node. - - Returns: - list[str]: Product names for Products menu. - """ - project_name = node.evalParm("project_name") - folder_path = node.evalParm("folder_path") - product_type = node.evalParm("product_type") - - folder_entity = ayon_api.get_folder_by_path(project_name, - folder_path, - fields={"id"}) - if not folder_entity: - return [] - - products = ayon_api.get_products( - project_name, - folder_ids=[folder_entity["id"]], - product_types=[product_type] - ) - - return [product["name"] for product in products] - - -def set_to_latest_version(node): - """Callback on product name change - - Refresh version parameter value by setting its value to - the latest version of the selected product. - - Args: - node (hou.OpNode): The HDA node. - """ - - versions = get_available_versions(node) - if versions: - node.parm("version").set(str(versions[0])) diff --git a/server_addon/houdini/client/ayon_houdini/api/lib.py b/server_addon/houdini/client/ayon_houdini/api/lib.py deleted file mode 100644 index eec3995821..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/lib.py +++ /dev/null @@ -1,1365 +0,0 @@ -# -*- coding: utf-8 -*- -import sys -import os -import errno -import re -import logging -import json -from contextlib import contextmanager - -import six -import ayon_api - -from ayon_core.lib import StringTemplate -from ayon_core.settings import get_current_project_settings -from ayon_core.pipeline import ( - Anatomy, - get_current_project_name, - get_current_folder_path, - registered_host, - get_current_context, - get_current_host_name, -) -from ayon_core.pipeline.create import CreateContext -from ayon_core.pipeline.template_data import get_template_data -from ayon_core.pipeline.context_tools import get_current_folder_entity -from ayon_core.tools.utils import PopupUpdateKeys, SimplePopup -from ayon_core.tools.utils.host_tools import get_tool_by_name - -import hou - - -self = sys.modules[__name__] -self._parent = None -log = logging.getLogger(__name__) -JSON_PREFIX = "JSON:::" - - -def get_folder_fps(folder_entity=None): - """Return current folder fps.""" - - if folder_entity is None: - folder_entity = get_current_folder_entity(fields=["attrib.fps"]) - return folder_entity["attrib"]["fps"] - - -def get_output_parameter(node): - """Return the render output parameter of the given node - - Example: - root = hou.node("/obj") - my_alembic_node = root.createNode("alembic") - get_output_parameter(my_alembic_node) - >>> "filename" - - Notes: - I'm using node.type().name() to get on par with the creators, - Because the return value of `node.type().name()` is the - same string value used in creators - e.g. instance_data.update({"node_type": "alembic"}) - - Rop nodes in different network categories have - the same output parameter. - So, I took that into consideration as a hint for - future development. - - Args: - node(hou.Node): node instance - - Returns: - hou.Parm - """ - - node_type = node.type().name() - - # Figure out which type of node is being rendered - if node_type in {"alembic", "rop_alembic"}: - return node.parm("filename") - elif node_type == "arnold": - if node_type.evalParm("ar_ass_export_enable"): - return node.parm("ar_ass_file") - return node.parm("ar_picture") - elif node_type in { - "geometry", - "rop_geometry", - "filmboxfbx", - "rop_fbx" - }: - return node.parm("sopoutput") - elif node_type == "comp": - return node.parm("copoutput") - elif node_type in {"karma", "opengl"}: - return node.parm("picture") - elif node_type == "ifd": # Mantra - if node.evalParm("soho_outputmode"): - return node.parm("soho_diskfile") - return node.parm("vm_picture") - elif node_type == "Redshift_Proxy_Output": - return node.parm("RS_archive_file") - elif node_type == "Redshift_ROP": - return node.parm("RS_outputFileNamePrefix") - elif node_type in {"usd", "usd_rop", "usdexport"}: - return node.parm("lopoutput") - elif node_type in {"usdrender", "usdrender_rop"}: - return node.parm("outputimage") - elif node_type == "vray_renderer": - return node.parm("SettingsOutput_img_file_path") - - raise TypeError("Node type '%s' not supported" % node_type) - - -def set_scene_fps(fps): - hou.setFps(fps) - - -# Valid FPS -def validate_fps(): - """Validate current scene FPS and show pop-up when it is incorrect - - Returns: - bool - - """ - - fps = get_folder_fps() - current_fps = hou.fps() # returns float - - if current_fps != fps: - - # Find main window - parent = hou.ui.mainQtWindow() - if parent is None: - pass - else: - dialog = PopupUpdateKeys(parent=parent) - dialog.setModal(True) - dialog.setWindowTitle("Houdini scene does not match project FPS") - dialog.set_message("Scene %i FPS does not match project %i FPS" % - (current_fps, fps)) - dialog.set_button_text("Fix") - - # on_show is the Fix button clicked callback - dialog.on_clicked_state.connect(lambda: set_scene_fps(fps)) - - dialog.show() - - return False - - return True - - -def render_rop(ropnode): - """Render ROP node utility for Publishing. - - This renders a ROP node with the settings we want during Publishing. - """ - # Print verbose when in batch mode without UI - verbose = not hou.isUIAvailable() - - # Render - try: - ropnode.render(verbose=verbose, - # Allow Deadline to capture completion percentage - output_progress=verbose, - # Render only this node - # (do not render any of its dependencies) - ignore_inputs=True) - except hou.Error as exc: - # The hou.Error is not inherited from a Python Exception class, - # so we explicitly capture the houdini error, otherwise pyblish - # will remain hanging. - import traceback - traceback.print_exc() - raise RuntimeError("Render failed: {0}".format(exc)) - - -def imprint(node, data, update=False): - """Store attributes with value on a node - - Depending on the type of attribute it creates the correct parameter - template. Houdini uses a template per type, see the docs for more - information. - - http://www.sidefx.com/docs/houdini/hom/hou/ParmTemplate.html - - Because of some update glitch where you cannot overwrite existing - ParmTemplates on node using: - `setParmTemplates()` and `parmTuplesInFolder()` - update is done in another pass. - - Args: - node(hou.Node): node object from Houdini - data(dict): collection of attributes and their value - update (bool, optional): flag if imprint should update - already existing data or leave them untouched and only - add new. - - Returns: - None - - """ - if not data: - return - if not node: - self.log.error("Node is not set, calling imprint on invalid data.") - return - - current_parms = {p.name(): p for p in node.spareParms()} - update_parm_templates = [] - new_parm_templates = [] - - for key, value in data.items(): - if value is None: - continue - - parm_template = get_template_from_value(key, value) - - if key in current_parms: - if node.evalParm(key) == value: - continue - if not update: - log.debug(f"{key} already exists on {node}") - else: - log.debug(f"replacing {key}") - update_parm_templates.append(parm_template) - continue - - new_parm_templates.append(parm_template) - - if not new_parm_templates and not update_parm_templates: - return - - parm_group = node.parmTemplateGroup() - - # Add new parm templates - if new_parm_templates: - parm_folder = parm_group.findFolder("Extra") - - # if folder doesn't exist yet, create one and append to it, - # else append to existing one - if not parm_folder: - parm_folder = hou.FolderParmTemplate("folder", "Extra") - parm_folder.setParmTemplates(new_parm_templates) - parm_group.append(parm_folder) - else: - # Add to parm template folder instance then replace with updated - # one in parm template group - for template in new_parm_templates: - parm_folder.addParmTemplate(template) - parm_group.replace(parm_folder.name(), parm_folder) - - # Update existing parm templates - for parm_template in update_parm_templates: - parm_group.replace(parm_template.name(), parm_template) - - # When replacing a parm with a parm of the same name it preserves its - # value if before the replacement the parm was not at the default, - # because it has a value override set. Since we're trying to update the - # parm by using the new value as `default` we enforce the parm is at - # default state - node.parm(parm_template.name()).revertToDefaults() - - node.setParmTemplateGroup(parm_group) - - -def lsattr(attr, value=None, root="/"): - """Return nodes that have `attr` - When `value` is not None it will only return nodes matching that value - for the given attribute. - Args: - attr (str): Name of the attribute (hou.Parm) - value (object, Optional): The value to compare the attribute too. - When the default None is provided the value check is skipped. - root (str): The root path in Houdini to search in. - Returns: - list: Matching nodes that have attribute with value. - """ - if value is None: - # Use allSubChildren() as allNodes() errors on nodes without - # permission to enter without a means to continue of querying - # the rest - nodes = hou.node(root).allSubChildren() - return [n for n in nodes if n.parm(attr)] - return lsattrs({attr: value}) - - -def lsattrs(attrs, root="/"): - """Return nodes matching `key` and `value` - Arguments: - attrs (dict): collection of attribute: value - root (str): The root path in Houdini to search in. - Example: - >> lsattrs({"id": "myId"}) - ["myNode"] - >> lsattr("id") - ["myNode", "myOtherNode"] - Returns: - list: Matching nodes that have attribute with value. - """ - - matches = set() - # Use allSubChildren() as allNodes() errors on nodes without - # permission to enter without a means to continue of querying - # the rest - nodes = hou.node(root).allSubChildren() - for node in nodes: - for attr in attrs: - if not node.parm(attr): - continue - elif node.evalParm(attr) != attrs[attr]: - continue - else: - matches.add(node) - - return list(matches) - - -def read(node): - """Read the container data in to a dict - - Args: - node(hou.Node): Houdini node - - Returns: - dict - - """ - # `spareParms` returns a tuple of hou.Parm objects - data = {} - if not node: - return data - for parameter in node.spareParms(): - value = parameter.eval() - # test if value is json encoded dict - if isinstance(value, six.string_types) and \ - value.startswith(JSON_PREFIX): - try: - value = json.loads(value[len(JSON_PREFIX):]) - except json.JSONDecodeError: - # not a json - pass - data[parameter.name()] = value - - return data - - -@contextmanager -def maintained_selection(): - """Maintain selection during context - Example: - >>> with maintained_selection(): - ... # Modify selection - ... node.setSelected(on=False, clear_all_selected=True) - >>> # Selection restored - """ - - previous_selection = hou.selectedNodes() - try: - yield - finally: - # Clear the selection - # todo: does hou.clearAllSelected() do the same? - for node in hou.selectedNodes(): - node.setSelected(on=False) - - if previous_selection: - for node in previous_selection: - node.setSelected(on=True) - - -@contextmanager -def parm_values(overrides): - """Override Parameter values during the context. - Arguments: - overrides (List[Tuple[hou.Parm, Any]]): The overrides per parm - that should be applied during context. - """ - - originals = [] - try: - for parm, value in overrides: - originals.append((parm, parm.eval())) - parm.set(value) - yield - finally: - for parm, value in originals: - # Parameter might not exist anymore so first - # check whether it's still valid - if hou.parm(parm.path()): - parm.set(value) - - -def reset_framerange(fps=True, frame_range=True): - """Set frame range and FPS to current folder.""" - - project_name = get_current_project_name() - folder_path = get_current_folder_path() - - folder_entity = ayon_api.get_folder_by_path(project_name, folder_path) - folder_attributes = folder_entity["attrib"] - - # Set FPS - if fps: - fps = get_folder_fps(folder_entity) - print("Setting scene FPS to {}".format(int(fps))) - set_scene_fps(fps) - - if frame_range: - - # Set Start and End Frames - frame_start = folder_attributes.get("frameStart") - frame_end = folder_attributes.get("frameEnd") - - if frame_start is None or frame_end is None: - log.warning("No edit information found for '%s'", folder_path) - return - - handle_start = folder_attributes.get("handleStart", 0) - handle_end = folder_attributes.get("handleEnd", 0) - - frame_start -= int(handle_start) - frame_end += int(handle_end) - - # Set frame range and FPS - hou.playbar.setFrameRange(frame_start, frame_end) - hou.playbar.setPlaybackRange(frame_start, frame_end) - hou.setFrame(frame_start) - - -def get_main_window(): - """Acquire Houdini's main window""" - if self._parent is None: - self._parent = hou.ui.mainQtWindow() - return self._parent - - -def get_template_from_value(key, value): - if isinstance(value, float): - parm = hou.FloatParmTemplate(name=key, - label=key, - num_components=1, - default_value=(value,)) - elif isinstance(value, bool): - parm = hou.ToggleParmTemplate(name=key, - label=key, - default_value=value) - elif isinstance(value, int): - parm = hou.IntParmTemplate(name=key, - label=key, - num_components=1, - default_value=(value,)) - elif isinstance(value, six.string_types): - parm = hou.StringParmTemplate(name=key, - label=key, - num_components=1, - default_value=(value,)) - elif isinstance(value, (dict, list, tuple)): - parm = hou.StringParmTemplate(name=key, - label=key, - num_components=1, - default_value=( - JSON_PREFIX + json.dumps(value),)) - else: - raise TypeError("Unsupported type: %r" % type(value)) - - return parm - - -def get_frame_data(node, log=None): - """Get the frame data: `frameStartHandle`, `frameEndHandle` - and `byFrameStep`. - - This function uses Houdini node's `trange`, `t1, `t2` and `t3` - parameters as the source of truth for the full inclusive frame - range to render, as such these are considered as the frame - range including the handles. - - The non-inclusive frame start and frame end without handles - can be computed by subtracting the handles from the inclusive - frame range. - - Args: - node (hou.Node): ROP node to retrieve frame range from, - the frame range is assumed to be the frame range - *including* the start and end handles. - - Returns: - dict: frame data for `frameStartHandle`, `frameEndHandle` - and `byFrameStep`. - - """ - - if log is None: - log = self.log - - data = {} - - if node.parm("trange") is None: - log.debug( - "Node has no 'trange' parameter: {}".format(node.path()) - ) - return data - - if node.evalParm("trange") == 0: - data["frameStartHandle"] = hou.intFrame() - data["frameEndHandle"] = hou.intFrame() - data["byFrameStep"] = 1.0 - - log.info( - "Node '{}' has 'Render current frame' set.\n" - "Folder Handles are ignored.\n" - "frameStart and frameEnd are set to the " - "current frame.".format(node.path()) - ) - else: - data["frameStartHandle"] = int(node.evalParm("f1")) - data["frameEndHandle"] = int(node.evalParm("f2")) - data["byFrameStep"] = node.evalParm("f3") - - return data - - -def splitext(name, allowed_multidot_extensions): - # type: (str, list) -> tuple - """Split file name to name and extension. - - Args: - name (str): File name to split. - allowed_multidot_extensions (list of str): List of allowed multidot - extensions. - - Returns: - tuple: Name and extension. - """ - - for ext in allowed_multidot_extensions: - if name.endswith(ext): - return name[:-len(ext)], ext - - return os.path.splitext(name) - - -def get_top_referenced_parm(parm): - - processed = set() # disallow infinite loop - while True: - if parm.path() in processed: - raise RuntimeError("Parameter references result in cycle.") - - processed.add(parm.path()) - - ref = parm.getReferencedParm() - if ref.path() == parm.path(): - # It returns itself when it doesn't reference - # another parameter - return ref - else: - parm = ref - - -def evalParmNoFrame(node, parm, pad_character="#"): - - parameter = node.parm(parm) - assert parameter, "Parameter does not exist: %s.%s" % (node, parm) - - # If the parameter has a parameter reference, then get that - # parameter instead as otherwise `unexpandedString()` fails. - parameter = get_top_referenced_parm(parameter) - - # Substitute out the frame numbering with padded characters - try: - raw = parameter.unexpandedString() - except hou.Error as exc: - print("Failed: %s" % parameter) - raise RuntimeError(exc) - - def replace(match): - padding = 1 - n = match.group(2) - if n and int(n): - padding = int(n) - return pad_character * padding - - expression = re.sub(r"(\$F([0-9]*))", replace, raw) - - with hou.ScriptEvalContext(parameter): - return hou.expandStringAtFrame(expression, 0) - - -def get_color_management_preferences(): - """Get default OCIO preferences""" - return { - "config": hou.Color.ocio_configPath(), - "display": hou.Color.ocio_defaultDisplay(), - "view": hou.Color.ocio_defaultView() - } - - -def get_obj_node_output(obj_node): - """Find output node. - - If the node has any output node return the - output node with the minimum `outputidx`. - When no output is present return the node - with the display flag set. If no output node is - detected then None is returned. - - Arguments: - node (hou.Node): The node to retrieve a single - the output node for. - - Returns: - Optional[hou.Node]: The child output node. - - """ - - outputs = obj_node.subnetOutputs() - if not outputs: - return - - elif len(outputs) == 1: - return outputs[0] - - else: - return min(outputs, - key=lambda node: node.evalParm('outputidx')) - - -def get_output_children(output_node, include_sops=True): - """Recursively return a list of all output nodes - contained in this node including this node. - - It works in a similar manner to output_node.allNodes(). - """ - out_list = [output_node] - - if output_node.childTypeCategory() == hou.objNodeTypeCategory(): - for child in output_node.children(): - out_list += get_output_children(child, include_sops=include_sops) - - elif include_sops and \ - output_node.childTypeCategory() == hou.sopNodeTypeCategory(): - out = get_obj_node_output(output_node) - if out: - out_list += [out] - - return out_list - - -def get_resolution_from_folder(folder_entity): - """Get resolution from the given folder entity. - - Args: - folder_entity (dict[str, Any]): Folder entity. - - Returns: - Union[Tuple[int, int], None]: Resolution width and height. - - """ - if not folder_entity or "attrib" not in folder_entity: - print("Entered folder is not valid. \"{}\"".format( - str(folder_entity) - )) - return None - - folder_attributes = folder_entity["attrib"] - resolution_width = folder_attributes.get("resolutionWidth") - resolution_height = folder_attributes.get("resolutionHeight") - - # Make sure both width and height are set - if resolution_width is None or resolution_height is None: - print("No resolution information found for '{}'".format( - folder_entity["path"] - )) - return None - - return int(resolution_width), int(resolution_height) - - -def set_camera_resolution(camera, folder_entity=None): - """Apply resolution to camera from folder entity of the publish""" - - if not folder_entity: - folder_entity = get_current_folder_entity() - - resolution = get_resolution_from_folder(folder_entity) - - if resolution: - print("Setting camera resolution: {} -> {}x{}".format( - camera.name(), resolution[0], resolution[1] - )) - camera.parm("resx").set(resolution[0]) - camera.parm("resy").set(resolution[1]) - - -def get_camera_from_container(container): - """Get camera from container node. """ - - cameras = container.recursiveGlob( - "*", - filter=hou.nodeTypeFilter.ObjCamera, - include_subnets=False - ) - - assert len(cameras) == 1, "Camera instance must have only one camera" - return cameras[0] - - -def get_current_context_template_data_with_folder_attrs(): - """ - - Output contains 'folderAttributes' key with folder attribute values. - - Returns: - dict[str, Any]: Template data to fill templates. - - """ - context = get_current_context() - project_name = context["project_name"] - folder_path = context["folder_path"] - task_name = context["task_name"] - host_name = get_current_host_name() - - project_entity = ayon_api.get_project(project_name) - anatomy = Anatomy(project_name, project_entity=project_entity) - folder_entity = ayon_api.get_folder_by_path(project_name, folder_path) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - - # get context specific vars - folder_attributes = folder_entity["attrib"] - - # compute `frameStartHandle` and `frameEndHandle` - frame_start = folder_attributes.get("frameStart") - frame_end = folder_attributes.get("frameEnd") - handle_start = folder_attributes.get("handleStart") - handle_end = folder_attributes.get("handleEnd") - if frame_start is not None and handle_start is not None: - folder_attributes["frameStartHandle"] = frame_start - handle_start - - if frame_end is not None and handle_end is not None: - folder_attributes["frameEndHandle"] = frame_end + handle_end - - template_data = get_template_data( - project_entity, folder_entity, task_entity, host_name - ) - template_data["root"] = anatomy.roots - template_data["folderAttributes"] = folder_attributes - - return template_data - - -def set_review_color_space(opengl_node, review_color_space="", log=None): - """Set ociocolorspace parameter for the given OpenGL node. - - Set `ociocolorspace` parameter of the given OpenGl node - to to the given review_color_space value. - If review_color_space is empty, a default colorspace corresponding to - the display & view of the current Houdini session will be used. - - Args: - opengl_node (hou.Node): ROP node to set its ociocolorspace parm. - review_color_space (str): Colorspace value for ociocolorspace parm. - log (logging.Logger): Logger to log to. - """ - - if log is None: - log = self.log - - # Set Color Correction parameter to OpenColorIO - colorcorrect_parm = opengl_node.parm("colorcorrect") - if colorcorrect_parm.eval() != 2: - colorcorrect_parm.set(2) - log.debug( - "'Color Correction' parm on '{}' has been set to" - " 'OpenColorIO'".format(opengl_node.path()) - ) - - opengl_node.setParms( - {"ociocolorspace": review_color_space} - ) - - log.debug( - "'OCIO Colorspace' parm on '{}' has been set to " - "the view color space '{}'" - .format(opengl_node, review_color_space) - ) - - -def get_context_var_changes(): - """get context var changes.""" - - houdini_vars_to_update = {} - - project_settings = get_current_project_settings() - houdini_vars_settings = \ - project_settings["houdini"]["general"]["update_houdini_var_context"] - - if not houdini_vars_settings["enabled"]: - return houdini_vars_to_update - - houdini_vars = houdini_vars_settings["houdini_vars"] - - # No vars specified - nothing to do - if not houdini_vars: - return houdini_vars_to_update - - # Get Template data - template_data = get_current_context_template_data_with_folder_attrs() - - # Set Houdini Vars - for item in houdini_vars: - # For consistency reasons we always force all vars to be uppercase - # Also remove any leading, and trailing whitespaces. - var = item["var"].strip().upper() - - # get and resolve template in value - item_value = StringTemplate.format_template( - item["value"], - template_data - ) - - if var == "JOB" and item_value == "": - # sync $JOB to $HIP if $JOB is empty - item_value = os.environ["HIP"] - - if item["is_directory"]: - item_value = item_value.replace("\\", "/") - - current_value = hou.hscript("echo -n `${}`".format(var))[0] - - if current_value != item_value: - houdini_vars_to_update[var] = ( - current_value, item_value, item["is_directory"] - ) - - return houdini_vars_to_update - - -def update_houdini_vars_context(): - """Update folder context variables""" - - for var, (_old, new, is_directory) in get_context_var_changes().items(): - if is_directory: - try: - os.makedirs(new) - except OSError as e: - if e.errno != errno.EEXIST: - print( - "Failed to create ${} dir. Maybe due to " - "insufficient permissions.".format(var) - ) - - hou.hscript("set {}={}".format(var, new)) - os.environ[var] = new - print("Updated ${} to {}".format(var, new)) - - -def update_houdini_vars_context_dialog(): - """Show pop-up to update folder context variables""" - update_vars = get_context_var_changes() - if not update_vars: - # Nothing to change - print("Nothing to change, Houdini vars are already up to date.") - return - - message = "\n".join( - "${}: {} -> {}".format(var, old or "None", new or "None") - for var, (old, new, _is_directory) in update_vars.items() - ) - - # TODO: Use better UI! - parent = hou.ui.mainQtWindow() - dialog = SimplePopup(parent=parent) - dialog.setModal(True) - dialog.setWindowTitle("Houdini scene has outdated folder variables") - dialog.set_message(message) - dialog.set_button_text("Fix") - - # on_show is the Fix button clicked callback - dialog.on_clicked.connect(update_houdini_vars_context) - - dialog.show() - - -def publisher_show_and_publish(comment=None): - """Open publisher window and trigger publishing action. - - Args: - comment (Optional[str]): Comment to set in publisher window. - """ - - main_window = get_main_window() - publisher_window = get_tool_by_name( - tool_name="publisher", - parent=main_window, - ) - publisher_window.show_and_publish(comment) - - -def find_rop_input_dependencies(input_tuple): - """Self publish from ROP nodes. - - Arguments: - tuple (hou.RopNode.inputDependencies) which can be a nested tuples - represents the input dependencies of the ROP node, consisting of ROPs, - and the frames that need to be be rendered prior to rendering the ROP. - - Returns: - list of the RopNode.path() that can be found inside - the input tuple. - """ - - out_list = [] - if isinstance(input_tuple[0], hou.RopNode): - return input_tuple[0].path() - - if isinstance(input_tuple[0], tuple): - for item in input_tuple: - out_list.append(find_rop_input_dependencies(item)) - - return out_list - - -def self_publish(): - """Self publish from ROP nodes. - - Firstly, it gets the node and its dependencies. - Then, it deactivates all other ROPs - And finally, it triggers the publishing action. - """ - - result, comment = hou.ui.readInput( - "Add Publish Comment", - buttons=("Publish", "Cancel"), - title="Publish comment", - close_choice=1 - ) - - if result: - return - - current_node = hou.node(".") - inputs_paths = find_rop_input_dependencies( - current_node.inputDependencies() - ) - inputs_paths.append(current_node.path()) - - host = registered_host() - context = CreateContext(host, reset=True) - - for instance in context.instances: - node_path = instance.data.get("instance_node") - instance["active"] = node_path and node_path in inputs_paths - - context.save_changes() - - publisher_show_and_publish(comment) - - -def add_self_publish_button(node): - """Adds a self publish button to the rop node.""" - - label = os.environ.get("AYON_MENU_LABEL") or "AYON" - - button_parm = hou.ButtonParmTemplate( - "ayon_self_publish", - "{} Publish".format(label), - script_callback="from ayon_houdini.api.lib import " - "self_publish; self_publish()", - script_callback_language=hou.scriptLanguage.Python, - join_with_next=True - ) - - template = node.parmTemplateGroup() - template.insertBefore((0,), button_parm) - node.setParmTemplateGroup(template) - - -def get_scene_viewer(visible_only=True): - """ - Return an instance of a visible viewport. - - There may be many, some could be closed, any visible are current - - Arguments: - visible_only (Optional[bool]): Only return viewers that currently - are the active tab (and hence are visible). - - Returns: - Optional[hou.SceneViewer]: A scene viewer, if any. - """ - panes = hou.ui.paneTabs() - panes = [x for x in panes if x.type() == hou.paneTabType.SceneViewer] - - if visible_only: - return next((pane for pane in panes if pane.isCurrentTab()), None) - - panes = sorted(panes, key=lambda x: x.isCurrentTab()) - if panes: - return panes[-1] - - return None - - -def sceneview_snapshot( - sceneview, - filepath="$HIP/thumbnails/$HIPNAME.$F4.jpg", - frame_start=None, - frame_end=None): - """Take a snapshot of your scene view. - - It takes snapshot of your scene view for the given frame range. - So, it's capable of generating snapshots image sequence. - It works in different Houdini context e.g. Objects, Solaris - - Example:: - >>> from ayon_houdini.api import lib - >>> sceneview = hou.ui.paneTabOfType(hou.paneTabType.SceneViewer) - >>> lib.sceneview_snapshot(sceneview) - - Notes: - .png output will render poorly, so use .jpg. - - How it works: - Get the current sceneviewer (may be more than one or hidden) - and screengrab the perspective viewport to a file in the - publish location to be picked up with the publish. - - Credits: - https://www.sidefx.com/forum/topic/42808/?page=1#post-354796 - - Args: - sceneview (hou.SceneViewer): The scene view pane from which you want - to take a snapshot. - filepath (str): thumbnail filepath. it expects `$F4` token - when frame_end is bigger than frame_star other wise - each frame will override its predecessor. - frame_start (int): the frame at which snapshot starts - frame_end (int): the frame at which snapshot ends - """ - - if frame_start is None: - frame_start = hou.frame() - if frame_end is None: - frame_end = frame_start - - if not isinstance(sceneview, hou.SceneViewer): - log.debug("Wrong Input. {} is not of type hou.SceneViewer." - .format(sceneview)) - return - viewport = sceneview.curViewport() - - flip_settings = sceneview.flipbookSettings().stash() - flip_settings.frameRange((frame_start, frame_end)) - flip_settings.output(filepath) - flip_settings.outputToMPlay(False) - sceneview.flipbook(viewport, flip_settings) - log.debug("A snapshot of sceneview has been saved to: {}".format(filepath)) - - -def get_background_images(node, raw=False): - """"Return background images defined inside node. - - Similar to `nodegraphutils.saveBackgroundImages` but this method also - allows to retrieve the data as JSON encodable data instead of - `hou.NetworkImage` instances when using `raw=True` - """ - - def _parse(image_data): - image = hou.NetworkImage(image_data["path"], - hou.BoundingRect(*image_data["rect"])) - if "relativetopath" in image_data: - image.setRelativeToPath(image_data["relativetopath"]) - if "brightness" in image_data: - image.setBrightness(image_data["brightness"]) - return image - - data = node.userData("backgroundimages") - if not data: - return [] - - try: - images = json.loads(data) - except json.decoder.JSONDecodeError: - images = [] - - if not raw: - images = [_parse(_data) for _data in images] - return images - - -def set_background_images(node, images): - """Set hou.NetworkImage background images under given hou.Node - - Similar to: `nodegraphutils.loadBackgroundImages` - - """ - - def _serialize(image): - """Return hou.NetworkImage as serialized dict""" - if isinstance(image, dict): - # Assume already serialized, only do some minor validations - if "path" not in image: - raise ValueError("Missing `path` key in image dictionary.") - if "rect" not in image: - raise ValueError("Missing `rect` key in image dictionary.") - if len(image["rect"]) != 4: - raise ValueError("`rect` value must be list of four floats.") - return image - - rect = image.rect() - rect_min = rect.min() - rect_max = rect.max() - data = { - "path": image.path(), - "rect": [rect_min.x(), rect_min.y(), rect_max.x(), rect_max.y()], - } - if image.brightness() != 1.0: - data["brightness"] = image.brightness() - if image.relativeToPath(): - data["relativetopath"] = image.relativeToPath() - return data - - with hou.undos.group('Edit Background Images'): - if images: - assert all(isinstance(image, (dict, hou.NetworkImage)) - for image in images) - data = json.dumps([_serialize(image) for image in images]) - node.setUserData("backgroundimages", data) - else: - node.destroyUserData("backgroundimages", must_exist=False) - - -def set_node_thumbnail(node, image_path, rect=None): - """Set hou.NetworkImage attached to node. - - If an existing connected image is found it assumes that is the existing - thumbnail and will update that particular instance instead. - - When `image_path` is None an existing attached `hou.NetworkImage` will be - removed. - - Arguments: - node (hou.Node): Node to set thumbnail for. - image_path (Union[str, None]): Path to image to set. - If None is set then the thumbnail will be removed if it exists. - rect (hou.BoundingRect): Bounding rect for the relative placement - to the node. - - Returns: - hou.NetworkImage or None: The network image that was set or None if - instead it not set or removed. - - """ - - parent = node.parent() - images = get_background_images(parent) - - node_path = node.path() - # Find first existing image attached to node - index, image = next( - ( - (index, image) for index, image in enumerate(images) if - image.relativeToPath() == node_path - ), - (None, None) - ) - if image_path is None: - # Remove image if it exists - if image: - images.remove(image) - set_background_images(parent, images) - return - - if rect is None: - rect = hou.BoundingRect(-1, -1, 1, 1) - - if isinstance(image_path, hou.NetworkImage): - image = image_path - if index is not None: - images[index] = image - else: - images.append(image) - elif image is None: - # Create the image - image = hou.NetworkImage(image_path, rect) - image.setRelativeToPath(node.path()) - images.append(image) - else: - # Update first existing image - image.setRect(rect) - image.setPath(image_path) - - set_background_images(parent, images) - - return image - - -def remove_all_thumbnails(node): - """Remove all node thumbnails. - - Removes all network background images that are linked to the given node. - """ - parent = node.parent() - images = get_background_images(parent) - node_path = node.path() - images = [ - image for image in images if image.relativeToPath() != node_path - ] - set_background_images(parent, images) - - -def get_node_thumbnail(node, first_only=True): - """Return node thumbnails. - - Return network background images that are linked to the given node. - By default, only returns the first one found, unless `first_only` is False. - - Returns: - Union[hou.NetworkImage, List[hou.NetworkImage]]: - Connected network images - - """ - parent = node.parent() - images = get_background_images(parent) - node_path = node.path() - - def is_attached_to_node(image): - return image.relativeToPath() == node_path - - attached_images = filter(is_attached_to_node, images) - - # Find first existing image attached to node - if first_only: - return next(attached_images, None) - else: - return attached_images - - -def find_active_network(category, default): - """Find the first active network editor in the UI. - - If no active network editor pane is found at the given category then the - `default` path will be used as fallback. - - For example, to find an active LOPs network: - >>> network = find_active_network( - ... category=hou.lopNodeTypeCategory(), - ... fallback="/stage" - ... ) - hou.Node("/stage/lopnet1") - - Arguments: - category (hou.NodeTypeCategory): The node network category type. - default (str): The default path to fallback to if no active pane - is found with the given category. - - Returns: - hou.Node: The node network to return. - - """ - # Find network editors that are current tab of given category - index = 0 - while True: - pane = hou.ui.paneTabOfType(hou.paneTabType.NetworkEditor, index) - if pane is None: - break - - index += 1 - if not pane.isCurrentTab(): - continue - - pwd = pane.pwd() - if pwd.type().category() != category: - continue - - if not pwd.isEditable(): - continue - - return pwd - - # Default to the fallback if no valid candidate was found - return hou.node(default) - - -def update_content_on_context_change(): - """Update all Creator instances to current asset""" - host = registered_host() - context = host.get_current_context() - - folder_path = context["folder_path"] - task = context["task_name"] - - create_context = CreateContext(host, reset=True) - - for instance in create_context.instances: - instance_folder_path = instance.get("folderPath") - if instance_folder_path and instance_folder_path != folder_path: - instance["folderPath"] = folder_path - instance_task = instance.get("task") - if instance_task and instance_task != task: - instance["task"] = task - - create_context.save_changes() - - -def prompt_reset_context(): - """Prompt the user what context settings to reset. - This prompt is used on saving to a different task to allow the scene to - get matched to the new context. - """ - # TODO: Cleanup this prototyped mess of imports and odd dialog - from ayon_core.tools.attribute_defs.dialog import ( - AttributeDefinitionsDialog - ) - from ayon_core.style import load_stylesheet - from ayon_core.lib import BoolDef, UILabelDef - - definitions = [ - UILabelDef( - label=( - "You are saving your workfile into a different folder or task." - "\n\n" - "Would you like to update some settings to the new context?\n" - ) - ), - BoolDef( - "fps", - label="FPS", - tooltip="Reset workfile FPS", - default=True - ), - BoolDef( - "frame_range", - label="Frame Range", - tooltip="Reset workfile start and end frame ranges", - default=True - ), - BoolDef( - "instances", - label="Publish instances", - tooltip="Update all publish instance's folder and task to match " - "the new folder and task", - default=True - ), - ] - - dialog = AttributeDefinitionsDialog(definitions) - dialog.setWindowTitle("Saving to different context.") - dialog.setStyleSheet(load_stylesheet()) - if not dialog.exec_(): - return None - - options = dialog.get_values() - if options["fps"] or options["frame_range"]: - reset_framerange( - fps=options["fps"], - frame_range=options["frame_range"] - ) - - if options["instances"]: - update_content_on_context_change() - - dialog.deleteLater() diff --git a/server_addon/houdini/client/ayon_houdini/api/pipeline.py b/server_addon/houdini/client/ayon_houdini/api/pipeline.py deleted file mode 100644 index c6fbbd5b62..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/pipeline.py +++ /dev/null @@ -1,449 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pipeline tools for OpenPype Houdini integration.""" -import os -import json -import logging - -import hou # noqa - -from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost -from ayon_core.tools.utils import host_tools -import pyblish.api - -from ayon_core.pipeline import ( - register_creator_plugin_path, - register_loader_plugin_path, - register_inventory_action_path, - AVALON_CONTAINER_ID, - AYON_CONTAINER_ID, -) -from ayon_core.pipeline.load import any_outdated_containers -from ayon_houdini import HOUDINI_HOST_DIR -from ayon_houdini.api import lib, shelves, creator_node_shelves - -from ayon_core.lib import ( - register_event_callback, - emit_event, - env_value_to_bool, -) - -from .lib import JSON_PREFIX - - -log = logging.getLogger("ayon_houdini") - -AVALON_CONTAINERS = "/obj/AVALON_CONTAINERS" -CONTEXT_CONTAINER = "/obj/OpenPypeContext" -IS_HEADLESS = not hasattr(hou, "ui") - -PLUGINS_DIR = os.path.join(HOUDINI_HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - -# Track whether the workfile tool is about to save -_about_to_save = False - - -class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "houdini" - - def __init__(self): - super(HoudiniHost, self).__init__() - self._op_events = {} - self._has_been_setup = False - - def install(self): - pyblish.api.register_host("houdini") - pyblish.api.register_host("hython") - pyblish.api.register_host("hpython") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - - log.info("Installing callbacks ... ") - # register_event_callback("init", on_init) - self._register_callbacks() - register_event_callback("workfile.save.before", before_workfile_save) - register_event_callback("before.save", before_save) - register_event_callback("save", on_save) - register_event_callback("open", on_open) - register_event_callback("new", on_new) - register_event_callback("taskChanged", on_task_changed) - - self._has_been_setup = True - - # Set folder settings for the empty scene directly after launch of - # Houdini so it initializes into the correct scene FPS, - # Frame Range, etc. - # TODO: make sure this doesn't trigger when - # opening with last workfile. - _set_context_settings() - - if not IS_HEADLESS: - import hdefereval # noqa, hdefereval is only available in ui mode - # Defer generation of shelves due to issue on Windows where shelf - # initialization during start up delays Houdini UI by minutes - # making it extremely slow to launch. - hdefereval.executeDeferred(shelves.generate_shelves) - hdefereval.executeDeferred(creator_node_shelves.install) - if env_value_to_bool("AYON_WORKFILE_TOOL_ON_START"): - hdefereval.executeDeferred(lambda: host_tools.show_workfiles(parent=hou.qt.mainWindow())) - - def workfile_has_unsaved_changes(self): - return hou.hipFile.hasUnsavedChanges() - - def get_workfile_extensions(self): - return [".hip", ".hiplc", ".hipnc"] - - def save_workfile(self, dst_path=None): - # Force forwards slashes to avoid segfault - if dst_path: - dst_path = dst_path.replace("\\", "/") - hou.hipFile.save(file_name=dst_path, - save_to_recent_files=True) - return dst_path - - def open_workfile(self, filepath): - # Force forwards slashes to avoid segfault - filepath = filepath.replace("\\", "/") - - hou.hipFile.load(filepath, - suppress_save_prompt=True, - ignore_load_warnings=False) - - return filepath - - def get_current_workfile(self): - current_filepath = hou.hipFile.path() - if (os.path.basename(current_filepath) == "untitled.hip" and - not os.path.exists(current_filepath)): - # By default a new scene in houdini is saved in the current - # working directory as "untitled.hip" so we need to capture - # that and consider it 'not saved' when it's in that state. - return None - - return current_filepath - - def get_containers(self): - return ls() - - def _register_callbacks(self): - for event in self._op_events.copy().values(): - if event is None: - continue - - try: - hou.hipFile.removeEventCallback(event) - except RuntimeError as e: - log.info(e) - - self._op_events[on_file_event_callback] = hou.hipFile.addEventCallback( - on_file_event_callback - ) - - @staticmethod - def create_context_node(): - """Helper for creating context holding node. - - Returns: - hou.Node: context node - - """ - obj_network = hou.node("/obj") - op_ctx = obj_network.createNode("subnet", - node_name="OpenPypeContext", - run_init_scripts=False, - load_contents=False) - - op_ctx.moveToGoodPosition() - op_ctx.setBuiltExplicitly(False) - op_ctx.setCreatorState("OpenPype") - op_ctx.setComment("OpenPype node to hold context metadata") - op_ctx.setColor(hou.Color((0.081, 0.798, 0.810))) - op_ctx.setDisplayFlag(False) - op_ctx.hide(True) - return op_ctx - - def update_context_data(self, data, changes): - op_ctx = hou.node(CONTEXT_CONTAINER) - if not op_ctx: - op_ctx = self.create_context_node() - - lib.imprint(op_ctx, data, update=True) - - def get_context_data(self): - op_ctx = hou.node(CONTEXT_CONTAINER) - if not op_ctx: - op_ctx = self.create_context_node() - return lib.read(op_ctx) - - def save_file(self, dst_path=None): - # Force forwards slashes to avoid segfault - dst_path = dst_path.replace("\\", "/") - - hou.hipFile.save(file_name=dst_path, - save_to_recent_files=True) - - -def on_file_event_callback(event): - if event == hou.hipFileEventType.AfterLoad: - emit_event("open") - elif event == hou.hipFileEventType.AfterSave: - emit_event("save") - elif event == hou.hipFileEventType.BeforeSave: - emit_event("before.save") - elif event == hou.hipFileEventType.AfterClear: - emit_event("new") - - -def containerise(name, - namespace, - nodes, - context, - loader=None, - suffix=""): - """Bundle `nodes` into a subnet and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - nodes (list): Long names of nodes to containerise - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Name of container assembly - - """ - - # Get AVALON_CONTAINERS subnet - subnet = get_or_create_avalon_container() - - # Create proper container name - container_name = "{}_{}".format(name, suffix or "CON") - container = hou.node("/obj/{}".format(name)) - container.setName(container_name, unique_name=True) - - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": context["representation"]["id"], - } - - lib.imprint(container, data) - - # "Parent" the container under the container network - hou.moveNodesTo([container], subnet) - - subnet.node(container_name).moveToGoodPosition() - - return container - - -def parse_container(container): - """Return the container node's full container data. - - Args: - container (hou.Node): A container node name. - - Returns: - dict: The container schema data for this container node. - - """ - # Read only relevant parms - # TODO: Clean up this hack replacing `lib.read(container)` - - data = {} - for name in ["name", "namespace", "loader", "representation", "id"]: - parm = container.parm(name) - if not parm: - return {} - - value = parm.eval() - - # test if value is json encoded dict - if isinstance(value, str) and value.startswith(JSON_PREFIX): - try: - value = json.loads(value[len(JSON_PREFIX):]) - except json.JSONDecodeError: - # not a json - pass - data[name] = value - - # Backwards compatibility pre-schemas for containers - data["schema"] = data.get("schema", "openpype:container-1.0") - - # Append transient data - data["objectName"] = container.path() - data["node"] = container - - return data - - -def ls(): - containers = [] - for identifier in ( - AYON_CONTAINER_ID, - AVALON_CONTAINER_ID, - "pyblish.mindbender.container" - ): - containers += lib.lsattr("id", identifier) - - for container in sorted(containers, - # Hou 19+ Python 3 hou.ObjNode are not - # sortable due to not supporting greater - # than comparisons - key=lambda node: node.path()): - yield parse_container(container) - - -def before_workfile_save(event): - global _about_to_save - _about_to_save = True - - -def before_save(): - return lib.validate_fps() - - -def on_save(): - - log.info("Running callback on save..") - - # update houdini vars - lib.update_houdini_vars_context_dialog() - - # We are now starting the actual save directly - global _about_to_save - _about_to_save = False - - -def on_task_changed(): - global _about_to_save - if not IS_HEADLESS and _about_to_save: - # Let's prompt the user to update the context settings or not - lib.prompt_reset_context() - - -def _show_outdated_content_popup(): - # Get main window - parent = lib.get_main_window() - if parent is None: - log.info("Skipping outdated content pop-up " - "because Houdini window can't be found.") - return - - from ayon_core.tools.utils import SimplePopup - - # Show outdated pop-up - def _on_show_inventory(): - from ayon_core.tools.utils import host_tools - host_tools.show_scene_inventory(parent=parent) - - dialog = SimplePopup(parent=parent) - dialog.setWindowTitle("Houdini scene has outdated content") - dialog.set_message("There are outdated containers in " - "your Houdini scene.") - dialog.on_clicked.connect(_on_show_inventory) - dialog.show() - - -def on_open(): - - if not hou.isUIAvailable(): - log.debug("Batch mode detected, ignoring `on_open` callbacks..") - return - - log.info("Running callback on open..") - - # update houdini vars - lib.update_houdini_vars_context_dialog() - - # Validate FPS after update_task_from_path to - # ensure it is using correct FPS for the folder - lib.validate_fps() - - if any_outdated_containers(): - parent = lib.get_main_window() - if parent is None: - # When opening Houdini with last workfile on launch the UI hasn't - # initialized yet completely when the `on_open` callback triggers. - # We defer the dialog popup to wait for the UI to become available. - # We assume it will open because `hou.isUIAvailable()` returns True - import hdefereval - hdefereval.executeDeferred(_show_outdated_content_popup) - else: - _show_outdated_content_popup() - - log.warning("Scene has outdated content.") - - -def on_new(): - """Set project resolution and fps when create a new file""" - - if hou.hipFile.isLoadingHipFile(): - # This event also triggers when Houdini opens a file due to the - # new event being registered to 'afterClear'. As such we can skip - # 'new' logic if the user is opening a file anyway - log.debug("Skipping on new callback due to scene being opened.") - return - - log.info("Running callback on new..") - _set_context_settings() - - # It seems that the current frame always gets reset to frame 1 on - # new scene. So we enforce current frame to be at the start of the playbar - # with execute deferred - def _enforce_start_frame(): - start = hou.playbar.playbackRange()[0] - hou.setFrame(start) - - if hou.isUIAvailable(): - import hdefereval - hdefereval.executeDeferred(_enforce_start_frame) - else: - # Run without execute deferred when no UI is available because - # without UI `hdefereval` is not available to import - _enforce_start_frame() - - -def get_or_create_avalon_container() -> "hou.OpNode": - avalon_container = hou.node(AVALON_CONTAINERS) - if avalon_container: - return avalon_container - - parent_path, name = AVALON_CONTAINERS.rsplit("/", 1) - parent = hou.node(parent_path) - return parent.createNode( - "subnet", node_name=name - ) - - -def _set_context_settings(): - """Apply the project settings from the project definition - - Settings can be overwritten by a folder if the folder.attrib contains - any information regarding those settings. - - Examples of settings: - fps - resolution - renderer - - Returns: - None - """ - - lib.reset_framerange() - lib.update_houdini_vars_context() diff --git a/server_addon/houdini/client/ayon_houdini/api/plugin.py b/server_addon/houdini/client/ayon_houdini/api/plugin.py deleted file mode 100644 index 8a2344febb..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/plugin.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- -"""Houdini specific Avalon/Pyblish plugin definitions.""" -import sys -from abc import ( - ABCMeta -) -import six -import hou - -import pyblish.api -from ayon_core.pipeline import ( - CreatorError, - Creator, - CreatedInstance, - AYON_INSTANCE_ID, - AVALON_INSTANCE_ID, - load, - publish -) -from ayon_core.lib import BoolDef - -from .lib import imprint, read, lsattr, add_self_publish_button - - -SETTINGS_CATEGORY = "houdini" - - -class HoudiniCreatorBase(object): - @staticmethod - def cache_instance_data(shared_data): - """Cache instances for Creators to shared data. - - Create `houdini_cached_instances` key when needed in shared data and - fill it with all collected instances from the scene under its - respective creator identifiers. - - Create `houdini_cached_legacy_instance` key for any legacy instances - detected in the scene as instances per family. - - Args: - Dict[str, Any]: Shared data. - - """ - if shared_data.get("houdini_cached_instances") is None: - cache = dict() - cache_legacy = dict() - - nodes = [] - for id_type in [AYON_INSTANCE_ID, AVALON_INSTANCE_ID]: - nodes.extend(lsattr("id", id_type)) - for node in nodes: - - creator_identifier_parm = node.parm("creator_identifier") - if creator_identifier_parm: - # creator instance - creator_id = creator_identifier_parm.eval() - cache.setdefault(creator_id, []).append(node) - - else: - # legacy instance - family_parm = node.parm("family") - if not family_parm: - # must be a broken instance - continue - - family = family_parm.eval() - cache_legacy.setdefault(family, []).append(node) - - shared_data["houdini_cached_instances"] = cache - shared_data["houdini_cached_legacy_instance"] = cache_legacy - - return shared_data - - @staticmethod - def create_instance_node( - folder_path, - node_name, - parent, - node_type="geometry", - pre_create_data=None - ): - """Create node representing instance. - - Arguments: - folder_path (str): Folder path. - node_name (str): Name of the new node. - parent (str): Name of the parent node. - node_type (str, optional): Type of the node. - pre_create_data (Optional[Dict]): Pre create data. - - Returns: - hou.Node: Newly created instance node. - - """ - parent_node = hou.node(parent) - instance_node = parent_node.createNode( - node_type, node_name=node_name) - instance_node.moveToGoodPosition() - return instance_node - - -@six.add_metaclass(ABCMeta) -class HoudiniCreator(Creator, HoudiniCreatorBase): - """Base class for most of the Houdini creator plugins.""" - selected_nodes = [] - settings_name = None - add_publish_button = False - - settings_category = SETTINGS_CATEGORY - - def create(self, product_name, instance_data, pre_create_data): - try: - self.selected_nodes = [] - - if pre_create_data.get("use_selection"): - self.selected_nodes = hou.selectedNodes() - - # Get the node type and remove it from the data, not needed - node_type = instance_data.pop("node_type", None) - if node_type is None: - node_type = "geometry" - - folder_path = instance_data["folderPath"] - - instance_node = self.create_instance_node( - folder_path, - product_name, - "/out", - node_type, - pre_create_data - ) - - self.customize_node_look(instance_node) - - instance_data["instance_node"] = instance_node.path() - instance_data["instance_id"] = instance_node.path() - instance_data["families"] = self.get_publish_families() - instance = CreatedInstance( - self.product_type, - product_name, - instance_data, - self) - self._add_instance_to_context(instance) - self.imprint(instance_node, instance.data_to_store()) - - if self.add_publish_button: - add_self_publish_button(instance_node) - - return instance - - except hou.Error as er: - six.reraise( - CreatorError, - CreatorError("Creator error: {}".format(er)), - sys.exc_info()[2]) - - def lock_parameters(self, node, parameters): - """Lock list of specified parameters on the node. - - Args: - node (hou.Node): Houdini node to lock parameters on. - parameters (list of str): List of parameter names. - - """ - for name in parameters: - try: - parm = node.parm(name) - parm.lock(True) - except AttributeError: - self.log.debug("missing lock pattern {}".format(name)) - - def collect_instances(self): - # cache instances if missing - self.cache_instance_data(self.collection_shared_data) - for instance in self.collection_shared_data[ - "houdini_cached_instances"].get(self.identifier, []): - - node_data = read(instance) - - # Node paths are always the full node path since that is unique - # Because it's the node's path it's not written into attributes - # but explicitly collected - node_path = instance.path() - node_data["instance_id"] = node_path - node_data["instance_node"] = node_path - node_data["families"] = self.get_publish_families() - if "AYON_productName" in node_data: - node_data["productName"] = node_data.pop("AYON_productName") - - created_instance = CreatedInstance.from_existing( - node_data, self - ) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - for created_inst, changes in update_list: - instance_node = hou.node(created_inst.get("instance_node")) - new_values = { - key: changes[key].new_value - for key in changes.changed_keys - } - # Update parm templates and values - self.imprint( - instance_node, - new_values, - update=True - ) - - def imprint(self, node, values, update=False): - # Never store instance node and instance id since that data comes - # from the node's path - if "productName" in values: - values["AYON_productName"] = values.pop("productName") - values.pop("instance_node", None) - values.pop("instance_id", None) - values.pop("families", None) - imprint(node, values, update=update) - - def remove_instances(self, instances): - """Remove specified instance from the scene. - - This is only removing `id` parameter so instance is no longer - instance, because it might contain valuable data for artist. - - """ - for instance in instances: - instance_node = hou.node(instance.data.get("instance_node")) - if instance_node: - instance_node.destroy() - - self._remove_instance_from_context(instance) - - def get_pre_create_attr_defs(self): - return [ - BoolDef("use_selection", label="Use selection") - ] - - @staticmethod - def customize_node_look( - node, color=None, - shape="chevron_down"): - """Set custom look for instance nodes. - - Args: - node (hou.Node): Node to set look. - color (hou.Color, Optional): Color of the node. - shape (str, Optional): Shape name of the node. - - Returns: - None - - """ - if not color: - color = hou.Color((0.616, 0.871, 0.769)) - node.setUserData('nodeshape', shape) - node.setColor(color) - - def get_publish_families(self): - """Return families for the instances of this creator. - - Allow a Creator to define multiple families so that a creator can - e.g. specify `usd` and `usdrop`. - - There is no need to override this method if you only have the - primary family defined by the `product_type` property as that will - always be set. - - Returns: - List[str]: families for instances of this creator - """ - return [] - - def get_network_categories(self): - """Return in which network view type this creator should show. - - The node type categories returned here will be used to define where - the creator will show up in the TAB search for nodes in Houdini's - Network View. - - This can be overridden in inherited classes to define where that - particular Creator should be visible in the TAB search. - - Returns: - list: List of houdini node type categories - - """ - return [hou.ropNodeTypeCategory()] - - def apply_settings(self, project_settings): - """Method called on initialization of plugin to apply settings.""" - - # Apply General Settings - houdini_general_settings = project_settings["houdini"]["general"] - self.add_publish_button = houdini_general_settings.get( - "add_self_publish_button", False) - - # Apply Creator Settings - settings_name = self.settings_name - if settings_name is None: - settings_name = self.__class__.__name__ - - settings = project_settings["houdini"]["create"] - settings = settings.get(settings_name) - if settings is None: - self.log.debug( - "No settings found for {}".format(self.__class__.__name__) - ) - return - - for key, value in settings.items(): - setattr(self, key, value) - - -class HoudiniLoader(load.LoaderPlugin): - """Base class for Houdini load plugins.""" - - hosts = ["houdini"] - settings_category = SETTINGS_CATEGORY - - -class HoudiniInstancePlugin(pyblish.api.InstancePlugin): - """Base class for Houdini instance publish plugins.""" - - hosts = ["houdini"] - settings_category = SETTINGS_CATEGORY - - -class HoudiniContextPlugin(pyblish.api.ContextPlugin): - """Base class for Houdini context publish plugins.""" - - hosts = ["houdini"] - settings_category = SETTINGS_CATEGORY - - -class HoudiniExtractorPlugin(publish.Extractor): - """Base class for Houdini extract plugins. - - Note: - The `HoudiniExtractorPlugin` is a subclass of `publish.Extractor`, - which in turn is a subclass of `pyblish.api.InstancePlugin`. - Should there be a requirement to create an extractor that operates - as a context plugin, it would be beneficial to incorporate - the functionalities present in `publish.Extractor`. - """ - - hosts = ["houdini"] - settings_category = SETTINGS_CATEGORY diff --git a/server_addon/houdini/client/ayon_houdini/api/shelves.py b/server_addon/houdini/client/ayon_houdini/api/shelves.py deleted file mode 100644 index 2987568af1..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/shelves.py +++ /dev/null @@ -1,215 +0,0 @@ -import os -import re -import logging -import platform - -from ayon_core.settings import get_project_settings -from ayon_core.pipeline import get_current_project_name - -from ayon_core.lib import StringTemplate - -import hou - -from .lib import get_current_context_template_data_with_folder_attrs - -log = logging.getLogger("ayon_houdini.shelves") - - -def generate_shelves(): - """This function generates complete shelves from shelf set to tools - in Houdini from openpype project settings houdini shelf definition. - """ - current_os = platform.system().lower() - - # load configuration of houdini shelves - project_name = get_current_project_name() - project_settings = get_project_settings(project_name) - shelves_configs = project_settings["houdini"]["shelves"] - - if not shelves_configs: - log.debug("No custom shelves found in project settings.") - return - - # Get Template data - template_data = get_current_context_template_data_with_folder_attrs() - - for config in shelves_configs: - selected_option = config["options"] - shelf_set_config = config[selected_option] - - shelf_set_filepath = shelf_set_config.get('shelf_set_source_path') - if shelf_set_filepath: - shelf_set_os_filepath = shelf_set_filepath[current_os] - if shelf_set_os_filepath: - shelf_set_os_filepath = get_path_using_template_data( - shelf_set_os_filepath, template_data - ) - if not os.path.isfile(shelf_set_os_filepath): - log.error("Shelf path doesn't exist - " - "{}".format(shelf_set_os_filepath)) - continue - - hou.shelves.loadFile(shelf_set_os_filepath) - continue - - shelf_set_name = shelf_set_config.get('shelf_set_name') - if not shelf_set_name: - log.warning("No name found in shelf set definition.") - continue - - shelves_definition = shelf_set_config.get('shelf_definition') - if not shelves_definition: - log.debug( - "No shelf definition found for shelf set named '{}'".format( - shelf_set_name - ) - ) - continue - - shelf_set = get_or_create_shelf_set(shelf_set_name) - for shelf_definition in shelves_definition: - shelf_name = shelf_definition.get('shelf_name') - if not shelf_name: - log.warning("No name found in shelf definition.") - continue - - shelf = get_or_create_shelf(shelf_name) - - if not shelf_definition.get('tools_list'): - log.debug( - "No tool definition found for shelf named {}".format( - shelf_name - ) - ) - continue - - mandatory_attributes = {'label', 'script'} - for tool_definition in shelf_definition.get('tools_list'): - # We verify that the name and script attributes of the tool - # are set - if not all( - tool_definition[key] for key in mandatory_attributes - ): - log.warning( - "You need to specify at least the name and the " - "script path of the tool.") - continue - - tool = get_or_create_tool( - tool_definition, shelf, template_data - ) - - if not tool: - continue - - # Add the tool to the shelf if not already in it - if tool not in shelf.tools(): - shelf.setTools(list(shelf.tools()) + [tool]) - - # Add the shelf in the shelf set if not already in it - if shelf not in shelf_set.shelves(): - shelf_set.setShelves(shelf_set.shelves() + (shelf,)) - - -def get_or_create_shelf_set(shelf_set_label): - """This function verifies if the shelf set label exists. If not, - creates a new shelf set. - - Arguments: - shelf_set_label (str): The label of the shelf set - - Returns: - hou.ShelfSet: The shelf set existing or the new one - """ - all_shelves_sets = hou.shelves.shelfSets().values() - - shelf_set = next((shelf for shelf in all_shelves_sets if - shelf.label() == shelf_set_label), None) - if shelf_set: - return shelf_set - - shelf_set_name = shelf_set_label.replace(' ', '_').lower() - new_shelf_set = hou.shelves.newShelfSet( - name=shelf_set_name, - label=shelf_set_label - ) - return new_shelf_set - - -def get_or_create_shelf(shelf_label): - """This function verifies if the shelf label exists. If not, creates - a new shelf. - - Arguments: - shelf_label (str): The label of the shelf - - Returns: - hou.Shelf: The shelf existing or the new one - """ - all_shelves = hou.shelves.shelves().values() - - shelf = next((s for s in all_shelves if s.label() == shelf_label), None) - if shelf: - return shelf - - shelf_name = shelf_label.replace(' ', '_').lower() - new_shelf = hou.shelves.newShelf( - name=shelf_name, - label=shelf_label - ) - return new_shelf - - -def get_or_create_tool(tool_definition, shelf, template_data): - """This function verifies if the tool exists and updates it. If not, creates - a new one. - - Arguments: - tool_definition (dict): Dict with label, script, icon and help - shelf (hou.Shelf): The parent shelf of the tool - - Returns: - hou.Tool: The tool updated or the new one - """ - - tool_label = tool_definition.get("label") - if not tool_label: - log.warning("Skipped shelf without label") - return - - script_path = tool_definition["script"] - script_path = get_path_using_template_data(script_path, template_data) - if not script_path or not os.path.exists(script_path): - log.warning("This path doesn't exist - {}".format(script_path)) - return - - icon_path = tool_definition["icon"] - if icon_path: - icon_path = get_path_using_template_data(icon_path, template_data) - tool_definition["icon"] = icon_path - - existing_tools = shelf.tools() - existing_tool = next( - (tool for tool in existing_tools if tool.label() == tool_label), - None - ) - - with open(script_path) as stream: - script = stream.read() - - tool_definition["script"] = script - - if existing_tool: - tool_definition.pop("label", None) - existing_tool.setData(**tool_definition) - return existing_tool - - tool_name = re.sub(r"[^\w\d]+", "_", tool_label).lower() - return hou.shelves.newTool(name=tool_name, **tool_definition) - - -def get_path_using_template_data(path, template_data): - path = StringTemplate.format_template(path, template_data) - path = path.replace("\\", "/") - - return path diff --git a/server_addon/houdini/client/ayon_houdini/api/usd.py b/server_addon/houdini/client/ayon_houdini/api/usd.py deleted file mode 100644 index a416d581c3..0000000000 --- a/server_addon/houdini/client/ayon_houdini/api/usd.py +++ /dev/null @@ -1,379 +0,0 @@ -"""Houdini-specific USD Library functions.""" - -import contextlib -import logging -import json -import itertools -from typing import List - -import hou -from pxr import Usd, Sdf, Tf, Vt, UsdRender - -log = logging.getLogger(__name__) - - -def add_usd_output_processor(ropnode, processor): - """Add USD Output Processor to USD Rop node. - - Args: - ropnode (hou.RopNode): The USD Rop node. - processor (str): The output processor name. This is the basename of - the python file that contains the Houdini USD Output Processor. - - """ - - import loputils - - loputils.handleOutputProcessorAdd( - { - "node": ropnode, - "parm": ropnode.parm("outputprocessors"), - "script_value": processor, - } - ) - - -def remove_usd_output_processor(ropnode, processor): - """Removes USD Output Processor from USD Rop node. - - Args: - ropnode (hou.RopNode): The USD Rop node. - processor (str): The output processor name. This is the basename of - the python file that contains the Houdini USD Output Processor. - - """ - import loputils - - parm = ropnode.parm(processor + "_remove") - if not parm: - raise RuntimeError( - "Output Processor %s does not " - "exist on %s" % (processor, ropnode.name()) - ) - - loputils.handleOutputProcessorRemove({"node": ropnode, "parm": parm}) - - -@contextlib.contextmanager -def outputprocessors(ropnode, processors=tuple(), disable_all_others=True): - """Context manager to temporarily add Output Processors to USD ROP node. - - Args: - ropnode (hou.RopNode): The USD Rop node. - processors (tuple or list): The processors to add. - disable_all_others (bool, Optional): Whether to disable all - output processors currently on the ROP node that are not in the - `processors` list passed to this function. - - """ - # TODO: Add support for forcing the correct Order of the processors - - original = [] - prefix = "enableoutputprocessor_" - processor_parms = ropnode.globParms(prefix + "*") - for parm in processor_parms: - original.append((parm, parm.eval())) - - if disable_all_others: - for parm in processor_parms: - parm.set(False) - - added = [] - for processor in processors: - - parm = ropnode.parm(prefix + processor) - if parm: - # If processor already exists, just enable it - parm.set(True) - - else: - # Else add the new processor - add_usd_output_processor(ropnode, processor) - added.append(processor) - - try: - yield - finally: - - # Remove newly added processors - for processor in added: - remove_usd_output_processor(ropnode, processor) - - # Revert to original values - for parm, value in original: - if parm: - parm.set(value) - - -def get_usd_rop_loppath(node): - - # Get sop path - node_type = node.type().name() - if node_type == "usd": - return node.parm("loppath").evalAsNode() - - elif node_type in {"usd_rop", "usdrender_rop"}: - # Inside Solaris e.g. /stage (not in ROP context) - # When incoming connection is present it takes it directly - inputs = node.inputs() - if inputs: - return inputs[0] - else: - return node.parm("loppath").evalAsNode() - - -def get_layer_save_path(layer, expand_string=True): - """Get custom HoudiniLayerInfo->HoudiniSavePath from SdfLayer. - - Args: - layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from. - expand_string (bool): Whether to expand any houdini vars in the save - path before computing the absolute path. - - Returns: - str or None: Path to save to when data exists. - - """ - hou_layer_info = layer.rootPrims.get("HoudiniLayerInfo") - if not hou_layer_info: - return - - save_path = hou_layer_info.customData.get("HoudiniSavePath", None) - if save_path: - # Unfortunately this doesn't actually resolve the full absolute path - if expand_string: - save_path = hou.text.expandString(save_path) - return layer.ComputeAbsolutePath(save_path) - - -def get_referenced_layers(layer): - """Return SdfLayers for all external references of the current layer - - Args: - layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from. - - Returns: - list: List of pxr.Sdf.Layer that are external references to this layer - - """ - - layers = [] - for layer_id in layer.GetExternalReferences(): - layer = Sdf.Layer.Find(layer_id) - if not layer: - # A file may not be in memory and is - # referenced from disk. As such it cannot - # be found. We will ignore those layers. - continue - - layers.append(layer) - - return layers - - -def iter_layer_recursive(layer): - """Recursively iterate all 'external' referenced layers""" - - layers = get_referenced_layers(layer) - traversed = set(layers) # Avoid recursion to itself (if even possible) - traverse = list(layers) - for layer in traverse: - - # Include children layers (recursion) - children_layers = get_referenced_layers(layer) - children_layers = [x for x in children_layers if x not in traversed] - traverse.extend(children_layers) - traversed.update(children_layers) - - yield layer - - -def get_configured_save_layers(usd_rop, strip_above_layer_break=True): - """Retrieve the layer save paths from a USD ROP. - - Arguments: - usdrop (hou.RopNode): USD Rop Node - strip_above_layer_break (Optional[bool]): Whether to exclude any - layers that are above layer breaks. This defaults to True. - - Returns: - List[Sdf.Layer]: The layers with configured save paths. - - """ - - lop_node = get_usd_rop_loppath(usd_rop) - stage = lop_node.stage(apply_viewport_overrides=False) - if not stage: - raise RuntimeError( - "No valid USD stage for ROP node: " "%s" % usd_rop.path() - ) - - root_layer = stage.GetRootLayer() - - if strip_above_layer_break: - layers_above_layer_break = set(lop_node.layersAboveLayerBreak()) - else: - layers_above_layer_break = set() - - save_layers = [] - for layer in iter_layer_recursive(root_layer): - if ( - strip_above_layer_break and - layer.identifier in layers_above_layer_break - ): - continue - - save_path = get_layer_save_path(layer) - if save_path is not None: - save_layers.append(layer) - - return save_layers - - -def setup_lop_python_layer(layer, node, savepath=None, - apply_file_format_args=True): - """Set up Sdf.Layer with HoudiniLayerInfo prim for metadata. - - This is the same as `loputils.createPythonLayer` but can be run on top - of `pxr.Sdf.Layer` instances that are already created in a Python LOP node. - That's useful if your layer creation itself is built to be DCC agnostic, - then we just need to run this after per layer to make it explicitly - stored for houdini. - - By default, Houdini doesn't apply the FileFormatArguments supplied to - the created layer; however it does support USD's file save suffix - of `:SDF_FORMAT_ARGS:` to supply them. With `apply_file_format_args` any - file format args set on the layer's creation will be added to the - save path through that. - - Note: The `node.addHeldLayer` call will only work from a LOP python node - whenever `node.editableStage()` or `node.editableLayer()` was called. - - Arguments: - layer (Sdf.Layer): An existing layer (most likely just created - in the current runtime) - node (hou.LopNode): The Python LOP node to attach the layer to so - it does not get garbage collected/mangled after the downstream. - savepath (Optional[str]): When provided the HoudiniSaveControl - will be set to Explicit with HoudiniSavePath to this path. - apply_file_format_args (Optional[bool]): When enabled any - FileFormatArgs defined for the layer on creation will be set - in the HoudiniSavePath so Houdini USD ROP will use them top. - - Returns: - Sdf.PrimSpec: The Created HoudiniLayerInfo prim spec. - - """ - # Add a Houdini Layer Info prim where we can put the save path. - p = Sdf.CreatePrimInLayer(layer, '/HoudiniLayerInfo') - p.specifier = Sdf.SpecifierDef - p.typeName = 'HoudiniLayerInfo' - if savepath: - if apply_file_format_args: - args = layer.GetFileFormatArguments() - savepath = Sdf.Layer.CreateIdentifier(savepath, args) - - p.customData['HoudiniSavePath'] = savepath - p.customData['HoudiniSaveControl'] = 'Explicit' - # Let everyone know what node created this layer. - p.customData['HoudiniCreatorNode'] = node.sessionId() - p.customData['HoudiniEditorNodes'] = Vt.IntArray([node.sessionId()]) - node.addHeldLayer(layer.identifier) - - return p - - -@contextlib.contextmanager -def remap_paths(rop_node, mapping): - """Enable the AyonRemapPaths output processor with provided `mapping`""" - from ayon_houdini.api.lib import parm_values - - if not mapping: - # Do nothing - yield - return - - # Houdini string parms need to escape backslashes due to the support - # of expressions - as such we do so on the json data - value = json.dumps(mapping).replace("\\", "\\\\") - with outputprocessors( - rop_node, - processors=["ayon_remap_paths"], - disable_all_others=True, - ): - with parm_values([ - (rop_node.parm("ayon_remap_paths_remap_json"), value) - ]): - yield - - -def get_usd_render_rop_rendersettings(rop_node, stage=None, logger=None): - """Return the chosen UsdRender.Settings from the stage (if any). - - Args: - rop_node (hou.Node): The Houdini USD Render ROP node. - stage (pxr.Usd.Stage): The USD stage to find the render settings - in. This is usually the stage from the LOP path the USD Render - ROP node refers to. - logger (logging.Logger): Logger to log warnings to if no render - settings were find in stage. - - Returns: - Optional[UsdRender.Settings]: Render Settings. - - """ - if logger is None: - logger = log - - if stage is None: - lop_node = get_usd_rop_loppath(rop_node) - stage = lop_node.stage() - - path = rop_node.evalParm("rendersettings") - if not path: - # Default behavior - path = "/Render/rendersettings" - - prim = stage.GetPrimAtPath(path) - if not prim: - logger.warning("No render settings primitive found at: %s", path) - return - - render_settings = UsdRender.Settings(prim) - if not render_settings: - logger.warning("Prim at %s is not a valid RenderSettings prim.", path) - return - - return render_settings - - -def get_schema_type_names(type_name: str) -> List[str]: - """Return schema type name for type name and its derived types - - This can be useful for checking whether a `Sdf.PrimSpec`'s type name is of - a given type or any of its derived types. - - Args: - type_name (str): The type name, like e.g. 'UsdGeomMesh' - - Returns: - List[str]: List of schema type names and their derived types. - - """ - schema_registry = Usd.SchemaRegistry - type_ = Tf.Type.FindByName(type_name) - - if type_ == Tf.Type.Unknown: - type_ = schema_registry.GetTypeFromSchemaTypeName(type_name) - if type_ == Tf.Type.Unknown: - # Type not found - return [] - - results = [] - derived = type_.GetAllDerivedTypes() - for derived_type in itertools.chain([type_], derived): - schema_type_name = schema_registry.GetSchemaTypeName(derived_type) - if schema_type_name: - results.append(schema_type_name) - - return results diff --git a/server_addon/houdini/client/ayon_houdini/hooks/set_default_display_and_view.py b/server_addon/houdini/client/ayon_houdini/hooks/set_default_display_and_view.py deleted file mode 100644 index 7d41979600..0000000000 --- a/server_addon/houdini/client/ayon_houdini/hooks/set_default_display_and_view.py +++ /dev/null @@ -1,64 +0,0 @@ -from ayon_applications import PreLaunchHook, LaunchTypes - - -class SetDefaultDisplayView(PreLaunchHook): - """Set default view and default display for houdini via OpenColorIO. - - Houdini's defaultDisplay and defaultView are set by - setting 'OCIO_ACTIVE_DISPLAYS' and 'OCIO_ACTIVE_VIEWS' - environment variables respectively. - - More info: https://www.sidefx.com/docs/houdini/io/ocio.html#set-up - """ - - app_groups = {"houdini"} - launch_types = {LaunchTypes.local} - - def execute(self): - - OCIO = self.launch_context.env.get("OCIO") - - # This is a cheap way to skip this hook if either global color - # management or houdini color management was disabled because the - # OCIO var would be set by the global OCIOEnvHook - if not OCIO: - return - - # workfile settings added in '0.2.13' - houdini_color_settings = \ - self.data["project_settings"]["houdini"]["imageio"].get("workfile") - - if not houdini_color_settings: - self.log.info("Hook 'SetDefaultDisplayView' requires Houdini " - "addon version >= '0.2.13'") - return - - if not houdini_color_settings["enabled"]: - self.log.info( - "Houdini workfile color management is disabled." - ) - return - - # 'OCIO_ACTIVE_DISPLAYS', 'OCIO_ACTIVE_VIEWS' are checked - # as Admins can add them in Ayon env vars or Ayon tools. - - default_display = houdini_color_settings["default_display"] - if default_display: - # get 'OCIO_ACTIVE_DISPLAYS' value if exists. - self._set_context_env("OCIO_ACTIVE_DISPLAYS", default_display) - - default_view = houdini_color_settings["default_view"] - if default_view: - # get 'OCIO_ACTIVE_VIEWS' value if exists. - self._set_context_env("OCIO_ACTIVE_VIEWS", default_view) - - def _set_context_env(self, env_var, default_value): - env_value = self.launch_context.env.get(env_var, "") - new_value = ":".join( - key for key in [default_value, env_value] if key - ) - self.log.info( - "Setting {} environment to: {}" - .format(env_var, new_value) - ) - self.launch_context.env[env_var] = new_value diff --git a/server_addon/houdini/client/ayon_houdini/hooks/set_paths.py b/server_addon/houdini/client/ayon_houdini/hooks/set_paths.py deleted file mode 100644 index 4b89ebe944..0000000000 --- a/server_addon/houdini/client/ayon_houdini/hooks/set_paths.py +++ /dev/null @@ -1,18 +0,0 @@ -from ayon_applications import PreLaunchHook, LaunchTypes - - -class SetPath(PreLaunchHook): - """Set current dir to workdir. - - Hook `GlobalHostDataHook` must be executed before this hook. - """ - app_groups = {"houdini"} - launch_types = {LaunchTypes.local} - - def execute(self): - workdir = self.launch_context.env.get("AYON_WORKDIR", "") - if not workdir: - self.log.warning("BUG: Workdir is not filled.") - return - - self.launch_context.kwargs["cwd"] = workdir diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/convert_legacy.py b/server_addon/houdini/client/ayon_houdini/plugins/create/convert_legacy.py deleted file mode 100644 index 4c8c8062ce..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/convert_legacy.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -"""Converter for legacy Houdini products.""" -from ayon_core.pipeline.create.creator_plugins import ProductConvertorPlugin -from ayon_houdini.api.lib import imprint - - -class HoudiniLegacyConvertor(ProductConvertorPlugin): - """Find and convert any legacy products in the scene. - - This Converter will find all legacy products in the scene and will - transform them to the current system. Since the old products doesn't - retain any information about their original creators, the only mapping - we can do is based on their families. - - Its limitation is that you can have multiple creators creating product - name of the same product type and there is no way to handle it. This code - should nevertheless cover all creators that came with AYON. - - """ - identifier = "io.openpype.creators.houdini.legacy" - product_type_to_id = { - "camera": "io.openpype.creators.houdini.camera", - "ass": "io.openpype.creators.houdini.ass", - "imagesequence": "io.openpype.creators.houdini.imagesequence", - "hda": "io.openpype.creators.houdini.hda", - "pointcache": "io.openpype.creators.houdini.pointcache", - "redshiftproxy": "io.openpype.creators.houdini.redshiftproxy", - "redshift_rop": "io.openpype.creators.houdini.redshift_rop", - "usd": "io.openpype.creators.houdini.usd", - "usdrender": "io.openpype.creators.houdini.usdrender", - "vdbcache": "io.openpype.creators.houdini.vdbcache" - } - - def __init__(self, *args, **kwargs): - super(HoudiniLegacyConvertor, self).__init__(*args, **kwargs) - self.legacy_instances = {} - - def find_instances(self): - """Find legacy products in the scene. - - Legacy products are the ones that doesn't have `creator_identifier` - parameter on them. - - This is using cached entries done in - :py:meth:`~HoudiniCreatorBase.cache_instance_data()` - - """ - self.legacy_instances = self.collection_shared_data.get( - "houdini_cached_legacy_instance") - if not self.legacy_instances: - return - self.add_convertor_item("Found {} incompatible product{}.".format( - len(self.legacy_instances), - "s" if len(self.legacy_instances) > 1 else "" - )) - - def convert(self): - """Convert all legacy products to current. - - It is enough to add `creator_identifier` and `instance_node`. - - """ - if not self.legacy_instances: - return - - for product_type, legacy_instances in self.legacy_instances.items(): - if product_type in self.product_type_to_id: - for instance in legacy_instances: - creator_id = self.product_type_to_id[product_type] - data = { - "creator_identifier": creator_id, - "instance_node": instance.path() - } - if product_type == "pointcache": - data["families"] = ["abc"] - self.log.info("Converting {} to {}".format( - instance.path(), creator_id)) - imprint(instance, data) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_alembic_camera.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_alembic_camera.py deleted file mode 100644 index 4a92e24671..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_alembic_camera.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating alembic camera products.""" -from ayon_houdini.api import plugin -from ayon_core.pipeline import CreatorError - -import hou - - -class CreateAlembicCamera(plugin.HoudiniCreator): - """Single baked camera from Alembic ROP.""" - - identifier = "io.openpype.creators.houdini.camera" - label = "Camera (Abc)" - product_type = "camera" - icon = "camera" - - def create(self, product_name, instance_data, pre_create_data): - import hou - - instance_data.pop("active", None) - instance_data.update({"node_type": "alembic"}) - - instance = super(CreateAlembicCamera, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - parms = { - "filename": hou.text.expandString( - "$HIP/pyblish/{}.abc".format(product_name)), - "use_sop_path": False, - } - - if self.selected_nodes: - if len(self.selected_nodes) > 1: - raise CreatorError("More than one item selected.") - path = self.selected_nodes[0].path() - # Split the node path into the first root and the remainder - # So we can set the root and objects parameters correctly - _, root, remainder = path.split("/", 2) - parms.update({"root": "/" + root, "objects": remainder}) - - instance_node.setParms(parms) - - # Lock the Use Sop Path setting so the - # user doesn't accidentally enable it. - to_lock = ["use_sop_path"] - self.lock_parameters(instance_node, to_lock) - - instance_node.parm("trange").set(1) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.objNodeTypeCategory() - ] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_arnold_ass.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_arnold_ass.py deleted file mode 100644 index 4f5fb5833e..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_arnold_ass.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating Arnold ASS files.""" -from ayon_houdini.api import plugin -from ayon_core.lib import BoolDef - - -class CreateArnoldAss(plugin.HoudiniCreator): - """Arnold .ass Archive""" - - identifier = "io.openpype.creators.houdini.ass" - label = "Arnold ASS" - product_type = "ass" - icon = "magic" - - # Default extension: `.ass` or `.ass.gz` - # however calling HoudiniCreator.create() - # will override it by the value in the project settings - ext = ".ass" - - def create(self, product_name, instance_data, pre_create_data): - import hou - - instance_data.pop("active", None) - instance_data.update({"node_type": "arnold"}) - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - creator_attributes["farm"] = pre_create_data["farm"] - - instance = super(CreateArnoldAss, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - # Hide Properties Tab on Arnold ROP since that's used - # for rendering instead of .ass Archive Export - parm_template_group = instance_node.parmTemplateGroup() - parm_template_group.hideFolder("Properties", True) - instance_node.setParmTemplateGroup(parm_template_group) - - filepath = "{}{}".format( - hou.text.expandString("$HIP/pyblish/"), - "{}.$F4{}".format(product_name, self.ext) - ) - parms = { - # Render frame range - "trange": 1, - # Arnold ROP settings - "ar_ass_file": filepath, - "ar_ass_export_enable": 1 - } - - instance_node.setParms(parms) - - # Lock any parameters in this list - to_lock = ["ar_ass_export_enable", "productType", "id"] - self.lock_parameters(instance_node, to_lock) - - def get_instance_attr_defs(self): - return [ - BoolDef("farm", - label="Submitting to Farm", - default=False) - ] - - def get_pre_create_attr_defs(self): - attrs = super().get_pre_create_attr_defs() - # Use same attributes as for instance attributes - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_arnold_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_arnold_rop.py deleted file mode 100644 index 43875ccbd6..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_arnold_rop.py +++ /dev/null @@ -1,110 +0,0 @@ -from ayon_houdini.api import plugin -from ayon_core.lib import EnumDef, BoolDef - - -class CreateArnoldRop(plugin.HoudiniCreator): - """Arnold ROP""" - - identifier = "io.openpype.creators.houdini.arnold_rop" - label = "Arnold ROP" - product_type = "arnold_rop" - icon = "magic" - - # Default extension - ext = "exr" - - # Default render target - render_target = "farm_split" - - def create(self, product_name, instance_data, pre_create_data): - import hou - # Transfer settings from pre create to instance - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - for key in ["render_target", "review"]: - if key in pre_create_data: - creator_attributes[key] = pre_create_data[key] - - # Remove the active, we are checking the bypass flag of the nodes - instance_data.pop("active", None) - instance_data.update({"node_type": "arnold"}) - - # Add chunk size attribute - instance_data["chunkSize"] = 1 - - instance = super(CreateArnoldRop, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - ext = pre_create_data.get("image_format") - - filepath = "{renders_dir}{product_name}/{product_name}.$F4.{ext}".format( - renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), - product_name=product_name, - ext=ext, - ) - parms = { - # Render frame range - "trange": 1, - - # Arnold ROP settings - "ar_picture": filepath, - "ar_exr_half_precision": 1 # half precision - } - - if pre_create_data.get("render_target") == "farm_split": - ass_filepath = \ - "{export_dir}{product_name}/{product_name}.$F4.ass".format( - export_dir=hou.text.expandString("$HIP/pyblish/ass/"), - product_name=product_name, - ) - parms["ar_ass_export_enable"] = 1 - parms["ar_ass_file"] = ass_filepath - - instance_node.setParms(parms) - - # Lock any parameters in this list - to_lock = ["productType", "id"] - self.lock_parameters(instance_node, to_lock) - - def get_instance_attr_defs(self): - """get instance attribute definitions. - - Attributes defined in this method are exposed in - publish tab in the publisher UI. - """ - - render_target_items = { - "local": "Local machine rendering", - "local_no_render": "Use existing frames (local)", - "farm": "Farm Rendering", - "farm_split": "Farm Rendering - Split export & render jobs", - } - - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - EnumDef("render_target", - items=render_target_items, - label="Render target", - default=self.render_target), - ] - - def get_pre_create_attr_defs(self): - image_format_enum = [ - "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", - "rad", "rat", "rta", "sgi", "tga", "tif", - ] - - attrs = [ - EnumDef("image_format", - image_format_enum, - default=self.ext, - label="Image Format Options"), - ] - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_bgeo.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_bgeo.py deleted file mode 100644 index 93cf0e0998..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_bgeo.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating pointcache bgeo files.""" -from ayon_houdini.api import plugin -from ayon_core.pipeline import CreatorError -import hou -from ayon_core.lib import EnumDef, BoolDef - - -class CreateBGEO(plugin.HoudiniCreator): - """BGEO pointcache creator.""" - identifier = "io.openpype.creators.houdini.bgeo" - label = "PointCache (Bgeo)" - product_type = "pointcache" - icon = "gears" - - def create(self, product_name, instance_data, pre_create_data): - - instance_data.pop("active", None) - - instance_data.update({"node_type": "geometry"}) - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - creator_attributes["farm"] = pre_create_data["farm"] - - instance = super(CreateBGEO, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - file_path = "{}{}".format( - hou.text.expandString("$HIP/pyblish/"), - "{}.$F4.{}".format( - product_name, - pre_create_data.get("bgeo_type") or "bgeo.sc") - ) - parms = { - "sopoutput": file_path - } - - instance_node.parm("trange").set(1) - if self.selected_nodes: - # if selection is on SOP level, use it - if isinstance(self.selected_nodes[0], hou.SopNode): - parms["soppath"] = self.selected_nodes[0].path() - else: - # try to find output node with the lowest index - outputs = [ - child for child in self.selected_nodes[0].children() - if child.type().name() == "output" - ] - if not outputs: - instance_node.setParms(parms) - raise CreatorError(( - "Missing output node in SOP level for the selection. " - "Please select correct SOP path in created instance." - )) - outputs.sort(key=lambda output: output.evalParm("outputidx")) - parms["soppath"] = outputs[0].path() - - instance_node.setParms(parms) - - def get_instance_attr_defs(self): - return [ - BoolDef("farm", - label="Submitting to Farm", - default=False) - ] - - def get_pre_create_attr_defs(self): - attrs = super().get_pre_create_attr_defs() - bgeo_enum = [ - { - "value": "bgeo", - "label": "uncompressed bgeo (.bgeo)" - }, - { - "value": "bgeosc", - "label": "BLOSC compressed bgeo (.bgeosc)" - }, - { - "value": "bgeo.sc", - "label": "BLOSC compressed bgeo (.bgeo.sc)" - }, - { - "value": "bgeo.gz", - "label": "GZ compressed bgeo (.bgeo.gz)" - }, - { - "value": "bgeo.lzma", - "label": "LZMA compressed bgeo (.bgeo.lzma)" - }, - { - "value": "bgeo.bz2", - "label": "BZip2 compressed bgeo (.bgeo.bz2)" - } - ] - - return attrs + [ - EnumDef("bgeo_type", bgeo_enum, label="BGEO Options"), - ] + self.get_instance_attr_defs() - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.sopNodeTypeCategory() - ] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_composite.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_composite.py deleted file mode 100644 index 8c0ee8a099..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_composite.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating composite sequences.""" -from ayon_houdini.api import plugin -from ayon_core.pipeline import CreatorError - -import hou - - -class CreateCompositeSequence(plugin.HoudiniCreator): - """Composite ROP to Image Sequence""" - - identifier = "io.openpype.creators.houdini.imagesequence" - label = "Composite (Image Sequence)" - product_type = "imagesequence" - icon = "gears" - - ext = ".exr" - - def create(self, product_name, instance_data, pre_create_data): - import hou # noqa - - instance_data.pop("active", None) - instance_data.update({"node_type": "comp"}) - - instance = super(CreateCompositeSequence, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - filepath = "{}{}".format( - hou.text.expandString("$HIP/pyblish/"), - "{}.$F4{}".format(product_name, self.ext) - ) - parms = { - "trange": 1, - "copoutput": filepath - } - - if self.selected_nodes: - if len(self.selected_nodes) > 1: - raise CreatorError("More than one item selected.") - path = self.selected_nodes[0].path() - parms["coppath"] = path - - instance_node.setParms(parms) - - # Manually set f1 & f2 to $FSTART and $FEND respectively - # to match other Houdini nodes default. - instance_node.parm("f1").setExpression("$FSTART") - instance_node.parm("f2").setExpression("$FEND") - - # Lock any parameters in this list - to_lock = ["prim_to_detail_pattern"] - self.lock_parameters(instance_node, to_lock) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.cop2NodeTypeCategory() - ] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_hda.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_hda.py deleted file mode 100644 index ed2fd980e7..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_hda.py +++ /dev/null @@ -1,323 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating publishable Houdini Digital Assets.""" -import hou - -import ayon_api -from ayon_core.pipeline import ( - CreatorError, - get_current_project_name -) -from ayon_core.lib import ( - get_ayon_username, - BoolDef -) - -from ayon_houdini.api import plugin - - -# region assettools -# logic based on Houdini 19.5.752 `assettools.py` because -# this logic was removed in Houdini 20+ -def get_tool_submenus(hda_def): - """Returns the tab submenu entries of this node. - - Note: A node could be placed in multiple entries at once. - - Arguments: - hda_def: the HDA Definition by hou.node.type().definition() - - Returns: - Optional[list[str]]: A list of submenus - """ - - import xml.etree.ElementTree as ET - if hda_def.hasSection('Tools.shelf'): - sections = hda_def.sections() - ts_section = sections['Tools.shelf'].contents() - try: - root = ET.fromstring(ts_section) - except ET.ParseError: - return None - tool = root[0] - submenus = tool.findall('toolSubmenu') - if submenus: - tool_submenus = [] - for submenu in submenus: - if submenu is not None: - text = submenu.text - if text: - tool_submenus.append(submenu.text) - if tool_submenus: - return tool_submenus - else: - return None - else: - return None - else: - return None - - -def set_tool_submenu(hda_def, - new_submenu='Digital Assets'): - """Sets the tab menu entry for a node. - - Arguments: - hda_def: the HDA Definition by hou.node.type().definition() - new_submenu (Optional[str]): This will be the new submenu, replacing - old_submenu entry - """ - - context_dict = { - 'Shop': 'SHOP', - 'Cop2': 'COP2', - 'Object': 'OBJ', - 'Chop': 'CHOP', - 'Sop': 'SOP', - 'Vop': 'VOP', - 'VopNet': 'VOPNET', - 'Driver': 'ROP', - 'TOP': 'TOP', - 'Top': 'TOP', - 'Lop': 'LOP', - 'Dop': 'DOP'} - - utils_dict = { - 'Shop': 'shoptoolutils', - 'Cop2': 'cop2toolutils', - 'Object': 'objecttoolutils', - 'Chop': 'choptoolutils', - 'Sop': 'soptoolutils', - 'Vop': 'voptoolutils', - 'VopNet': 'vopnettoolutils', - 'Driver': 'drivertoolutils', - 'TOP': 'toptoolutils', - 'Top': 'toptoolutils', - 'Lop': 'loptoolutils', - 'Dop': 'doptoolutils'} - - if hda_def.hasSection('Tools.shelf'): - old_submenu = get_tool_submenus(hda_def)[0] - else: - # Add default tools shelf section - content = """ - - - - -SOP - - -$HDA_TABLE_AND_NAME - -Digital Assets - - - - """ - - nodetype_category_name = hda_def.nodeType().category().name() - context = context_dict[nodetype_category_name] - util = utils_dict[nodetype_category_name] - content = content.replace( - "SOP", - f"{context}") - content = content.replace('soptoolutils', util) - hda_def.addSection('Tools.shelf', content) - old_submenu = 'Digital Assets' - - # Replace submenu - tools = hda_def.sections()["Tools.shelf"] - content = tools.contents() - content = content.replace( - f"{old_submenu}", - f"{new_submenu}" - ) - - hda_def.addSection('Tools.shelf', content) -# endregion - - -class CreateHDA(plugin.HoudiniCreator): - """Publish Houdini Digital Asset file.""" - - identifier = "io.openpype.creators.houdini.hda" - label = "Houdini Digital Asset (Hda)" - product_type = "hda" - icon = "gears" - maintain_selection = False - - def _check_existing(self, folder_path, product_name): - # type: (str, str) -> bool - """Check if existing product name versions already exists.""" - # Get all products of the current folder - project_name = self.project_name - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path, fields={"id"} - ) - product_entities = ayon_api.get_products( - project_name, folder_ids={folder_entity["id"]}, fields={"name"} - ) - existing_product_names_low = { - product_entity["name"].lower() - for product_entity in product_entities - } - return product_name.lower() in existing_product_names_low - - def create_instance_node( - self, - folder_path, - node_name, - parent, - node_type="geometry", - pre_create_data=None - ): - if pre_create_data is None: - pre_create_data = {} - - if self.selected_nodes: - # if we have `use selection` enabled, and we have some - # selected nodes ... - if self.selected_nodes[0].type().name() == "subnet": - to_hda = self.selected_nodes[0] - to_hda.setName("{}_subnet".format(node_name), unique_name=True) - else: - parent_node = self.selected_nodes[0].parent() - subnet = parent_node.collapseIntoSubnet( - self.selected_nodes, - subnet_name="{}_subnet".format(node_name)) - subnet.moveToGoodPosition() - to_hda = subnet - else: - # Use Obj as the default path - parent_node = hou.node("/obj") - # Find and return the NetworkEditor pane tab with the minimum index - pane = hou.ui.paneTabOfType(hou.paneTabType.NetworkEditor) - if isinstance(pane, hou.NetworkEditor): - # Use the NetworkEditor pane path as the parent path. - parent_node = pane.pwd() - - to_hda = parent_node.createNode( - "subnet", node_name="{}_subnet".format(node_name)) - if not to_hda.type().definition(): - # if node type has not its definition, it is not user - # created hda. We test if hda can be created from the node. - if not to_hda.canCreateDigitalAsset(): - raise CreatorError( - "cannot create hda from node {}".format(to_hda)) - - # Pick a unique type name for HDA product per folder path per project. - type_name = ( - "{project_name}{folder_path}_{node_name}".format( - project_name=get_current_project_name(), - folder_path=folder_path.replace("/","_"), - node_name=node_name - ) - ) - - hda_node = to_hda.createDigitalAsset( - name=type_name, - description=node_name, - hda_file_name="$HIP/{}.hda".format(node_name), - ignore_external_references=True - ) - hda_node.layoutChildren() - elif self._check_existing(folder_path, node_name): - raise CreatorError( - ("product {} is already published with different HDA" - "definition.").format(node_name)) - else: - hda_node = to_hda - - # If user tries to create the same HDA instance more than - # once, then all of them will have the same product name and - # point to the same hda_file_name. But, their node names will - # be incremented. - hda_node.setName(node_name, unique_name=True) - self.customize_node_look(hda_node) - - # Set Custom settings. - hda_def = hda_node.type().definition() - - if pre_create_data.get("set_user"): - hda_def.setUserInfo(get_ayon_username()) - - if pre_create_data.get("use_project"): - set_tool_submenu(hda_def, "AYON/{}".format(self.project_name)) - - return hda_node - - def create(self, product_name, instance_data, pre_create_data): - instance_data.pop("active", None) - - return super(CreateHDA, self).create( - product_name, - instance_data, - pre_create_data) - - def get_network_categories(self): - # Houdini allows creating sub-network nodes inside - # these categories. - # Therefore this plugin can work in these categories. - return [ - hou.chopNodeTypeCategory(), - hou.cop2NodeTypeCategory(), - hou.dopNodeTypeCategory(), - hou.ropNodeTypeCategory(), - hou.lopNodeTypeCategory(), - hou.objNodeTypeCategory(), - hou.sopNodeTypeCategory(), - hou.topNodeTypeCategory(), - hou.vopNodeTypeCategory() - ] - - def get_pre_create_attr_defs(self): - attrs = super(CreateHDA, self).get_pre_create_attr_defs() - return attrs + [ - BoolDef("set_user", - tooltip="Set current user as the author of the HDA", - default=False, - label="Set Current User"), - BoolDef("use_project", - tooltip="Use project name as tab submenu path.\n" - "The location in TAB Menu will be\n" - "'AYON/project_name/your_HDA_name'", - default=True, - label="Use Project as menu entry"), - ] - - def get_dynamic_data( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ): - """ - Pass product name from product name templates as dynamic data. - """ - dynamic_data = super(CreateHDA, self).get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ) - - dynamic_data.update( - { - "asset": folder_entity["name"], - "folder": { - "label": folder_entity["label"], - "name": folder_entity["name"] - } - } - ) - - return dynamic_data diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_karma_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_karma_rop.py deleted file mode 100644 index 693e6295e2..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_karma_rop.py +++ /dev/null @@ -1,147 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin to create Karma ROP.""" -from ayon_houdini.api import plugin -from ayon_core.lib import BoolDef, EnumDef, NumberDef - - -class CreateKarmaROP(plugin.HoudiniCreator): - """Karma ROP""" - identifier = "io.openpype.creators.houdini.karma_rop" - label = "Karma ROP" - product_type = "karma_rop" - icon = "magic" - - # Default render target - render_target = "farm" - - def create(self, product_name, instance_data, pre_create_data): - import hou # noqa - # Transfer settings from pre create to instance - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - - for key in ["render_target", "review"]: - if key in pre_create_data: - creator_attributes[key] = pre_create_data[key] - - instance_data.pop("active", None) - instance_data.update({"node_type": "karma"}) - # Add chunk size attribute - instance_data["chunkSize"] = 10 - - instance = super(CreateKarmaROP, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - ext = pre_create_data.get("image_format") - - filepath = "{renders_dir}{product_name}/{product_name}.$F4.{ext}".format( - renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), - product_name=product_name, - ext=ext, - ) - checkpoint = "{cp_dir}{product_name}.$F4.checkpoint".format( - cp_dir=hou.text.expandString("$HIP/pyblish/"), - product_name=product_name - ) - - usd_directory = "{usd_dir}{product_name}_$RENDERID".format( - usd_dir=hou.text.expandString("$HIP/pyblish/renders/usd_renders/"), # noqa - product_name=product_name - ) - - parms = { - # Render Frame Range - "trange": 1, - # Karma ROP Setting - "picture": filepath, - # Karma Checkpoint Setting - "productName": checkpoint, - # USD Output Directory - "savetodirectory": usd_directory, - } - - res_x = pre_create_data.get("res_x") - res_y = pre_create_data.get("res_y") - - if self.selected_nodes: - # If camera found in selection - # we will use as render camera - camera = None - for node in self.selected_nodes: - if node.type().name() == "cam": - camera = node.path() - has_camera = pre_create_data.get("cam_res") - if has_camera: - res_x = node.evalParm("resx") - res_y = node.evalParm("resy") - - if not camera: - self.log.warning("No render camera found in selection") - - parms.update({ - "camera": camera or "", - "resolutionx": res_x, - "resolutiony": res_y, - }) - - instance_node.setParms(parms) - - # Lock some Avalon attributes - to_lock = ["productType", "id"] - self.lock_parameters(instance_node, to_lock) - - def get_instance_attr_defs(self): - """get instance attribute definitions. - - Attributes defined in this method are exposed in - publish tab in the publisher UI. - """ - - render_target_items = { - "local": "Local machine rendering", - "local_no_render": "Use existing frames (local)", - "farm": "Farm Rendering", - } - - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - EnumDef("render_target", - items=render_target_items, - label="Render target", - default=self.render_target) - ] - - - def get_pre_create_attr_defs(self): - image_format_enum = [ - "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", - "rad", "rat", "rta", "sgi", "tga", "tif", - ] - - attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs() - - attrs += [ - EnumDef("image_format", - image_format_enum, - default="exr", - label="Image Format Options"), - NumberDef("res_x", - label="width", - default=1920, - decimals=0), - NumberDef("res_y", - label="height", - default=720, - decimals=0), - BoolDef("cam_res", - label="Camera Resolution", - default=False), - ] - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_mantra_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_mantra_rop.py deleted file mode 100644 index ce1c96f8b2..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_mantra_rop.py +++ /dev/null @@ -1,128 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin to create Mantra ROP.""" -from ayon_houdini.api import plugin -from ayon_core.lib import EnumDef, BoolDef - - -class CreateMantraROP(plugin.HoudiniCreator): - """Mantra ROP""" - identifier = "io.openpype.creators.houdini.mantra_rop" - label = "Mantra ROP" - product_type = "mantra_rop" - icon = "magic" - - # Default render target - render_target = "farm_split" - - def create(self, product_name, instance_data, pre_create_data): - import hou # noqa - # Transfer settings from pre create to instance - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - for key in ["render_target", "review"]: - if key in pre_create_data: - creator_attributes[key] = pre_create_data[key] - - instance_data.pop("active", None) - instance_data.update({"node_type": "ifd"}) - # Add chunk size attribute - instance_data["chunkSize"] = 10 - - instance = super(CreateMantraROP, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - ext = pre_create_data.get("image_format") - - filepath = "{renders_dir}{product_name}/{product_name}.$F4.{ext}".format( - renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), - product_name=product_name, - ext=ext, - ) - - parms = { - # Render Frame Range - "trange": 1, - # Mantra ROP Setting - "vm_picture": filepath, - } - - if pre_create_data.get("render_target") == "farm_split": - ifd_filepath = \ - "{export_dir}{product_name}/{product_name}.$F4.ifd".format( - export_dir=hou.text.expandString("$HIP/pyblish/ifd/"), - product_name=product_name, - ) - parms["soho_outputmode"] = 1 - parms["soho_diskfile"] = ifd_filepath - - if self.selected_nodes: - # If camera found in selection - # we will use as render camera - camera = None - for node in self.selected_nodes: - if node.type().name() == "cam": - camera = node.path() - - if not camera: - self.log.warning("No render camera found in selection") - - parms.update({"camera": camera or ""}) - - custom_res = pre_create_data.get("override_resolution") - if custom_res: - parms.update({"override_camerares": 1}) - instance_node.setParms(parms) - - # Lock some Avalon attributes - to_lock = ["productType", "id"] - self.lock_parameters(instance_node, to_lock) - - def get_instance_attr_defs(self): - """get instance attribute definitions. - - Attributes defined in this method are exposed in - publish tab in the publisher UI. - """ - - render_target_items = { - "local": "Local machine rendering", - "local_no_render": "Use existing frames (local)", - "farm": "Farm Rendering", - "farm_split": "Farm Rendering - Split export & render jobs", - } - - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - EnumDef("render_target", - items=render_target_items, - label="Render target", - default=self.render_target) - ] - - def get_pre_create_attr_defs(self): - image_format_enum = [ - "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", - "rad", "rat", "rta", "sgi", "tga", "tif", - ] - - attrs = super(CreateMantraROP, self).get_pre_create_attr_defs() - - attrs += [ - EnumDef("image_format", - image_format_enum, - default="exr", - label="Image Format Options"), - BoolDef("override_resolution", - label="Override Camera Resolution", - tooltip="Override the current camera " - "resolution, recommended for IPR.", - default=False), - ] - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_model.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_model.py deleted file mode 100644 index ed6b2096c5..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_model.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating Model product type. - -Note: - Currently, This creator plugin is the same as 'create_pointcache.py' - But renaming the product type to 'model'. - - It's purpose to support - Maya (load/publish model from maya to/from houdini). - - It's considered to support multiple representations in the future. -""" - -from ayon_houdini.api import plugin -from ayon_core.lib import BoolDef - -import hou - - - -class CreateModel(plugin.HoudiniCreator): - """Create Model""" - identifier = "io.openpype.creators.houdini.model" - label = "Model" - product_type = "model" - icon = "cube" - - def create(self, product_name, instance_data, pre_create_data): - instance_data.pop("active", None) - instance_data.update({"node_type": "alembic"}) - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - creator_attributes["farm"] = pre_create_data["farm"] - - instance = super(CreateModel, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - parms = { - "use_sop_path": True, - "build_from_path": True, - "path_attrib": "path", - "prim_to_detail_pattern": "cbId", - "format": 2, - "facesets": 0, - "filename": hou.text.expandString( - "$HIP/pyblish/{}.abc".format(product_name)) - } - - if self.selected_nodes: - selected_node = self.selected_nodes[0] - - # Although Houdini allows ObjNode path on `sop_path` for the - # the ROP node we prefer it set to the SopNode path explicitly - - # Allow sop level paths (e.g. /obj/geo1/box1) - if isinstance(selected_node, hou.SopNode): - parms["sop_path"] = selected_node.path() - self.log.debug( - "Valid SopNode selection, 'SOP Path' in ROP will be set to '%s'." - % selected_node.path() - ) - - # Allow object level paths to Geometry nodes (e.g. /obj/geo1) - # but do not allow other object level nodes types like cameras, etc. - elif isinstance(selected_node, hou.ObjNode) and \ - selected_node.type().name() in ["geo"]: - - # get the output node with the minimum - # 'outputidx' or the node with display flag - sop_path = self.get_obj_output(selected_node) - - if sop_path: - parms["sop_path"] = sop_path.path() - self.log.debug( - "Valid ObjNode selection, 'SOP Path' in ROP will be set to " - "the child path '%s'." - % sop_path.path() - ) - - if not parms.get("sop_path", None): - self.log.debug( - "Selection isn't valid. 'SOP Path' in ROP will be empty." - ) - else: - self.log.debug( - "No Selection. 'SOP Path' in ROP will be empty." - ) - - instance_node.setParms(parms) - instance_node.parm("trange").set(1) - - # Explicitly set f1 and f2 to frame start. - # Which forces the rop node to export one frame. - instance_node.parmTuple('f').deleteAllKeyframes() - fstart = int(hou.hscriptExpression("$FSTART")) - instance_node.parmTuple('f').set((fstart, fstart, 1)) - - # Lock any parameters in this list - to_lock = ["prim_to_detail_pattern"] - self.lock_parameters(instance_node, to_lock) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.sopNodeTypeCategory() - ] - - def get_obj_output(self, obj_node): - """Find output node with the smallest 'outputidx'.""" - - outputs = obj_node.subnetOutputs() - - # if obj_node is empty - if not outputs: - return - - # if obj_node has one output child whether its - # sop output node or a node with the render flag - elif len(outputs) == 1: - return outputs[0] - - # if there are more than one, then it have multiple output nodes - # return the one with the minimum 'outputidx' - else: - return min(outputs, - key=lambda node: node.evalParm('outputidx')) - - def get_instance_attr_defs(self): - return [ - BoolDef("farm", - label="Submitting to Farm", - default=False) - ] - - def get_pre_create_attr_defs(self): - attrs = super().get_pre_create_attr_defs() - # Use same attributes as for instance attributes - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_pointcache.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_pointcache.py deleted file mode 100644 index 6a63659053..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_pointcache.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating pointcache alembics.""" -from ayon_houdini.api import plugin -from ayon_core.lib import BoolDef - -import hou - - - -class CreatePointCache(plugin.HoudiniCreator): - """Alembic ROP to pointcache""" - identifier = "io.openpype.creators.houdini.pointcache" - label = "PointCache (Abc)" - product_type = "pointcache" - icon = "gears" - - def create(self, product_name, instance_data, pre_create_data): - instance_data.pop("active", None) - instance_data.update({"node_type": "alembic"}) - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - creator_attributes["farm"] = pre_create_data["farm"] - - instance = super(CreatePointCache, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - parms = { - "use_sop_path": True, - "build_from_path": True, - "path_attrib": "path", - "prim_to_detail_pattern": "cbId", - "format": 2, - "facesets": 0, - "filename": hou.text.expandString( - "$HIP/pyblish/{}.abc".format(product_name)) - } - - if self.selected_nodes: - selected_node = self.selected_nodes[0] - - # Although Houdini allows ObjNode path on `sop_path` for the - # the ROP node we prefer it set to the SopNode path explicitly - - # Allow sop level paths (e.g. /obj/geo1/box1) - if isinstance(selected_node, hou.SopNode): - parms["sop_path"] = selected_node.path() - self.log.debug( - "Valid SopNode selection, 'SOP Path' in ROP will be set to '%s'." - % selected_node.path() - ) - - # Allow object level paths to Geometry nodes (e.g. /obj/geo1) - # but do not allow other object level nodes types like cameras, etc. - elif isinstance(selected_node, hou.ObjNode) and \ - selected_node.type().name() in ["geo"]: - - # get the output node with the minimum - # 'outputidx' or the node with display flag - sop_path = self.get_obj_output(selected_node) - - if sop_path: - parms["sop_path"] = sop_path.path() - self.log.debug( - "Valid ObjNode selection, 'SOP Path' in ROP will be set to " - "the child path '%s'." - % sop_path.path() - ) - - if not parms.get("sop_path", None): - self.log.debug( - "Selection isn't valid. 'SOP Path' in ROP will be empty." - ) - else: - self.log.debug( - "No Selection. 'SOP Path' in ROP will be empty." - ) - - instance_node.setParms(parms) - instance_node.parm("trange").set(1) - - # Lock any parameters in this list - to_lock = ["prim_to_detail_pattern"] - self.lock_parameters(instance_node, to_lock) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.sopNodeTypeCategory() - ] - - def get_obj_output(self, obj_node): - """Find output node with the smallest 'outputidx'.""" - - outputs = obj_node.subnetOutputs() - - # if obj_node is empty - if not outputs: - return - - # if obj_node has one output child whether its - # sop output node or a node with the render flag - elif len(outputs) == 1: - return outputs[0] - - # if there are more than one, then it have multiple output nodes - # return the one with the minimum 'outputidx' - else: - return min(outputs, - key=lambda node: node.evalParm('outputidx')) - - def get_instance_attr_defs(self): - return [ - BoolDef("farm", - label="Submitting to Farm", - default=False) - ] - - def get_pre_create_attr_defs(self): - attrs = super().get_pre_create_attr_defs() - # Use same attributes as for instance attributes - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_redshift_proxy.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_redshift_proxy.py deleted file mode 100644 index 0e3eb03ddd..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_redshift_proxy.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating Redshift proxies.""" -from ayon_houdini.api import plugin -import hou -from ayon_core.lib import BoolDef - - -class CreateRedshiftProxy(plugin.HoudiniCreator): - """Redshift Proxy""" - identifier = "io.openpype.creators.houdini.redshiftproxy" - label = "Redshift Proxy" - product_type = "redshiftproxy" - icon = "magic" - - def create(self, product_name, instance_data, pre_create_data): - - # Remove the active, we are checking the bypass flag of the nodes - instance_data.pop("active", None) - - # Redshift provides a `Redshift_Proxy_Output` node type which shows - # a limited set of parameters by default and is set to extract a - # Redshift Proxy. However when "imprinting" extra parameters needed - # for OpenPype it starts showing all its parameters again. It's unclear - # why this happens. - # TODO: Somehow enforce so that it only shows the original limited - # attributes of the Redshift_Proxy_Output node type - instance_data.update({"node_type": "Redshift_Proxy_Output"}) - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - creator_attributes["farm"] = pre_create_data["farm"] - - instance = super(CreateRedshiftProxy, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - parms = { - "RS_archive_file": '$HIP/pyblish/{}.$F4.rs'.format(product_name), - } - - if self.selected_nodes: - parms["RS_archive_sopPath"] = self.selected_nodes[0].path() - - instance_node.setParms(parms) - - # Lock some Avalon attributes - to_lock = ["productType", "id", "prim_to_detail_pattern"] - self.lock_parameters(instance_node, to_lock) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.sopNodeTypeCategory() - ] - - def get_instance_attr_defs(self): - return [ - BoolDef("farm", - label="Submitting to Farm", - default=False) - ] - - def get_pre_create_attr_defs(self): - attrs = super().get_pre_create_attr_defs() - # Use same attributes as for instance attributes - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_redshift_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_redshift_rop.py deleted file mode 100644 index d63e584692..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_redshift_rop.py +++ /dev/null @@ -1,172 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin to create Redshift ROP.""" -import hou # noqa - -from ayon_core.pipeline import CreatorError -from ayon_houdini.api import plugin -from ayon_core.lib import EnumDef, BoolDef - - -class CreateRedshiftROP(plugin.HoudiniCreator): - """Redshift ROP""" - - identifier = "io.openpype.creators.houdini.redshift_rop" - label = "Redshift ROP" - product_type = "redshift_rop" - icon = "magic" - ext = "exr" - multi_layered_mode = "No Multi-Layered EXR File" - - # Default render target - render_target = "farm_split" - - def create(self, product_name, instance_data, pre_create_data): - # Transfer settings from pre create to instance - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - for key in ["render_target", "review"]: - if key in pre_create_data: - creator_attributes[key] = pre_create_data[key] - - instance_data.pop("active", None) - instance_data.update({"node_type": "Redshift_ROP"}) - # Add chunk size attribute - instance_data["chunkSize"] = 10 - - instance = super(CreateRedshiftROP, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - basename = instance_node.name() - - # Also create the linked Redshift IPR Rop - try: - ipr_rop = instance_node.parent().createNode( - "Redshift_IPR", node_name=f"{basename}_IPR" - ) - except hou.OperationFailed as e: - raise CreatorError( - ( - "Cannot create Redshift node. Is Redshift " - "installed and enabled?" - ) - ) from e - - # Move it to directly under the Redshift ROP - ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1)) - - # Set the linked rop to the Redshift ROP - ipr_rop.parm("linked_rop").set(instance_node.path()) - ext = pre_create_data.get("image_format") - multi_layered_mode = pre_create_data.get("multi_layered_mode") - - ext_format_index = {"exr": 0, "tif": 1, "jpg": 2, "png": 3} - multilayer_mode_index = {"No Multi-Layered EXR File": "1", - "Full Multi-Layered EXR File": "2" } - - filepath = "{renders_dir}{product_name}/{product_name}.{fmt}".format( - renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), - product_name=product_name, - fmt="$AOV.$F4.{ext}".format(ext=ext) - ) - - if multilayer_mode_index[multi_layered_mode] == "1": - multipart = False - - elif multilayer_mode_index[multi_layered_mode] == "2": - multipart = True - - parms = { - # Render frame range - "trange": 1, - # Redshift ROP settings - "RS_outputFileNamePrefix": filepath, - "RS_outputBeautyAOVSuffix": "beauty", - "RS_outputFileFormat": ext_format_index[ext], - } - if ext == "exr": - parms["RS_outputMultilayerMode"] = multilayer_mode_index[multi_layered_mode] - parms["RS_aovMultipart"] = multipart - - if self.selected_nodes: - # set up the render camera from the selected node - camera = None - for node in self.selected_nodes: - if node.type().name() == "cam": - camera = node.path() - parms["RS_renderCamera"] = camera or "" - - export_dir = hou.text.expandString("$HIP/pyblish/rs/") - rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs" - parms["RS_archive_file"] = rs_filepath - - if pre_create_data.get("render_target") == "farm_split": - parms["RS_archive_enable"] = 1 - - instance_node.setParms(parms) - - # Lock some Avalon attributes - to_lock = ["productType", "id"] - self.lock_parameters(instance_node, to_lock) - - def remove_instances(self, instances): - for instance in instances: - node = instance.data.get("instance_node") - - ipr_node = hou.node(f"{node}_IPR") - if ipr_node: - ipr_node.destroy() - - return super(CreateRedshiftROP, self).remove_instances(instances) - - def get_instance_attr_defs(self): - """get instance attribute definitions. - - Attributes defined in this method are exposed in - publish tab in the publisher UI. - """ - - render_target_items = { - "local": "Local machine rendering", - "local_no_render": "Use existing frames (local)", - "farm": "Farm Rendering", - "farm_split": "Farm Rendering - Split export & render jobs", - } - - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - EnumDef("render_target", - items=render_target_items, - label="Render target", - default=self.render_target) - ] - - def get_pre_create_attr_defs(self): - - image_format_enum = [ - "exr", "tif", "jpg", "png", - ] - - multi_layered_mode = [ - "No Multi-Layered EXR File", - "Full Multi-Layered EXR File" - ] - - attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs() - attrs += [ - EnumDef("image_format", - image_format_enum, - default=self.ext, - label="Image Format Options"), - EnumDef("multi_layered_mode", - multi_layered_mode, - default=self.multi_layered_mode, - label="Multi-Layered EXR"), - ] - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_review.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_review.py deleted file mode 100644 index b27264f400..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_review.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating openGL reviews.""" -from ayon_houdini.api import lib, plugin -from ayon_core.lib import EnumDef, BoolDef, NumberDef - -import os -import hou - - -class CreateReview(plugin.HoudiniCreator): - """Review with OpenGL ROP""" - - identifier = "io.openpype.creators.houdini.review" - label = "Review" - product_type = "review" - icon = "video-camera" - review_color_space = "" - - def apply_settings(self, project_settings): - super(CreateReview, self).apply_settings(project_settings) - # workfile settings added in '0.2.13' - color_settings = project_settings["houdini"]["imageio"].get( - "workfile", {} - ) - if color_settings.get("enabled"): - self.review_color_space = color_settings.get("review_color_space") - - def create(self, product_name, instance_data, pre_create_data): - - instance_data.pop("active", None) - instance_data.update({"node_type": "opengl"}) - instance_data["imageFormat"] = pre_create_data.get("imageFormat") - instance_data["keepImages"] = pre_create_data.get("keepImages") - - instance = super(CreateReview, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - frame_range = hou.playbar.frameRange() - - filepath = "{root}/{product_name}/{product_name}.$F4.{ext}".format( - root=hou.text.expandString("$HIP/pyblish"), - # keep dynamic link to product name - product_name="`chs(\"AYON_productName\")`", - ext=pre_create_data.get("image_format") or "png" - ) - - parms = { - "picture": filepath, - - "trange": 1, - - # Unlike many other ROP nodes the opengl node does not default - # to expression of $FSTART and $FEND so we preserve that behavior - # but do set the range to the frame range of the playbar - "f1": frame_range[0], - "f2": frame_range[1], - } - - override_resolution = pre_create_data.get("override_resolution") - if override_resolution: - parms.update({ - "tres": override_resolution, - "res1": pre_create_data.get("resx"), - "res2": pre_create_data.get("resy"), - "aspect": pre_create_data.get("aspect"), - }) - - if self.selected_nodes: - # The first camera found in selection we will use as camera - # Other node types we set in force objects - camera = None - force_objects = [] - for node in self.selected_nodes: - path = node.path() - if node.type().name() == "cam": - if camera: - continue - camera = path - else: - force_objects.append(path) - - if not camera: - self.log.warning("No camera found in selection.") - - parms.update({ - "camera": camera or "", - "scenepath": "/obj", - "forceobjects": " ".join(force_objects), - "vobjects": "" # clear candidate objects from '*' value - }) - - instance_node.setParms(parms) - - # Set OCIO Colorspace to the default colorspace - # if there's OCIO - if os.getenv("OCIO"): - # Fall to the default value if cls.review_color_space is empty. - if not self.review_color_space: - # cls.review_color_space is an empty string - # when the imageio/workfile setting is disabled or - # when the Review colorspace setting is empty. - from ayon_houdini.api.colorspace import get_default_display_view_colorspace # noqa - self.review_color_space = get_default_display_view_colorspace() - - lib.set_review_color_space(instance_node, - self.review_color_space, - self.log) - - to_lock = ["id", "productType"] - - self.lock_parameters(instance_node, to_lock) - - def get_pre_create_attr_defs(self): - attrs = super(CreateReview, self).get_pre_create_attr_defs() - - image_format_enum = [ - "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", - "rad", "rat", "rta", "sgi", "tga", "tif", - ] - - return attrs + [ - BoolDef("keepImages", - label="Keep Image Sequences", - default=False), - EnumDef("imageFormat", - image_format_enum, - default="png", - label="Image Format Options"), - BoolDef("override_resolution", - label="Override resolution", - tooltip="When disabled the resolution set on the camera " - "is used instead.", - default=True), - NumberDef("resx", - label="Resolution Width", - default=1280, - minimum=2, - decimals=0), - NumberDef("resy", - label="Resolution Height", - default=720, - minimum=2, - decimals=0), - NumberDef("aspect", - label="Aspect Ratio", - default=1.0, - minimum=0.0001, - decimals=3) - ] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_staticmesh.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_staticmesh.py deleted file mode 100644 index 17b646040c..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_staticmesh.py +++ /dev/null @@ -1,155 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator for Unreal Static Meshes.""" -from ayon_houdini.api import plugin -from ayon_core.lib import BoolDef, EnumDef - -import hou - - -class CreateStaticMesh(plugin.HoudiniCreator): - """Static Meshes as FBX. """ - - identifier = "io.openpype.creators.houdini.staticmesh.fbx" - label = "Static Mesh (FBX)" - product_type = "staticMesh" - icon = "fa5s.cubes" - - default_variants = ["Main"] - - def create(self, product_name, instance_data, pre_create_data): - - instance_data.update({"node_type": "filmboxfbx"}) - - instance = super(CreateStaticMesh, self).create( - product_name, - instance_data, - pre_create_data) - - # get the created rop node - instance_node = hou.node(instance.get("instance_node")) - - # prepare parms - output_path = hou.text.expandString( - "$HIP/pyblish/{}.fbx".format(product_name) - ) - - parms = { - "startnode": self.get_selection(), - "sopoutput": output_path, - # vertex cache format - "vcformat": pre_create_data.get("vcformat"), - "convertunits": pre_create_data.get("convertunits"), - # set render range to use frame range start-end frame - "trange": 1, - "createsubnetroot": pre_create_data.get("createsubnetroot") - } - - # set parms - instance_node.setParms(parms) - - # Lock any parameters in this list - to_lock = ["productType", "id"] - self.lock_parameters(instance_node, to_lock) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.objNodeTypeCategory(), - hou.sopNodeTypeCategory() - ] - - def get_pre_create_attr_defs(self): - """Add settings for users. """ - - attrs = super(CreateStaticMesh, self).get_pre_create_attr_defs() - createsubnetroot = BoolDef("createsubnetroot", - tooltip="Create an extra root for the " - "Export node when it's a " - "subnetwork. This causes the " - "exporting subnetwork node to be " - "represented in the FBX file.", - default=False, - label="Create Root for Subnet") - vcformat = EnumDef("vcformat", - items={ - 0: "Maya Compatible (MC)", - 1: "3DS MAX Compatible (PC2)" - }, - default=0, - label="Vertex Cache Format") - convert_units = BoolDef("convertunits", - tooltip="When on, the FBX is converted" - "from the current Houdini " - "system units to the native " - "FBX unit of centimeters.", - default=False, - label="Convert Units") - - return attrs + [createsubnetroot, vcformat, convert_units] - - def get_dynamic_data( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ): - """ - The default prodcut name templates for Unreal include {asset} and thus - we should pass that along as dynamic data. - """ - dynamic_data = super(CreateStaticMesh, self).get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ) - dynamic_data["asset"] = folder_entity["name"] - return dynamic_data - - def get_selection(self): - """Selection Logic. - - how self.selected_nodes should be processed to get - the desirable node from selection. - - Returns: - str : node path - """ - - selection = "" - - if self.selected_nodes: - selected_node = self.selected_nodes[0] - - # Accept sop level nodes (e.g. /obj/geo1/box1) - if isinstance(selected_node, hou.SopNode): - selection = selected_node.path() - self.log.debug( - "Valid SopNode selection, 'Export' in filmboxfbx" - " will be set to '%s'.", selected_node - ) - - # Accept object level nodes (e.g. /obj/geo1) - elif isinstance(selected_node, hou.ObjNode): - selection = selected_node.path() - self.log.debug( - "Valid ObjNode selection, 'Export' in filmboxfbx " - "will be set to the child path '%s'.", selection - ) - - else: - self.log.debug( - "Selection isn't valid. 'Export' in " - "filmboxfbx will be empty." - ) - else: - self.log.debug( - "No Selection. 'Export' in filmboxfbx will be empty." - ) - - return selection diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_usd.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_usd.py deleted file mode 100644 index b6c0aa8895..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_usd.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating USDs.""" -from ayon_houdini.api import plugin - -import hou - - -class CreateUSD(plugin.HoudiniCreator): - """Universal Scene Description""" - identifier = "io.openpype.creators.houdini.usd" - label = "USD" - product_type = "usd" - icon = "cubes" - enabled = False - description = "Create USD" - - def create(self, product_name, instance_data, pre_create_data): - - instance_data.pop("active", None) - instance_data.update({"node_type": "usd"}) - - instance = super(CreateUSD, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - parms = { - "lopoutput": "$HIP/pyblish/{}.usd".format(product_name), - "enableoutputprocessor_simplerelativepaths": False, - } - - if self.selected_nodes: - parms["loppath"] = self.selected_nodes[0].path() - - instance_node.setParms(parms) - - # Lock any parameters in this list - to_lock = [ - "fileperframe", - # Lock some Avalon attributes - "productType", - "id", - ] - self.lock_parameters(instance_node, to_lock) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.lopNodeTypeCategory() - ] - - def get_publish_families(self): - return ["usd", "usdrop"] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_usd_look.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_usd_look.py deleted file mode 100644 index 58a7aa77be..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_usd_look.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating USD looks with textures.""" -import inspect - -from ayon_houdini.api import plugin - -import hou - - -class CreateUSDLook(plugin.HoudiniCreator): - """Universal Scene Description Look""" - - identifier = "io.openpype.creators.houdini.usd.look" - label = "Look" - product_type = "look" - icon = "paint-brush" - enabled = True - description = "Create USD Look" - - def create(self, product_name, instance_data, pre_create_data): - - instance_data.pop("active", None) - instance_data.update({"node_type": "usd"}) - - instance = super(CreateUSDLook, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - parms = { - "lopoutput": "$HIP/pyblish/{}.usd".format(product_name), - "enableoutputprocessor_simplerelativepaths": False, - - # Set the 'default prim' by default to the folder name being - # published to - "defaultprim": '/`strsplit(chs("folderPath"), "/", -1)`', - } - - if self.selected_nodes: - parms["loppath"] = self.selected_nodes[0].path() - - instance_node.setParms(parms) - - # Lock any parameters in this list - to_lock = [ - "fileperframe", - # Lock some Avalon attributes - "family", - "id", - ] - self.lock_parameters(instance_node, to_lock) - - def get_detail_description(self): - return inspect.cleandoc("""Publish looks in USD data. - - From the Houdini Solaris context (LOPs) this will publish the look for - an asset as a USD file with the used textures. - - Any assets used by the look will be relatively remapped to the USD - file and integrated into the publish as `resources`. - - """) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.lopNodeTypeCategory() - ] - - def get_publish_families(self): - return ["usd", "look", "usdrop"] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_usdrender.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_usdrender.py deleted file mode 100644 index 9c7bc0fd3e..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_usdrender.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating USD renders.""" -from ayon_houdini.api import plugin -from ayon_core.lib import BoolDef, EnumDef - -import hou - - -def get_usd_rop_renderers(): - """Return all available renderers supported by USD Render ROP. - Note that the USD Render ROP does not include all Hydra renderers, because - it excludes the GL ones like Houdini GL and Storm. USD Render ROP only - lists the renderers that have `aovsupport` enabled. Also see: - https://www.sidefx.com/docs/houdini/nodes/out/usdrender.html#list - Returns: - dict[str, str]: Plug-in name to display name mapping. - """ - return { - info["name"]: info["displayname"] for info - in hou.lop.availableRendererInfo() if info.get('aovsupport') - } - - -class CreateUSDRender(plugin.HoudiniCreator): - """USD Render ROP in /stage""" - identifier = "io.openpype.creators.houdini.usdrender" - label = "USD Render" - product_type = "usdrender" - icon = "magic" - description = "Create USD Render" - - default_renderer = "Karma CPU" - # Default render target - render_target = "farm_split" - - def create(self, product_name, instance_data, pre_create_data): - - # Transfer settings from pre create to instance - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - - for key in ["render_target", "review"]: - if key in pre_create_data: - creator_attributes[key] = pre_create_data[key] - - # TODO: Support creation in /stage if wanted by user - # pre_create_data["parent"] = "/stage" - - # Remove the active, we are checking the bypass flag of the nodes - instance_data.pop("active", None) - instance_data.update({"node_type": "usdrender"}) - - # Override default value for the Export Chunk Size because if the - # a single USD file is written as opposed to per frame we want to - # ensure only one machine picks up that sequence - # TODO: Probably better to change the default somehow for just this - # Creator on the HoudiniSubmitDeadline plug-in, if possible? - ( - instance_data - .setdefault("publish_attributes", {}) - .setdefault("HoudiniSubmitDeadlineUsdRender", {})["export_chunk"] - ) = 1000 - - instance = super(CreateUSDRender, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - parms = { - # Render frame range - "trange": 1 - } - if self.selected_nodes: - parms["loppath"] = self.selected_nodes[0].path() - - if pre_create_data.get("render_target") == "farm_split": - # Do not trigger the husk render, only trigger the USD export - parms["runcommand"] = False - # By default, the render ROP writes out the render file to a - # temporary directory. But if we want to render the USD file on - # the farm we instead want it in the project available - # to all machines. So we ensure all USD files are written to a - # folder to our choice. The - # `__render__.usd` (default name, defined by `lopoutput` parm) - # in that folder will then be the file to render. - parms["savetodirectory_directory"] = "$HIP/render/usd/$HIPNAME/$OS" - parms["lopoutput"] = "__render__.usd" - parms["allframesatonce"] = True - - # By default strip any Houdini custom data from the output file - # since the renderer doesn't care about it - parms["clearhoudinicustomdata"] = True - - # Use the first selected LOP node if "Use Selection" is enabled - # and the user had any nodes selected - if self.selected_nodes: - for node in self.selected_nodes: - if node.type().category() == hou.lopNodeTypeCategory(): - parms["loppath"] = node.path() - break - - # Set default renderer if defined in settings - if pre_create_data.get("renderer"): - parms["renderer"] = pre_create_data.get("renderer") - - instance_node.setParms(parms) - - # Lock some AYON attributes - to_lock = ["productType", "id"] - self.lock_parameters(instance_node, to_lock) - - def get_instance_attr_defs(self): - """get instance attribute definitions. - Attributes defined in this method are exposed in - publish tab in the publisher UI. - """ - - render_target_items = { - "local": "Local machine rendering", - "local_no_render": "Use existing frames (local)", - "farm": "Farm Rendering", - "farm_split": "Farm Rendering - Split export & render jobs", - } - - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - EnumDef("render_target", - items=render_target_items, - label="Render target", - default=self.render_target) - ] - - def get_pre_create_attr_defs(self): - - # Retrieve available renderers and convert default renderer to - # plug-in name if settings provided the display name - renderer_plugin_to_display_name = get_usd_rop_renderers() - default_renderer = self.default_renderer or None - if ( - default_renderer - and default_renderer not in renderer_plugin_to_display_name - ): - # Map default renderer display name to plugin name - for name, display_name in renderer_plugin_to_display_name.items(): - if default_renderer == display_name: - default_renderer = name - break - else: - # Default renderer not found in available renderers - default_renderer = None - - attrs = super(CreateUSDRender, self).get_pre_create_attr_defs() - attrs += [ - EnumDef("renderer", - label="Renderer", - default=default_renderer, - items=renderer_plugin_to_display_name), - ] - - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_vbd_cache.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_vbd_cache.py deleted file mode 100644 index e8c0920ec8..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_vbd_cache.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating VDB Caches.""" -from ayon_houdini.api import plugin -from ayon_core.lib import BoolDef - -import hou - - -class CreateVDBCache(plugin.HoudiniCreator): - """OpenVDB from Geometry ROP""" - identifier = "io.openpype.creators.houdini.vdbcache" - name = "vbdcache" - label = "VDB Cache" - product_type = "vdbcache" - icon = "cloud" - - def create(self, product_name, instance_data, pre_create_data): - import hou - - instance_data.pop("active", None) - instance_data.update({"node_type": "geometry"}) - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - creator_attributes["farm"] = pre_create_data["farm"] - instance = super(CreateVDBCache, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - file_path = "{}{}".format( - hou.text.expandString("$HIP/pyblish/"), - "{}.$F4.vdb".format(product_name)) - parms = { - "sopoutput": file_path, - "initsim": True, - "trange": 1 - } - - if self.selected_nodes: - parms["soppath"] = self.get_sop_node_path(self.selected_nodes[0]) - - instance_node.setParms(parms) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.objNodeTypeCategory(), - hou.sopNodeTypeCategory() - ] - - def get_sop_node_path(self, selected_node): - """Get Sop Path of the selected node. - - Although Houdini allows ObjNode path on `sop_path` for the - the ROP node, we prefer it set to the SopNode path explicitly. - """ - - # Allow sop level paths (e.g. /obj/geo1/box1) - if isinstance(selected_node, hou.SopNode): - self.log.debug( - "Valid SopNode selection, 'SOP Path' in ROP will" - " be set to '%s'.", selected_node.path() - ) - return selected_node.path() - - # Allow object level paths to Geometry nodes (e.g. /obj/geo1) - # but do not allow other object level nodes types like cameras, etc. - elif isinstance(selected_node, hou.ObjNode) and \ - selected_node.type().name() == "geo": - - # Try to find output node. - sop_node = self.get_obj_output(selected_node) - if sop_node: - self.log.debug( - "Valid ObjNode selection, 'SOP Path' in ROP will " - "be set to the child path '%s'.", sop_node.path() - ) - return sop_node.path() - - self.log.debug( - "Selection isn't valid. 'SOP Path' in ROP will be empty." - ) - return "" - - def get_obj_output(self, obj_node): - """Try to find output node. - - If any output nodes are present, return the output node with - the minimum 'outputidx' - If no output nodes are present, return the node with display flag - If no nodes are present at all, return None - """ - - outputs = obj_node.subnetOutputs() - - # if obj_node is empty - if not outputs: - return - - # if obj_node has one output child whether its - # sop output node or a node with the render flag - elif len(outputs) == 1: - return outputs[0] - - # if there are more than one, then it has multiple output nodes - # return the one with the minimum 'outputidx' - else: - return min(outputs, - key=lambda node: node.evalParm('outputidx')) - - def get_instance_attr_defs(self): - return [ - BoolDef("farm", - label="Submitting to Farm", - default=False) - ] - - def get_pre_create_attr_defs(self): - attrs = super().get_pre_create_attr_defs() - # Use same attributes as for instance attributes - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_vray_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_vray_rop.py deleted file mode 100644 index d15ee23825..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_vray_rop.py +++ /dev/null @@ -1,200 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin to create VRay ROP.""" -import hou - -from ayon_houdini.api import plugin -from ayon_core.pipeline import CreatorError -from ayon_core.lib import EnumDef, BoolDef - - -class CreateVrayROP(plugin.HoudiniCreator): - """VRay ROP""" - - identifier = "io.openpype.creators.houdini.vray_rop" - label = "VRay ROP" - product_type = "vray_rop" - icon = "magic" - ext = "exr" - - # Default render target - render_target = "farm_split" - - def create(self, product_name, instance_data, pre_create_data): - # Transfer settings from pre create to instance - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - for key in ["render_target", "review"]: - if key in pre_create_data: - creator_attributes[key] = pre_create_data[key] - - instance_data.pop("active", None) - instance_data.update({"node_type": "vray_renderer"}) - # Add chunk size attribute - instance_data["chunkSize"] = 10 - - instance = super(CreateVrayROP, self).create( - product_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - # Add IPR for Vray - basename = instance_node.name() - try: - ipr_rop = instance_node.parent().createNode( - "vray", node_name=basename + "_IPR" - ) - except hou.OperationFailed: - raise CreatorError( - "Cannot create Vray render node. " - "Make sure Vray installed and enabled!" - ) - - ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1)) - ipr_rop.parm("rop").set(instance_node.path()) - - parms = { - "trange": 1, - "SettingsEXR_bits_per_channel": "16" # half precision - } - - if pre_create_data.get("render_target") == "farm_split": - scene_filepath = \ - "{export_dir}{product_name}/{product_name}.$F4.vrscene".format( - export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"), - product_name=product_name, - ) - # Setting render_export_mode to "2" because that's for - # "Export only" ("1" is for "Export & Render") - parms["render_export_mode"] = "2" - parms["render_export_filepath"] = scene_filepath - - if self.selected_nodes: - # set up the render camera from the selected node - camera = None - for node in self.selected_nodes: - if node.type().name() == "cam": - camera = node.path() - parms.update({ - "render_camera": camera or "" - }) - - # Enable render element - ext = pre_create_data.get("image_format") - instance_data["RenderElement"] = pre_create_data.get("render_element_enabled") # noqa - if pre_create_data.get("render_element_enabled", True): - # Vray has its own tag for AOV file output - filepath = "{renders_dir}{product_name}/{product_name}.{fmt}".format( - renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), - product_name=product_name, - fmt="${aov}.$F4.{ext}".format(aov="AOV", - ext=ext) - ) - filepath = "{}{}".format( - hou.text.expandString("$HIP/pyblish/renders/"), - "{}/{}.${}.$F4.{}".format(product_name, - product_name, - "AOV", - ext) - ) - re_rop = instance_node.parent().createNode( - "vray_render_channels", - node_name=basename + "_render_element" - ) - # move the render element node next to the vray renderer node - re_rop.setPosition(instance_node.position() + hou.Vector2(0, 1)) - re_path = re_rop.path() - parms.update({ - "use_render_channels": 1, - "SettingsOutput_img_file_path": filepath, - "render_network_render_channels": re_path - }) - - else: - filepath = "{renders_dir}{product_name}/{product_name}.{fmt}".format( - renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), - product_name=product_name, - fmt="$F4.{ext}".format(ext=ext) - ) - parms.update({ - "use_render_channels": 0, - "SettingsOutput_img_file_path": filepath - }) - - custom_res = pre_create_data.get("override_resolution") - if custom_res: - parms.update({"override_camerares": 1}) - - instance_node.setParms(parms) - - # lock parameters from AVALON - to_lock = ["productType", "id"] - self.lock_parameters(instance_node, to_lock) - - def remove_instances(self, instances): - for instance in instances: - node = instance.data.get("instance_node") - # for the extra render node from the plugins - # such as vray and redshift - ipr_node = hou.node("{}{}".format(node, "_IPR")) - if ipr_node: - ipr_node.destroy() - re_node = hou.node("{}{}".format(node, - "_render_element")) - if re_node: - re_node.destroy() - - return super(CreateVrayROP, self).remove_instances(instances) - - def get_instance_attr_defs(self): - """get instance attribute definitions. - - Attributes defined in this method are exposed in - publish tab in the publisher UI. - """ - - - render_target_items = { - "local": "Local machine rendering", - "local_no_render": "Use existing frames (local)", - "farm": "Farm Rendering", - "farm_split": "Farm Rendering - Split export & render jobs", - } - - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - EnumDef("render_target", - items=render_target_items, - label="Render target", - default=self.render_target) - ] - - def get_pre_create_attr_defs(self): - image_format_enum = [ - "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", - "rad", "rat", "rta", "sgi", "tga", "tif", - ] - - attrs = super(CreateVrayROP, self).get_pre_create_attr_defs() - - attrs += [ - EnumDef("image_format", - image_format_enum, - default=self.ext, - label="Image Format Options"), - BoolDef("override_resolution", - label="Override Camera Resolution", - tooltip="Override the current camera " - "resolution, recommended for IPR.", - default=False), - BoolDef("render_element_enabled", - label="Render Element", - tooltip="Create Render Element Node " - "if enabled", - default=False) - ] - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/create/create_workfile.py b/server_addon/houdini/client/ayon_houdini/plugins/create/create_workfile.py deleted file mode 100644 index babf602855..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/create/create_workfile.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating workfiles.""" -import ayon_api - -from ayon_houdini.api import plugin -from ayon_houdini.api.lib import read, imprint -from ayon_houdini.api.pipeline import CONTEXT_CONTAINER -from ayon_core.pipeline import CreatedInstance, AutoCreator -import hou - - -class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator): - """Workfile auto-creator.""" - identifier = "io.openpype.creators.houdini.workfile" - label = "Workfile" - product_type = "workfile" - icon = "fa5.file" - - default_variant = "Main" - - def create(self): - variant = self.default_variant - current_instance = next( - ( - instance for instance in self.create_context.instances - if instance.creator_identifier == self.identifier - ), None) - - project_name = self.project_name - folder_path = self.create_context.get_current_folder_path() - task_name = self.create_context.get_current_task_name() - host_name = self.host_name - - if current_instance is None: - current_folder_path = None - else: - current_folder_path = current_instance["folderPath"] - - if current_instance is None: - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name, - ) - data = { - "folderPath": folder_path, - "task": task_name, - "variant": variant, - } - - data.update( - self.get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - current_instance) - ) - self.log.info("Auto-creating workfile instance...") - current_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - self._add_instance_to_context(current_instance) - elif ( - current_folder_path != folder_path - or current_instance["task"] != task_name - ): - # Update instance context if is not the same - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name, - ) - current_instance["folderPath"] = folder_path - current_instance["task"] = task_name - current_instance["productName"] = product_name - - # write workfile information to context container. - op_ctx = hou.node(CONTEXT_CONTAINER) - if not op_ctx: - op_ctx = self.host.create_context_node() - - workfile_data = {"workfile": current_instance.data_to_store()} - imprint(op_ctx, workfile_data) - - def collect_instances(self): - op_ctx = hou.node(CONTEXT_CONTAINER) - instance = read(op_ctx) - if not instance: - return - workfile = instance.get("workfile") - if not workfile: - return - created_instance = CreatedInstance.from_existing( - workfile, self - ) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - op_ctx = hou.node(CONTEXT_CONTAINER) - for created_inst, _changes in update_list: - if created_inst["creator_identifier"] == self.identifier: - workfile_data = {"workfile": created_inst.data_to_store()} - imprint(op_ctx, workfile_data, update=True) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/inventory/set_camera_resolution.py b/server_addon/houdini/client/ayon_houdini/plugins/inventory/set_camera_resolution.py deleted file mode 100644 index e2f8fcfa9b..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/inventory/set_camera_resolution.py +++ /dev/null @@ -1,26 +0,0 @@ -from ayon_core.pipeline import InventoryAction -from ayon_houdini.api.lib import ( - get_camera_from_container, - set_camera_resolution -) -from ayon_core.pipeline.context_tools import get_current_folder_entity - - -class SetCameraResolution(InventoryAction): - - label = "Set Camera Resolution" - icon = "desktop" - color = "orange" - - @staticmethod - def is_compatible(container): - return ( - container.get("loader") == "CameraLoader" - ) - - def process(self, containers): - folder_entity = get_current_folder_entity() - for container in containers: - node = container["node"] - camera = get_camera_from_container(node) - set_camera_resolution(camera, folder_entity) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/actions.py b/server_addon/houdini/client/ayon_houdini/plugins/load/actions.py deleted file mode 100644 index 5fe545ced9..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/actions.py +++ /dev/null @@ -1,83 +0,0 @@ -"""A module containing generic loader actions that will display in the Loader. - -""" - -from ayon_houdini.api import plugin - - -class SetFrameRangeLoader(plugin.HoudiniLoader): - """Set frame range excluding pre- and post-handles""" - - product_types = { - "animation", - "camera", - "pointcache", - "vdbcache", - "usd", - } - representations = {"abc", "vdb", "usd"} - - label = "Set frame range" - order = 11 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - import hou - - version_attributes = context["version"]["attrib"] - - start = version_attributes.get("frameStart") - end = version_attributes.get("frameEnd") - - if start is None or end is None: - print( - "Skipping setting frame range because start or " - "end frame data is missing.." - ) - return - - hou.playbar.setFrameRange(start, end) - hou.playbar.setPlaybackRange(start, end) - - -class SetFrameRangeWithHandlesLoader(plugin.HoudiniLoader): - """Set frame range including pre- and post-handles""" - - product_types = { - "animation", - "camera", - "pointcache", - "vdbcache", - "usd", - } - representations = {"abc", "vdb", "usd"} - - label = "Set frame range (with handles)" - order = 12 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - import hou - - version_attributes = context["version"]["attrib"] - - start = version_attributes.get("frameStart") - end = version_attributes.get("frameEnd") - - if start is None or end is None: - print( - "Skipping setting frame range because start or " - "end frame data is missing.." - ) - return - - # Include handles - start -= version_attributes.get("handleStart", 0) - end += version_attributes.get("handleEnd", 0) - - hou.playbar.setFrameRange(start, end) - hou.playbar.setPlaybackRange(start, end) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_alembic.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_alembic.py deleted file mode 100644 index 7db2fe93ed..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_alembic.py +++ /dev/null @@ -1,89 +0,0 @@ -import os -from ayon_core.pipeline import get_representation_path -from ayon_houdini.api import ( - pipeline, - plugin -) - - -class AbcLoader(plugin.HoudiniLoader): - """Load Alembic""" - - product_types = {"model", "animation", "pointcache", "gpuCache"} - label = "Load Alembic" - representations = {"*"} - extensions = {"abc"} - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - import hou - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context) - file_path = os.path.normpath(file_path) - file_path = file_path.replace("\\", "/") - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a new geo node - container = obj.createNode("geo", node_name=node_name) - - # Remove the file node, it only loads static meshes - # Houdini 17 has removed the file node from the geo node - file_node = container.node("file1") - if file_node: - file_node.destroy() - - # Create an alembic node (supports animation) - alembic = container.createNode("alembic", node_name=node_name) - alembic.setParms({"fileName": file_path}) - - # Position nodes nicely - container.moveToGoodPosition() - container.layoutChildren() - - nodes = [container, alembic] - - return pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - def update(self, container, context): - repre_entity = context["representation"] - node = container["node"] - try: - alembic_node = next( - n for n in node.children() if n.type().name() == "alembic" - ) - except StopIteration: - self.log.error("Could not find node of type `alembic`") - return - - # Update the file path - file_path = get_representation_path(repre_entity) - file_path = file_path.replace("\\", "/") - - alembic_node.setParms({"fileName": file_path}) - - # Update attribute - node.setParms({"representation": repre_entity["id"]}) - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, context): - self.update(container, context) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_alembic_archive.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_alembic_archive.py deleted file mode 100644 index a34a43e48a..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_alembic_archive.py +++ /dev/null @@ -1,81 +0,0 @@ - -import os -from ayon_core.pipeline import get_representation_path -from ayon_houdini.api import ( - pipeline, - plugin -) - - -class AbcArchiveLoader(plugin.HoudiniLoader): - """Load Alembic as full geometry network hierarchy """ - - product_types = {"model", "animation", "pointcache", "gpuCache"} - label = "Load Alembic as Archive" - representations = {"*"} - extensions = {"abc"} - order = -5 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - import hou - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context) - file_path = os.path.normpath(file_path) - file_path = file_path.replace("\\", "/") - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create an Alembic archive node - node = obj.createNode("alembicarchive", node_name=node_name) - node.moveToGoodPosition() - - # TODO: add FPS of project / folder - node.setParms({"fileName": file_path, - "channelRef": True}) - - # Apply some magic - node.parm("buildHierarchy").pressButton() - node.moveToGoodPosition() - - nodes = [node] - - self[:] = nodes - - return pipeline.containerise(node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="") - - def update(self, container, context): - repre_entity = context["representation"] - node = container["node"] - - # Update the file path - file_path = get_representation_path(repre_entity) - file_path = file_path.replace("\\", "/") - - # Update attributes - node.setParms({"fileName": file_path, - "representation": repre_entity["id"]}) - - # Rebuild - node.parm("buildHierarchy").pressButton() - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, context): - self.update(container, context) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_ass.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_ass.py deleted file mode 100644 index 5fd97bc2a6..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_ass.py +++ /dev/null @@ -1,91 +0,0 @@ -import os -import re - -from ayon_core.pipeline import get_representation_path -from ayon_houdini.api import ( - pipeline, - plugin -) - - -class AssLoader(plugin.HoudiniLoader): - """Load .ass with Arnold Procedural""" - - product_types = {"ass"} - label = "Load Arnold Procedural" - representations = {"ass"} - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - import hou - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a new geo node - procedural = obj.createNode("arnold::procedural", node_name=node_name) - - procedural.setParms( - { - "ar_filename": self.format_path(context["representation"]) - }) - - nodes = [procedural] - self[:] = nodes - - return pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - def update(self, container, context): - # Update the file path - repre_entity = context["representation"] - procedural = container["node"] - procedural.setParms({"ar_filename": self.format_path(repre_entity)}) - - # Update attribute - procedural.setParms({"representation": repre_entity["id"]}) - - def remove(self, container): - node = container["node"] - node.destroy() - - @staticmethod - def format_path(representation): - """Format file path correctly for single ass.* or ass.* sequence. - - Args: - representation (dict): representation to be loaded. - - Returns: - str: Formatted path to be used by the input node. - - """ - path = get_representation_path(representation) - if not os.path.exists(path): - raise RuntimeError("Path does not exist: {}".format(path)) - - is_sequence = bool(representation["context"].get("frame")) - # The path is either a single file or sequence in a folder. - if is_sequence: - dir_path, file_name = os.path.split(path) - path = os.path.join( - dir_path, - re.sub(r"(.*)\.(\d+)\.(ass.*)", "\\1.$F4.\\3", file_name) - ) - - return os.path.normpath(path).replace("\\", "/") - - def switch(self, container, context): - self.update(container, context) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_asset_lop.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_asset_lop.py deleted file mode 100644 index d9ab438d6d..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_asset_lop.py +++ /dev/null @@ -1,52 +0,0 @@ -from ayon_core.pipeline import load -from ayon_houdini.api.lib import find_active_network - -import hou - - -class LOPLoadAssetLoader(load.LoaderPlugin): - """Load reference/payload into Solaris using AYON `lop_import` LOP""" - - product_types = {"*"} - label = "Load Asset (LOPs)" - representations = ["usd", "abc", "usda", "usdc"] - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create node - network = find_active_network( - category=hou.lopNodeTypeCategory(), - default="/stage" - ) - node = network.createNode("ayon::lop_import", node_name=node_name) - node.moveToGoodPosition() - - # Set representation id - parm = node.parm("representation") - parm.set(context["representation"]["id"]) - parm.pressButton() # trigger callbacks - - nodes = [node] - self[:] = nodes - - def update(self, container, context): - node = container["node"] - - # Set representation id - parm = node.parm("representation") - parm.set(context["representation"]["id"]) - parm.pressButton() # trigger callbacks - - def remove(self, container): - node = container["node"] - node.destroy() - - def switch(self, container, context): - self.update(container, context) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_bgeo.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_bgeo.py deleted file mode 100644 index 7119612cda..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_bgeo.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import re - -from ayon_core.pipeline import get_representation_path -from ayon_houdini.api import ( - pipeline, - plugin -) - - -class BgeoLoader(plugin.HoudiniLoader): - """Load bgeo files to Houdini.""" - - label = "Load bgeo" - product_types = {"model", "pointcache", "bgeo"} - representations = { - "bgeo", "bgeosc", "bgeogz", - "bgeo.sc", "bgeo.gz", "bgeo.lzma", "bgeo.bz2"} - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - import hou - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a new geo node - container = obj.createNode("geo", node_name=node_name) - - # Remove the file node, it only loads static meshes - # Houdini 17 has removed the file node from the geo node - file_node = container.node("file1") - if file_node: - file_node.destroy() - - # Explicitly create a file node - path = self.filepath_from_context(context) - file_node = container.createNode("file", node_name=node_name) - file_node.setParms( - {"file": self.format_path(path, context["representation"])}) - - # Set display on last node - file_node.setDisplayFlag(True) - - nodes = [container, file_node] - self[:] = nodes - - return pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - @staticmethod - def format_path(path, representation): - """Format file path correctly for single bgeo or bgeo sequence.""" - if not os.path.exists(path): - raise RuntimeError("Path does not exist: %s" % path) - - is_sequence = bool(representation["context"].get("frame")) - # The path is either a single file or sequence in a folder. - if not is_sequence: - filename = path - else: - filename = re.sub(r"(.*)\.(\d+)\.(bgeo.*)", "\\1.$F4.\\3", path) - - filename = os.path.join(path, filename) - - filename = os.path.normpath(filename) - filename = filename.replace("\\", "/") - - return filename - - def update(self, container, context): - repre_entity = context["representation"] - node = container["node"] - try: - file_node = next( - n for n in node.children() if n.type().name() == "file" - ) - except StopIteration: - self.log.error("Could not find node of type `alembic`") - return - - # Update the file path - file_path = get_representation_path(repre_entity) - file_path = self.format_path(file_path, repre_entity) - - file_node.setParms({"file": file_path}) - - # Update attribute - node.setParms({"representation": repre_entity["id"]}) - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, context): - self.update(container, context) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_camera.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_camera.py deleted file mode 100644 index b597519813..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_camera.py +++ /dev/null @@ -1,212 +0,0 @@ -import hou -from ayon_core.pipeline import get_representation_path - -from ayon_houdini.api import ( - pipeline, - plugin -) -from ayon_houdini.api.lib import ( - set_camera_resolution, - get_camera_from_container -) - - -ARCHIVE_EXPRESSION = ('__import__("_alembic_hom_extensions")' - '.alembicGetCameraDict') - - -def transfer_non_default_values(src, dest, ignore=None): - """Copy parm from src to dest. - - Because the Alembic Archive rebuilds the entire node - hierarchy on triggering "Build Hierarchy" we want to - preserve any local tweaks made by the user on the camera - for ease of use. That could be a background image, a - resolution change or even Redshift camera parameters. - - We try to do so by finding all Parms that exist on both - source and destination node, include only those that both - are not at their default value, they must be visible, - we exclude those that have the special "alembic archive" - channel expression and ignore certain Parm types. - - """ - - ignore_types = { - hou.parmTemplateType.Toggle, - hou.parmTemplateType.Menu, - hou.parmTemplateType.Button, - hou.parmTemplateType.FolderSet, - hou.parmTemplateType.Separator, - hou.parmTemplateType.Label, - } - - src.updateParmStates() - - for parm in src.allParms(): - - if ignore and parm.name() in ignore: - continue - - # If destination parm does not exist, ignore.. - dest_parm = dest.parm(parm.name()) - if not dest_parm: - continue - - # Ignore values that are currently at default - if parm.isAtDefault() and dest_parm.isAtDefault(): - continue - - if not parm.isVisible(): - # Ignore hidden parameters, assume they - # are implementation details - continue - - expression = None - try: - expression = parm.expression() - except hou.OperationFailed: - # No expression present - pass - - if expression is not None and ARCHIVE_EXPRESSION in expression: - # Assume it's part of the automated connections that the - # Alembic Archive makes on loading of the camera and thus we do - # not want to transfer the expression - continue - - # Ignore folders, separators, etc. - if parm.parmTemplate().type() in ignore_types: - continue - - print("Preserving attribute: %s" % parm.name()) - dest_parm.setFromParm(parm) - - -class CameraLoader(plugin.HoudiniLoader): - """Load camera from an Alembic file""" - - product_types = {"camera"} - label = "Load Camera (abc)" - representations = {"abc"} - order = -10 - - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context).replace("\\", "/") - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a archive node - node = self.create_and_connect(obj, "alembicarchive", node_name) - - # TODO: add FPS of project / folder - node.setParms({"fileName": file_path, "channelRef": True}) - - # Apply some magic - node.parm("buildHierarchy").pressButton() - node.moveToGoodPosition() - - # Create an alembic xform node - nodes = [node] - - camera = get_camera_from_container(node) - self._match_maya_render_mask(camera) - set_camera_resolution(camera, folder_entity=context["folder"]) - self[:] = nodes - - return pipeline.containerise(node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="") - - def update(self, container, context): - repre_entity = context["representation"] - node = container["node"] - - # Update the file path - file_path = get_representation_path(repre_entity) - file_path = file_path.replace("\\", "/") - - # Update attributes - node.setParms({"fileName": file_path, - "representation": repre_entity["id"]}) - - # Store the cam temporarily next to the Alembic Archive - # so that we can preserve parm values the user set on it - # after build hierarchy was triggered. - old_camera = get_camera_from_container(node) - temp_camera = old_camera.copyTo(node.parent()) - - # Rebuild - node.parm("buildHierarchy").pressButton() - - # Apply values to the new camera - new_camera = get_camera_from_container(node) - transfer_non_default_values(temp_camera, - new_camera, - # The hidden uniform scale attribute - # gets a default connection to - # "icon_scale" just skip that completely - ignore={"scale"}) - - self._match_maya_render_mask(new_camera) - set_camera_resolution(new_camera) - - temp_camera.destroy() - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - - node = container["node"] - node.destroy() - - def create_and_connect(self, node, node_type, name=None): - """Create a node within a node which and connect it to the input - - Args: - node(hou.Node): parent of the new node - node_type(str) name of the type of node, eg: 'alembic' - name(str, Optional): name of the node - - Returns: - hou.Node - - """ - if name: - new_node = node.createNode(node_type, node_name=name) - else: - new_node = node.createNode(node_type) - - new_node.moveToGoodPosition() - return new_node - - def _match_maya_render_mask(self, camera): - """Workaround to match Maya render mask in Houdini""" - - parm = camera.parm("aperture") - expression = parm.expression() - expression = expression.replace("return ", "aperture = ") - expression += """ -# Match maya render mask (logic from Houdini's own FBX importer) -node = hou.pwd() -resx = node.evalParm('resx') -resy = node.evalParm('resy') -aspect = node.evalParm('aspect') -aperture *= min(1, (resx / resy * aspect) / 1.5) -return aperture -""" - parm.setExpression(expression, language=hou.exprLanguage.Python) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_fbx.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_fbx.py deleted file mode 100644 index 273ca43bc4..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_fbx.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -"""Fbx Loader for houdini. """ -from ayon_core.pipeline import get_representation_path -from ayon_houdini.api import ( - pipeline, - plugin -) - - -class FbxLoader(plugin.HoudiniLoader): - """Load fbx files. """ - - label = "Load FBX" - icon = "code-fork" - color = "orange" - - order = -10 - - product_types = {"*"} - representations = {"*"} - extensions = {"fbx"} - - def load(self, context, name=None, namespace=None, data=None): - - # get file path from context - file_path = self.filepath_from_context(context) - file_path = file_path.replace("\\", "/") - - # get necessary data - namespace, node_name = self.get_node_name(context, name, namespace) - - # create load tree - nodes = self.create_load_node_tree(file_path, node_name, name) - - self[:] = nodes - - # Call containerise function which does some automations for you - # like moving created nodes to the AVALON_CONTAINERS subnetwork - containerised_nodes = pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - return containerised_nodes - - def update(self, container, context): - repre_entity = context["representation"] - node = container["node"] - try: - file_node = next( - n for n in node.children() if n.type().name() == "file" - ) - except StopIteration: - self.log.error("Could not find node of type `file`") - return - - # Update the file path from representation - file_path = get_representation_path(repre_entity) - file_path = file_path.replace("\\", "/") - - file_node.setParms({"file": file_path}) - - # Update attribute - node.setParms({"representation": repre_entity["id"]}) - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, context): - self.update(container, context) - - def get_node_name(self, context, name=None, namespace=None): - """Define node name.""" - - if not namespace: - namespace = context["folder"]["name"] - - if namespace: - node_name = "{}_{}".format(namespace, name) - else: - node_name = name - - return namespace, node_name - - def create_load_node_tree(self, file_path, node_name, product_name): - """Create Load network. - - you can start building your tree at any obj level. - it'll be much easier to build it in the root obj level. - - Afterwards, your tree will be automatically moved to - '/obj/AVALON_CONTAINERS' subnetwork. - """ - import hou - - # Get the root obj level - obj = hou.node("/obj") - - # Create a new obj geo node - parent_node = obj.createNode("geo", node_name=node_name) - - # In older houdini, - # when reating a new obj geo node, a default file node will be - # automatically created. - # so, we will delete it if exists. - file_node = parent_node.node("file1") - if file_node: - file_node.destroy() - - # Create a new file node - file_node = parent_node.createNode("file", node_name=node_name) - file_node.setParms({"file": file_path}) - - # Create attribute delete - attribdelete_name = "attribdelete_{}".format(product_name) - attribdelete = parent_node.createNode("attribdelete", - node_name=attribdelete_name) - attribdelete.setParms({"ptdel": "fbx_*"}) - attribdelete.setInput(0, file_node) - - # Create a Null node - null_name = "OUT_{}".format(product_name) - null = parent_node.createNode("null", node_name=null_name) - null.setInput(0, attribdelete) - - # Ensure display flag is on the file_node input node and not on the OUT - # node to optimize "debug" displaying in the viewport. - file_node.setDisplayFlag(True) - - # Set new position for children nodes - parent_node.layoutChildren() - - # Return all the nodes - return [parent_node, file_node, attribdelete, null] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_filepath.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_filepath.py deleted file mode 100644 index 2ce9bd7ffb..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_filepath.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -import re -import hou - -from ayon_houdini.api import ( - pipeline, - plugin -) - - -class FilePathLoader(plugin.HoudiniLoader): - """Load a managed filepath to a null node. - - This is useful if for a particular workflow there is no existing loader - yet. A Houdini artists can load as the generic filepath loader and then - reference the relevant Houdini parm to use the exact value. The benefit - is that this filepath will be managed and can be updated as usual. - - """ - - label = "Load filepath to node" - order = 9 - icon = "link" - color = "white" - product_types = {"*"} - representations = {"*"} - - def load(self, context, name=None, namespace=None, data=None): - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a null node - container = obj.createNode("null", node_name=node_name) - - # Destroy any children - for node in container.children(): - node.destroy() - - # Add filepath attribute, set value as default value - filepath = self.format_path( - path=self.filepath_from_context(context), - representation=context["representation"] - ) - parm_template_group = container.parmTemplateGroup() - attr_folder = hou.FolderParmTemplate("attributes_folder", "Attributes") - parm = hou.StringParmTemplate(name="filepath", - label="Filepath", - num_components=1, - default_value=(filepath,)) - attr_folder.addParmTemplate(parm) - parm_template_group.append(attr_folder) - - # Hide some default labels - for folder_label in ["Transform", "Render", "Misc", "Redshift OBJ"]: - folder = parm_template_group.findFolder(folder_label) - if not folder: - continue - parm_template_group.hideFolder(folder_label, True) - - container.setParmTemplateGroup(parm_template_group) - - container.setDisplayFlag(False) - container.setSelectableInViewport(False) - container.useXray(False) - - nodes = [container] - - self[:] = nodes - - return pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - def update(self, container, context): - - # Update the file path - representation_entity = context["representation"] - file_path = self.format_path( - path=self.filepath_from_context(context), - representation=representation_entity - ) - - node = container["node"] - node.setParms({ - "filepath": file_path, - "representation": str(representation_entity["id"]) - }) - - # Update the parameter default value (cosmetics) - parm_template_group = node.parmTemplateGroup() - parm = parm_template_group.find("filepath") - parm.setDefaultValue((file_path,)) - parm_template_group.replace(parm_template_group.find("filepath"), - parm) - node.setParmTemplateGroup(parm_template_group) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - - node = container["node"] - node.destroy() - - @staticmethod - def format_path(path: str, representation: dict) -> str: - """Format file path for sequence with $F.""" - if not os.path.exists(path): - raise RuntimeError("Path does not exist: %s" % path) - - # The path is either a single file or sequence in a folder. - frame = representation["context"].get("frame") - if frame is not None: - # Substitute frame number in sequence with $F with padding - ext = representation.get("ext", representation["name"]) - token = "$F{}".format(len(frame)) # e.g. $F4 - pattern = r"\.(\d+)\.{ext}$".format(ext=re.escape(ext)) - path = re.sub(pattern, ".{}.{}".format(token, ext), path) - - return os.path.normpath(path).replace("\\", "/") diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_hda.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_hda.py deleted file mode 100644 index fcf0e834f8..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_hda.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import hou -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID -) -from ayon_core.pipeline.load import LoadError -from ayon_houdini.api import ( - lib, - pipeline, - plugin -) - - -class HdaLoader(plugin.HoudiniLoader): - """Load Houdini Digital Asset file.""" - - product_types = {"hda"} - label = "Load Hda" - representations = {"hda"} - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context) - file_path = os.path.normpath(file_path) - file_path = file_path.replace("\\", "/") - - namespace = namespace or context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - hou.hda.installFile(file_path) - - hda_defs = hou.hda.definitionsInFile(file_path) - if not hda_defs: - raise LoadError(f"No HDA definitions found in file: {file_path}") - - parent_node = self._create_dedicated_parent_node(hda_defs[-1]) - - # Get the type name from the HDA definition. - type_name = hda_defs[-1].nodeTypeName() - hda_node = parent_node.createNode(type_name, node_name) - hda_node.moveToGoodPosition() - - # Imprint it manually - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": node_name, - "namespace": namespace, - "loader": self.__class__.__name__, - "representation": context["representation"]["id"], - } - - lib.imprint(hda_node, data) - - return hda_node - - def update(self, container, context): - - repre_entity = context["representation"] - hda_node = container["node"] - file_path = get_representation_path(repre_entity) - file_path = file_path.replace("\\", "/") - hou.hda.installFile(file_path) - defs = hda_node.type().allInstalledDefinitions() - def_paths = [d.libraryFilePath() for d in defs] - new = def_paths.index(file_path) - defs[new].setIsPreferred(True) - hda_node.setParms({ - "representation": repre_entity["id"] - }) - - def remove(self, container): - node = container["node"] - parent = node.parent() - node.destroy() - - if parent.path() == pipeline.AVALON_CONTAINERS: - return - - # Remove parent if empty. - if not parent.children(): - parent.destroy() - - def _create_dedicated_parent_node(self, hda_def): - - # Get the root node - parent_node = pipeline.get_or_create_avalon_container() - node = None - node_type = None - if hda_def.nodeTypeCategory() == hou.objNodeTypeCategory(): - return parent_node - elif hda_def.nodeTypeCategory() == hou.chopNodeTypeCategory(): - node_type, node_name = "chopnet", "MOTION" - elif hda_def.nodeTypeCategory() == hou.cop2NodeTypeCategory(): - node_type, node_name = "cop2net", "IMAGES" - elif hda_def.nodeTypeCategory() == hou.dopNodeTypeCategory(): - node_type, node_name = "dopnet", "DOPS" - elif hda_def.nodeTypeCategory() == hou.ropNodeTypeCategory(): - node_type, node_name = "ropnet", "ROPS" - elif hda_def.nodeTypeCategory() == hou.lopNodeTypeCategory(): - node_type, node_name = "lopnet", "LOPS" - elif hda_def.nodeTypeCategory() == hou.sopNodeTypeCategory(): - node_type, node_name = "geo", "SOPS" - elif hda_def.nodeTypeCategory() == hou.topNodeTypeCategory(): - node_type, node_name = "topnet", "TOPS" - # TODO: Create a dedicated parent node based on Vop Node vex context. - elif hda_def.nodeTypeCategory() == hou.vopNodeTypeCategory(): - node_type, node_name = "matnet", "MATSandVOPS" - - node = parent_node.node(node_name) - if not node: - node = parent_node.createNode(node_type, node_name) - - node.moveToGoodPosition() - return node diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_image.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_image.py deleted file mode 100644 index 9d4cd2fb18..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_image.py +++ /dev/null @@ -1,188 +0,0 @@ -import os -import re -import hou - -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from ayon_houdini.api import ( - pipeline, - plugin, - lib -) - - -def get_image_avalon_container(): - """The COP2 files must be in a COP2 network. - - So we maintain a single entry point within AVALON_CONTAINERS, - just for ease of use. - - """ - - path = pipeline.AVALON_CONTAINERS - avalon_container = hou.node(path) - if not avalon_container: - # Let's create avalon container secretly - # but make sure the pipeline still is built the - # way we anticipate it was built, asserting it. - assert path == "/obj/AVALON_CONTAINERS" - - parent = hou.node("/obj") - avalon_container = parent.createNode( - "subnet", node_name="AVALON_CONTAINERS" - ) - - image_container = hou.node(path + "/IMAGES") - if not image_container: - image_container = avalon_container.createNode( - "cop2net", node_name="IMAGES" - ) - image_container.moveToGoodPosition() - - return image_container - - -class ImageLoader(plugin.HoudiniLoader): - """Load images into COP2""" - - product_types = { - "imagesequence", - "review", - "render", - "plate", - "image", - "online", - } - label = "Load Image (COP2)" - representations = {"*"} - order = -10 - - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - # Format file name, Houdini only wants forward slashes - path = self.filepath_from_context(context) - path = self.format_path(path, representation=context["representation"]) - - # Get the root node - parent = get_image_avalon_container() - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - node = parent.createNode("file", node_name=node_name) - node.moveToGoodPosition() - - parms = {"filename1": path} - parms.update(self.get_colorspace_parms(context["representation"])) - - node.setParms(parms) - - # Imprint it manually - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": node_name, - "namespace": namespace, - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - } - - # todo: add folder="Avalon" - lib.imprint(node, data) - - return node - - def update(self, container, context): - repre_entity = context["representation"] - node = container["node"] - - # Update the file path - file_path = get_representation_path(repre_entity) - file_path = self.format_path(file_path, repre_entity) - - parms = { - "filename1": file_path, - "representation": repre_entity["id"], - } - - parms.update(self.get_colorspace_parms(repre_entity)) - - # Update attributes - node.setParms(parms) - - def remove(self, container): - - node = container["node"] - - # Let's clean up the IMAGES COP2 network - # if it ends up being empty and we deleted - # the last file node. Store the parent - # before we delete the node. - parent = node.parent() - - node.destroy() - - if not parent.children(): - parent.destroy() - - @staticmethod - def format_path(path, representation): - """Format file path correctly for single image or sequence.""" - if not os.path.exists(path): - raise RuntimeError("Path does not exist: %s" % path) - - ext = os.path.splitext(path)[-1] - - is_sequence = bool(representation["context"].get("frame")) - # The path is either a single file or sequence in a folder. - if not is_sequence: - filename = path - else: - filename = re.sub(r"(.*)\.(\d+){}$".format(re.escape(ext)), - "\\1.$F4{}".format(ext), - path) - - filename = os.path.join(path, filename) - - filename = os.path.normpath(filename) - filename = filename.replace("\\", "/") - - return filename - - def get_colorspace_parms(self, representation: dict) -> dict: - """Return the color space parameters. - - Returns the values for the colorspace parameters on the node if there - is colorspace data on the representation. - - Arguments: - representation (dict): The representation entity. - - Returns: - dict: Parm to value mapping if colorspace data is defined. - - """ - # Using OCIO colorspace on COP2 File node is only supported in Hou 20+ - major, _, _ = hou.applicationVersion() - if major < 20: - return {} - - data = representation.get("data", {}).get("colorspaceData", {}) - if not data: - return {} - - colorspace = data["colorspace"] - if colorspace: - return { - "colorspace": 3, # Use OpenColorIO - "ocio_space": colorspace - } - - def switch(self, container, representation): - self.update(container, representation) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_redshift_proxy.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_redshift_proxy.py deleted file mode 100644 index 514dbe109f..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_redshift_proxy.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -import re -import hou - -from ayon_core.pipeline import get_representation_path -from ayon_core.pipeline.load import LoadError - -from ayon_houdini.api import ( - pipeline, - plugin -) - - -class RedshiftProxyLoader(plugin.HoudiniLoader): - """Load Redshift Proxy""" - - product_types = {"redshiftproxy"} - label = "Load Redshift Proxy" - representations = {"rs"} - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a new geo node - container = obj.createNode("geo", node_name=node_name) - - # Check whether the Redshift parameters exist - if not, then likely - # redshift is not set up or initialized correctly - if not container.parm("RS_objprop_proxy_enable"): - container.destroy() - raise LoadError("Unable to initialize geo node with Redshift " - "attributes. Make sure you have the Redshift " - "plug-in set up correctly for Houdini.") - - # Enable by default - container.setParms({ - "RS_objprop_proxy_enable": True, - "RS_objprop_proxy_file": self.format_path( - self.filepath_from_context(context), - context["representation"]) - }) - - # Remove the file node, it only loads static meshes - # Houdini 17 has removed the file node from the geo node - file_node = container.node("file1") - if file_node: - file_node.destroy() - - # Add this stub node inside so it previews ok - proxy_sop = container.createNode("redshift_proxySOP", - node_name=node_name) - proxy_sop.setDisplayFlag(True) - - nodes = [container, proxy_sop] - - self[:] = nodes - - return pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - def update(self, container, context): - repre_entity = context["representation"] - # Update the file path - file_path = get_representation_path(repre_entity) - - node = container["node"] - node.setParms({ - "RS_objprop_proxy_file": self.format_path( - file_path, repre_entity) - }) - - # Update attribute - node.setParms({"representation": repre_entity["id"]}) - - def remove(self, container): - - node = container["node"] - node.destroy() - - @staticmethod - def format_path(path, representation): - """Format file path correctly for single redshift proxy - or redshift proxy sequence.""" - if not os.path.exists(path): - raise RuntimeError("Path does not exist: %s" % path) - - is_sequence = bool(representation["context"].get("frame")) - # The path is either a single file or sequence in a folder. - if is_sequence: - filename = re.sub(r"(.*)\.(\d+)\.(rs.*)", "\\1.$F4.\\3", path) - filename = os.path.join(path, filename) - else: - filename = path - - filename = os.path.normpath(filename) - filename = filename.replace("\\", "/") - - return filename diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_layer.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_layer.py deleted file mode 100644 index fb302fd943..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_layer.py +++ /dev/null @@ -1,87 +0,0 @@ -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from ayon_houdini.api import ( - plugin, - lib -) - - -class USDSublayerLoader(plugin.HoudiniLoader): - """Sublayer USD file in Solaris""" - - product_types = { - "usd", - "usdCamera", - } - label = "Sublayer USD" - representations = {"usd", "usda", "usdlc", "usdnc", "abc"} - order = 1 - - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - import os - import hou - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context) - file_path = os.path.normpath(file_path) - file_path = file_path.replace("\\", "/") - - # Get the root node - stage = hou.node("/stage") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create USD reference - container = stage.createNode("sublayer", node_name=node_name) - container.setParms({"filepath1": file_path}) - container.moveToGoodPosition() - - # Imprint it manually - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": node_name, - "namespace": namespace, - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - } - - # todo: add folder="Avalon" - lib.imprint(container, data) - - return container - - def update(self, container, context): - repre_entity = context["representation"] - node = container["node"] - - # Update the file path - file_path = get_representation_path(repre_entity) - file_path = file_path.replace("\\", "/") - - # Update attributes - node.setParms( - { - "filepath1": file_path, - "representation": repre_entity["id"], - } - ) - - # Reload files - node.parm("reload").pressButton() - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, context): - self.update(container, context) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_reference.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_reference.py deleted file mode 100644 index 690f6ce187..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_reference.py +++ /dev/null @@ -1,87 +0,0 @@ -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from ayon_houdini.api import ( - plugin, - lib -) - - -class USDReferenceLoader(plugin.HoudiniLoader): - """Reference USD file in Solaris""" - - product_types = { - "usd", - "usdCamera", - } - label = "Reference USD" - representations = {"usd", "usda", "usdlc", "usdnc", "abc"} - order = -8 - - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - import os - import hou - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context) - file_path = os.path.normpath(file_path) - file_path = file_path.replace("\\", "/") - - # Get the root node - stage = hou.node("/stage") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create USD reference - container = stage.createNode("reference", node_name=node_name) - container.setParms({"filepath1": file_path}) - container.moveToGoodPosition() - - # Imprint it manually - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": node_name, - "namespace": namespace, - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - } - - # todo: add folder="Avalon" - lib.imprint(container, data) - - return container - - def update(self, container, context): - repre_entity = context["representation"] - node = container["node"] - - # Update the file path - file_path = get_representation_path(repre_entity) - file_path = file_path.replace("\\", "/") - - # Update attributes - node.setParms( - { - "filepath1": file_path, - "representation": repre_entity["id"], - } - ) - - # Reload files - node.parm("reload").pressButton() - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, context): - self.update(container, context) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_sop.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_sop.py deleted file mode 100644 index 347e3283de..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_usd_sop.py +++ /dev/null @@ -1,79 +0,0 @@ -import os - -from ayon_houdini.api import ( - pipeline, - plugin -) - - -class SopUsdImportLoader(plugin.HoudiniLoader): - """Load USD to SOPs via `usdimport`""" - - label = "Load USD to SOPs" - product_types = {"*"} - representations = {"usd"} - order = -6 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - import hou - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context) - file_path = os.path.normpath(file_path) - file_path = file_path.replace("\\", "/") - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a new geo node - container = obj.createNode("geo", node_name=node_name) - - # Create a usdimport node - usdimport = container.createNode("usdimport", node_name=node_name) - usdimport.setParms({"filepath1": file_path}) - - # Set new position for unpack node else it gets cluttered - nodes = [container, usdimport] - - return pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - def update(self, container, context): - - node = container["node"] - try: - usdimport_node = next( - n for n in node.children() if n.type().name() == "usdimport" - ) - except StopIteration: - self.log.error("Could not find node of type `usdimport`") - return - - # Update the file path - file_path = self.filepath_from_context(context) - file_path = file_path.replace("\\", "/") - - usdimport_node.setParms({"filepath1": file_path}) - - # Update attribute - node.setParms({"representation": context["representation"]["id"]}) - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, representation): - self.update(container, representation) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/load_vdb.py b/server_addon/houdini/client/ayon_houdini/plugins/load/load_vdb.py deleted file mode 100644 index 9014f4c5e2..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/load_vdb.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import re - -from ayon_core.pipeline import get_representation_path -from ayon_houdini.api import ( - pipeline, - plugin -) - - -class VdbLoader(plugin.HoudiniLoader): - """Load VDB""" - - product_types = {"vdbcache"} - label = "Load VDB" - representations = {"vdb"} - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - import hou - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["folder"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a new geo node - container = obj.createNode("geo", node_name=node_name) - - # Remove the file node, it only loads static meshes - # Houdini 17 has removed the file node from the geo node - file_node = container.node("file1") - if file_node: - file_node.destroy() - - # Explicitly create a file node - file_node = container.createNode("file", node_name=node_name) - path = self.filepath_from_context(context) - file_node.setParms( - {"file": self.format_path(path, context["representation"])}) - - # Set display on last node - file_node.setDisplayFlag(True) - - nodes = [container, file_node] - self[:] = nodes - - return pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - @staticmethod - def format_path(path, representation): - """Format file path correctly for single vdb or vdb sequence.""" - if not os.path.exists(path): - raise RuntimeError("Path does not exist: %s" % path) - - is_sequence = bool(representation["context"].get("frame")) - # The path is either a single file or sequence in a folder. - if not is_sequence: - filename = path - else: - filename = re.sub(r"(.*)\.(\d+)\.vdb$", "\\1.$F4.vdb", path) - - filename = os.path.join(path, filename) - - filename = os.path.normpath(filename) - filename = filename.replace("\\", "/") - - return filename - - def update(self, container, context): - repre_entity = context["representation"] - node = container["node"] - try: - file_node = next( - n for n in node.children() if n.type().name() == "file" - ) - except StopIteration: - self.log.error("Could not find node of type `alembic`") - return - - # Update the file path - file_path = get_representation_path(repre_entity) - file_path = self.format_path(file_path, repre_entity) - - file_node.setParms({"file": file_path}) - - # Update attribute - node.setParms({"representation": repre_entity["id"]}) - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, context): - self.update(container, context) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/load/show_usdview.py b/server_addon/houdini/client/ayon_houdini/plugins/load/show_usdview.py deleted file mode 100644 index 4e18bc038a..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/load/show_usdview.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import platform -import subprocess - -from ayon_core.lib.vendor_bin_utils import find_executable -from ayon_houdini.api import plugin - - -class ShowInUsdview(plugin.HoudiniLoader): - """Open USD file in usdview""" - - label = "Show in usdview" - representations = {"*"} - product_types = {"*"} - extensions = {"usd", "usda", "usdlc", "usdnc", "abc"} - order = 15 - - icon = "code-fork" - color = "white" - - def load(self, context, name=None, namespace=None, data=None): - from pathlib import Path - - if platform.system() == "Windows": - executable = "usdview.bat" - else: - executable = "usdview" - - usdview = find_executable(executable) - if not usdview: - raise RuntimeError("Unable to find usdview") - - # For some reason Windows can return the path like: - # C:/PROGRA~1/SIDEEF~1/HOUDIN~1.435/bin/usdview - # convert to resolved path so `subprocess` can take it - usdview = str(Path(usdview).resolve().as_posix()) - - filepath = self.filepath_from_context(context) - filepath = os.path.normpath(filepath) - filepath = filepath.replace("\\", "/") - - if not os.path.exists(filepath): - self.log.error("File does not exist: %s" % filepath) - return - - self.log.info("Start houdini variant of usdview...") - - subprocess.Popen([usdview, filepath, "--renderer", "GL"]) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_active_state.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_active_state.py deleted file mode 100644 index e09a347e9f..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_active_state.py +++ /dev/null @@ -1,42 +0,0 @@ -import hou - -import pyblish.api -from ayon_houdini.api import plugin - - -class CollectInstanceActiveState(plugin.HoudiniInstancePlugin): - """Collect default active state for instance from its node bypass state. - - This is done at the very end of the CollectorOrder so that any required - collecting of data iterating over instances (with InstancePlugin) will - actually collect the data for when the user enables the state in the UI. - Otherwise potentially required data might have skipped collecting. - - """ - - order = pyblish.api.CollectorOrder + 0.299 - families = ["*"] - label = "Instance Active State" - - def process(self, instance): - - # Must have node to check for bypass state - if len(instance) == 0: - return - - # Check bypass state and reverse - active = True - node = hou.node(instance.data.get("instance_node")) - if hasattr(node, "isBypassed"): - active = not node.isBypassed() - - # Set instance active state - instance.data.update( - { - "active": active, - # temporarily translation of `active` to `publish` till - # issue has been resolved: - # https://github.com/pyblish/pyblish-base/issues/307 - "publish": active, - } - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_arnold_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_arnold_rop.py deleted file mode 100644 index 10c6d91d26..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_arnold_rop.py +++ /dev/null @@ -1,168 +0,0 @@ -import os -import re - -import hou -import pyblish.api - -from ayon_houdini.api import colorspace, plugin -from ayon_houdini.api.lib import ( - get_color_management_preferences, - evalParmNoFrame -) - - -class CollectArnoldROPRenderProducts(plugin.HoudiniInstancePlugin): - """Collect Arnold ROP Render Products - - Collects the instance.data["files"] for the render products. - - Provides: - instance -> files - - """ - - label = "Arnold ROP Render Products" - # This specific order value is used so that - # this plugin runs after CollectFrames - order = pyblish.api.CollectorOrder + 0.11 - families = ["arnold_rop"] - - def process(self, instance): - - rop = hou.node(instance.data.get("instance_node")) - - # Collect chunkSize - chunk_size_parm = rop.parm("chunkSize") - if chunk_size_parm: - chunk_size = int(chunk_size_parm.eval()) - instance.data["chunkSize"] = chunk_size - self.log.debug("Chunk Size: %s" % chunk_size) - - default_prefix = evalParmNoFrame(rop, "ar_picture") - render_products = [] - - export_prefix = None - export_products = [] - if instance.data["splitRender"]: - export_prefix = evalParmNoFrame( - rop, "ar_ass_file", pad_character="0" - ) - beauty_export_product = self.get_render_product_name( - prefix=export_prefix, - suffix=None) - export_products.append(beauty_export_product) - self.log.debug( - "Found export product: {}".format(beauty_export_product) - ) - instance.data["ifdFile"] = beauty_export_product - instance.data["exportFiles"] = list(export_products) - - # Default beauty AOV - beauty_product = self.get_render_product_name(prefix=default_prefix, - suffix=None) - render_products.append(beauty_product) - - files_by_aov = { - "": self.generate_expected_files(instance, beauty_product) - } - - # Assume it's a multipartExr Render. - multipartExr = True - - num_aovs = rop.evalParm("ar_aovs") - # TODO: Check the following logic. - # as it always assumes that all AOV are not merged. - for index in range(1, num_aovs + 1): - # Skip disabled AOVs - if not rop.evalParm("ar_enable_aov{}".format(index)): - continue - - if rop.evalParm("ar_aov_exr_enable_layer_name{}".format(index)): - label = rop.evalParm("ar_aov_exr_layer_name{}".format(index)) - else: - label = evalParmNoFrame(rop, "ar_aov_label{}".format(index)) - - aov_product = self.get_render_product_name(default_prefix, - suffix=label) - render_products.append(aov_product) - files_by_aov[label] = self.generate_expected_files(instance, - aov_product) - - # Set to False as soon as we have a separated aov. - multipartExr = False - - # Review Logic expects this key to exist and be True - # if render is a multipart Exr. - # As long as we have one AOV then multipartExr should be True. - instance.data["multipartExr"] = multipartExr - - for product in render_products: - self.log.debug("Found render product: {}".format(product)) - - instance.data["files"] = list(render_products) - instance.data["renderProducts"] = colorspace.ARenderProduct() - - # For now by default do NOT try to publish the rendered output - instance.data["publishJobState"] = "Suspended" - instance.data["attachTo"] = [] # stub required data - - if "expectedFiles" not in instance.data: - instance.data["expectedFiles"] = list() - instance.data["expectedFiles"].append(files_by_aov) - - # update the colorspace data - colorspace_data = get_color_management_preferences() - instance.data["colorspaceConfig"] = colorspace_data["config"] - instance.data["colorspaceDisplay"] = colorspace_data["display"] - instance.data["colorspaceView"] = colorspace_data["view"] - - def get_render_product_name(self, prefix, suffix): - """Return the output filename using the AOV prefix and suffix""" - - # When AOV is explicitly defined in prefix we just swap it out - # directly with the AOV suffix to embed it. - # Note: ${AOV} seems to be evaluated in the parameter as %AOV% - if "%AOV%" in prefix: - # It seems that when some special separator characters are present - # before the %AOV% token that Redshift will secretly remove it if - # there is no suffix for the current product, for example: - # foo_%AOV% -> foo.exr - pattern = "%AOV%" if suffix else "[._-]?%AOV%" - product_name = re.sub(pattern, - suffix, - prefix, - flags=re.IGNORECASE) - else: - if suffix: - # Add ".{suffix}" before the extension - prefix_base, ext = os.path.splitext(prefix) - product_name = prefix_base + "." + suffix + ext - else: - product_name = prefix - - return product_name - - def generate_expected_files(self, instance, path): - """Create expected files in instance data""" - - dir = os.path.dirname(path) - file = os.path.basename(path) - - if "#" in file: - def replace(match): - return "%0{}d".format(len(match.group())) - - file = re.sub("#+", replace, file) - - if "%" not in file: - return path - - expected_files = [] - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - for i in range(int(start), (int(end) + 1)): - expected_files.append( - os.path.join(dir, (file % i)).replace("\\", "/")) - - return expected_files diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_asset_handles.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_asset_handles.py deleted file mode 100644 index db9bde8595..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_asset_handles.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collector plugin for frames data on ROP instances.""" -import pyblish.api -from ayon_core.lib import BoolDef -from ayon_core.pipeline import AYONPyblishPluginMixin -from ayon_houdini.api import plugin - - -class CollectAssetHandles(plugin.HoudiniInstancePlugin, - AYONPyblishPluginMixin): - """Apply folder handles. - - If instance does not have: - - frameStart - - frameEnd - - handleStart - - handleEnd - But it does have: - - frameStartHandle - - frameEndHandle - - Then we will retrieve the folder's handles to compute - the exclusive frame range and actual handle ranges. - """ - - # This specific order value is used so that - # this plugin runs after CollectAnatomyInstanceData - order = pyblish.api.CollectorOrder + 0.499 - - label = "Collect Folder Handles" - use_asset_handles = True - - def process(self, instance): - # Only process instances without already existing handles data - # but that do have frameStartHandle and frameEndHandle defined - # like the data collected from CollectRopFrameRange - if "frameStartHandle" not in instance.data: - return - if "frameEndHandle" not in instance.data: - return - - has_existing_data = { - "handleStart", - "handleEnd", - "frameStart", - "frameEnd" - }.issubset(instance.data) - if has_existing_data: - return - - attr_values = self.get_attr_values_from_data(instance.data) - if attr_values.get("use_handles", self.use_asset_handles): - folder_attributes = instance.data["folderEntity"]["attrib"] - handle_start = folder_attributes.get("handleStart", 0) - handle_end = folder_attributes.get("handleEnd", 0) - else: - handle_start = 0 - handle_end = 0 - - frame_start = instance.data["frameStartHandle"] + handle_start - frame_end = instance.data["frameEndHandle"] - handle_end - - instance.data.update({ - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": frame_start, - "frameEnd": frame_end - }) - - # Log debug message about the collected frame range - if attr_values.get("use_handles", self.use_asset_handles): - self.log.debug( - "Full Frame range with Handles " - "[{frame_start_handle} - {frame_end_handle}]" - .format( - frame_start_handle=instance.data["frameStartHandle"], - frame_end_handle=instance.data["frameEndHandle"] - ) - ) - else: - self.log.debug( - "Use handles is deactivated for this instance, " - "start and end handles are set to 0." - ) - - # Log collected frame range to the user - message = "Frame range [{frame_start} - {frame_end}]".format( - frame_start=frame_start, - frame_end=frame_end - ) - if handle_start or handle_end: - message += " with handles [{handle_start}]-[{handle_end}]".format( - handle_start=handle_start, - handle_end=handle_end - ) - self.log.info(message) - - if instance.data.get("byFrameStep", 1.0) != 1.0: - self.log.info( - "Frame steps {}".format(instance.data["byFrameStep"])) - - # Add frame range to label if the instance has a frame range. - label = instance.data.get("label", instance.data["name"]) - instance.data["label"] = ( - "{label} [{frame_start_handle} - {frame_end_handle}]" - .format( - label=label, - frame_start_handle=instance.data["frameStartHandle"], - frame_end_handle=instance.data["frameEndHandle"] - ) - ) - - @classmethod - def get_attribute_defs(cls): - return [ - BoolDef("use_handles", - tooltip="Disable this if you want the publisher to" - " ignore start and end handles specified in the" - " folder attributes for this publish instance", - default=cls.use_asset_handles, - label="Use asset handles") - ] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_cache_farm.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_cache_farm.py deleted file mode 100644 index b7c3b55cae..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_cache_farm.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import hou -import pyblish.api -from ayon_houdini.api import ( - lib, - plugin -) - - -class CollectDataforCache(plugin.HoudiniInstancePlugin): - """Collect data for caching to Deadline.""" - - # Run after Collect Frames - order = pyblish.api.CollectorOrder + 0.11 - families = ["ass", "pointcache", "redshiftproxy", "vdbcache", "model"] - targets = ["local", "remote"] - label = "Collect Data for Cache" - - def process(self, instance): - creator_attribute = instance.data["creator_attributes"] - farm_enabled = creator_attribute["farm"] - instance.data["farm"] = farm_enabled - if not farm_enabled: - self.log.debug("Caching on farm is disabled. " - "Skipping farm collecting.") - return - # Why do we need this particular collector to collect the expected - # output files from a ROP node. Don't we have a dedicated collector - # for that yet? - # Answer: No, we don't have a generic expected file collector. - # Because different product types needs different logic. - # e.g. check CollectMantraROPRenderProducts - # and CollectKarmaROPRenderProducts - # Collect expected files - ropnode = hou.node(instance.data["instance_node"]) - output_parm = lib.get_output_parameter(ropnode) - expected_filepath = output_parm.eval() - instance.data.setdefault("files", list()) - instance.data.setdefault("expectedFiles", list()) - - frames = instance.data.get("frames", "") - if isinstance(frames, str): - # single file - instance.data["files"].append(expected_filepath) - else: - # list of files - staging_dir, _ = os.path.split(expected_filepath) - instance.data["files"].extend( - ["{}/{}".format(staging_dir, f) for f in frames] - ) - - cache_files = {"cache": instance.data["files"]} - - instance.data.update({ - "plugin": "Houdini", - "publish": True - }) - instance.data["families"].append("publish.hou") - instance.data["expectedFiles"].append(cache_files) - - self.log.debug("Caching on farm expected files: {}".format(instance.data["expectedFiles"])) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_chunk_size.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_chunk_size.py deleted file mode 100644 index cd94827ba7..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_chunk_size.py +++ /dev/null @@ -1,31 +0,0 @@ -import pyblish.api -from ayon_core.lib import NumberDef -from ayon_core.pipeline import AYONPyblishPluginMixin -from ayon_houdini.api import plugin - - -class CollectChunkSize(plugin.HoudiniInstancePlugin, - AYONPyblishPluginMixin): - """Collect chunk size for cache submission to Deadline.""" - - order = pyblish.api.CollectorOrder + 0.05 - families = ["ass", "pointcache", "vdbcache", "redshiftproxy", "model"] - targets = ["local", "remote"] - label = "Collect Chunk Size" - chunk_size = 999999 - - def process(self, instance): - # need to get the chunk size info from the setting - attr_values = self.get_attr_values_from_data(instance.data) - instance.data["chunkSize"] = attr_values.get("chunkSize") - - @classmethod - def get_attribute_defs(cls): - return [ - NumberDef("chunkSize", - minimum=1, - maximum=999999, - decimals=0, - default=cls.chunk_size, - label="Frame Per Task") - ] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_current_file.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_current_file.py deleted file mode 100644 index 8e339e0e04..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import hou - -import pyblish.api -from ayon_houdini.api import plugin - - -class CollectHoudiniCurrentFile(plugin.HoudiniContextPlugin): - """Inject the current working file into context""" - - order = pyblish.api.CollectorOrder - 0.1 - label = "Houdini Current File" - - def process(self, context): - """Inject the current working file""" - - current_file = hou.hipFile.path() - if not os.path.exists(current_file): - # By default, Houdini will even point a new scene to a path. - # However if the file is not saved at all and does not exist, - # we assume the user never set it. - current_file = "" - - elif os.path.basename(current_file) == "untitled.hip": - # Due to even a new file being called 'untitled.hip' we are unable - # to confirm the current scene was ever saved because the file - # could have existed already. We will allow it if the file exists, - # but show a warning for this edge case to clarify the potential - # false positive. - self.log.warning( - "Current file is 'untitled.hip' and we are " - "unable to detect whether the current scene is " - "saved correctly." - ) - - context.data["currentFile"] = current_file - self.log.info('Current workfile path: {}'.format(current_file)) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_farm_instances.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_farm_instances.py deleted file mode 100644 index f14ff65518..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_farm_instances.py +++ /dev/null @@ -1,36 +0,0 @@ -import pyblish.api -from ayon_houdini.api import plugin - - -class CollectFarmInstances(plugin.HoudiniInstancePlugin): - """Collect instances for farm render.""" - - order = pyblish.api.CollectorOrder - families = ["mantra_rop", - "karma_rop", - "redshift_rop", - "arnold_rop", - "vray_rop", - "usdrender"] - - targets = ["local", "remote"] - label = "Collect farm instances" - - def process(self, instance): - - creator_attribute = instance.data["creator_attributes"] - - # Collect Render Target - if creator_attribute.get("render_target") not in { - "farm_split", "farm" - }: - instance.data["farm"] = False - instance.data["splitRender"] = False - self.log.debug("Render on farm is disabled. " - "Skipping farm collecting.") - return - - instance.data["farm"] = True - instance.data["splitRender"] = ( - creator_attribute.get("render_target") == "farm_split" - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_files_for_cleaning_up.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_files_for_cleaning_up.py deleted file mode 100644 index 3ab03babf4..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_files_for_cleaning_up.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -from typing import List - -import pyblish.api -from ayon_core.pipeline import AYONPyblishPluginMixin -from ayon_houdini.api import plugin - - -class CollectFilesForCleaningUp(plugin.HoudiniInstancePlugin, - AYONPyblishPluginMixin): - """Collect Files For Cleaning Up. - - This collector collects output files and adds them to file remove list. - - CAUTION: - This collector registers exported files and - the parent folder for deletion in `ExplicitCleanUp` plug-in. - please refer to `ExplicitCleanUp`'s docstring for further info. - - Notes: - Artists are free to change the file path in the ROP node. - - Farm instances will be processed on farm by other dedicated plugins - that live in core addon e.g. `CollectRenderedFiles` plugin. - These dedicated plugins don't support tracking and removing - intermediate render files. - - Local Render instances don't track intermediate render files, - Therefore, this plugin doesn't support removing - intermediate render files. - - HDA is not added to this plugin's options in server settings. - Cleaning up HDA products will break the scene as Houdini will no longer - be able to find the HDA file. - In addition,HDA plugins always save HDAs to external files. - Therefore, Cleaning up HDA products will break the ability to go back - to the workfile and continue on the HDA. - """ - - # It should run after CollectFrames and Collect Render plugins, - # and before CollectLocalRenderInstances. - order = pyblish.api.CollectorOrder + 0.115 - - hosts = ["houdini"] - families = ["*"] - label = "Collect Files For Cleaning Up" - - def process(self, instance): - - if instance.data.get("farm"): - self.log.debug("Should be processed on farm, skipping.") - return - - files: List[str] = [] - staging_dirs: List[str] = [] - expected_files = instance.data.get("expectedFiles", []) - - # Prefer 'expectedFiles' over 'frames' because it usually contains more - # output files than just a single file or single sequence of files. - if expected_files: - # Products with expected files - # This can be Render products or submitted cache to farm. - for expected in expected_files: - # expected.values() is a list of lists - for output_files in expected.values(): - staging_dir, _ = os.path.split(output_files[0]) - if staging_dir not in staging_dirs: - staging_dirs.append(staging_dir) - files.extend(output_files) - else: - # Products with frames or single file. - - frames = instance.data.get("frames") - if frames is None: - self.log.warning( - f"No frames data found on instance {instance}" - ". Skipping collection for caching on farm..." - ) - return - - staging_dir = instance.data.get("stagingDir") - staging_dirs.append(staging_dir) - - if isinstance(frames, str): - # single file. - files.append(f"{staging_dir}/{frames}") - else: - # list of frame. - files.extend( - [f"{staging_dir}/{frame}" for frame in frames] - ) - - self.log.debug( - f"Add directories to 'cleanupEmptyDir': {staging_dirs}") - instance.context.data["cleanupEmptyDirs"].extend(staging_dirs) - - self.log.debug("Add files to 'cleanupFullPaths': {}".format(files)) - instance.context.data["cleanupFullPaths"].extend(files) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_frames.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_frames.py deleted file mode 100644 index a442e74835..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_frames.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collector plugin for frames data on ROP instances.""" -import os -import hou # noqa -import clique -import pyblish.api -from ayon_houdini.api import lib, plugin - - -class CollectFrames(plugin.HoudiniInstancePlugin): - """Collect all frames which would be saved from the ROP nodes""" - - # This specific order value is used so that - # this plugin runs after CollectRopFrameRange - order = pyblish.api.CollectorOrder + 0.1 - label = "Collect Frames" - families = ["camera", "vdbcache", "imagesequence", "ass", - "redshiftproxy", "review", "pointcache", "fbx", - "model"] - - def process(self, instance): - - # CollectRopFrameRange computes `start_frame` and `end_frame` - # depending on the trange value. - start_frame = instance.data["frameStartHandle"] - end_frame = instance.data["frameEndHandle"] - - # Evaluate the file name at the first frame. - ropnode = hou.node(instance.data["instance_node"]) - output_parm = lib.get_output_parameter(ropnode) - output = output_parm.evalAtFrame(start_frame) - file_name = os.path.basename(output) - - # todo: `frames` currently conflicts with "explicit frames" for a - # for a custom frame list. So this should be refactored. - - instance.data.update({ - "frames": file_name, # Set frames to the file name by default. - "stagingDir": os.path.dirname(output) - }) - - # Skip unnecessary logic if start and end frames are equal. - if start_frame == end_frame: - return - - # Create collection using frame pattern. - # e.g. 'pointcacheBgeoCache_AB010.1001.bgeo' - # will be - frame_collection, _ = clique.assemble( - [file_name], - patterns=[clique.PATTERNS["frames"]], - minimum_items=1 - ) - - # Return as no frame pattern detected. - if not frame_collection: - return - - # It's always expected to be one collection. - frame_collection = frame_collection[0] - frame_collection.indexes.clear() - frame_collection.indexes.update(list(range(start_frame, (end_frame + 1)))) - instance.data["frames"] = list(frame_collection) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_inputs.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_inputs.py deleted file mode 100644 index f2904a68f6..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_inputs.py +++ /dev/null @@ -1,137 +0,0 @@ -from collections import deque - -import pyblish.api -from ayon_core.pipeline import registered_host -from ayon_houdini.api import plugin - - -def get_container_members(container): - node = container["node"] - # Usually the loaded containers don't have any complex references - # and the contained children should be all we need. So we disregard - # checking for .references() on the nodes. - members = set(node.allSubChildren()) - members.add(node) # include the node itself - return members - - -def collect_input_containers(containers, nodes): - """Collect containers that contain any of the node in `nodes`. - - This will return any loaded Avalon container that contains at least one of - the nodes. As such, the Avalon container is an input for it. Or in short, - there are member nodes of that container. - - Returns: - list: Loaded containers that contain the `nodes` - - """ - # Assume the containers have collected their cached '_members' data - # in the collector. - return [container for container in containers - if any(node in container["_members"] for node in nodes)] - - -def iter_upstream(node): - """Yields all upstream inputs for the current node. - - This includes all `node.inputAncestors()` but also traverses through all - `node.references()` for the node itself and for any of the upstream nodes. - This method has no max-depth and will collect all upstream inputs. - - Yields: - hou.Node: The upstream nodes, including references. - - """ - - upstream = node.inputAncestors( - include_ref_inputs=True, follow_subnets=True - ) - - # Initialize process queue with the node's ancestors itself - queue = deque(upstream) - collected = set(upstream) - - # Traverse upstream references for all nodes and yield them as we - # process the queue. - while queue: - upstream_node = queue.pop() - yield upstream_node - - # Find its references that are not collected yet. - references = upstream_node.references() - references = [n for n in references if n not in collected] - - queue.extend(references) - collected.update(references) - - # Include the references' ancestors that have not been collected yet. - for reference in references: - if reference in collected: - # Might have been collected in previous iteration - continue - - ancestors = reference.inputAncestors( - include_ref_inputs=True, follow_subnets=True - ) - ancestors = [n for n in ancestors if n not in collected] - - queue.extend(ancestors) - collected.update(ancestors) - - -class CollectUpstreamInputs(plugin.HoudiniInstancePlugin): - """Collect source input containers used for this publish. - - This will include `inputs` data of which loaded publishes were used in the - generation of this publish. This leaves an upstream trace to what was used - as input. - - """ - - label = "Collect Inputs" - order = pyblish.api.CollectorOrder + 0.4 - - def process(self, instance): - # We can't get the "inputAncestors" directly from the ROP - # node, so we find the related output node (set in SOP/COP path) - # and include that together with its ancestors - output = instance.data.get("output_node") - - if output is None: - # If no valid output node is set then ignore it as validation - # will be checking those cases. - self.log.debug( - "No output node found, skipping collecting of inputs.." - ) - return - - # For large scenes the querying of "host.ls()" can be relatively slow - # e.g. up to a second. Many instances calling it easily slows this - # down. As such, we cache it so we trigger it only once. - # todo: Instead of hidden cache make "CollectContainers" plug-in - cache_key = "__cache_containers" - scene_containers = instance.context.data.get(cache_key, None) - if scene_containers is None: - # Query the scenes' containers if there's no cache yet - host = registered_host() - scene_containers = list(host.ls()) - for container in scene_containers: - # Embed the members into the container dictionary - container_members = set(get_container_members(container)) - container["_members"] = container_members - instance.context.data[cache_key] = scene_containers - - inputs = [] - if scene_containers: - # Collect all upstream parents - nodes = list(iter_upstream(output)) - nodes.append(output) - - # Collect containers for the given set of nodes - containers = collect_input_containers(scene_containers, nodes) - - inputs = [c["representation"] for c in containers] - - instance.data["inputRepresentations"] = inputs - self.log.debug("Collected inputs: %s" % inputs) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_instances_type.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_instances_type.py deleted file mode 100644 index 75a394a1f9..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_instances_type.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Collector for different types. - -This will add additional families to different instance based on -the creator_identifier parameter. -""" -import pyblish.api -from ayon_houdini.api import plugin - - -class CollectPointcacheType(plugin.HoudiniInstancePlugin): - """Collect data type for different instances.""" - - order = pyblish.api.CollectorOrder - families = ["pointcache", "model"] - label = "Collect instances types" - - def process(self, instance): - if instance.data["creator_identifier"] == "io.openpype.creators.houdini.bgeo": # noqa: E501 - instance.data["families"] += ["bgeo"] - elif instance.data["creator_identifier"] in { - "io.openpype.creators.houdini.pointcache", - "io.openpype.creators.houdini.model" - }: - instance.data["families"] += ["abc"] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_karma_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_karma_rop.py deleted file mode 100644 index 60fec9d2e0..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_karma_rop.py +++ /dev/null @@ -1,113 +0,0 @@ -import re -import os - -import hou -import pyblish.api - -from ayon_houdini.api.lib import ( - evalParmNoFrame, - get_color_management_preferences -) -from ayon_houdini.api import ( - colorspace, - plugin -) - - -class CollectKarmaROPRenderProducts(plugin.HoudiniInstancePlugin): - """Collect Karma Render Products - - Collects the instance.data["files"] for the multipart render product. - - Provides: - instance -> files - - """ - - label = "Karma ROP Render Products" - # This specific order value is used so that - # this plugin runs after CollectFrames - order = pyblish.api.CollectorOrder + 0.11 - families = ["karma_rop"] - - def process(self, instance): - - rop = hou.node(instance.data.get("instance_node")) - - # Collect chunkSize - chunk_size_parm = rop.parm("chunkSize") - if chunk_size_parm: - chunk_size = int(chunk_size_parm.eval()) - instance.data["chunkSize"] = chunk_size - self.log.debug("Chunk Size: %s" % chunk_size) - - default_prefix = evalParmNoFrame(rop, "picture") - render_products = [] - - # Default beauty AOV - beauty_product = self.get_render_product_name( - prefix=default_prefix, suffix=None - ) - render_products.append(beauty_product) - - files_by_aov = { - "beauty": self.generate_expected_files(instance, - beauty_product) - } - - # Review Logic expects this key to exist and be True - # if render is a multipart Exr. - # As long as we have one AOV then multipartExr should be True. - # By default karma render is a multipart Exr. - instance.data["multipartExr"] = True - - filenames = list(render_products) - instance.data["files"] = filenames - instance.data["renderProducts"] = colorspace.ARenderProduct() - - for product in render_products: - self.log.debug("Found render product: %s" % product) - - if "expectedFiles" not in instance.data: - instance.data["expectedFiles"] = list() - instance.data["expectedFiles"].append(files_by_aov) - - # update the colorspace data - colorspace_data = get_color_management_preferences() - instance.data["colorspaceConfig"] = colorspace_data["config"] - instance.data["colorspaceDisplay"] = colorspace_data["display"] - instance.data["colorspaceView"] = colorspace_data["view"] - - def get_render_product_name(self, prefix, suffix): - product_name = prefix - if suffix: - # Add ".{suffix}" before the extension - prefix_base, ext = os.path.splitext(prefix) - product_name = "{}.{}{}".format(prefix_base, suffix, ext) - - return product_name - - def generate_expected_files(self, instance, path): - """Create expected files in instance data""" - - dir = os.path.dirname(path) - file = os.path.basename(path) - - if "#" in file: - def replace(match): - return "%0{}d".format(len(match.group())) - - file = re.sub("#+", replace, file) - - if "%" not in file: - return path - - expected_files = [] - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - for i in range(int(start), (int(end) + 1)): - expected_files.append( - os.path.join(dir, (file % i)).replace("\\", "/")) - - return expected_files diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_local_render_instances.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_local_render_instances.py deleted file mode 100644 index 931a79535b..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_local_render_instances.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -import pyblish.api -from ayon_core.pipeline.create import get_product_name -from ayon_core.pipeline.farm.patterning import match_aov_pattern -from ayon_core.pipeline.publish import ( - get_plugin_settings, - apply_plugin_settings_automatically -) -from ayon_houdini.api import plugin - - -class CollectLocalRenderInstances(plugin.HoudiniInstancePlugin): - """Collect instances for local render. - - Agnostic Local Render Collector. - """ - - # this plugin runs after Collect Render Products - order = pyblish.api.CollectorOrder + 0.12 - families = ["mantra_rop", - "karma_rop", - "redshift_rop", - "arnold_rop", - "vray_rop", - "usdrender"] - - label = "Collect local render instances" - - use_deadline_aov_filter = False - aov_filter = {"host_name": "houdini", - "value": [".*([Bb]eauty).*"]} - - @classmethod - def apply_settings(cls, project_settings): - # Preserve automatic settings applying logic - settings = get_plugin_settings(plugin=cls, - project_settings=project_settings, - log=cls.log, - category="houdini") - apply_plugin_settings_automatically(cls, settings, logger=cls.log) - - if not cls.use_deadline_aov_filter: - # get aov_filter from collector settings - # and restructure it as match_aov_pattern requires. - cls.aov_filter = { - cls.aov_filter["host_name"]: cls.aov_filter["value"] - } - else: - # get aov_filter from deadline settings - cls.aov_filter = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"]["aov_filter"] - cls.aov_filter = { - item["name"]: item["value"] - for item in cls.aov_filter - } - - def process(self, instance): - - if instance.data["farm"]: - self.log.debug("Render on farm is enabled. " - "Skipping local render collecting.") - return - - # Create Instance for each AOV. - context = instance.context - expectedFiles = next(iter(instance.data["expectedFiles"]), {}) - - product_type = "render" # is always render - product_group = get_product_name( - context.data["projectName"], - context.data["taskEntity"]["name"], - context.data["taskEntity"]["taskType"], - context.data["hostName"], - product_type, - instance.data["productName"] - ) - - for aov_name, aov_filepaths in expectedFiles.items(): - product_name = product_group - - if aov_name: - product_name = "{}_{}".format(product_name, aov_name) - - # Create instance for each AOV - aov_instance = context.create_instance(product_name) - - # Prepare Representation for each AOV - aov_filenames = [os.path.basename(path) for path in aov_filepaths] - staging_dir = os.path.dirname(aov_filepaths[0]) - ext = aov_filepaths[0].split(".")[-1] - - # Decide if instance is reviewable - preview = False - if instance.data.get("multipartExr", False): - # Add preview tag because its multipartExr. - preview = True - else: - # Add Preview tag if the AOV matches the filter. - preview = match_aov_pattern( - "houdini", self.aov_filter, aov_filenames[0] - ) - - preview = preview and instance.data.get("review", False) - - # Support Single frame. - # The integrator wants single files to be a single - # filename instead of a list. - # More info: https://github.com/ynput/ayon-core/issues/238 - if len(aov_filenames) == 1: - aov_filenames = aov_filenames[0] - - aov_instance.data.update({ - # 'label': label, - "task": instance.data["task"], - "folderPath": instance.data["folderPath"], - "frameStart": instance.data["frameStartHandle"], - "frameEnd": instance.data["frameEndHandle"], - "productType": product_type, - "family": product_type, - "productName": product_name, - "productGroup": product_group, - "families": ["render.local.hou", "review"], - "instance_node": instance.data["instance_node"], - "representations": [ - { - "stagingDir": staging_dir, - "ext": ext, - "name": ext, - "tags": ["review"] if preview else [], - "files": aov_filenames, - "frameStart": instance.data["frameStartHandle"], - "frameEnd": instance.data["frameEndHandle"] - } - ] - }) - - # Skip integrating original render instance. - # We are not removing it because it's used to trigger the render. - instance.data["integrate"] = False diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_mantra_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_mantra_rop.py deleted file mode 100644 index f7feeee63b..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_mantra_rop.py +++ /dev/null @@ -1,159 +0,0 @@ -import re -import os - -import hou -import pyblish.api - -from ayon_houdini.api.lib import ( - evalParmNoFrame, - get_color_management_preferences -) -from ayon_houdini.api import ( - colorspace, - plugin -) - - -class CollectMantraROPRenderProducts(plugin.HoudiniInstancePlugin): - """Collect Mantra Render Products - - Collects the instance.data["files"] for the render products. - - Provides: - instance -> files - - """ - - label = "Mantra ROP Render Products" - # This specific order value is used so that - # this plugin runs after CollectFrames - order = pyblish.api.CollectorOrder + 0.11 - families = ["mantra_rop"] - - def process(self, instance): - - rop = hou.node(instance.data.get("instance_node")) - - # Collect chunkSize - chunk_size_parm = rop.parm("chunkSize") - if chunk_size_parm: - chunk_size = int(chunk_size_parm.eval()) - instance.data["chunkSize"] = chunk_size - self.log.debug("Chunk Size: %s" % chunk_size) - - default_prefix = evalParmNoFrame(rop, "vm_picture") - render_products = [] - - export_prefix = None - export_products = [] - if instance.data["splitRender"]: - export_prefix = evalParmNoFrame( - rop, "soho_diskfile", pad_character="0" - ) - beauty_export_product = self.get_render_product_name( - prefix=export_prefix, - suffix=None) - export_products.append(beauty_export_product) - self.log.debug( - "Found export product: {}".format(beauty_export_product) - ) - instance.data["ifdFile"] = beauty_export_product - instance.data["exportFiles"] = list(export_products) - - # Default beauty AOV - beauty_product = self.get_render_product_name( - prefix=default_prefix, suffix=None - ) - render_products.append(beauty_product) - - files_by_aov = { - "beauty": self.generate_expected_files(instance, - beauty_product) - } - - # Assume it's a multipartExr Render. - multipartExr = True - - # TODO: This logic doesn't take into considerations - # cryptomatte defined in 'Images > Cryptomatte' - aov_numbers = rop.evalParm("vm_numaux") - if aov_numbers > 0: - # get the filenames of the AOVs - for i in range(1, aov_numbers + 1): - var = rop.evalParm("vm_variable_plane%d" % i) - if var: - aov_name = "vm_filename_plane%d" % i - aov_boolean = "vm_usefile_plane%d" % i - aov_enabled = rop.evalParm(aov_boolean) - has_aov_path = rop.evalParm(aov_name) - if has_aov_path and aov_enabled == 1: - aov_prefix = evalParmNoFrame(rop, aov_name) - aov_product = self.get_render_product_name( - prefix=aov_prefix, suffix=None - ) - render_products.append(aov_product) - - files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa - - # Set to False as soon as we have a separated aov. - multipartExr = False - - # Review Logic expects this key to exist and be True - # if render is a multipart Exr. - # As long as we have one AOV then multipartExr should be True. - instance.data["multipartExr"] = multipartExr - - for product in render_products: - self.log.debug("Found render product: %s" % product) - - filenames = list(render_products) - instance.data["files"] = filenames - instance.data["renderProducts"] = colorspace.ARenderProduct() - - # For now by default do NOT try to publish the rendered output - instance.data["publishJobState"] = "Suspended" - instance.data["attachTo"] = [] # stub required data - - if "expectedFiles" not in instance.data: - instance.data["expectedFiles"] = list() - instance.data["expectedFiles"].append(files_by_aov) - - # update the colorspace data - colorspace_data = get_color_management_preferences() - instance.data["colorspaceConfig"] = colorspace_data["config"] - instance.data["colorspaceDisplay"] = colorspace_data["display"] - instance.data["colorspaceView"] = colorspace_data["view"] - - def get_render_product_name(self, prefix, suffix): - product_name = prefix - if suffix: - # Add ".{suffix}" before the extension - prefix_base, ext = os.path.splitext(prefix) - product_name = prefix_base + "." + suffix + ext - - return product_name - - def generate_expected_files(self, instance, path): - """Create expected files in instance data""" - - dir = os.path.dirname(path) - file = os.path.basename(path) - - if "#" in file: - def replace(match): - return "%0{}d".format(len(match.group())) - - file = re.sub("#+", replace, file) - - if "%" not in file: - return path - - expected_files = [] - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - for i in range(int(start), (int(end) + 1)): - expected_files.append( - os.path.join(dir, (file % i)).replace("\\", "/")) - - return expected_files diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_output_node.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_output_node.py deleted file mode 100644 index ff51669376..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_output_node.py +++ /dev/null @@ -1,77 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.publish import KnownPublishError -from ayon_houdini.api import plugin - - -class CollectOutputSOPPath(plugin.HoudiniInstancePlugin): - """Collect the out node's SOP/COP Path value.""" - - order = pyblish.api.CollectorOrder - families = [ - "pointcache", - "camera", - "vdbcache", - "imagesequence", - "usd", - "usdrender", - "redshiftproxy", - "staticMesh", - "model" - ] - - label = "Collect Output Node Path" - - def process(self, instance): - - import hou - - node = hou.node(instance.data["instance_node"]) - - # Get sop path - node_type = node.type().name() - if node_type == "geometry": - out_node = node.parm("soppath").evalAsNode() - - elif node_type == "alembic": - - # Alembic can switch between using SOP Path or object - if node.parm("use_sop_path").eval(): - out_node = node.parm("sop_path").evalAsNode() - else: - root = node.parm("root").eval() - objects = node.parm("objects").eval() - path = root + "/" + objects - out_node = hou.node(path) - - elif node_type == "comp": - out_node = node.parm("coppath").evalAsNode() - - elif node_type == "usd" or node_type == "usdrender": - out_node = node.parm("loppath").evalAsNode() - - elif node_type == "usd_rop" or node_type == "usdrender_rop": - # Inside Solaris e.g. /stage (not in ROP context) - # When incoming connection is present it takes it directly - inputs = node.inputs() - if inputs: - out_node = inputs[0] - else: - out_node = node.parm("loppath").evalAsNode() - - elif node_type == "Redshift_Proxy_Output": - out_node = node.parm("RS_archive_sopPath").evalAsNode() - - elif node_type == "filmboxfbx": - out_node = node.parm("startnode").evalAsNode() - - else: - raise KnownPublishError( - "ROP node type '{}' is not supported.".format(node_type) - ) - - if not out_node: - self.log.warning("No output node collected.") - return - - self.log.debug("Output node: %s" % out_node.path()) - instance.data["output_node"] = out_node diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_redshift_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_redshift_rop.py deleted file mode 100644 index 96cb6ebeaf..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_redshift_rop.py +++ /dev/null @@ -1,185 +0,0 @@ -import re -import os - -import hou -import pyblish.api - -from ayon_houdini.api.lib import ( - evalParmNoFrame, - get_color_management_preferences -) -from ayon_houdini.api import ( - colorspace, - plugin -) - - -class CollectRedshiftROPRenderProducts(plugin.HoudiniInstancePlugin): - """Collect USD Render Products - - Collects the instance.data["files"] for the render products. - - Provides: - instance -> files - - """ - - label = "Redshift ROP Render Products" - # This specific order value is used so that - # this plugin runs after CollectFrames - order = pyblish.api.CollectorOrder + 0.11 - families = ["redshift_rop"] - - def process(self, instance): - rop = hou.node(instance.data.get("instance_node")) - - # Collect chunkSize - chunk_size_parm = rop.parm("chunkSize") - if chunk_size_parm: - chunk_size = int(chunk_size_parm.eval()) - instance.data["chunkSize"] = chunk_size - self.log.debug("Chunk Size: %s" % chunk_size) - - default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix") - beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix") - - export_products = [] - if instance.data["splitRender"]: - export_prefix = evalParmNoFrame( - rop, "RS_archive_file", pad_character="0" - ) - beauty_export_product = self.get_render_product_name( - prefix=export_prefix, - suffix=None) - export_products.append(beauty_export_product) - self.log.debug( - "Found export product: {}".format(beauty_export_product) - ) - instance.data["ifdFile"] = beauty_export_product - instance.data["exportFiles"] = list(export_products) - - full_exr_mode = (rop.evalParm("RS_outputMultilayerMode") == "2") - if full_exr_mode: - # Ignore beauty suffix if full mode is enabled - # As this is what the rop does. - beauty_suffix = "" - - # Assume it's a multipartExr Render. - multipartExr = True - - # Default beauty/main layer AOV - beauty_product = self.get_render_product_name( - prefix=default_prefix, suffix=beauty_suffix - ) - render_products = [beauty_product] - files_by_aov = { - beauty_suffix: self.generate_expected_files(instance, - beauty_product) - } - - aovs_rop = rop.parm("RS_aovGetFromNode").evalAsNode() - if aovs_rop: - rop = aovs_rop - - num_aovs = 0 - if not rop.evalParm('RS_aovAllAOVsDisabled'): - num_aovs = rop.evalParm("RS_aov") - - for index in range(num_aovs): - i = index + 1 - - # Skip disabled AOVs - if not rop.evalParm(f"RS_aovEnable_{i}"): - continue - - aov_suffix = rop.evalParm(f"RS_aovSuffix_{i}") - aov_prefix = evalParmNoFrame(rop, f"RS_aovCustomPrefix_{i}") - if not aov_prefix: - aov_prefix = default_prefix - - if rop.parm(f"RS_aovID_{i}").evalAsString() == "CRYPTOMATTE" or \ - not full_exr_mode: - - aov_product = self.get_render_product_name(aov_prefix, aov_suffix) - render_products.append(aov_product) - - files_by_aov[aov_suffix] = self.generate_expected_files(instance, - aov_product) # noqa - - # Set to False as soon as we have a separated aov. - multipartExr = False - - # Review Logic expects this key to exist and be True - # if render is a multipart Exr. - # As long as we have one AOV then multipartExr should be True. - instance.data["multipartExr"] = multipartExr - - for product in render_products: - self.log.debug("Found render product: %s" % product) - - filenames = list(render_products) - instance.data["files"] = filenames - instance.data["renderProducts"] = colorspace.ARenderProduct() - - # For now by default do NOT try to publish the rendered output - instance.data["publishJobState"] = "Suspended" - instance.data["attachTo"] = [] # stub required data - - if "expectedFiles" not in instance.data: - instance.data["expectedFiles"] = [] - instance.data["expectedFiles"].append(files_by_aov) - - # update the colorspace data - colorspace_data = get_color_management_preferences() - instance.data["colorspaceConfig"] = colorspace_data["config"] - instance.data["colorspaceDisplay"] = colorspace_data["display"] - instance.data["colorspaceView"] = colorspace_data["view"] - - def get_render_product_name(self, prefix, suffix): - """Return the output filename using the AOV prefix and suffix""" - - # When AOV is explicitly defined in prefix we just swap it out - # directly with the AOV suffix to embed it. - # Note: '$AOV' seems to be evaluated in the parameter as '%AOV%' - has_aov_in_prefix = "%AOV%" in prefix - if has_aov_in_prefix: - # It seems that when some special separator characters are present - # before the %AOV% token that Redshift will secretly remove it if - # there is no suffix for the current product, for example: - # foo_%AOV% -> foo.exr - pattern = "%AOV%" if suffix else "[._-]?%AOV%" - product_name = re.sub(pattern, suffix, prefix, flags=re.IGNORECASE) - else: - if suffix: - # Add ".{suffix}" before the extension - prefix_base, ext = os.path.splitext(prefix) - product_name = prefix_base + "." + suffix + ext - else: - product_name = prefix - - return product_name - - def generate_expected_files(self, instance, path): - """Create expected files in instance data""" - - dir = os.path.dirname(path) - file = os.path.basename(path) - - if "#" in file: - def replace(match): - return "%0{}d".format(len(match.group())) - - file = re.sub("#+", replace, file) - - if "%" not in file: - return path - - expected_files = [] - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - for i in range(int(start), (int(end) + 1)): - expected_files.append( - os.path.join(dir, (file % i)).replace("\\", "/")) - - return expected_files diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_render_products.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_render_products.py deleted file mode 100644 index 9dea2364f8..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_render_products.py +++ /dev/null @@ -1,248 +0,0 @@ -import re -import os - -import hou -import pxr.UsdRender - -import pyblish.api - -from ayon_houdini.api import plugin -from ayon_houdini.api.usd import ( - get_usd_render_rop_rendersettings -) - - -class CollectRenderProducts(plugin.HoudiniInstancePlugin): - """Collect USD Render Products. - - The render products are collected from the USD Render ROP node by detecting - what the selected Render Settings prim path is, then finding those - Render Settings in the USD Stage and collecting the targeted Render - Products and their expected filenames. - - Note: Product refers USD Render Product, not to an AYON Product - - """ - - label = "Collect Render Products" - # This plugin should run after CollectUsdRender - # and, before CollectLocalRenderInstances - order = pyblish.api.CollectorOrder + 0.04 - families = ["usdrender"] - - def process(self, instance): - - rop_node = hou.node(instance.data["instance_node"]) - node = instance.data.get("output_node") - if not node: - rop_path = rop_node.path() - self.log.error( - "No output node found. Make sure to connect a valid " - "input to the USD ROP: %s" % rop_path - ) - return - - override_output_image = rop_node.evalParm("outputimage") - - filenames = [] - files_by_product = {} - stage = node.stage() - for prim_path in self.get_render_products(rop_node, stage): - prim = stage.GetPrimAtPath(prim_path) - if not prim or not prim.IsA(pxr.UsdRender.Product): - self.log.warning("Found invalid render product path " - "configured in render settings that is not a " - "Render Product prim: %s", prim_path) - continue - - render_product = pxr.UsdRender.Product(prim) - # Get Render Product Name - if override_output_image: - name = override_output_image - else: - # We force taking it from any random time sample as opposed to - # "default" that the USD Api falls back to since that won't - # return time sampled values if they were set per time sample. - name = render_product.GetProductNameAttr().Get(time=0) - - dirname = os.path.dirname(name) - basename = os.path.basename(name) - - dollarf_regex = r"(\$F([0-9]?))" - if re.match(dollarf_regex, basename): - # TODO: Confirm this actually is allowed USD stages and HUSK - # Substitute $F - def replace(match): - """Replace $F4 with padded #.""" - padding = int(match.group(2)) if match.group(2) else 1 - return "#" * padding - - filename_base = re.sub(dollarf_regex, replace, basename) - filename = os.path.join(dirname, filename_base) - else: - # Last group of digits in the filename before the extension - # The frame number must always be prefixed by underscore or dot - # Allow product names like: - # - filename.1001.exr - # - filename.1001.aov.exr - # - filename.aov.1001.exr - # - filename_1001.exr - frame_regex = r"(.*[._])(\d+)(?!.*\d)(.*\.[A-Za-z0-9]+$)" - - # It may be the case that the current USD stage has stored - # product name samples (e.g. when loading a USD file with - # time samples) where it does not refer to e.g. $F4. And thus - # it refers to the actual path like /path/to/frame.1001.exr - # TODO: It would be better to maybe sample product name - # attribute `ValueMightBeTimeVarying` and if so get it per - # frame using `attr.Get(time=frame)` to ensure we get the - # actual product name set at that point in time? - # Substitute basename.0001.ext - def replace(match): - head, frame, tail = match.groups() - padding = "#" * len(frame) - return head + padding + tail - - filename_base = re.sub(frame_regex, replace, basename) - filename = os.path.join(dirname, filename_base) - filename = filename.replace("\\", "/") - - assert "#" in filename, ( - "Couldn't resolve render product name " - "with frame number: %s" % name - ) - - filenames.append(filename) - - # TODO: Improve AOV name detection logic - aov_identifier = self.get_aov_identifier(render_product) - if aov_identifier in files_by_product: - self.log.error( - "Multiple render products are identified as the same AOV " - "which means one of the two will not be ingested during" - "publishing. AOV: '%s'", aov_identifier - ) - self.log.warning("Skipping Render Product: %s", render_product) - - files_by_product[aov_identifier] = self.generate_expected_files( - instance, - filename - ) - - aov_label = f"'{aov_identifier}' aov in " if aov_identifier else "" - self.log.debug("Render Product %s%s", aov_label, prim_path) - self.log.debug("Product name: %s", filename) - - # Filenames for Deadline - instance.data["files"] = filenames - instance.data.setdefault("expectedFiles", []).append(files_by_product) - - # Farm Publishing add review logic expects this key to exist and - # be True if render is a multipart Exr. - # otherwise it will most probably fail the AOV filter as multipartExr - # files mostly don't include aov name in the file path. - # Assume multipartExr is 'True' as long as we have one AOV. - instance.data["multipartExr"] = len(files_by_product) <= 1 - - def get_aov_identifier(self, render_product): - """Return the AOV identifier for a Render Product - - A Render Product does not really define what 'AOV' it is, it - defines the product name (output path) and the render vars to - include. - - So we need to define what in particular of a `UsdRenderProduct` - we use to separate the AOV (and thus apply sub-grouping with). - - For now we'll consider any Render Product that only refers - to a single rendervar that the rendervars prim name is the AOV - otherwise we'll assume renderproduct to be a combined multilayer - 'main' layer - - Args: - render_product (pxr.UsdRender.Product): The Render Product - - Returns: - str: The AOV identifier - - """ - targets = render_product.GetOrderedVarsRel().GetTargets() - if len(targets) > 1: - # Cryptomattes usually are combined render vars, for example: - # - crypto_asset, crypto_asset01, crypto_asset02, crypto_asset03 - # - crypto_object, crypto_object01, etc. - # These still refer to the same AOV so we take the common prefix - # e.g. `crypto_asset` or `crypto` (if multiple are combined) - if all(target.name.startswith("crypto") for target in targets): - start = os.path.commonpath([target.name for target in targets]) - return start.rstrip("_") # remove any trailing _ - - # Main layer - return "" - elif len(targets) == 1: - # AOV for a single var - return targets[0].name - else: - self.log.warning( - f"Render product has no rendervars set: {render_product}") - return "" - - def get_render_products(self, usdrender_rop, stage): - """"The render products in the defined render settings - - Args: - usdrender_rop (hou.Node): The Houdini USD Render ROP node. - stage (pxr.Usd.Stage): The USD stage to find the render settings - in. This is usually the stage from the LOP path the USD Render - ROP node refers to. - - Returns: - List[Sdf.Path]: Render Product paths enabled in the render settings - - """ - render_settings = get_usd_render_rop_rendersettings(usdrender_rop, - stage, - logger=self.log) - if not render_settings: - return [] - - return render_settings.GetProductsRel().GetTargets() - - def generate_expected_files(self, instance, path): - """Generate full sequence of expected files from a filepath. - - The filepath should have '#' token as placeholder for frame numbers or - should have %04d or %d placeholders. The `#` characters indicate frame - number and padding, e.g. #### becomes 0001 for frame 1. - - Args: - instance (pyblish.api.Instance): The publish instance. - path (str): The filepath to generate the list of output files for. - - Returns: - list: Filepath per frame. - - """ - - folder = os.path.dirname(path) - filename = os.path.basename(path) - - if "#" in filename: - def replace(match): - return "%0{}d".format(len(match.group())) - - filename = re.sub("#+", replace, filename) - - if "%" not in filename: - # Not a sequence, single file - return path - - expected_files = [] - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - for frame in range(int(start), (int(end) + 1)): - expected_files.append( - os.path.join(folder, (filename % frame)).replace("\\", "/")) - - return expected_files diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_review_data.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_review_data.py deleted file mode 100644 index cca55463e6..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_review_data.py +++ /dev/null @@ -1,85 +0,0 @@ -import hou -import pyblish.api -from ayon_houdini.api import plugin - - -class CollectHoudiniReviewData(plugin.HoudiniInstancePlugin): - """Collect Review Data.""" - - label = "Collect Review Data" - # This specific order value is used so that - # this plugin runs after CollectRopFrameRange - # Also after CollectLocalRenderInstances - order = pyblish.api.CollectorOrder + 0.13 - families = ["review"] - - def process(self, instance): - - # This fixes the burnin having the incorrect start/end timestamps - # because without this it would take it from the context instead - # which isn't the actual frame range that this instance renders. - instance.data["handleStart"] = 0 - instance.data["handleEnd"] = 0 - instance.data["fps"] = instance.context.data["fps"] - - # Enable ftrack functionality - instance.data.setdefault("families", []).append('ftrack') - - # Get the camera from the rop node to collect the focal length - ropnode_path = instance.data["instance_node"] - ropnode = hou.node(ropnode_path) - - # Get camera based on the instance_node type. - camera_path = self._get_camera_path(ropnode) - camera_node = hou.node(camera_path) - if not camera_node: - self.log.warning("No valid camera node found on review node: " - "{}".format(camera_path)) - return - - # Collect focal length. - focal_length_parm = camera_node.parm("focal") - if not focal_length_parm: - self.log.warning("No 'focal' (focal length) parameter found on " - "camera: {}".format(camera_path)) - return - - if focal_length_parm.isTimeDependent(): - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] + 1 - focal_length = [ - focal_length_parm.evalAsFloatAtFrame(t) - for t in range(int(start), int(end)) - ] - else: - focal_length = focal_length_parm.evalAsFloat() - - # Store focal length in `burninDataMembers` - burnin_members = instance.data.setdefault("burninDataMembers", {}) - burnin_members["focalLength"] = focal_length - - def _get_camera_path(self, ropnode): - """Get the camera path associated with the given rop node. - - This function evaluates the camera parameter according to the - type of the given rop node. - - Returns: - Union[str, None]: Camera path or None. - - This function can return empty string if the camera - path is empty i.e. no camera path. - """ - - if ropnode.type().name() in { - "opengl", "karma", "ifd", "arnold" - }: - return ropnode.parm("camera").eval() - - elif ropnode.type().name() == "Redshift_ROP": - return ropnode.parm("RS_renderCamera").eval() - - elif ropnode.type().name() == "vray_renderer": - return ropnode.parm("render_camera").eval() - - return None diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_reviewable_instances.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_reviewable_instances.py deleted file mode 100644 index 1bc797a1c1..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_reviewable_instances.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api -from ayon_houdini.api import plugin - - -class CollectReviewableInstances(plugin.HoudiniInstancePlugin): - """Collect Reviewable Instances. - - Basically, all instances of the specified families - with creator_attribure["review"] - """ - - order = pyblish.api.CollectorOrder - label = "Collect Reviewable Instances" - families = ["mantra_rop", - "karma_rop", - "redshift_rop", - "arnold_rop", - "vray_rop", - "usdrender"] - - def process(self, instance): - creator_attribute = instance.data["creator_attributes"] - - instance.data["review"] = creator_attribute.get("review", False) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_rop_frame_range.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_rop_frame_range.py deleted file mode 100644 index c0f8d7aef9..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_rop_frame_range.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collector plugin for frames data on ROP instances.""" -import hou # noqa -import pyblish.api -from ayon_houdini.api import lib, plugin - - -class CollectRopFrameRange(plugin.HoudiniInstancePlugin): - """Collect all frames which would be saved from the ROP nodes""" - - order = pyblish.api.CollectorOrder - label = "Collect RopNode Frame Range" - - def process(self, instance): - - node_path = instance.data.get("instance_node") - if node_path is None: - # Instance without instance node like a workfile instance - self.log.debug( - "No instance node found for instance: {}".format(instance) - ) - return - - ropnode = hou.node(node_path) - frame_data = lib.get_frame_data( - ropnode, self.log - ) - - if not frame_data: - return - - # Log debug message about the collected frame range - self.log.debug( - "Collected frame_data: {}".format(frame_data) - ) - - instance.data.update(frame_data) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_staticmesh_type.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_staticmesh_type.py deleted file mode 100644 index 1aab655532..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_staticmesh_type.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collector for staticMesh types. """ - -import pyblish.api -from ayon_houdini.api import plugin - - -class CollectStaticMeshType(plugin.HoudiniInstancePlugin): - """Collect data type for fbx instance.""" - - families = ["staticMesh"] - label = "Collect type of staticMesh" - - order = pyblish.api.CollectorOrder - - def process(self, instance): - - if instance.data["creator_identifier"] == "io.openpype.creators.houdini.staticmesh.fbx": # noqa: E501 - # Marking this instance as FBX triggers the FBX extractor. - instance.data["families"] += ["fbx"] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_layers.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_layers.py deleted file mode 100644 index 5fa787fb39..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_layers.py +++ /dev/null @@ -1,158 +0,0 @@ -import copy -import os -import re - -import pyblish.api - -from ayon_core.pipeline.create import get_product_name -from ayon_houdini.api import plugin -import ayon_houdini.api.usd as usdlib - -import hou - - -def copy_instance_data(instance_src, instance_dest, attr): - """Copy instance data from `src` instance to `dest` instance. - - Examples: - >>> copy_instance_data(instance_src, instance_dest, - >>> attr="publish_attributes.CollectRopFrameRange") - - Arguments: - instance_src (pyblish.api.Instance): Source instance to copy from - instance_dest (pyblish.api.Instance): Target instance to copy to - attr (str): Attribute on the source instance to copy. This can be - a nested key joined by `.` to only copy sub entries of dictionaries - in the source instance's data. - - Raises: - KeyError: If the key does not exist on the source instance. - AssertionError: If a parent key already exists on the destination - instance but is not of the correct type (= is not a dict) - - """ - - src_data = instance_src.data - dest_data = instance_dest.data - keys = attr.split(".") - for i, key in enumerate(keys): - if key not in src_data: - break - - src_value = src_data[key] - if i != len(key): - dest_data = dest_data.setdefault(key, {}) - assert isinstance(dest_data, dict), "Destination must be a dict" - src_data = src_value - else: - # Last iteration - assign the value - dest_data[key] = copy.deepcopy(src_value) - - -class CollectUsdLayers(plugin.HoudiniInstancePlugin): - """Collect the USD Layers that have configured save paths.""" - - order = pyblish.api.CollectorOrder + 0.25 - label = "Collect USD Layers" - families = ["usdrop"] - - def process(self, instance): - # TODO: Replace this with a Hidden Creator so we collect these BEFORE - # starting the publish so the user sees them before publishing - # - however user should not be able to individually enable/disable - # this from the main ROP its created from? - - output = instance.data.get("output_node") - if not output: - self.log.debug("No output node found..") - return - - rop_node = hou.node(instance.data["instance_node"]) - - save_layers = [] - for layer in usdlib.get_configured_save_layers(rop_node): - - info = layer.rootPrims.get("HoudiniLayerInfo") - save_path = info.customData.get("HoudiniSavePath") - creator = info.customData.get("HoudiniCreatorNode") - - self.log.debug("Found configured save path: " - "%s -> %s", layer, save_path) - - # Log node that configured this save path - creator_node = hou.nodeBySessionId(creator) if creator else None - if creator_node: - self.log.debug( - "Created by: %s", creator_node.path() - ) - - save_layers.append((layer, save_path, creator_node)) - - # Store on the instance - instance.data["usdConfiguredSavePaths"] = save_layers - - # Create configured layer instances so User can disable updating - # specific configured layers for publishing. - context = instance.context - for layer, save_path, creator_node in save_layers: - name = os.path.basename(save_path) - layer_inst = context.create_instance(name) - - # include same USD ROP - layer_inst.append(rop_node) - - staging_dir, fname = os.path.split(save_path) - fname_no_ext, ext = os.path.splitext(fname) - - variant = fname_no_ext - - # Strip off any trailing version number in the form of _v[0-9]+ - variant = re.sub("_v[0-9]+$", "", variant) - - layer_inst.data["usd_layer"] = layer - layer_inst.data["usd_layer_save_path"] = save_path - - project_name = context.data["projectName"] - variant_base = instance.data["variant"] - subset = get_product_name( - project_name=project_name, - # TODO: This should use task from `instance` - task_name=context.data["anatomyData"]["task"]["name"], - task_type=context.data["anatomyData"]["task"]["type"], - host_name=context.data["hostName"], - product_type="usd", - variant=variant_base + "_" + variant, - project_settings=context.data["project_settings"] - ) - - label = "{0} -> {1}".format(instance.data["name"], subset) - family = "usd" - layer_inst.data["family"] = family - layer_inst.data["families"] = [family] - layer_inst.data["subset"] = subset - layer_inst.data["label"] = label - layer_inst.data["asset"] = instance.data["asset"] - layer_inst.data["task"] = instance.data.get("task") - layer_inst.data["instance_node"] = instance.data["instance_node"] - layer_inst.data["render"] = False - layer_inst.data["output_node"] = creator_node - - # Inherit "use handles" from the source instance - # TODO: Do we want to maybe copy full `publish_attributes` instead? - copy_instance_data( - instance, layer_inst, - attr="publish_attributes.CollectRopFrameRange.use_handles" - ) - - # Allow this subset to be grouped into a USD Layer on creation - layer_inst.data["subsetGroup"] = "USD Layer" - - # For now just assume the representation will get published - representation = { - "name": "usd", - "ext": ext.lstrip("."), - "stagingDir": staging_dir, - "files": fname - } - layer_inst.data.setdefault("representations", []).append( - representation) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_look_assets.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_look_assets.py deleted file mode 100644 index 0874cef0b6..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_look_assets.py +++ /dev/null @@ -1,243 +0,0 @@ -import re - -import os -import glob -from typing import List, Optional -import dataclasses - -import pyblish.api -import hou -from pxr import Sdf - -from ayon_houdini.api import plugin - - -# Colorspace attributes differ per renderer implementation in the USD data -# Some have dedicated input names like Arnold and Redshift, whereas others like -# MaterialX store `colorSpace` metadata on the asset property itself. -# See `get_colorspace` method on the plug-in for more details -COLORSPACE_ATTRS = [ - "inputs:color_space", # Image Vop (arnold::image) - "inputs:tex0_colorSpace", # RS Texture Vop (redshift::TextureSampler) - # TODO: USD UV Texture VOP doesn't seem to use colorspaces from the actual - # OCIO configuration so we skip these for now. Especially since the - # texture is usually used for 'preview' purposes anyway. - # "inputs:sourceColorSpace", # USD UV Texture Vop (usduvtexture::2.0) -] - - -@dataclasses.dataclass -class Resource: - attribute: str # property path - source: str # unresolved source path - files: List[str] # resolve list of files, e.g. multiple for - color_space: str = None # colorspace of the resource - - -def get_layer_property_paths(layer: Sdf.Layer) -> List[Sdf.Path]: - """Return all property paths from a layer""" - paths = [] - - def collect_paths(path): - if not path.IsPropertyPath(): - return - paths.append(path) - - layer.Traverse("/", collect_paths) - - return paths - - -class CollectUsdLookAssets(plugin.HoudiniInstancePlugin): - """Collect all assets introduced by the look. - - We are looking to collect e.g. all texture resources so we can transfer - them with the publish and write then to the publish location. - - If possible, we'll also try to identify the colorspace of the asset. - - """ - # TODO: Implement $F frame support (per frame values) - # TODO: If input image is already a published texture or resource than - # preferably we'd keep the link in-tact and NOT update it. We can just - # start ignoring AYON URIs - - label = "Collect USD Look Assets" - order = pyblish.api.CollectorOrder - hosts = ["houdini"] - families = ["look"] - - exclude_suffixes = [".usd", ".usda", ".usdc", ".usdz", ".abc", ".vbd"] - - def process(self, instance): - - rop: hou.RopNode = hou.node(instance.data.get("instance_node")) - if not rop: - return - - lop_node: hou.LopNode = instance.data.get("output_node") - if not lop_node: - return - - above_break_layers = set(lop_node.layersAboveLayerBreak()) - - stage = lop_node.stage() - layers = [ - layer for layer - in stage.GetLayerStack(includeSessionLayers=False) - if layer.identifier not in above_break_layers - ] - - instance_resources = self.get_layer_assets(layers) - - # Define a relative asset remapping for the USD Extractor so that - # any textures are remapped to their 'relative' publish path. - # All textures will be in a relative `./resources/` folder - remap = {} - for resource in instance_resources: - source = resource.source - name = os.path.basename(source) - remap[os.path.normpath(source)] = f"./resources/{name}" - instance.data["assetRemap"] = remap - - # Store resources on instance - resources = instance.data.setdefault("resources", []) - for resource in instance_resources: - resources.append(dataclasses.asdict(resource)) - - # Log all collected textures - # Note: It is fine for a single texture to be included more than once - # where even one of them does not have a color space set, but the other - # does. For example, there may be a USD UV Texture just for a GL - # preview material which does not specify an OCIO color - # space. - all_files = [] - for resource in instance_resources: - all_files.append(f"{resource.attribute}:") - - for filepath in resource.files: - if resource.color_space: - file_label = f"- {filepath} ({resource.color_space})" - else: - file_label = f"- {filepath}" - all_files.append(file_label) - - self.log.info( - "Collected assets:\n{}".format( - "\n".join(all_files) - ) - ) - - def get_layer_assets(self, layers: List[Sdf.Layer]) -> List[Resource]: - # TODO: Correctly resolve paths using Asset Resolver. - # Preferably this would use one cached - # resolver context to optimize the path resolving. - # TODO: Fix for timesamples - if timesamples, then `.default` might - # not be authored on the spec - - resources: List[Resource] = list() - for layer in layers: - for path in get_layer_property_paths(layer): - - spec = layer.GetAttributeAtPath(path) - if not spec: - continue - - if spec.typeName != "asset": - continue - - asset: Sdf.AssetPath = spec.default - base, ext = os.path.splitext(asset.path) - if ext in self.exclude_suffixes: - continue - - filepath = asset.path.replace("\\", "/") - - # Expand to all files of the available files on disk - # TODO: Add support for `` - # TODO: Add support for `` - if "" in filepath.upper(): - pattern = re.sub( - r"", - # UDIM is always four digits - "[0-9]" * 4, - filepath, - flags=re.IGNORECASE - ) - files = glob.glob(pattern) - else: - # Single file - files = [filepath] - - # Detect the colorspace of the input asset property - colorspace = self.get_colorspace(spec) - - resource = Resource( - attribute=path.pathString, - source=asset.path, - files=files, - color_space=colorspace - ) - resources.append(resource) - - # Sort by filepath - resources.sort(key=lambda r: r.source) - - return resources - - def get_colorspace(self, spec: Sdf.AttributeSpec) -> Optional[str]: - """Return colorspace for a Asset attribute spec. - - There is currently no USD standard on how colorspaces should be - represented for shaders or asset properties - each renderer's material - implementations seem to currently use their own way of specifying the - colorspace on the shader. As such, this comes with some guesswork. - - Args: - spec (Sdf.AttributeSpec): The asset type attribute to retrieve - the colorspace for. - - Returns: - Optional[str]: The colorspace for the given attribute, if any. - - """ - # TODO: Support Karma, V-Ray, Renderman texture colorspaces - # Materialx image defines colorspace as custom info on the attribute - if spec.HasInfo("colorSpace"): - return spec.GetInfo("colorSpace") - - # Arnold materials define the colorspace as a separate primvar - # TODO: Fix for timesamples - if timesamples, then `.default` might - # not be authored on the spec - prim_path = spec.path.GetPrimPath() - layer = spec.layer - for name in COLORSPACE_ATTRS: - colorspace_property_path = prim_path.AppendProperty(name) - colorspace_spec = layer.GetAttributeAtPath( - colorspace_property_path - ) - if colorspace_spec and colorspace_spec.default: - return colorspace_spec.default - - -class CollectUsdLookResourceTransfers(plugin.HoudiniInstancePlugin): - """Define the publish direct file transfers for any found resources. - - This ensures that any source texture will end up in the published look - in the `resourcesDir`. - - """ - label = "Collect USD Look Transfers" - order = pyblish.api.CollectorOrder + 0.496 - hosts = ["houdini"] - families = ["look"] - - def process(self, instance): - - resources_dir = instance.data["resourcesDir"] - transfers = instance.data.setdefault("transfers", []) - for resource in instance.data.get("resources", []): - for src in resource["files"]: - dest = os.path.join(resources_dir, os.path.basename(src)) - transfers.append((src, dest)) - self.log.debug("Registering transfer: %s -> %s", src, dest) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_render.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_render.py deleted file mode 100644 index a6e7572a18..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_usd_render.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import re - -import hou -import pyblish.api - -from ayon_houdini.api import ( - colorspace, - plugin -) -from ayon_houdini.api.lib import ( - evalParmNoFrame, - get_color_management_preferences -) - - -class CollectUsdRender(plugin.HoudiniInstancePlugin): - """Collect publishing data for USD Render ROP. - - If `rendercommand` parm is disabled (and thus no rendering triggers by the - usd render rop) it is assumed to be a "Split Render" job where the farm - will get an additional render job after the USD file is extracted. - - Provides: - instance -> ifdFile - instance -> colorspaceConfig - instance -> colorspaceDisplay - instance -> colorspaceView - - """ - - label = "Collect USD Render Rop" - order = pyblish.api.CollectorOrder - hosts = ["houdini"] - families = ["usdrender"] - - def process(self, instance): - - rop = hou.node(instance.data.get("instance_node")) - - if instance.data["splitRender"]: - # USD file output - lop_output = evalParmNoFrame( - rop, "lopoutput", pad_character="#" - ) - - # The file is usually relative to the Output Processor's 'Save to - # Directory' which forces all USD files to end up in that directory - # TODO: It is possible for a user to disable this - # TODO: When enabled I think only the basename of the `lopoutput` - # parm is preserved, any parent folders defined are likely ignored - folder = evalParmNoFrame( - rop, "savetodirectory_directory", pad_character="#" - ) - - export_file = os.path.join(folder, lop_output) - - # Substitute any # characters in the name back to their $F4 - # equivalent - def replace_to_f(match): - number = len(match.group(0)) - if number <= 1: - number = "" # make it just $F not $F1 or $F0 - return "$F{}".format(number) - - export_file = re.sub("#+", replace_to_f, export_file) - self.log.debug( - "Found export file: {}".format(export_file) - ) - instance.data["ifdFile"] = export_file - - # The render job is not frame dependent but fully dependent on - # the job having been completed, since the extracted file is a - # single file. - if "$F" not in export_file: - instance.data["splitRenderFrameDependent"] = False - - # update the colorspace data - colorspace_data = get_color_management_preferences() - instance.data["colorspaceConfig"] = colorspace_data["config"] - instance.data["colorspaceDisplay"] = colorspace_data["display"] - instance.data["colorspaceView"] = colorspace_data["view"] - - # stub required data for Submit Publish Job publish plug-in - instance.data["attachTo"] = [] - instance.data["renderProducts"] = colorspace.ARenderProduct() diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_vray_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_vray_rop.py deleted file mode 100644 index 2f9c2bb18e..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_vray_rop.py +++ /dev/null @@ -1,154 +0,0 @@ -import re -import os - -import hou -import pyblish.api - -from ayon_houdini.api.lib import ( - evalParmNoFrame, - get_color_management_preferences -) -from ayon_houdini.api import ( - colorspace, - plugin -) - - -class CollectVrayROPRenderProducts(plugin.HoudiniInstancePlugin): - """Collect Vray Render Products - - Collects the instance.data["files"] for the render products. - - Provides: - instance -> files - - """ - - label = "VRay ROP Render Products" - # This specific order value is used so that - # this plugin runs after CollectFrames - order = pyblish.api.CollectorOrder + 0.11 - families = ["vray_rop"] - - def process(self, instance): - - rop = hou.node(instance.data.get("instance_node")) - - # Collect chunkSize - chunk_size_parm = rop.parm("chunkSize") - if chunk_size_parm: - chunk_size = int(chunk_size_parm.eval()) - instance.data["chunkSize"] = chunk_size - self.log.debug("Chunk Size: %s" % chunk_size) - - default_prefix = evalParmNoFrame(rop, "SettingsOutput_img_file_path") - render_products = [] - # TODO: add render elements if render element - - export_prefix = None - export_products = [] - if instance.data["splitRender"]: - export_prefix = evalParmNoFrame( - rop, "render_export_filepath", pad_character="0" - ) - beauty_export_product = self.get_render_product_name( - prefix=export_prefix, - suffix=None) - export_products.append(beauty_export_product) - self.log.debug( - "Found export product: {}".format(beauty_export_product) - ) - instance.data["ifdFile"] = beauty_export_product - instance.data["exportFiles"] = list(export_products) - - beauty_product = self.get_render_product_name(default_prefix) - render_products.append(beauty_product) - files_by_aov = { - "": self.generate_expected_files(instance, - beauty_product)} - - # Assume it's a multipartExr Render. - multipartExr = True - - if instance.data.get("RenderElement", True): - render_element = self.get_render_element_name(rop, default_prefix) - if render_element: - for aov, renderpass in render_element.items(): - render_products.append(renderpass) - files_by_aov[aov] = self.generate_expected_files( - instance, renderpass) - # Set to False as soon as we have a separated aov. - multipartExr = False - - # Review Logic expects this key to exist and be True - # if render is a multipart Exr. - # As long as we have one AOV then multipartExr should be True. - instance.data["multipartExr"] = multipartExr - - for product in render_products: - self.log.debug("Found render product: %s" % product) - filenames = list(render_products) - instance.data["files"] = filenames - instance.data["renderProducts"] = colorspace.ARenderProduct() - - # For now by default do NOT try to publish the rendered output - instance.data["publishJobState"] = "Suspended" - instance.data["attachTo"] = [] # stub required data - - if "expectedFiles" not in instance.data: - instance.data["expectedFiles"] = list() - instance.data["expectedFiles"].append(files_by_aov) - self.log.debug("expectedFiles:{}".format(files_by_aov)) - - # update the colorspace data - colorspace_data = get_color_management_preferences() - instance.data["colorspaceConfig"] = colorspace_data["config"] - instance.data["colorspaceDisplay"] = colorspace_data["display"] - instance.data["colorspaceView"] = colorspace_data["view"] - - def get_render_product_name(self, prefix, suffix=""): - """Return the beauty output filename if render element enabled - """ - # Remove aov suffix from the product: `prefix.aov_suffix` -> `prefix` - aov_parm = ".{}".format(suffix) - return prefix.replace(aov_parm, "") - - def get_render_element_name(self, node, prefix, suffix=""): - """Return the output filename using the AOV prefix and suffix - """ - render_element_dict = {} - # need a rewrite - re_path = node.evalParm("render_network_render_channels") - if re_path: - node_children = hou.node(re_path).children() - for element in node_children: - if element.shaderName() != "vray:SettingsRenderChannels": - aov = str(element) - render_product = prefix.replace(suffix, aov) - render_element_dict[aov] = render_product - return render_element_dict - - def generate_expected_files(self, instance, path): - """Create expected files in instance data""" - - dir = os.path.dirname(path) - file = os.path.basename(path) - - if "#" in file: - def replace(match): - return "%0{}d".format(len(match.group())) - - file = re.sub("#+", replace, file) - - if "%" not in file: - return path - - expected_files = [] - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - for i in range(int(start), (int(end) + 1)): - expected_files.append( - os.path.join(dir, (file % i)).replace("\\", "/")) - - return expected_files diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_workfile.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_workfile.py deleted file mode 100644 index 8d0939a803..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,34 +0,0 @@ -import os - -import pyblish.api -from ayon_houdini.api import plugin - -class CollectWorkfile(plugin.HoudiniInstancePlugin): - """Inject workfile representation into instance""" - - order = pyblish.api.CollectorOrder - 0.01 - label = "Houdini Workfile Data" - families = ["workfile"] - - def process(self, instance): - - current_file = instance.context.data["currentFile"] - folder, file = os.path.split(current_file) - filename, ext = os.path.splitext(file) - - instance.data.update({ - "setMembers": [current_file], - "frameStart": instance.context.data['frameStart'], - "frameEnd": instance.context.data['frameEnd'], - "handleStart": instance.context.data['handleStart'], - "handleEnd": instance.context.data['handleEnd'] - }) - - instance.data['representations'] = [{ - 'name': ext.lstrip("."), - 'ext': ext.lstrip("."), - 'files': file, - "stagingDir": folder, - }] - - self.log.debug('Collected workfile instance: {}'.format(file)) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_workscene_fps.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_workscene_fps.py deleted file mode 100644 index 0091eb0abb..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/collect_workscene_fps.py +++ /dev/null @@ -1,15 +0,0 @@ -import hou -import pyblish.api -from ayon_houdini.api import plugin - - -class CollectWorksceneFPS(plugin.HoudiniContextPlugin): - """Get the FPS of the work scene.""" - - label = "Workscene FPS" - order = pyblish.api.CollectorOrder - - def process(self, context): - fps = hou.fps() - self.log.info("Workscene FPS: %s" % fps) - context.data.update({"fps": fps}) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py deleted file mode 100644 index e85df4ee81..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py +++ /dev/null @@ -1,59 +0,0 @@ -import tempfile -import pyblish.api - -from ayon_core.pipeline import OptionalPyblishPluginMixin -from ayon_houdini.api import lib, plugin -from ayon_houdini.api.pipeline import IS_HEADLESS - - -class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin, - OptionalPyblishPluginMixin): - """Set instance thumbnail to a screengrab of current active viewport. - - This makes it so that if an instance does not have a thumbnail set yet that - it will get a thumbnail of the currently active view at the time of - publishing as a fallback. - - """ - order = pyblish.api.ExtractorOrder + 0.49 - label = "Extract Active View Thumbnail" - families = ["workfile"] - - def process(self, instance): - if not self.is_active(instance.data): - return - - if IS_HEADLESS: - self.log.debug( - "Skip extraction of active view thumbnail, due to being in" - "headless mode." - ) - return - - thumbnail = instance.data.get("thumbnailPath") - if thumbnail: - # A thumbnail was already set for this instance - return - - view_thumbnail = self.get_view_thumbnail(instance) - if not view_thumbnail: - return - self.log.debug("Setting instance thumbnail path to: {}" - .format(view_thumbnail) - ) - instance.data["thumbnailPath"] = view_thumbnail - - def get_view_thumbnail(self, instance): - - sceneview = lib.get_scene_viewer() - if sceneview is None: - self.log.debug("Skipping Extract Active View Thumbnail" - " because no scene view was detected.") - return - - with tempfile.NamedTemporaryFile("w", suffix=".jpg", delete=False) as tmp: - lib.sceneview_snapshot(sceneview, tmp.name) - thumbnail_path = tmp.name - - instance.context.data["cleanupFullPaths"].append(thumbnail_path) - return thumbnail_path diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_hda.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_hda.py deleted file mode 100644 index e4449d11f8..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_hda.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -import os -from pprint import pformat -import hou -import pyblish.api -from ayon_houdini.api import plugin - - -class ExtractHDA(plugin.HoudiniExtractorPlugin): - - order = pyblish.api.ExtractorOrder - label = "Extract HDA" - families = ["hda"] - - def process(self, instance): - self.log.info(pformat(instance.data)) - hda_node = hou.node(instance.data.get("instance_node")) - hda_def = hda_node.type().definition() - hda_options = hda_def.options() - hda_options.setSaveInitialParmsAndContents(True) - - next_version = instance.data["anatomyData"]["version"] - self.log.info("setting version: {}".format(next_version)) - hda_def.setVersion(str(next_version)) - hda_def.setOptions(hda_options) - hda_def.save(hda_def.libraryFilePath(), hda_node, hda_options) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - file = os.path.basename(hda_def.libraryFilePath()) - staging_dir = os.path.dirname(hda_def.libraryFilePath()) - self.log.info("Using HDA from {}".format(hda_def.libraryFilePath())) - - representation = { - 'name': 'hda', - 'ext': 'hda', - 'files': file, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_render.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_render.py deleted file mode 100644 index c7ec7603f4..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_render.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -import hou - -import pyblish.api - -from ayon_houdini.api import plugin -from ayon_houdini.api.lib import render_rop - - -class ExtractRender(plugin.HoudiniExtractorPlugin): - - order = pyblish.api.ExtractorOrder - label = "Extract Render" - families = ["mantra_rop", - "karma_rop", - "redshift_rop", - "arnold_rop", - "vray_rop", - "usdrender"] - - def process(self, instance): - creator_attribute = instance.data["creator_attributes"] - product_type = instance.data["productType"] - rop_node = hou.node(instance.data.get("instance_node")) - - # TODO: This section goes against pyblish concepts where - # pyblish plugins should change the state of the scene. - # However, in ayon publisher tool users can have options and - # these options should some how synced with the houdini nodes. - # More info: https://github.com/ynput/ayon-core/issues/417 - - # Align split parameter value on rop node to the render target. - if instance.data["splitRender"]: - if product_type == "arnold_rop": - rop_node.setParms({"ar_ass_export_enable": 1}) - elif product_type == "mantra_rop": - rop_node.setParms({"soho_outputmode": 1}) - elif product_type == "redshift_rop": - rop_node.setParms({"RS_archive_enable": 1}) - elif product_type == "vray_rop": - rop_node.setParms({"render_export_mode": "2"}) - elif product_type == "usdrender": - rop_node.setParms({"runcommand": 0}) - else: - if product_type == "arnold_rop": - rop_node.setParms({"ar_ass_export_enable": 0}) - elif product_type == "mantra_rop": - rop_node.setParms({"soho_outputmode": 0}) - elif product_type == "redshift_rop": - rop_node.setParms({"RS_archive_enable": 0}) - elif product_type == "vray_rop": - rop_node.setParms({"render_export_mode": "1"}) - elif product_type == "usdrender": - rop_node.setParms({"runcommand": 1}) - - if instance.data.get("farm"): - self.log.debug("Render should be processed on farm, skipping local render.") - return - - if creator_attribute.get("render_target") == "local": - ropnode = hou.node(instance.data.get("instance_node")) - render_rop(ropnode) - - # `ExpectedFiles` is a list that includes one dict. - expected_files = instance.data["expectedFiles"][0] - # Each key in that dict is a list of files. - # Combine lists of files into one big list. - all_frames = [] - for value in expected_files.values(): - if isinstance(value, str): - all_frames.append(value) - elif isinstance(value, list): - all_frames.extend(value) - # Check missing frames. - # Frames won't exist if user cancels the render. - missing_frames = [ - frame - for frame in all_frames - if not os.path.exists(frame) - ] - if missing_frames: - # TODO: Use user friendly error reporting. - raise RuntimeError("Failed to complete render extraction. " - "Missing output files: {}".format( - missing_frames)) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_rop.py deleted file mode 100644 index 62a38c0b93..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_rop.py +++ /dev/null @@ -1,150 +0,0 @@ -import os -import hou - -import pyblish.api - -from ayon_core.pipeline import publish -from ayon_houdini.api import plugin -from ayon_houdini.api.lib import render_rop, splitext - - -class ExtractROP(plugin.HoudiniExtractorPlugin): - """Generic Extractor for any ROP node.""" - label = "Extract ROP" - order = pyblish.api.ExtractorOrder - - families = ["abc", "camera", "bgeo", "pointcache", "fbx", - "vdbcache", "ass", "redshiftproxy", "mantraifd"] - targets = ["local", "remote"] - - def process(self, instance: pyblish.api.Instance): - if instance.data.get("farm"): - self.log.debug("Should be processed on farm, skipping.") - return - - rop_node = hou.node(instance.data["instance_node"]) - - files = instance.data["frames"] - first_file = files[0] if isinstance(files, (list, tuple)) else files - _, ext = splitext( - first_file, allowed_multidot_extensions=[ - ".ass.gz", ".bgeo.sc", ".bgeo.gz", - ".bgeo.lzma", ".bgeo.bz2"] - ) - ext = ext.lstrip(".") - - self.log.debug(f"Rendering {rop_node.path()} to {first_file}..") - - render_rop(rop_node) - self.validate_expected_frames(instance) - - # In some cases representation name is not the the extension - # TODO: Preferably we remove this very specific naming - product_type = instance.data["productType"] - name = { - "bgeo": "bgeo", - "rs": "rs", - "ass": "ass" - }.get(product_type, ext) - - representation = { - "name": name, - "ext": ext, - "files": instance.data["frames"], - "stagingDir": instance.data["stagingDir"], - "frameStart": instance.data["frameStartHandle"], - "frameEnd": instance.data["frameEndHandle"], - } - self.update_representation_data(instance, representation) - instance.data.setdefault("representations", []).append(representation) - - def validate_expected_frames(self, instance: pyblish.api.Instance): - """ - Validate all expected files in `instance.data["frames"]` exist in - the staging directory. - """ - filenames = instance.data["frames"] - staging_dir = instance.data["stagingDir"] - if isinstance(filenames, str): - # Single frame - filenames = [filenames] - - missing_filenames = [ - filename for filename in filenames - if not os.path.isfile(os.path.join(staging_dir, filename)) - ] - if missing_filenames: - raise RuntimeError(f"Missing frames: {missing_filenames}") - - def update_representation_data(self, - instance: pyblish.api.Instance, - representation: dict): - """Allow subclass to override the representation data in-place""" - pass - - -class ExtractOpenGL(ExtractROP, - publish.ColormanagedPyblishPluginMixin): - - order = pyblish.api.ExtractorOrder - 0.01 - label = "Extract OpenGL" - families = ["review"] - - def process(self, instance): - # This plugin is triggered when marking render as reviewable. - # Therefore, this plugin will run over wrong instances. - # TODO: Don't run this plugin on wrong instances. - # This plugin should run only on review product type - # with instance node of opengl type. - instance_node = instance.data.get("instance_node") - if not instance_node: - self.log.debug("Skipping instance without instance node.") - return - - rop_node = hou.node(instance_node) - if rop_node.type().name() != "opengl": - self.log.debug("Skipping OpenGl extraction. Rop node {} " - "is not an OpenGl node.".format(rop_node.path())) - return - - super(ExtractOpenGL, self).process(instance) - - def update_representation_data(self, - instance: pyblish.api.Instance, - representation: dict): - tags = ["review"] - if not instance.data.get("keepImages"): - tags.append("delete") - - representation.update({ - # TODO: Avoid this override? - "name": instance.data["imageFormat"], - "ext": instance.data["imageFormat"], - - "tags": tags, - "preview": True, - "camera_name": instance.data.get("review_camera") - }) - - -class ExtractComposite(ExtractROP, - publish.ColormanagedPyblishPluginMixin): - - label = "Extract Composite (Image Sequence)" - families = ["imagesequence"] - - def update_representation_data(self, - instance: pyblish.api.Instance, - representation: dict): - - if representation["ext"].lower() != "exr": - return - - # Inject colorspace with 'scene_linear' as that's the - # default Houdini working colorspace and all extracted - # OpenEXR images should be in that colorspace. - # https://www.sidefx.com/docs/houdini/render/linear.html#image-formats - self.set_representation_colorspace( - representation, instance.context, - colorspace="scene_linear" - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_usd.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_usd.py deleted file mode 100644 index e8e7d6a583..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_usd.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -from typing import List, AnyStr - -import pyblish.api - -from ayon_core.pipeline.publish.lib import get_instance_expected_output_path -from ayon_houdini.api import plugin -from ayon_houdini.api.lib import render_rop -from ayon_houdini.api.usd import remap_paths - -import hou - - -class ExtractUSD(plugin.HoudiniExtractorPlugin): - - order = pyblish.api.ExtractorOrder - label = "Extract USD" - families = ["usdrop"] - - def process(self, instance): - - ropnode = hou.node(instance.data.get("instance_node")) - - # Get the filename from the filename parameter - output = ropnode.evalParm("lopoutput") - staging_dir = os.path.dirname(output) - instance.data["stagingDir"] = staging_dir - file_name = os.path.basename(output) - - self.log.info("Writing USD '%s' to '%s'" % (file_name, staging_dir)) - - mapping = self.get_source_to_publish_paths(instance.context) - - # Allow instance-specific path remapping overrides, e.g. changing - # paths on used resources/textures for looks - instance_mapping = instance.data.get("assetRemap", {}) - if instance_mapping: - self.log.debug("Instance-specific asset path remapping:\n" - f"{instance_mapping}") - mapping.update(instance_mapping) - - with remap_paths(ropnode, mapping): - render_rop(ropnode) - - assert os.path.exists(output), "Output does not exist: %s" % output - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'usd', - 'ext': 'usd', - 'files': file_name, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - def get_source_to_publish_paths(self, context): - """Define a mapping of all current instances in context from source - file to publish file so this can be used on the USD save to remap - asset layer paths on publish via AyonRemapPaths output processor""" - - mapping = {} - for instance in context: - if not instance.data.get("active", True): - continue - - if not instance.data.get("publish", True): - continue - - for repre in instance.data.get("representations", []): - name = repre.get("name") - ext = repre.get("ext") - - # TODO: The remapping might need to get more involved if the - # asset paths that are set use e.g. $F - # TODO: If the representation has multiple files we might need - # to define the path remapping per file of the sequence - path = get_instance_expected_output_path( - instance, representation_name=name, ext=ext - ) - for source_path in get_source_paths(instance, repre): - source_path = os.path.normpath(source_path) - mapping[source_path] = path - - return mapping - - -def get_source_paths( - instance: pyblish.api.Instance, - repre: dict -) -> List[AnyStr]: - """Return the full source filepaths for an instance's representations""" - - staging = repre.get("stagingDir", instance.data.get("stagingDir")) - files = repre.get("files", []) - if isinstance(files, list): - return [os.path.join(staging, fname) for fname in files] - elif isinstance(files, str): - # Single file - return [os.path.join(staging, files)] - - raise TypeError(f"Unsupported type for representation files: {files} " - "(supports list or str)") diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/help/validate_vdb_output_node.xml b/server_addon/houdini/client/ayon_houdini/plugins/publish/help/validate_vdb_output_node.xml deleted file mode 100644 index 8aac9a6a07..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/help/validate_vdb_output_node.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - -Invalid VDB - -## Invalid VDB output - -All primitives of the output geometry must be VDBs, no other primitive -types are allowed. That means that regardless of the amount of VDBs in the -geometry it will have an equal amount of VDBs, points, primitives and -vertices since each VDB primitive is one point, one vertex and one VDB. - -This validation only checks the geometry on the first frame of the export -frame range. - - - - - -### Detailed Info - -ROP node `{rop_path}` is set to export SOP path `{sop_path}`. - -{message} - - - - diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/increment_current_file.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/increment_current_file.py deleted file mode 100644 index 878500f605..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/increment_current_file.py +++ /dev/null @@ -1,54 +0,0 @@ -import pyblish.api - -from ayon_core.lib import version_up -from ayon_core.pipeline import registered_host -from ayon_core.pipeline.publish import ( - get_errored_plugins_from_context, - KnownPublishError -) - -from ayon_houdini.api import plugin - - -class IncrementCurrentFile(plugin.HoudiniContextPlugin): - """Increment the current file. - - Saves the current scene with an increased version number. - - """ - - label = "Increment current file" - order = pyblish.api.IntegratorOrder + 9.0 - families = ["workfile", - "usdrender", - "mantra_rop", - "karma_rop", - "redshift_rop", - "arnold_rop", - "vray_rop", - "render.local.hou", - "publish.hou"] - optional = True - - def process(self, context): - - errored_plugins = get_errored_plugins_from_context(context) - if any( - plugin.__name__ == "HoudiniSubmitPublishDeadline" - for plugin in errored_plugins - ): - raise KnownPublishError( - "Skipping incrementing current file because " - "submission to deadline failed." - ) - - # Filename must not have changed since collecting - host = registered_host() - current_file = host.current_file() - if context.data["currentFile"] != current_file: - raise KnownPublishError( - "Collected filename mismatches from current scene name." - ) - - new_filepath = version_up(current_file) - host.save_workfile(new_filepath) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/save_scene.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/save_scene.py deleted file mode 100644 index e0734da5d1..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/save_scene.py +++ /dev/null @@ -1,27 +0,0 @@ -import pyblish.api - -from ayon_core.pipeline import registered_host - -from ayon_houdini.api import plugin - - -class SaveCurrentScene(plugin.HoudiniContextPlugin): - """Save current scene""" - - label = "Save current file" - order = pyblish.api.ExtractorOrder - 0.49 - - def process(self, context): - - # Filename must not have changed since collecting - host = registered_host() - current_file = host.get_current_workfile() - assert context.data['currentFile'] == current_file, ( - "Collected filename from current scene name." - ) - - if host.workfile_has_unsaved_changes(): - self.log.info("Saving current file: {}".format(current_file)) - host.save_workfile(current_file) - else: - self.log.debug("No unsaved changes, skipping file save..") diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_abc_primitive_to_detail.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_abc_primitive_to_detail.py deleted file mode 100644 index 51885a963e..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_abc_primitive_to_detail.py +++ /dev/null @@ -1,150 +0,0 @@ -# -*- coding: utf-8 -*- -from collections import defaultdict - -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -class ValidateAbcPrimitiveToDetail(plugin.HoudiniInstancePlugin): - """Validate Alembic ROP Primitive to Detail attribute is consistent. - - The Alembic ROP crashes Houdini whenever an attribute in the "Primitive to - Detail" parameter exists on only a part of the primitives that belong to - the same hierarchy path. Whenever it encounters inconsistent values, - specifically where some are empty as opposed to others then Houdini - crashes. (Tested in Houdini 17.5.229) - - """ - - order = pyblish.api.ValidatorOrder + 0.1 - families = ["abc"] - label = "Validate Primitive to Detail (Abc)" - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - ("Primitives found with inconsistent primitive " - "to detail attributes. See log."), - title=self.label - ) - - @classmethod - def get_invalid(cls, instance): - import hou # noqa - output_node = instance.data.get("output_node") - rop_node = hou.node(instance.data["instance_node"]) - if output_node is None: - cls.log.error( - "SOP Output node in '%s' does not exist. " - "Ensure a valid SOP output path is set." % rop_node.path() - ) - - return [rop_node.path()] - - pattern = rop_node.parm("prim_to_detail_pattern").eval().strip() - if not pattern: - cls.log.debug( - "Alembic ROP has no 'Primitive to Detail' pattern. " - "Validation is ignored.." - ) - return - - build_from_path = rop_node.parm("build_from_path").eval() - if not build_from_path: - cls.log.debug( - "Alembic ROP has 'Build from Path' disabled. " - "Validation is ignored.." - ) - return - - path_attr = rop_node.parm("path_attrib").eval() - if not path_attr: - cls.log.error( - "The Alembic ROP node has no Path Attribute" - "value set, but 'Build Hierarchy from Attribute'" - "is enabled." - ) - return [rop_node.path()] - - # Let's assume each attribute is explicitly named for now and has no - # wildcards for Primitive to Detail. This simplifies the check. - cls.log.debug("Checking Primitive to Detail pattern: %s" % pattern) - cls.log.debug("Checking with path attribute: %s" % path_attr) - - if not hasattr(output_node, "geometry"): - # In the case someone has explicitly set an Object - # node instead of a SOP node in Geometry context - # then for now we ignore - this allows us to also - # export object transforms. - cls.log.warning("No geometry output node found, skipping check..") - return - - # Check if the primitive attribute exists - frame = instance.data.get("frameStart", 0) - geo = output_node.geometryAtFrame(frame) - - # If there are no primitives on the start frame then it might be - # something that is emitted over time. As such we can't actually - # validate whether the attributes exist, because they won't exist - # yet. In that case, just warn the user and allow it. - if len(geo.iterPrims()) == 0: - cls.log.warning( - "No primitives found on current frame. Validation" - " for Primitive to Detail will be skipped." - ) - return - - attrib = geo.findPrimAttrib(path_attr) - if not attrib: - cls.log.info( - "Geometry Primitives are missing " - "path attribute: `%s`" % path_attr - ) - return [output_node.path()] - - # Ensure at least a single string value is present - if not attrib.strings(): - cls.log.info( - "Primitive path attribute has no " - "string values: %s" % path_attr - ) - return [output_node.path()] - - paths = None - for attr in pattern.split(" "): - if not attr.strip(): - # Ignore empty values - continue - - # Check if the primitive attribute exists - attrib = geo.findPrimAttrib(attr) - if not attrib: - # It is allowed to not have the attribute at all - continue - - # The issue can only happen if at least one string attribute is - # present. So we ignore cases with no values whatsoever. - if not attrib.strings(): - continue - - check = defaultdict(set) - values = geo.primStringAttribValues(attr) - if paths is None: - paths = geo.primStringAttribValues(path_attr) - - for path, value in zip(paths, values): - check[path].add(value) - - for path, values in check.items(): - # Whenever a single path has multiple values for the - # Primitive to Detail attribute then we consider it - # inconsistent and invalidate the ROP node's content. - if len(values) > 1: - cls.log.warning( - "Path has multiple values: %s (path: %s)" - % (list(values), path) - ) - return [output_node.path()] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_alembic_face_sets.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_alembic_face_sets.py deleted file mode 100644 index 00ce554ff1..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_alembic_face_sets.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -import hou -import pyblish.api -from ayon_houdini.api import plugin - - -class ValidateAlembicROPFaceSets(plugin.HoudiniInstancePlugin): - """Validate Face Sets are disabled for extraction to pointcache. - - When groups are saved as Face Sets with the Alembic these show up - as shadingEngine connections in Maya - however, with animated groups - these connections in Maya won't work as expected, it won't update per - frame. Additionally, it can break shader assignments in some cases - where it requires to first break this connection to allow a shader to - be assigned. - - It is allowed to include Face Sets, so only an issue is logged to - identify that it could introduce issues down the pipeline. - - """ - - order = pyblish.api.ValidatorOrder + 0.1 - families = ["abc"] - label = "Validate Alembic ROP Face Sets" - - def process(self, instance): - - rop = hou.node(instance.data["instance_node"]) - facesets = rop.parm("facesets").eval() - - # 0 = No Face Sets - # 1 = Save Non-Empty Groups as Face Sets - # 2 = Save All Groups As Face Sets - if facesets != 0: - self.log.warning( - "Alembic ROP saves 'Face Sets' for Geometry. " - "Are you sure you want this?" - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_alembic_input_node.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_alembic_input_node.py deleted file mode 100644 index aab3068171..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_alembic_input_node.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -import hou -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -class ValidateAlembicInputNode(plugin.HoudiniInstancePlugin): - """Validate that the node connected to the output is correct. - - The connected node cannot be of the following types for Alembic: - - VDB - - Volume - - """ - - order = pyblish.api.ValidatorOrder + 0.1 - families = ["abc"] - label = "Validate Input Node (Abc)" - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - ("Primitive types found that are not supported " - "for Alembic output."), - title=self.label - ) - - @classmethod - def get_invalid(cls, instance): - - invalid_prim_types = ["VDB", "Volume"] - output_node = instance.data.get("output_node") - - if output_node is None: - node = hou.node(instance.data["instance_node"]) - cls.log.error( - "SOP Output node in '%s' does not exist. " - "Ensure a valid SOP output path is set." % node.path() - ) - - return [node.path()] - - if not hasattr(output_node, "geometry"): - # In the case someone has explicitly set an Object - # node instead of a SOP node in Geometry context - # then for now we ignore - this allows us to also - # export object transforms. - cls.log.warning("No geometry output node found, skipping check..") - return - - frame = instance.data.get("frameStart", 0) - geo = output_node.geometryAtFrame(frame) - - invalid = False - for prim_type in invalid_prim_types: - if geo.countPrimType(prim_type) > 0: - cls.log.error( - "Found a primitive which is of type '%s' !" % prim_type - ) - invalid = True - - if invalid: - return [instance] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_animation_settings.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_animation_settings.py deleted file mode 100644 index 1cc9e24dc9..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_animation_settings.py +++ /dev/null @@ -1,53 +0,0 @@ -import hou - -import pyblish.api -from ayon_core.pipeline.publish import PublishValidationError - -from ayon_houdini.api import lib, plugin - - -class ValidateAnimationSettings(plugin.HoudiniInstancePlugin): - """Validate if the unexpanded string contains the frame ('$F') token - - This validator will only check the output parameter of the node if - the Valid Frame Range is not set to 'Render Current Frame' - - Rules: - If you render out a frame range it is mandatory to have the - frame token - '$F4' or similar - to ensure that each frame gets - written. If this is not the case you will override the same file - every time a frame is written out. - - Examples: - Good: 'my_vbd_cache.$F4.vdb' - Bad: 'my_vbd_cache.vdb' - - """ - - order = pyblish.api.ValidatorOrder - label = "Validate Frame Settings" - families = ["vdbcache"] - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Output settings do no match for '%s'" % instance - ) - - @classmethod - def get_invalid(cls, instance): - - node = hou.node(instance.data["instance_node"]) - # Check trange parm, 0 means Render Current Frame - frame_range = node.evalParm("trange") - if frame_range == 0: - return [] - - output_parm = lib.get_output_parameter(node) - unexpanded_str = output_parm.unexpandedString() - - if "$F" not in unexpanded_str: - cls.log.error("No frame token found in '%s'" % node.path()) - return [instance] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_bypass.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_bypass.py deleted file mode 100644 index d984c63756..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_bypass.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -import hou - -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -class ValidateBypassed(plugin.HoudiniInstancePlugin): - """Validate all primitives build hierarchy from attribute when enabled. - - The name of the attribute must exist on the prims and have the same name - as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic - ROP node whenever Build Hierarchy from Attribute is enabled. - - """ - - order = pyblish.api.ValidatorOrder - 0.1 - families = ["*"] - label = "Validate ROP Bypass" - - def process(self, instance): - - if not instance.data.get("instance_node"): - # Ignore instances without an instance node - # e.g. in memory bootstrap instances - self.log.debug( - "Skipping instance without instance node: {}".format(instance) - ) - return - - invalid = self.get_invalid(instance) - if invalid: - rop = invalid[0] - raise PublishValidationError( - ("ROP node {} is set to bypass, publishing cannot " - "continue.".format(rop.path())), - title=self.label - ) - - @classmethod - def get_invalid(cls, instance): - - rop = hou.node(instance.data["instance_node"]) - if hasattr(rop, "isBypassed") and rop.isBypassed(): - return [rop] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_camera_rop.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_camera_rop.py deleted file mode 100644 index f21addb11d..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_camera_rop.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator plugin for Houdini Camera ROP settings.""" -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -class ValidateCameraROP(plugin.HoudiniInstancePlugin): - """Validate Camera ROP settings.""" - - order = pyblish.api.ValidatorOrder - families = ["camera"] - label = "Camera ROP" - - def process(self, instance): - - import hou - - node = hou.node(instance.data.get("instance_node")) - if node.parm("use_sop_path").eval(): - raise PublishValidationError( - ("Alembic ROP for Camera export should not be " - "set to 'Use Sop Path'. Please disable."), - title=self.label - ) - - # Get the root and objects parameter of the Alembic ROP node - root = node.parm("root").eval() - objects = node.parm("objects").eval() - errors = [] - if not root: - errors.append("Root parameter must be set on Alembic ROP") - if not root.startswith("/"): - errors.append("Root parameter must start with slash /") - if not objects: - errors.append("Objects parameter must be set on Alembic ROP") - if len(objects.split(" ")) != 1: - errors.append("Must have only a single object.") - - if errors: - for error in errors: - self.log.error(error) - raise PublishValidationError( - "Some checks failed, see validator log.", - title=self.label) - - # Check if the object exists and is a camera - path = root + "/" + objects - camera = hou.node(path) - - if not camera: - raise PublishValidationError( - "Camera path does not exist: %s" % path, - title=self.label) - - if camera.type().name() != "cam": - raise PublishValidationError( - ("Object set in Alembic ROP is not a camera: " - "{} (type: {})").format(camera, camera.type().name()), - title=self.label) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_cop_output_node.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_cop_output_node.py deleted file mode 100644 index 1d63e15d90..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_cop_output_node.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -import hou - -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -class ValidateCopOutputNode(plugin.HoudiniInstancePlugin): - """Validate the instance COP Output Node. - - This will ensure: - - The COP Path is set. - - The COP Path refers to an existing object. - - The COP Path node is a COP node. - - """ - - order = pyblish.api.ValidatorOrder - families = ["imagesequence"] - label = "Validate COP Output Node" - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Output node '{}' is incorrect. " - "See plug-in log for details.".format(invalid), - title=self.label, - description=( - "### Invalid COP output node\n\n" - "The output node path for the instance must be set to a " - "valid COP node path.\n\nSee the log for more details." - ) - ) - - @classmethod - def get_invalid(cls, instance): - output_node = instance.data.get("output_node") - - if not output_node: - node = hou.node(instance.data.get("instance_node")) - cls.log.error( - "COP Output node in '%s' does not exist. " - "Ensure a valid COP output path is set." % node.path() - ) - - return [node.path()] - - # Output node must be a Sop node. - if not isinstance(output_node, hou.CopNode): - cls.log.error( - "Output node %s is not a COP node. " - "COP Path must point to a COP node, " - "instead found category type: %s", - output_node.path(), output_node.type().category().name() - ) - return [output_node.path()] - - # For the sake of completeness also assert the category type - # is Cop2 to avoid potential edge case scenarios even though - # the isinstance check above should be stricter than this category - if output_node.type().category().name() != "Cop2": - cls.log.error( - "Output node %s is not of category Cop2.", output_node.path() - ) - return [output_node.path()] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_export_is_a_single_frame.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_export_is_a_single_frame.py deleted file mode 100644 index 62bc5e3b44..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_export_is_a_single_frame.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for checking that export is a single frame.""" -from ayon_core.pipeline import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_core.pipeline.publish import ValidateContentsOrder -from ayon_houdini.api.action import SelectInvalidAction -from ayon_houdini.api import plugin - - -class ValidateSingleFrame(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validate Export is a Single Frame. - - It checks if rop node is exporting one frame. - This is mainly for Model product type. - """ - - families = ["model"] - label = "Validate Single Frame" - order = ValidateContentsOrder + 0.1 - actions = [SelectInvalidAction] - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - nodes = [n.path() for n in invalid] - raise PublishValidationError( - "See log for details. " - "Invalid nodes: {0}".format(nodes) - ) - - @classmethod - def get_invalid(cls, instance): - - invalid = [] - - frame_start = instance.data.get("frameStartHandle") - frame_end = instance.data.get("frameEndHandle") - - # This happens if instance node has no 'trange' parameter. - if frame_start is None or frame_end is None: - cls.log.debug( - "No frame data, skipping check.." - ) - return - - if frame_start != frame_end: - invalid.append(instance.data["instance_node"]) - cls.log.error( - "Invalid frame range on '%s'." - "You should use the same frame number for 'f1' " - "and 'f2' parameters.", - instance.data["instance_node"].path() - ) - - return invalid diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_fbx_output_node.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_fbx_output_node.py deleted file mode 100644 index 1c236bb8f7..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_fbx_output_node.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_core.pipeline import PublishValidationError -from ayon_houdini.api.action import ( - SelectInvalidAction, - SelectROPAction, -) -from ayon_houdini.api import plugin -from ayon_houdini.api.lib import get_obj_node_output -import hou - - -class ValidateFBXOutputNode(plugin.HoudiniInstancePlugin): - """Validate the instance Output Node. - - This will ensure: - - The Output Node Path is set. - - The Output Node Path refers to an existing object. - - The Output Node is a Sop or Obj node. - - The Output Node has geometry data. - - The Output Node doesn't include invalid primitive types. - """ - - order = pyblish.api.ValidatorOrder - families = ["fbx"] - label = "Validate FBX Output Node" - actions = [SelectROPAction, SelectInvalidAction] - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - nodes = [n.path() for n in invalid] - raise PublishValidationError( - "See log for details. " - "Invalid nodes: {0}".format(nodes), - title="Invalid output node(s)" - ) - - @classmethod - def get_invalid(cls, instance): - output_node = instance.data.get("output_node") - - # Check if The Output Node Path is set and - # refers to an existing object. - if output_node is None: - rop_node = hou.node(instance.data["instance_node"]) - cls.log.error( - "Output node in '%s' does not exist. " - "Ensure a valid output path is set.", rop_node.path() - ) - - return [rop_node] - - # Check if the Output Node is a Sop or an Obj node - # also, list all sop output nodes inside as well as - # invalid empty nodes. - all_out_sops = [] - invalid = [] - - # if output_node is an ObjSubnet or an ObjNetwork - if output_node.childTypeCategory() == hou.objNodeTypeCategory(): - for node in output_node.allSubChildren(): - if node.type().name() == "geo": - out = get_obj_node_output(node) - if out: - all_out_sops.append(out) - else: - invalid.append(node) # empty_objs - cls.log.error( - "Geo Obj Node '%s' is empty!", - node.path() - ) - if not all_out_sops: - invalid.append(output_node) # empty_objs - cls.log.error( - "Output Node '%s' is empty!", - node.path() - ) - - # elif output_node is an ObjNode - elif output_node.type().name() == "geo": - out = get_obj_node_output(output_node) - if out: - all_out_sops.append(out) - else: - invalid.append(node) # empty_objs - cls.log.error( - "Output Node '%s' is empty!", - node.path() - ) - - # elif output_node is a SopNode - elif output_node.type().category().name() == "Sop": - all_out_sops.append(output_node) - - # Then it's a wrong node type - else: - cls.log.error( - "Output node %s is not a SOP or OBJ Geo or OBJ SubNet node. " - "Instead found category type: %s %s", - output_node.path(), output_node.type().category().name(), - output_node.type().name() - ) - return [output_node] - - # Check if all output sop nodes have geometry - # and don't contain invalid prims - invalid_prim_types = ["VDB", "Volume"] - for sop_node in all_out_sops: - # Empty Geometry test - if not hasattr(sop_node, "geometry"): - invalid.append(sop_node) # empty_geometry - cls.log.error( - "Sop node '%s' doesn't include any prims.", - sop_node.path() - ) - continue - - frame = instance.data.get("frameStart", 0) - geo = sop_node.geometryAtFrame(frame) - if len(geo.iterPrims()) == 0: - invalid.append(sop_node) # empty_geometry - cls.log.error( - "Sop node '%s' doesn't include any prims.", - sop_node.path() - ) - continue - - # Invalid Prims test - for prim_type in invalid_prim_types: - if geo.countPrimType(prim_type) > 0: - invalid.append(sop_node) # invalid_prims - cls.log.error( - "Sop node '%s' includes invalid prims of type '%s'.", - sop_node.path(), prim_type - ) - - if invalid: - return invalid diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_file_extension.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_file_extension.py deleted file mode 100644 index 1b3a58f4b3..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_file_extension.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import hou - -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import lib, plugin - - -class ValidateFileExtension(plugin.HoudiniInstancePlugin): - """Validate the output file extension fits the output family. - - File extensions: - - Pointcache must be .abc - - Camera must be .abc - - VDB must be .vdb - - """ - - order = pyblish.api.ValidatorOrder - families = ["camera", "vdbcache"] - label = "Output File Extension" - - family_extensions = { - "camera": ".abc", - "vdbcache": ".vdb", - } - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "ROP node has incorrect file extension: {}".format(invalid), - title=self.label - ) - - @classmethod - def get_invalid(cls, instance): - - # Get ROP node from instance - node = hou.node(instance.data["instance_node"]) - - # Create lookup for current family in instance - families = [] - product_type = instance.data.get("productType") - if product_type: - families.append(product_type) - families = set(families) - - # Perform extension check - output = lib.get_output_parameter(node).eval() - _, output_extension = os.path.splitext(output) - - for family in families: - extension = cls.family_extensions.get(family, None) - if extension is None: - raise PublishValidationError( - "Unsupported family: {}".format(family), - title=cls.label) - - if output_extension != extension: - return [node.path()] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_frame_range.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_frame_range.py deleted file mode 100644 index 9435fa033a..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_frame_range.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -import hou - -import pyblish.api -from ayon_core.pipeline import PublishValidationError -from ayon_core.pipeline.publish import RepairAction - -from ayon_houdini.api.action import SelectInvalidAction -from ayon_houdini.api import plugin - - - -class DisableUseFolderHandlesAction(RepairAction): - label = "Disable use folder handles" - icon = "mdi.toggle-switch-off" - - -class ValidateFrameRange(plugin.HoudiniInstancePlugin): - """Validate Frame Range. - - Due to the usage of start and end handles, - then Frame Range must be >= (start handle + end handle) - which results that frameEnd be smaller than frameStart - """ - - order = pyblish.api.ValidatorOrder - 0.1 - label = "Validate Frame Range" - actions = [DisableUseFolderHandlesAction, SelectInvalidAction] - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - title="Invalid Frame Range", - message=( - "Invalid frame range because the instance " - "start frame ({0[frameStart]}) is higher than " - "the end frame ({0[frameEnd]})" - .format(instance.data) - ), - description=( - "## Invalid Frame Range\n" - "The frame range for the instance is invalid because " - "the start frame is higher than the end frame.\n\nThis " - "is likely due to folder handles being applied to your " - "instance or the ROP node's start frame " - "is set higher than the end frame.\n\nIf your ROP frame " - "range is correct and you do not want to apply folder " - "handles make sure to disable Use folder handles on the " - "publish instance." - ) - ) - - @classmethod - def get_invalid(cls, instance): - - if not instance.data.get("instance_node"): - return - - rop_node = hou.node(instance.data["instance_node"]) - frame_start = instance.data.get("frameStart") - frame_end = instance.data.get("frameEnd") - - if frame_start is None or frame_end is None: - cls.log.debug( - "Skipping frame range validation for " - "instance without frame data: {}".format(rop_node.path()) - ) - return - - if frame_start > frame_end: - cls.log.info( - "The ROP node render range is set to " - "{0[frameStartHandle]} - {0[frameEndHandle]} " - "The folder handles applied to the instance are start handle " - "{0[handleStart]} and end handle {0[handleEnd]}" - .format(instance.data) - ) - return [rop_node] - - @classmethod - def repair(cls, instance): - - if not cls.get_invalid(instance): - # Already fixed - return - - # Disable use folder handles - context = instance.context - create_context = context.data["create_context"] - instance_id = instance.data.get("instance_id") - if not instance_id: - cls.log.debug("'{}' must have instance id" - .format(instance)) - return - - created_instance = create_context.get_instance_by_id(instance_id) - if not instance_id: - cls.log.debug("Unable to find instance '{}' by id" - .format(instance)) - return - - created_instance.publish_attributes["CollectAssetHandles"]["use_handles"] = False # noqa - - create_context.save_changes() - cls.log.debug("use folder handles is turned off for '{}'" - .format(instance)) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_frame_token.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_frame_token.py deleted file mode 100644 index 46c02ba6f2..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_frame_token.py +++ /dev/null @@ -1,52 +0,0 @@ -import hou - -import pyblish.api - -from ayon_houdini.api import lib, plugin - - -class ValidateFrameToken(plugin.HoudiniInstancePlugin): - """Validate if the unexpanded string contains the frame ('$F') token. - - This validator will *only* check the output parameter of the node if - the Valid Frame Range is not set to 'Render Current Frame' - - Rules: - If you render out a frame range it is mandatory to have the - frame token - '$F4' or similar - to ensure that each frame gets - written. If this is not the case you will override the same file - every time a frame is written out. - - Examples: - Good: 'my_vbd_cache.$F4.vdb' - Bad: 'my_vbd_cache.vdb' - - """ - - order = pyblish.api.ValidatorOrder - label = "Validate Frame Token" - families = ["vdbcache"] - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise RuntimeError( - "Output settings do no match for '%s'" % instance - ) - - @classmethod - def get_invalid(cls, instance): - - node = hou.node(instance.data["instance_node"]) - # Check trange parm, 0 means Render Current Frame - frame_range = node.evalParm("trange") - if frame_range == 0: - return [] - - output_parm = lib.get_output_parameter(node) - unexpanded_str = output_parm.unexpandedString() - - if "$F" not in unexpanded_str: - cls.log.error("No frame token found in '%s'" % node.path()) - return [instance] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_houdini_license_category.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_houdini_license_category.py deleted file mode 100644 index 1639a28790..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_houdini_license_category.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -import hou - -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -class ValidateHoudiniNotApprenticeLicense(plugin.HoudiniInstancePlugin): - """Validate the Houdini instance runs a non Apprentice license. - - USD ROPs: - When extracting USD files from an apprentice Houdini license, - the resulting files will get "scrambled" with a license protection - and get a special .usdnc suffix. - - This currently breaks the Subset/representation pipeline so we disallow - any publish with apprentice license. - - Alembic ROPs: - Houdini Apprentice does not export Alembic. - """ - - order = pyblish.api.ValidatorOrder - families = ["usdrop", "abc", "fbx", "camera"] - label = "Houdini Apprentice License" - - def process(self, instance): - - if hou.isApprentice(): - # Find which family was matched with the plug-in - families = {instance.data["productType"]} - families.update(instance.data.get("families", [])) - disallowed_families = families.intersection(self.families) - families = " ".join(sorted(disallowed_families)).title() - - raise PublishValidationError( - "{} publishing requires a non apprentice license." - .format(families), - title=self.label) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_instance_in_context.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_instance_in_context.py deleted file mode 100644 index 092a1199b9..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_instance_in_context.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validate if instance asset is the same as context asset.""" -from ayon_core.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) - -from ayon_houdini.api import plugin -from ayon_houdini.api.action import SelectROPAction - - -class ValidateInstanceInContextHoudini(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validator to check if instance asset match context asset. - - When working in per-shot style you always publish data in context of - current asset (shot). This validator checks if this is so. It is optional - so it can be disabled when needed. - """ - # Similar to maya-equivalent `ValidateInstanceInContext` - - order = ValidateContentsOrder - label = "Instance in same Context" - optional = True - actions = [SelectROPAction, RepairAction] - - def process(self, instance): - if not self.is_active(instance.data): - return - - attr_values = self.get_attr_values_from_data(instance.data) - if not attr_values and not instance.data.get("instance_node"): - # Skip instances that do not have the attr values because that - # hints these are runtime-instances, like e.g. USD layer - # contributions. We will confirm that by checking these do not - # have an instance node. We do not need to check these because they - # 'spawn off' from an original instance that has the check itself. - return - - folder_path = instance.data.get("folderPath") - task = instance.data.get("task") - context = self.get_context(instance) - if (folder_path, task) != context: - context_label = "{} > {}".format(*context) - instance_label = "{} > {}".format(folder_path, task) - - raise PublishValidationError( - message=( - "Instance '{}' publishes to different asset than current " - "context: {}. Current context: {}".format( - instance.name, instance_label, context_label - ) - ), - description=( - "## Publishing to a different asset\n" - "There are publish instances present which are publishing " - "into a different asset than your current context.\n\n" - "Usually this is not what you want but there can be cases " - "where you might want to publish into another asset or " - "shot. If that's the case you can disable the validation " - "on the instance to ignore it." - ) - ) - - @classmethod - def repair(cls, instance): - context_folder, context_task = cls.get_context(instance) - - create_context = instance.context.data["create_context"] - instance_id = instance.data["instance_id"] - created_instance = create_context.get_instance_by_id( - instance_id - ) - created_instance["folderPath"] = context_folder - created_instance["task"] = context_task - create_context.save_changes() - - @staticmethod - def get_context(instance): - """Return folderPath, task from publishing context data""" - context = instance.context - return context.data["folderPath"], context.data["task"] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_mesh_is_static.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_mesh_is_static.py deleted file mode 100644 index b6725bc36c..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_mesh_is_static.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for correct naming of Static Meshes.""" -from ayon_core.pipeline import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_core.pipeline.publish import ValidateContentsOrder - -from ayon_houdini.api import plugin -from ayon_houdini.api.action import SelectInvalidAction -from ayon_houdini.api.lib import get_output_children - - -class ValidateMeshIsStatic(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validate mesh is static. - - It checks if output node is time dependent. - this avoids getting different output from ROP node when extracted - from a different frame than the first frame. - (Might be overly restrictive though) - """ - - families = ["staticMesh", - "model"] - label = "Validate Mesh is Static" - order = ValidateContentsOrder + 0.1 - actions = [SelectInvalidAction] - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - nodes = [n.path() for n in invalid] - raise PublishValidationError( - "See log for details. " - "Invalid nodes: {0}".format(nodes) - ) - - @classmethod - def get_invalid(cls, instance): - - invalid = [] - - output_node = instance.data.get("output_node") - if output_node is None: - cls.log.debug( - "No Output Node, skipping check.." - ) - return - - all_outputs = get_output_children(output_node) - - for output in all_outputs: - if output.isTimeDependent(): - invalid.append(output) - cls.log.error( - "Output node '%s' is time dependent.", - output.path() - ) - - return invalid diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_mkpaths_toggled.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_mkpaths_toggled.py deleted file mode 100644 index 4573d4ba0b..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_mkpaths_toggled.py +++ /dev/null @@ -1,35 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api - -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -class ValidateIntermediateDirectoriesChecked(plugin.HoudiniInstancePlugin): - """Validate Create Intermediate Directories is enabled on ROP node.""" - - order = pyblish.api.ValidatorOrder - families = ["pointcache", "camera", "vdbcache", "model"] - label = "Create Intermediate Directories Checked" - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - ("Found ROP node with Create Intermediate " - "Directories turned off: {}".format(invalid)), - title=self.label) - - @classmethod - def get_invalid(cls, instance): - - result = [] - - for node in instance[:]: - if node.parm("mkpath").eval() != 1: - cls.log.error("Invalid settings found on `%s`" % node.path()) - result.append(node.path()) - - return result diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_no_errors.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_no_errors.py deleted file mode 100644 index 2afb6e5d78..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_no_errors.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -import hou - -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -def cook_in_range(node, start, end): - current = hou.intFrame() - if start >= current >= end: - # Allow cooking current frame since we're in frame range - node.cook(force=False) - else: - node.cook(force=False, frame_range=(start, start)) - - -def get_errors(node): - """Get cooking errors. - - If node already has errors check whether it needs to recook - If so, then recook first to see if that solves it. - - """ - if node.errors() and node.needsToCook(): - node.cook() - - return node.errors() - - -class ValidateNoErrors(plugin.HoudiniInstancePlugin): - """Validate the Instance has no current cooking errors.""" - - order = pyblish.api.ValidatorOrder - label = "Validate no errors" - - def process(self, instance): - - if not instance.data.get("instance_node"): - self.log.debug( - "Skipping 'Validate no errors' because instance " - "has no instance node: {}".format(instance) - ) - return - - validate_nodes = [] - - if len(instance) > 0: - validate_nodes.append(hou.node(instance.data.get("instance_node"))) - output_node = instance.data.get("output_node") - if output_node: - validate_nodes.append(output_node) - - for node in validate_nodes: - self.log.debug("Validating for errors: %s" % node.path()) - errors = get_errors(node) - - if errors: - # If there are current errors, then try an unforced cook - # to see whether the error will disappear. - self.log.debug( - "Recooking to revalidate error " - "is up to date for: %s" % node.path() - ) - current_frame = hou.intFrame() - start = instance.data.get("frameStart", current_frame) - end = instance.data.get("frameEnd", current_frame) - cook_in_range(node, start=start, end=end) - - # Check for errors again after the forced recook - errors = get_errors(node) - if errors: - self.log.error(errors) - raise PublishValidationError( - "Node has errors: {}".format(node.path()), - title=self.label) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_primitive_hierarchy_paths.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_primitive_hierarchy_paths.py deleted file mode 100644 index 9daab2a1a3..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_primitive_hierarchy_paths.py +++ /dev/null @@ -1,183 +0,0 @@ -# -*- coding: utf-8 -*- -import hou - -from ayon_houdini.api import plugin -from ayon_core.pipeline import PublishValidationError -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - RepairAction, -) - - -class AddDefaultPathAction(RepairAction): - label = "Add a default path attribute" - icon = "mdi.pencil-plus-outline" - - -class ValidatePrimitiveHierarchyPaths(plugin.HoudiniInstancePlugin): - """Validate all primitives build hierarchy from attribute when enabled. - - The name of the attribute must exist on the prims and have the same name - as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic - ROP node whenever Build Hierarchy from Attribute is enabled. - - """ - - order = ValidateContentsOrder + 0.1 - families = ["abc"] - label = "Validate Prims Hierarchy Path" - actions = [AddDefaultPathAction] - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - nodes = [n.path() for n in invalid] - raise PublishValidationError( - "See log for details. " "Invalid nodes: {0}".format(nodes), - title=self.label - ) - - @classmethod - def get_invalid(cls, instance): - - output_node = instance.data.get("output_node") - rop_node = hou.node(instance.data["instance_node"]) - - if output_node is None: - cls.log.error( - "SOP Output node in '%s' does not exist. " - "Ensure a valid SOP output path is set.", rop_node.path() - ) - - return [rop_node] - - build_from_path = rop_node.parm("build_from_path").eval() - if not build_from_path: - cls.log.debug( - "Alembic ROP has 'Build from Path' disabled. " - "Validation is ignored.." - ) - return - - path_attr = rop_node.parm("path_attrib").eval() - if not path_attr: - cls.log.error( - "The Alembic ROP node has no Path Attribute" - "value set, but 'Build Hierarchy from Attribute'" - "is enabled." - ) - return [rop_node] - - cls.log.debug("Checking for attribute: %s", path_attr) - - if not hasattr(output_node, "geometry"): - # In the case someone has explicitly set an Object - # node instead of a SOP node in Geometry context - # then for now we ignore - this allows us to also - # export object transforms. - cls.log.warning("No geometry output node found, skipping check..") - return - - # Check if the primitive attribute exists - frame = instance.data.get("frameStart", 0) - geo = output_node.geometryAtFrame(frame) - - # If there are no primitives on the current frame then we can't - # check whether the path names are correct. So we'll just issue a - # warning that the check can't be done consistently and skip - # validation. - if len(geo.iterPrims()) == 0: - cls.log.warning( - "No primitives found on current frame. Validation" - " for primitive hierarchy paths will be skipped," - " thus can't be validated." - ) - return - - # Check if there are any values for the primitives - attrib = geo.findPrimAttrib(path_attr) - if not attrib: - cls.log.info( - "Geometry Primitives are missing " - "path attribute: `%s`", path_attr - ) - return [output_node] - - # Ensure at least a single string value is present - if not attrib.strings(): - cls.log.info( - "Primitive path attribute has no " - "string values: %s", path_attr - ) - return [output_node] - - paths = geo.primStringAttribValues(path_attr) - # Ensure all primitives are set to a valid path - # Collect all invalid primitive numbers - invalid_prims = [i for i, path in enumerate(paths) if not path] - if invalid_prims: - num_prims = len(geo.iterPrims()) # faster than len(geo.prims()) - cls.log.info( - "Prims have no value for attribute `%s` " - "(%s of %s prims)", path_attr, len(invalid_prims), num_prims - ) - return [output_node] - - @classmethod - def repair(cls, instance): - """Add a default path attribute Action. - - It is a helper action more than a repair action, - used to add a default single value for the path. - """ - - rop_node = hou.node(instance.data["instance_node"]) - output_node = rop_node.parm("sop_path").evalAsNode() - - if not output_node: - cls.log.debug( - "Action isn't performed, invalid SOP Path on %s", - rop_node - ) - return - - # This check to prevent the action from running multiple times. - # git_invalid only returns [output_node] when - # path attribute is the problem - if cls.get_invalid(instance) != [output_node]: - return - - path_attr = rop_node.parm("path_attrib").eval() - - path_node = output_node.parent().createNode("name", "AUTO_PATH") - path_node.parm("attribname").set(path_attr) - path_node.parm("name1").set('`opname("..")`/`opname("..")`Shape') - - cls.log.debug( - "'%s' was created. It adds '%s' with a default single value", - path_node, path_attr - ) - - path_node.setGenericFlag(hou.nodeFlag.DisplayComment, True) - path_node.setComment( - 'Auto path node was created automatically by ' - '"Add a default path attribute"' - '\nFeel free to modify or replace it.' - ) - - if output_node.type().name() in ["null", "output"]: - # Connect before - path_node.setFirstInput(output_node.input(0)) - path_node.moveToGoodPosition() - output_node.setFirstInput(path_node) - output_node.moveToGoodPosition() - else: - # Connect after - path_node.setFirstInput(output_node) - rop_node.parm("sop_path").set(path_node.path()) - path_node.moveToGoodPosition() - - cls.log.debug( - "SOP path on '%s' updated to new output node '%s'", - rop_node, path_node - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_render_products.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_render_products.py deleted file mode 100644 index 774d517bfb..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_render_products.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect -import hou -import pyblish.api - -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api.action import SelectROPAction -from ayon_houdini.api import plugin - - -class ValidateUsdRenderProducts(plugin.HoudiniInstancePlugin): - """Validate at least one render product is present""" - - order = pyblish.api.ValidatorOrder - families = ["usdrender"] - hosts = ["houdini"] - label = "Validate Render Products" - actions = [SelectROPAction] - - def get_description(self): - return inspect.cleandoc( - """### No Render Products - - The render submission specified no Render Product outputs and - as such would not generate any rendered files. - - This is usually the case if no Render Settings or Render - Products were created. - - Make sure to create the Render Settings - relevant to the renderer you want to use. - - """ - ) - - def process(self, instance): - - if not instance.data.get("output_node"): - self.log.warning("No valid LOP node to render found.") - return - - if not instance.data.get("files", []): - node_path = instance.data["instance_node"] - node = hou.node(node_path) - rendersettings_path = ( - node.evalParm("rendersettings") or "/Render/rendersettings" - ) - raise PublishValidationError( - message=( - "No Render Products found in Render Settings " - "for '{}' at '{}'".format(node_path, rendersettings_path) - ), - description=self.get_description(), - title=self.label - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_review_colorspace.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_review_colorspace.py deleted file mode 100644 index e96b222446..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_review_colorspace.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import hou - -import pyblish.api -from ayon_core.pipeline import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_core.pipeline.publish import ( - RepairAction, - get_plugin_settings, - apply_plugin_settings_automatically -) - -from ayon_houdini.api import plugin -from ayon_houdini.api.action import SelectROPAction - - -class ResetViewSpaceAction(RepairAction): - label = "Reset OCIO colorspace parm" - icon = "mdi.monitor" - - -class ValidateReviewColorspace(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validate Review Colorspace parameters. - - It checks if 'OCIO Colorspace' parameter was set to valid value. - """ - - order = pyblish.api.ValidatorOrder + 0.1 - families = ["review"] - label = "Validate Review Colorspace" - actions = [ResetViewSpaceAction, SelectROPAction] - - optional = True - review_color_space = "" - - @classmethod - def apply_settings(cls, project_settings): - # Preserve automatic settings applying logic - settings = get_plugin_settings(plugin=cls, - project_settings=project_settings, - log=cls.log, - category="houdini") - apply_plugin_settings_automatically(cls, settings, logger=cls.log) - - # workfile settings added in '0.2.13' - color_settings = project_settings["houdini"]["imageio"].get( - "workfile", {} - ) - # Add review color settings - if color_settings.get("enabled"): - cls.review_color_space = color_settings.get("review_color_space") - - - def process(self, instance): - - rop_node = hou.node(instance.data["instance_node"]) - - # This plugin is triggered when marking render as reviewable. - # Therefore, this plugin will run on over wrong instances. - # TODO: Don't run this plugin on wrong instances. - # This plugin should run only on review product type - # with instance node of opengl type. - if rop_node.type().name() != "opengl": - self.log.debug("Skipping Validation. Rop node {} " - "is not an OpenGl node.".format(rop_node.path())) - return - - if not self.is_active(instance.data): - return - - if os.getenv("OCIO") is None: - self.log.debug( - "Using Houdini's Default Color Management, " - " skipping check.." - ) - return - - if rop_node.evalParm("colorcorrect") != 2: - # any colorspace settings other than default requires - # 'Color Correct' parm to be set to 'OpenColorIO' - raise PublishValidationError( - "'Color Correction' parm on '{}' ROP must be set to" - " 'OpenColorIO'".format(rop_node.path()) - ) - - current_color_space = rop_node.evalParm("ociocolorspace") - if current_color_space not in hou.Color.ocio_spaces(): - raise PublishValidationError( - "Invalid value: Colorspace name doesn't exist.\n" - "Check 'OCIO Colorspace' parameter on '{}' ROP" - .format(rop_node.path()) - ) - - # if houdini/imageio/workfile is enabled and - # Review colorspace setting is empty then this check should - # actually check if the current_color_space setting equals - # the default colorspace value. - # However, it will make the black cmd screen show up more often - # which is very annoying. - if self.review_color_space and \ - self.review_color_space != current_color_space: - - raise PublishValidationError( - "Invalid value: Colorspace name doesn't match" - "the Colorspace specified in settings." - ) - - @classmethod - def repair(cls, instance): - """Reset view colorspace. - - It is used to set colorspace on opengl node. - - It uses the colorspace value specified in the Houdini addon settings. - If the value in the Houdini addon settings is empty, - it will fall to the default colorspace. - - Note: - This repair action assumes that OCIO is enabled. - As if OCIO is disabled the whole validation is skipped - and this repair action won't show up. - """ - from ayon_houdini.api.lib import set_review_color_space - - # Fall to the default value if cls.review_color_space is empty. - if not cls.review_color_space: - # cls.review_color_space is an empty string - # when the imageio/workfile setting is disabled or - # when the Review colorspace setting is empty. - from ayon_houdini.api.colorspace import get_default_display_view_colorspace # noqa - cls.review_color_space = get_default_display_view_colorspace() - - rop_node = hou.node(instance.data["instance_node"]) - set_review_color_space(rop_node, - cls.review_color_space, - cls.log) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_scene_review.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_scene_review.py deleted file mode 100644 index f45cd1c97d..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_scene_review.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding: utf-8 -*- -import hou - -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -class ValidateSceneReview(plugin.HoudiniInstancePlugin): - """Validator Some Scene Settings before publishing the review - 1. Scene Path - 2. Resolution - """ - - order = pyblish.api.ValidatorOrder - families = ["review"] - label = "Scene Setting for review" - - def process(self, instance): - - report = [] - instance_node = hou.node(instance.data.get("instance_node")) - - # This plugin is triggered when marking render as reviewable. - # Therefore, this plugin will run on over wrong instances. - # TODO: Don't run this plugin on wrong instances. - # This plugin should run only on review product type - # with instance node of opengl type. - if instance_node.type().name() != "opengl": - self.log.debug("Skipping Validation. Rop node {} " - "is not an OpenGl node.".format(instance_node.path())) - return - - invalid = self.get_invalid_scene_path(instance_node) - if invalid: - report.append(invalid) - - invalid = self.get_invalid_camera_path(instance_node) - if invalid: - report.append(invalid) - - invalid = self.get_invalid_resolution(instance_node) - if invalid: - report.extend(invalid) - - if report: - raise PublishValidationError( - "\n\n".join(report), - title=self.label) - - def get_invalid_scene_path(self, rop_node): - scene_path_parm = rop_node.parm("scenepath") - scene_path_node = scene_path_parm.evalAsNode() - if not scene_path_node: - path = scene_path_parm.evalAsString() - return "Scene path does not exist: '{}'".format(path) - - def get_invalid_camera_path(self, rop_node): - camera_path_parm = rop_node.parm("camera") - camera_node = camera_path_parm.evalAsNode() - path = camera_path_parm.evalAsString() - if not camera_node: - return "Camera path does not exist: '{}'".format(path) - type_name = camera_node.type().name() - if type_name != "cam": - return "Camera path is not a camera: '{}' (type: {})".format( - path, type_name - ) - - def get_invalid_resolution(self, rop_node): - - # The resolution setting is only used when Override Camera Resolution - # is enabled. So we skip validation if it is disabled. - override = rop_node.parm("tres").eval() - if not override: - return - - invalid = [] - res_width = rop_node.parm("res1").eval() - res_height = rop_node.parm("res2").eval() - if res_width == 0: - invalid.append("Override Resolution width is set to zero.") - if res_height == 0: - invalid.append("Override Resolution height is set to zero") - - return invalid diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_sop_output_node.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_sop_output_node.py deleted file mode 100644 index 7d37927058..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_sop_output_node.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -import hou - -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin -from ayon_houdini.api.action import ( - SelectInvalidAction, - SelectROPAction, -) - - -class ValidateSopOutputNode(plugin.HoudiniInstancePlugin): - """Validate the instance SOP Output Node. - - This will ensure: - - The SOP Path is set. - - The SOP Path refers to an existing object. - - The SOP Path node is a SOP node. - - The SOP Path node has at least one input connection (has an input) - - The SOP Path has geometry data. - - """ - - order = pyblish.api.ValidatorOrder - families = ["pointcache", "vdbcache", "model"] - label = "Validate Output Node (SOP)" - actions = [SelectROPAction, SelectInvalidAction] - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Output node(s) are incorrect", - title="Invalid output node(s)" - ) - - @classmethod - def get_invalid(cls, instance): - output_node = instance.data.get("output_node") - - if output_node is None: - node = hou.node(instance.data["instance_node"]) - cls.log.error( - "SOP Output node in '%s' does not exist. " - "Ensure a valid SOP output path is set." % node.path() - ) - - return [node] - - # Output node must be a Sop node. - if not isinstance(output_node, hou.SopNode): - cls.log.error( - "Output node %s is not a SOP node. " - "SOP Path must point to a SOP node, " - "instead found category type: %s" - % (output_node.path(), output_node.type().category().name()) - ) - return [output_node] - - # For the sake of completeness also assert the category type - # is Sop to avoid potential edge case scenarios even though - # the isinstance check above should be stricter than this category - if output_node.type().category().name() != "Sop": - raise PublishValidationError( - ("Output node {} is not of category Sop. " - "This is a bug.").format(output_node.path()), - title=cls.label) - - # Ensure the node is cooked and succeeds to cook so we can correctly - # check for its geometry data. - if output_node.needsToCook(): - cls.log.debug("Cooking node: %s" % output_node.path()) - try: - output_node.cook() - except hou.Error as exc: - cls.log.error("Cook failed: %s" % exc) - cls.log.error(output_node.errors()[0]) - return [output_node] - - # Ensure the output node has at least Geometry data - if not output_node.geometry(): - cls.log.error( - "Output node `%s` has no geometry data." % output_node.path() - ) - return [output_node] diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_subset_name.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_subset_name.py deleted file mode 100644 index a63a4f16c7..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_subset_name.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for correct naming of Static Meshes.""" -import hou - -from ayon_core.pipeline import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - RepairAction, -) -from ayon_core.pipeline.create import get_product_name -from ayon_houdini.api import plugin -from ayon_houdini.api.action import SelectInvalidAction - - -class FixProductNameAction(RepairAction): - label = "Fix Product Name" - - -class ValidateSubsetName(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validate Product name. - - """ - - families = ["staticMesh", "hda"] - label = "Validate Product Name" - order = ValidateContentsOrder + 0.1 - actions = [FixProductNameAction, SelectInvalidAction] - - optional = True - - def process(self, instance): - - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - nodes = [n.path() for n in invalid] - raise PublishValidationError( - "See log for details. " - "Invalid nodes: {0}".format(nodes) - ) - - @classmethod - def get_invalid(cls, instance): - - invalid = [] - - rop_node = hou.node(instance.data["instance_node"]) - - # Check product name - folder_entity = instance.data["folderEntity"] - task_entity = instance.data["taskEntity"] - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - product_name = get_product_name( - instance.context.data["projectName"], - task_name, - task_type, - instance.context.data["hostName"], - instance.data["productType"], - variant=instance.data["variant"], - dynamic_data={ - "asset": folder_entity["name"], - "folder": { - "label": folder_entity["label"], - "name": folder_entity["name"] - } - } - ) - - if instance.data.get("productName") != product_name: - invalid.append(rop_node) - cls.log.error( - "Invalid product name on rop node '%s' should be '%s'.", - rop_node.path(), product_name - ) - - return invalid - - @classmethod - def repair(cls, instance): - rop_node = hou.node(instance.data["instance_node"]) - - # Check product name - folder_entity = instance.data["folderEntity"] - task_entity = instance.data["taskEntity"] - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - product_name = get_product_name( - instance.context.data["projectName"], - task_name, - task_type, - instance.context.data["hostName"], - instance.data["productType"], - variant=instance.data["variant"], - dynamic_data={ - "asset": folder_entity["name"], - "folder": { - "label": folder_entity["label"], - "name": folder_entity["name"] - } - } - ) - - instance.data["productName"] = product_name - rop_node.parm("AYON_productName").set(product_name) - - cls.log.debug( - "Product name on rop node '%s' has been set to '%s'.", - rop_node.path(), product_name - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_unreal_staticmesh_naming.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_unreal_staticmesh_naming.py deleted file mode 100644 index a3d971695d..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_unreal_staticmesh_naming.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for correct naming of Static Meshes.""" -import hou - -from ayon_core.pipeline import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_core.pipeline.publish import ValidateContentsOrder - -from ayon_houdini.api import plugin -from ayon_houdini.api.action import SelectInvalidAction -from ayon_houdini.api.lib import get_output_children - - -class ValidateUnrealStaticMeshName(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validate name of Unreal Static Mesh. - - This validator checks if output node name has a collision prefix: - - UBX - - UCP - - USP - - UCX - - This validator also checks if product name is correct - - {static mesh prefix}_{FolderName}{Variant}. - - """ - - families = ["staticMesh"] - label = "Unreal Static Mesh Name (FBX)" - order = ValidateContentsOrder + 0.1 - actions = [SelectInvalidAction] - - optional = True - collision_prefixes = [] - static_mesh_prefix = "" - - @classmethod - def apply_settings(cls, project_settings): - - settings = ( - project_settings["houdini"]["create"]["CreateStaticMesh"] - ) - cls.collision_prefixes = settings["collision_prefixes"] - cls.static_mesh_prefix = settings["static_mesh_prefix"] - - def process(self, instance): - - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - nodes = [n.path() for n in invalid] - raise PublishValidationError( - "See log for details. " - "Invalid nodes: {0}".format(nodes) - ) - - @classmethod - def get_invalid(cls, instance): - - invalid = [] - - rop_node = hou.node(instance.data["instance_node"]) - output_node = instance.data.get("output_node") - if output_node is None: - cls.log.debug( - "No Output Node, skipping check.." - ) - return - - if rop_node.evalParm("buildfrompath"): - # This validator doesn't support naming check if - # building hierarchy from path' is used - cls.log.info( - "Using 'Build Hierarchy from Path Attribute', skipping check.." - ) - return - - # Check nodes names - all_outputs = get_output_children(output_node, include_sops=False) - for output in all_outputs: - for prefix in cls.collision_prefixes: - if output.name().startswith(prefix): - invalid.append(output) - cls.log.error( - "Invalid node name: Node '%s' " - "includes a collision prefix '%s'", - output.path(), prefix - ) - break - - return invalid diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_asset_contribution_default_prim.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_asset_contribution_default_prim.py deleted file mode 100644 index 03836021dc..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_asset_contribution_default_prim.py +++ /dev/null @@ -1,102 +0,0 @@ -import inspect - -import hou -import pyblish.api - -from ayon_core.pipeline import PublishValidationError -from ayon_core.pipeline.publish import RepairAction, OptionalPyblishPluginMixin - -from ayon_houdini.api.action import SelectROPAction -from ayon_houdini.api import plugin - - -class ValidateUSDAssetContributionDefaultPrim(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validate the default prim is set when USD contribution is set to asset. - - If the USD asset contributions is enabled and the user has it set to - initialize asset as "asset" then most likely they are looking to publish - into an asset structure - which should have a default prim that matches - the folder's name. To ensure that's the case we force require the - value to be set on the ROP node. - - Note that another validator "Validate USD Rop Default Prim" enforces the - primitive actually exists (or has modifications) if the ROP specifies - a default prim - so that does not have to be validated with this validator. - - """ - - order = pyblish.api.ValidatorOrder - families = ["usdrop"] - hosts = ["houdini"] - label = "Validate USD Asset Contribution Default Prim" - actions = [SelectROPAction, RepairAction] - - # TODO: Unfortunately currently this does not show as optional toggle - # because the product type is `usd` and not `usdrop` - however we do - # not want to run this for ALL `usd` product types? - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Check if instance is set to be an asset contribution - settings = self.get_attr_values_from_data_for_plugin_name( - "CollectUSDLayerContributions", instance.data - ) - if ( - not settings.get("contribution_enabled", False) - or settings.get("contribution_target_product_init") != "asset" - ): - return - - rop_node = hou.node(instance.data["instance_node"]) - default_prim = rop_node.evalParm("defaultprim") - if not default_prim: - raise PublishValidationError( - f"No default prim specified on ROP node: {rop_node.path()}", - description=self.get_description() - ) - - folder_name = instance.data["folderPath"].rsplit("/", 1)[-1] - if not default_prim.lstrip("/") == folder_name: - raise PublishValidationError( - f"Default prim specified on ROP node does not match the " - f"asset's folder name: '{default_prim}' " - f"(should be: '/{folder_name}')", - description=self.get_description() - ) - - @classmethod - def repair(cls, instance): - rop_node = hou.node(instance.data["instance_node"]) - rop_node.parm("defaultprim").set( - "/`strsplit(chs(\"folderPath\"), \"/\", -1)`" - ) - - @staticmethod - def get_attr_values_from_data_for_plugin_name( - plugin_name: str, data: dict) -> dict: - return ( - data - .get("publish_attributes", {}) - .get(plugin_name, {}) - ) - - def get_description(self): - return inspect.cleandoc( - """### Default primitive not set to current asset - - The USD instance has **USD Contribution** enabled and is set to - initialize as **asset**. The asset requires a default root - primitive with the name of the folder it's related to. - - For example, you're working in `/asset/char_hero` then the - folder's name is `char_hero`. For the asset hence all prims should - live under `/char_hero` root primitive. - - This validation solely ensures the **default primitive** on the ROP - node is set to match the folder name. - """ - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_assignments.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_assignments.py deleted file mode 100644 index e5037454dd..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_assignments.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect -import hou -from pxr import Usd, UsdShade, UsdGeom - -import pyblish.api - -from ayon_core.pipeline.publish import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_houdini.api.action import SelectROPAction -from ayon_houdini.api import plugin - - -def has_material(prim: Usd.Prim, - include_subsets: bool=True, - purpose=UsdShade.Tokens.allPurpose) -> bool: - """Return whether primitive has any material binding.""" - search_from = [prim] - if include_subsets: - subsets = UsdShade.MaterialBindingAPI(prim).GetMaterialBindSubsets() - for subset in subsets: - search_from.append(subset.GetPrim()) - - bounds = UsdShade.MaterialBindingAPI.ComputeBoundMaterials(search_from, - purpose) - for (material, relationship) in zip(*bounds): - material_prim = material.GetPrim() - if material_prim.IsValid(): - # Has a material binding - return True - - return False - - -class ValidateUsdLookAssignments(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validate all geometry prims have a material binding. - - Note: This does not necessarily validate the material binding is authored - by the current layers if the input already had material bindings. - - """ - - order = pyblish.api.ValidatorOrder - families = ["look"] - hosts = ["houdini"] - label = "Validate All Geometry Has Material Assignment" - actions = [SelectROPAction] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - lop_node: hou.LopNode = instance.data.get("output_node") - if not lop_node: - return - - # We iterate the composed stage for code simplicity; however this - # means that it does not validate across e.g. multiple model variants - # but only checks against the current composed stage. Likely this is - # also what you actually want to validate, because your look might not - # apply to *all* model variants. - stage = lop_node.stage() - invalid = [] - for prim in stage.Traverse(): - if not prim.IsA(UsdGeom.Gprim): - continue - - if not has_material(prim): - invalid.append(prim.GetPath()) - - for path in sorted(invalid): - self.log.warning("No material binding on: %s", path.pathString) - - if invalid: - raise PublishValidationError( - "Found geometry without material bindings.", - title="No assigned materials", - description=self.get_description() - ) - - @staticmethod - def get_description(): - return inspect.cleandoc( - """### Geometry has no material assignments. - - A look publish should usually define a material assignment for all - geometry of a model. As such, this validates whether all geometry - currently has at least one material binding applied. - - """ - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_contents.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_contents.py deleted file mode 100644 index 43357cdb35..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_contents.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect -from typing import List, Union -from functools import partial - -import hou -from pxr import Sdf -import pyblish.api - -from ayon_core.pipeline.publish import PublishValidationError -from ayon_houdini.api.action import SelectROPAction -from ayon_houdini.api.usd import get_schema_type_names -from ayon_houdini.api import plugin - - -def get_applied_items(list_proxy) -> List[Union[Sdf.Reference, Sdf.Payload]]: - """Backwards compatible equivalent of `GetAppliedItems()`""" - return list_proxy.ApplyEditsToList([]) - - -class ValidateUsdLookContents(plugin.HoudiniInstancePlugin): - """Validate no meshes are defined in the look. - - Usually, a published look should not contain generated meshes in the output - but only the materials, material bindings and render geometry settings. - - To avoid accidentally including a Mesh definition we ensure none of the - generated output layers for the instance is defining any Mesh type. - - """ - - order = pyblish.api.ValidatorOrder - families = ["look"] - hosts = ["houdini"] - label = "Validate Look No Meshes/Lights" - actions = [SelectROPAction] - - disallowed_types = [ - "UsdGeomBoundable", # Meshes/Lights/Procedurals - "UsdRenderSettingsBase", # Render Settings - "UsdRenderVar", # Render Var - "UsdGeomCamera" # Cameras - ] - - def process(self, instance): - - lop_node: hou.LopNode = instance.data.get("output_node") - if not lop_node: - return - - # Get layers below layer break - above_break_layers = set(layer for layer in lop_node.layersAboveLayerBreak()) - stage = lop_node.stage() - layers = [ - layer for layer - in stage.GetLayerStack(includeSessionLayers=False) - if layer.identifier not in above_break_layers - ] - if not layers: - return - - # The Sdf.PrimSpec type name will not have knowledge about inherited - # types for the type, name. So we pre-collect all invalid types - # and their child types to ensure we match inherited types as well. - disallowed_type_names = set() - for type_name in self.disallowed_types: - disallowed_type_names.update(get_schema_type_names(type_name)) - - # Find invalid prims - invalid = [] - - def collect_invalid(layer: Sdf.Layer, path: Sdf.Path): - """Collect invalid paths into the `invalid` list""" - if not path.IsPrimPath(): - return - - prim = layer.GetPrimAtPath(path) - if prim.typeName in disallowed_type_names: - self.log.warning( - "Disallowed prim type '%s' at %s", - prim.typeName, prim.path.pathString - ) - invalid.append(path) - return - - # TODO: We should allow referencing or payloads, but if so - we - # should still check whether the loaded reference or payload - # introduces any geometry. If so, disallow it because that - # opinion would 'define' geometry in the output - references= get_applied_items(prim.referenceList) - if references: - self.log.warning( - "Disallowed references are added at %s: %s", - prim.path.pathString, - ", ".join(ref.assetPath for ref in references) - ) - invalid.append(path) - - payloads = get_applied_items(prim.payloadList) - if payloads: - self.log.warning( - "Disallowed payloads are added at %s: %s", - prim.path.pathString, - ", ".join(payload.assetPath for payload in payloads) - ) - invalid.append(path) - - for layer in layers: - layer.Traverse("/", partial(collect_invalid, layer)) - - if invalid: - raise PublishValidationError( - "Invalid look members found.", - title="Look Invalid Members", - description=self.get_description() - ) - - @staticmethod - def get_description(): - return inspect.cleandoc( - """### Look contains invalid members - - A look publish should usually only contain materials, material - bindings and render geometry settings. - - This validation invalidates any creation of: - - Render Settings, - - Lights, - - Cameras, - - Geometry (Meshes, Curves and other geometry types) - - To avoid writing out loaded geometry into the output make sure to - add a Layer Break after loading all the content you do **not** want - to save into the output file. Then your materials, material - bindings and render geometry settings are overrides applied to the - loaded content after the **Layer Break LOP** node. - - If you happen to write out additional data for the meshes via - e.g. a SOP Modify make sure to import to LOPs only the relevant - attributes, mark them as static attributes, static topology and - set the Primitive Definitions to be Overlay instead of Defines. - - Currently, to avoid issues with referencing/payloading geometry - from external files any references or payloads are also disallowed - for looks. - - """ - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_material_defs.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_material_defs.py deleted file mode 100644 index 273bf46b18..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_look_material_defs.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect -import hou -from pxr import Sdf, UsdShade -import pyblish.api - -from ayon_core.pipeline.publish import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_houdini.api.action import SelectROPAction -from ayon_houdini.api.usd import get_schema_type_names -from ayon_houdini.api import plugin - - -class ValidateLookShaderDefs(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validate Material primitives are defined types instead of overs""" - - order = pyblish.api.ValidatorOrder - families = ["look"] - hosts = ["houdini"] - label = "Validate Look Shaders Are Defined" - actions = [SelectROPAction] - optional = True - - # Types to validate at the low-level Sdf API - # For Usd API we validate directly against `UsdShade.Material` - validate_types = [ - "UsdShadeMaterial" - ] - - def process(self, instance): - if not self.is_active(instance.data): - return - - lop_node: hou.LopNode = instance.data.get("output_node") - if not lop_node: - return - - # Get layers below layer break - above_break_layers = set( - layer for layer in lop_node.layersAboveLayerBreak()) - stage = lop_node.stage() - layers = [ - layer for layer - in stage.GetLayerStack(includeSessionLayers=False) - if layer.identifier not in above_break_layers - ] - if not layers: - return - - # The Sdf.PrimSpec type name will not have knowledge about inherited - # types for the type, name. So we pre-collect all invalid types - # and their child types to ensure we match inherited types as well. - validate_type_names = set() - for type_name in self.validate_types: - validate_type_names.update(get_schema_type_names(type_name)) - - invalid = [] - for layer in layers: - def log_overs(path: Sdf.Path): - if not path.IsPrimPath(): - return - prim_spec = layer.GetPrimAtPath(path) - - if not prim_spec.typeName: - # Typeless may mean Houdini generated the material or - # shader as override because upstream the nodes already - # existed. So we check the stage instead to identify - # the composed type of the prim - prim = stage.GetPrimAtPath(path) - if not prim: - return - - if not prim.IsA(UsdShade.Material): - return - - self.log.debug("Material Prim has no type defined: %s", - path) - - elif prim_spec.typeName not in validate_type_names: - return - - if prim_spec.specifier != Sdf.SpecifierDef: - specifier = { - Sdf.SpecifierDef: "Def", - Sdf.SpecifierOver: "Over", - Sdf.SpecifierClass: "Class" - }[prim_spec.specifier] - - self.log.warning( - "Material is not defined but specified as " - "'%s': %s", specifier, path - ) - invalid.append(path) - - layer.Traverse("/", log_overs) - - if invalid: - raise PublishValidationError( - "Found Materials not specifying an authored definition.", - title="Materials not defined", - description=self.get_description() - ) - - @staticmethod - def get_description(): - return inspect.cleandoc( - """### Materials are not defined types - - There are materials in your current look that do not **define** the - material primitives, but rather **override** or specify a - **class**. This is most likely not what you want since you want - most looks to define new materials instead of overriding existing - materials. - - Usually this happens if your current scene loads an input asset - that already has the materials you're creating in your current - scene as well. For example, if you are loading the Asset that - contains the previously publish of your look without muting the - look layer. As such, Houdini sees the materials already exist and - will not make new definitions, but only write "override changes". - However, once your look publish would replace the previous one then - suddenly the materials would be missing and only specified as - overrides. - - So, in most cases this is solved by Layer Muting upstream the - look layers of the loaded asset. - - If for a specific case the materials already existing in the input - is correct then you can either specify new material names for what - you're creating in the current scene or disable this validation - if you are sure you want to write overrides in your look publish - instead of definitions. - """ - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_output_node.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_output_node.py deleted file mode 100644 index 7ef9a80394..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_output_node.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect - -import pyblish.api - -from ayon_core.pipeline import PublishValidationError -from ayon_houdini.api.action import SelectROPAction -from ayon_houdini.api import plugin - - -class ValidateUSDOutputNode(plugin.HoudiniInstancePlugin): - """Validate the instance USD LOPs Output Node. - - This will ensure: - - The LOP Path is set. - - The LOP Path refers to an existing object. - - The LOP Path node is a LOP node. - - """ - - # Validate early so that this error reports higher than others to the user - # so that if another invalidation is due to the output node being invalid - # the user will likely first focus on this first issue - order = pyblish.api.ValidatorOrder - 0.4 - families = ["usdrop"] - label = "Validate Output Node (USD)" - actions = [SelectROPAction] - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - path = invalid[0] - raise PublishValidationError( - "Output node '{}' has no valid LOP path set.".format(path), - title=self.label, - description=self.get_description() - ) - - @classmethod - def get_invalid(cls, instance): - - import hou - - output_node = instance.data.get("output_node") - - if output_node is None: - node = hou.node(instance.data.get("instance_node")) - cls.log.error( - "USD node '%s' configured LOP path does not exist. " - "Ensure a valid LOP path is set." % node.path() - ) - - return [node.path()] - - # Output node must be a Sop node. - if not isinstance(output_node, hou.LopNode): - cls.log.error( - "Output node %s is not a LOP node. " - "LOP Path must point to a LOP node, " - "instead found category type: %s" - % (output_node.path(), output_node.type().category().name()) - ) - return [output_node.path()] - - def get_description(self): - return inspect.cleandoc( - """### USD ROP has invalid LOP path - - The USD ROP node has no or an invalid LOP path set to be exported. - Make sure to correctly configure what you want to export for the - publish. - """ - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_arnold.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_arnold.py deleted file mode 100644 index 67d1aa605a..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_arnold.py +++ /dev/null @@ -1,311 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect -import hou -import pxr -from pxr import UsdRender -import pyblish.api - -from ayon_core.pipeline.publish import PublishValidationError, RepairAction - -from ayon_houdini.api.action import SelectROPAction -from ayon_houdini.api.usd import get_usd_render_rop_rendersettings -from ayon_houdini.api import plugin - - -class ValidateUSDRenderSingleFile(plugin.HoudiniInstancePlugin): - """Validate the writing of a single USD Render Output file. - - When writing to single file with USD Render ROP make sure to write the - output USD file from a single process to avoid overwriting it with - different processes. - """ - - order = pyblish.api.ValidatorOrder - families = ["usdrender"] - hosts = ["houdini"] - label = "Validate USD Render ROP Settings" - actions = [SelectROPAction, RepairAction] - - def process(self, instance): - - if instance.data.get("creator_attributes", - {}).get("render_target") != "farm_split": - # Validation is only relevant when submitting a farm job where the - # export and render are separate jobs. - return - - # Get configured settings for this instance - submission_data = ( - instance.data - .get("publish_attributes", {}) - .get("HoudiniSubmitDeadlineUsdRender", {}) - ) - render_chunk_size = submission_data.get("chunk", 1) - export_chunk_size = submission_data.get("export_chunk", 1) - usd_file_per_frame = "$F" in instance.data["ifdFile"] - frame_start_handle = instance.data["frameStartHandle"] - frame_end_handle = instance.data["frameEndHandle"] - num_frames = frame_end_handle - frame_start_handle + 1 - rop_node = hou.node(instance.data["instance_node"]) - - # Whether ROP node is set to render all Frames within a single process - # When this is disabled then Husk will restart completely per frame - # no matter the chunk size. - all_frames_at_once = rop_node.evalParm("allframesatonce") - - invalid = False - if usd_file_per_frame: - # USD file per frame - # If rendering multiple frames per task and USD file has $F then - # log a warning that the optimization will be less efficient - # since husk will still restart per frame. - if render_chunk_size > 1: - self.log.debug( - "Render chunk size is bigger than one but export file is " - "a USD file per frame. Husk does not allow rendering " - "separate USD files in one process. As such, Husk will " - "restart per frame even within the chunk to render the " - "correct file per frame." - ) - else: - # Single export USD file - # Export chunk size must be higher than the amount of frames to - # ensure the file is written in one go on one machine and thus - # ends up containing all frames correctly - if export_chunk_size < num_frames: - self.log.error( - "The export chunk size %s is smaller than the amount of " - "frames %s, so multiple tasks will try to export to " - "the same file. Make sure to increase chunk " - "size to higher than the amount of frames to render, " - "more than >%s", - export_chunk_size, num_frames, num_frames - ) - invalid = True - - if not all_frames_at_once: - self.log.error( - "Please enable 'Render All Frames With A Single Process' " - "on the USD Render ROP node or add $F to the USD filename", - ) - invalid = True - - if invalid: - raise PublishValidationError( - "Render USD file being overwritten during export.", - title="Render USD file overwritten", - description=self.get_description()) - - @classmethod - def repair(cls, instance): - # Enable all frames at once and make the frames per task - # very large - rop_node = hou.node(instance.data["instance_node"]) - rop_node.parm("allframesatonce").set(True) - - # Override instance setting for export chunk size - create_context = instance.context.data["create_context"] - created_instance = create_context.get_instance_by_id( - instance.data["instance_id"] - ) - created_instance.publish_attributes["HoudiniSubmitDeadlineUsdRender"]["export_chunk"] = 1000 # noqa - create_context.save_changes() - - def get_description(self): - return inspect.cleandoc( - """### Render USD file configured incorrectly - - The USD render ROP is currently configured to write a single - USD file to render instead of a file per frame. - - When that is the case, a single machine must produce that file in - one process to avoid the file being overwritten by the other - processes. - - We resolve that by enabling _Render All Frames With A Single - Process_ on the ROP node and ensure the export job task size - is larger than the amount of frames of the sequence, so the file - gets written in one go. - - Run **Repair** to resolve this for you. - - If instead you want to write separate render USD files, please - include $F in the USD output filename on the `ROP node > Output > - USD Export > Output File` - """ - ) - - -class ValidateUSDRenderArnoldSettings(plugin.HoudiniInstancePlugin): - """Validate USD Render Product names are correctly set absolute paths.""" - - order = pyblish.api.ValidatorOrder - families = ["usdrender"] - hosts = ["houdini"] - label = "Validate USD Render Arnold Settings" - actions = [SelectROPAction] - - def process(self, instance): - - rop_node = hou.node(instance.data["instance_node"]) - node = instance.data.get("output_node") - if not node: - # No valid output node was set. We ignore it since it will - # be validated by another plug-in. - return - - # Check only for Arnold renderer - renderer = rop_node.evalParm("renderer") - if renderer != "HdArnoldRendererPlugin": - self.log.debug("Skipping Arnold Settings validation because " - "renderer is set to: %s", renderer) - return - - # Validate Arnold Product Type is enabled on the Arnold Render Settings - # This is confirmed by the `includeAovs` attribute on the RenderProduct - stage: pxr.Usd.Stage = node.stage() - invalid = False - for prim_path in instance.data.get("usdRenderProducts", []): - prim = stage.GetPrimAtPath(prim_path) - include_aovs = prim.GetAttribute("includeAovs") - if not include_aovs.IsValid() or not include_aovs.Get(0): - self.log.error( - "All Render Products must be set to 'Arnold Product " - "Type' on the Arnold Render Settings node to ensure " - "correct output of metadata and AOVs." - ) - invalid = True - break - - # Ensure 'Delegate Products' is enabled for Husk - if not rop_node.evalParm("husk_delegateprod"): - invalid = True - self.log.error("USD Render ROP has `Husk > Rendering > Delegate " - "Products` disabled. Please enable to ensure " - "correct output files") - - # TODO: Detect bug of invalid Cryptomatte state? - # Detect if any Render Products were set that do not actually exist - # (e.g. invalid rendervar targets for a renderproduct) because that - # is what originated the Cryptomatte enable->disable bug. - - if invalid: - raise PublishValidationError( - "Invalid Render Settings for Arnold render." - ) - - -class ValidateUSDRenderCamera(plugin.HoudiniInstancePlugin): - """Validate USD Render Settings refer to a valid render camera. - - The render camera is defined in priority by this order: - 1. ROP Node Override Camera Parm (if set) - 2. Render Product Camera (if set - this may differ PER render product!) - 3. Render Settings Camera (if set) - - If None of these are set *or* a currently set entry resolves to an invalid - camera prim path then we'll report it as an error. - - """ - - order = pyblish.api.ValidatorOrder - families = ["usdrender"] - hosts = ["houdini"] - label = "Validate USD Render Camera" - actions = [SelectROPAction] - - def process(self, instance): - - rop_node = hou.node(instance.data["instance_node"]) - lop_node = instance.data.get("output_node") - if not lop_node: - # No valid output node was set. We ignore it since it will - # be validated by another plug-in. - return - - stage = lop_node.stage() - - render_settings = get_usd_render_rop_rendersettings(rop_node, stage, - logger=self.log) - if not render_settings: - # Without render settings we basically have no defined - self.log.error("No render settings found for %s.", rop_node.path()) - return - - render_settings_camera = self._get_camera(render_settings) - rop_camera = rop_node.evalParm("override_camera") - - invalid = False - camera_paths = set() - for render_product in self.iter_render_products(render_settings, - stage): - render_product_camera = self._get_camera(render_product) - - # Get first camera path as per order in in this plug-in docstring - camera_path = next( - (cam_path for cam_path in [rop_camera, - render_product_camera, - render_settings_camera] - if cam_path), - None - ) - if not camera_path: - self.log.error( - "No render camera defined for render product: '%s'", - render_product.GetPath() - ) - invalid = True - continue - - camera_paths.add(camera_path) - - # For the camera paths used across the render products detect - # whether the path is a valid camera in the stage - for camera_path in sorted(camera_paths): - camera_prim = stage.GetPrimAtPath(camera_path) - if not camera_prim or not camera_prim.IsValid(): - self.log.error( - "Render camera path '%s' does not exist in stage.", - camera_path - ) - invalid = True - continue - - if not camera_prim.IsA(pxr.UsdGeom.Camera): - self.log.error( - "Render camera path '%s' is not a camera.", - camera_path - ) - invalid = True - - if invalid: - raise PublishValidationError( - f"No render camera found for {instance.name}.", - title="Invalid Render Camera", - description=self.get_description() - ) - - def iter_render_products(self, render_settings, stage): - for product_path in render_settings.GetProductsRel().GetTargets(): - prim = stage.GetPrimAtPath(product_path) - if prim.IsA(UsdRender.Product): - yield UsdRender.Product(prim) - - def _get_camera(self, settings: UsdRender.SettingsBase): - """Return primary camera target from RenderSettings or RenderProduct""" - camera_targets = settings.GetCameraRel().GetForwardedTargets() - if camera_targets: - return camera_targets[0] - - def get_description(self): - return inspect.cleandoc( - """### Missing render camera - - No valid render camera was set for the USD Render Settings. - - The configured render camera path must be a valid camera in the - stage. Make sure it refers to an existing path and that it is - a camera. - - """ - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_product_names.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_product_names.py deleted file mode 100644 index 2da9d009ab..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_product_names.py +++ /dev/null @@ -1,35 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import pyblish.api -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api import plugin - - -class ValidateUSDRenderProductNames(plugin.HoudiniInstancePlugin): - """Validate USD Render Product names are correctly set absolute paths.""" - - order = pyblish.api.ValidatorOrder - families = ["usdrender"] - label = "Validate USD Render Product Names" - optional = True - - def process(self, instance): - - invalid = [] - for filepath in instance.data.get("files", []): - - if not filepath: - invalid.append("Detected empty output filepath.") - - if not os.path.isabs(filepath): - invalid.append( - "Output file path is not absolute path: %s" % filepath - ) - - if invalid: - for message in invalid: - self.log.error(message) - raise PublishValidationError( - "USD Render Paths are invalid.", title=self.label) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_product_paths.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_product_paths.py deleted file mode 100644 index 369ec082ce..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_render_product_paths.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import hou -import inspect -import pyblish.api - -from ayon_core.pipeline import ( - OptionalPyblishPluginMixin, - PublishValidationError -) - -from ayon_houdini.api import plugin - - -class ValidateUSDRenderProductPaths(plugin.HoudiniInstancePlugin, - OptionalPyblishPluginMixin): - """Validate USD Render Settings refer to a valid render camera. - - The publishing logic uses a metadata `.json` in the render output images' - folder to identify how the files should be published. To ensure multiple - subsequent submitted versions of a scene do not override the same metadata - json file we want to ensure the user has the render paths set up to - contain the $HIPNAME in a parent folder. - - """ - # NOTE(colorbleed): This workflow might be relatively Colorbleed-specific - # TODO: Preferably we find ways to make what this tries to avoid no issue - # itself by e.g. changing how AYON deals with these metadata json files. - - order = pyblish.api.ValidatorOrder - families = ["usdrender"] - hosts = ["houdini"] - label = "Validate USD Render Product Paths" - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - current_file = instance.context.data["currentFile"] - - # mimic `$HIPNAME:r` because `hou.text.collapseCommonVars can not - # collapse it - hipname_r = os.path.splitext(os.path.basename(current_file))[0] - - invalid = False - for filepath in instance.data.get("files", []): - folder = os.path.dirname(filepath) - - if hipname_r not in folder: - filepath_raw = hou.text.collapseCommonVars(filepath, vars=[ - "$HIP", "$JOB", "$HIPNAME" - ]) - filepath_raw = filepath_raw.replace(hipname_r, "$HIPNAME:r") - self.log.error("Invalid render output path:\n%s", filepath_raw) - invalid = True - - if invalid: - raise PublishValidationError( - "Render path is invalid. Please make sure to include a " - "folder with '$HIPNAME:r'.", - title=self.label, - description=self.get_description() - ) - - def get_description(self): - return inspect.cleandoc( - """### Invalid render output path - - The render output path must include the current scene name in - a parent folder to ensure uniqueness across multiple workfile - versions. Otherwise subsequent farm publishes could fail because - newer versions will overwrite the metadata files of older versions. - - The easiest way to do so is to include **`$HIPNAME:r`** somewhere - in the render product names. - - A recommended output path is for example: - ``` - $HIP/renders/$HIPNAME:r/$OS/$HIPNAME:r.$OS.$F4.exr - ``` - """ - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_rop_default_prim.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_rop_default_prim.py deleted file mode 100644 index ee4746f73f..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_usd_rop_default_prim.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect -import hou -from pxr import Sdf -import pyblish.api - -from ayon_core.pipeline import PublishValidationError - -from ayon_houdini.api.action import SelectROPAction -from ayon_houdini.api import plugin - - -class ValidateUSDRopDefaultPrim(plugin.HoudiniInstancePlugin): - """Validate the default prim exists if default prim value is set on ROP""" - - order = pyblish.api.ValidatorOrder - families = ["usdrop"] - hosts = ["houdini"] - label = "Validate USD ROP Default Prim" - actions = [SelectROPAction] - - def process(self, instance): - - rop_node = hou.node(instance.data["instance_node"]) - - default_prim = rop_node.evalParm("defaultprim") - if not default_prim: - self.log.debug( - "No default prim specified on ROP node: %s", rop_node.path() - ) - return - - lop_node: hou.LopNode = instance.data.get("output_node") - if not lop_node: - return - - above_break_layers = set(layer for layer in lop_node.layersAboveLayerBreak()) - stage = lop_node.stage() - layers = [ - layer for layer - in stage.GetLayerStack(includeSessionLayers=False) - if layer.identifier not in above_break_layers - ] - if not layers: - self.log.error("No USD layers found. This is likely a bug.") - return - - # TODO: This only would detect any local opinions on that prim and thus - # would fail to detect if a sublayer added on the stage root layer - # being exported would actually be generating the prim path. We - # should maybe consider that if this fails that we still check - # whether a sublayer doesn't create the default prim path. - for layer in layers: - if layer.GetPrimAtPath(default_prim): - break - else: - # No prim found at the given path on any of the generated layers - raise PublishValidationError( - "Default prim specified by USD ROP does not exist in " - f"stage: '{default_prim}'", - title="Default Prim", - description=self.get_description() - ) - - # Warn about any paths that are authored that are not a child - # of the default prim - outside_paths = set() - default_prim_path = f"/{default_prim.strip('/')}" - for layer in layers: - - def collect_outside_paths(path: Sdf.Path): - """Collect all paths that are no child of the default prim""" - - if not path.IsPrimPath(): - # Collect only prim paths - return - - # Ignore the HoudiniLayerInfo prim - if path.pathString == "/HoudiniLayerInfo": - return - - if not path.pathString.startswith(default_prim_path): - outside_paths.add(path) - - layer.Traverse("/", collect_outside_paths) - - if outside_paths: - self.log.warning( - "Found paths that are not within default primitive path '%s'. " - "When referencing the following paths by default will not be " - "loaded:", - default_prim - ) - for outside_path in sorted(outside_paths): - self.log.warning("Outside default prim: %s", outside_path) - - def get_description(self): - return inspect.cleandoc( - """### Default Prim not found - - The USD render ROP is currently configured to write the output - USD file with a default prim. However, the default prim is not - found in the USD stage. - - Make sure to double check the Default Prim setting on the USD - Render ROP for typos or make sure the hierarchy and opinions you - are creating exist in the default prim path. - - """ - ) diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_vdb_output_node.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_vdb_output_node.py deleted file mode 100644 index c4ed9d2fb8..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_vdb_output_node.py +++ /dev/null @@ -1,177 +0,0 @@ -# -*- coding: utf-8 -*- -import contextlib -import hou - -import pyblish.api -from ayon_core.pipeline import PublishXmlValidationError - -from ayon_houdini.api import plugin -from ayon_houdini.api.action import SelectInvalidAction - - -def group_consecutive_numbers(nums): - """ - Args: - nums (list): List of sorted integer numbers. - - Yields: - str: Group ranges as {start}-{end} if more than one number in the range - else it yields {end} - - """ - start = None - end = None - - def _result(a, b): - if a == b: - return "{}".format(a) - else: - return "{}-{}".format(a, b) - - for num in nums: - if start is None: - start = num - end = num - elif num == end + 1: - end = num - else: - yield _result(start, end) - start = num - end = num - if start is not None: - yield _result(start, end) - - -@contextlib.contextmanager -def update_mode_context(mode): - original = hou.updateModeSetting() - try: - hou.setUpdateMode(mode) - yield - finally: - hou.setUpdateMode(original) - - -def get_geometry_at_frame(sop_node, frame, force=True): - """Return geometry at frame but force a cooked value.""" - if not hasattr(sop_node, "geometry"): - return - with update_mode_context(hou.updateMode.AutoUpdate): - sop_node.cook(force=force, frame_range=(frame, frame)) - return sop_node.geometryAtFrame(frame) - - -class ValidateVDBOutputNode(plugin.HoudiniInstancePlugin): - """Validate that the node connected to the output node is of type VDB. - - All primitives of the output geometry must be VDBs, no other primitive - types are allowed. That means that regardless of the amount of VDBs in the - geometry it will have an equal amount of VDBs, points, primitives and - vertices since each VDB primitive is one point, one vertex and one VDB. - - This validation only checks the geometry on the first frame of the export - frame range for optimization purposes. - - A VDB is an inherited type of Prim, holds the following data: - - Primitives: 1 - - Points: 1 - - Vertices: 1 - - VDBs: 1 - - """ - - order = pyblish.api.ValidatorOrder + 0.1 - families = ["vdbcache"] - label = "Validate Output Node (VDB)" - actions = [SelectInvalidAction] - - def process(self, instance): - invalid_nodes, message = self.get_invalid_with_message(instance) - if invalid_nodes: - - # instance_node is str, but output_node is hou.Node so we convert - output = instance.data.get("output_node") - output_path = output.path() if output else None - - raise PublishXmlValidationError( - self, - "Invalid VDB content: {}".format(message), - formatting_data={ - "message": message, - "rop_path": instance.data.get("instance_node"), - "sop_path": output_path - } - ) - - @classmethod - def get_invalid_with_message(cls, instance): - - node = instance.data.get("output_node") - if node is None: - instance_node = instance.data.get("instance_node") - error = ( - "SOP path is not correctly set on " - "ROP node `{}`.".format(instance_node) - ) - return [hou.node(instance_node), error] - - frame = instance.data.get("frameStart", 0) - geometry = get_geometry_at_frame(node, frame) - if geometry is None: - # No geometry data on this node, maybe the node hasn't cooked? - error = ( - "SOP node `{}` has no geometry data. " - "Was it unable to cook?".format(node.path()) - ) - return [node, error] - - num_prims = geometry.intrinsicValue("primitivecount") - num_points = geometry.intrinsicValue("pointcount") - if num_prims == 0 and num_points == 0: - # Since we are only checking the first frame it doesn't mean there - # won't be VDB prims in a few frames. As such we'll assume for now - # the user knows what he or she is doing - cls.log.warning( - "SOP node `{}` has no primitives on start frame {}. " - "Validation is skipped and it is assumed elsewhere in the " - "frame range VDB prims and only VDB prims will exist." - "".format(node.path(), int(frame)) - ) - return [None, None] - - num_vdb_prims = geometry.countPrimType(hou.primType.VDB) - cls.log.debug("Detected {} VDB primitives".format(num_vdb_prims)) - if num_prims != num_vdb_prims: - # There's at least one primitive that is not a VDB. - # Search them and report them to the artist. - prims = geometry.prims() - invalid_prims = [prim for prim in prims - if not isinstance(prim, hou.VDB)] - if invalid_prims: - # Log prim numbers as consecutive ranges so logging isn't very - # slow for large number of primitives - error = ( - "Found non-VDB primitives for `{}`. " - "Primitive indices {} are not VDB primitives.".format( - node.path(), - ", ".join(group_consecutive_numbers( - prim.number() for prim in invalid_prims - )) - ) - ) - return [node, error] - - if num_points != num_vdb_prims: - # We have points unrelated to the VDB primitives. - error = ( - "The number of primitives and points do not match in '{}'. " - "This likely means you have unconnected points, which we do " - "not allow in the VDB output.".format(node.path())) - return [node, error] - - return [None, None] - - @classmethod - def get_invalid(cls, instance): - nodes, _ = cls.get_invalid_with_message(instance) - return nodes diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_workfile_paths.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_workfile_paths.py deleted file mode 100644 index a5a742069a..0000000000 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/validate_workfile_paths.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -import hou - -import pyblish.api -from ayon_core.pipeline import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_core.pipeline.publish import RepairAction - -from ayon_houdini.api import plugin - - -class ValidateWorkfilePaths( - plugin.HoudiniInstancePlugin, OptionalPyblishPluginMixin): - """Validate workfile paths so they are absolute.""" - - order = pyblish.api.ValidatorOrder - families = ["workfile"] - label = "Validate Workfile Paths" - actions = [RepairAction] - optional = True - - node_types = ["file", "alembic"] - prohibited_vars = ["$HIP", "$JOB"] - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid() - self.log.debug( - "Checking node types: {}".format(", ".join(self.node_types))) - self.log.debug( - "Searching prohibited vars: {}".format( - ", ".join(self.prohibited_vars) - ) - ) - - if invalid: - all_container_vars = set() - for param in invalid: - value = param.unexpandedString() - contained_vars = [ - var for var in self.prohibited_vars - if var in value - ] - all_container_vars.update(contained_vars) - - self.log.error( - "Parm {} contains prohibited vars {}: {}".format( - param.path(), - ", ".join(contained_vars), - value) - ) - - message = ( - "Prohibited vars {} found in parameter values".format( - ", ".join(all_container_vars) - ) - ) - raise PublishValidationError(message, title=self.label) - - @classmethod - def get_invalid(cls): - invalid = [] - for param, _ in hou.fileReferences(): - # it might return None for some reason - if not param: - continue - # skip nodes we are not interested in - if param.node().type().name() not in cls.node_types: - continue - - if param.keyframes(): - # Calling `.unexpandedString()` below fails if param has - # keyframes - so for now we will skip those params. These are - # e.g. present in `filecache` nodes. - continue - - if any( - v for v in cls.prohibited_vars - if v in param.unexpandedString()): - invalid.append(param) - - return invalid - - @classmethod - def repair(cls, instance): - invalid = cls.get_invalid() - for param in invalid: - cls.log.info("Processing: {}".format(param.path())) - cls.log.info("Replacing {} for {}".format( - param.unexpandedString(), - hou.text.expandString(param.unexpandedString()))) - param.set(hou.text.expandString(param.unexpandedString())) diff --git a/server_addon/houdini/client/ayon_houdini/startup/MainMenuCommon.xml b/server_addon/houdini/client/ayon_houdini/startup/MainMenuCommon.xml deleted file mode 100644 index 5b383f0085..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/MainMenuCommon.xml +++ /dev/null @@ -1,109 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/server_addon/houdini/client/ayon_houdini/startup/OPmenu.xml b/server_addon/houdini/client/ayon_houdini/startup/OPmenu.xml deleted file mode 100644 index 5637d2cf6a..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/OPmenu.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - -

- - - opmenu.unsynchronize - - opmenu.vhda_create - - - - - - - - - - diff --git a/server_addon/houdini/client/ayon_houdini/startup/husdplugins/outputprocessors/ayon_uri_processor.py b/server_addon/houdini/client/ayon_houdini/startup/husdplugins/outputprocessors/ayon_uri_processor.py deleted file mode 100644 index 5148435ff0..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/husdplugins/outputprocessors/ayon_uri_processor.py +++ /dev/null @@ -1,135 +0,0 @@ -import logging - -from husd.outputprocessor import OutputProcessor - -from ayon_core.pipeline import entity_uri -from ayon_core.pipeline.load.utils import get_representation_path_by_names - - -class AYONURIOutputProcessor(OutputProcessor): - """Process AYON Entity URIs into their full path equivalents.""" - - def __init__(self): - """ There is only one object of each output processor class that is - ever created in a Houdini session. Therefore, be very careful - about what data gets put in this object. - """ - self._save_cache = dict() - self._ref_cache = dict() - self._publish_context = None - self.log = logging.getLogger(__name__) - - @staticmethod - def name(): - return "ayon_uri_processor" - - @staticmethod - def displayName(): - return "AYON URI Output Processor" - - def processReferencePath(self, - asset_path, - referencing_layer_path, - asset_is_layer): - """ - Args: - asset_path (str): The path to the asset, as specified in Houdini. - If this asset is being written to disk, this will be the final - output of the `processSavePath()` calls on all output - processors. - referencing_layer_path (str): The absolute file path of the file - containing the reference to the asset. You can use this to make - the path pointer relative. - asset_is_layer (bool): A boolean value indicating whether this - asset is a USD layer file. If this is `False`, the asset is - something else (for example, a texture or volume file). - - Returns: - The refactored reference path. - - """ - - cache = self._ref_cache - - # Retrieve from cache if this query occurred before (optimization) - if asset_path in cache: - return cache[asset_path] - - uri_data = entity_uri.parse_ayon_entity_uri(asset_path) - if not uri_data: - cache[asset_path] = asset_path - return asset_path - - # Try and find it as an existing publish - query = { - "project_name": uri_data["project"], - "folder_path": uri_data["folder"], - "product_name": uri_data["product"], - "version_name": uri_data["version"], - "representation_name": uri_data["representation"], - } - path = get_representation_path_by_names( - **query - ) - if path: - self.log.debug( - "AYON URI Resolver - ref: %s -> %s", asset_path, path - ) - cache[asset_path] = path - return path - - elif self._publish_context: - # Query doesn't resolve to an existing version - likely - # points to a version defined in the current publish session - # as such we should resolve it using the current publish - # context if that was set prior to this publish - raise NotImplementedError("TODO") - - self.log.warning(f"Unable to resolve AYON URI: {asset_path}") - cache[asset_path] = asset_path - return asset_path - - def processSavePath(self, - asset_path, - referencing_layer_path, - asset_is_layer): - """ - Args: - asset_path (str): The path to the asset, as specified in Houdini. - If this asset is being written to disk, this will be the final - output of the `processSavePath()` calls on all output - processors. - referencing_layer_path (str): The absolute file path of the file - containing the reference to the asset. You can use this to make - the path pointer relative. - asset_is_layer (bool): A boolean value indicating whether this - asset is a USD layer file. If this is `False`, the asset is - something else (for example, a texture or volume file). - - Returns: - The refactored save path. - - """ - cache = self._save_cache - - # Retrieve from cache if this query occurred before (optimization) - if asset_path in cache: - return cache[asset_path] - - uri_data = entity_uri.parse_ayon_entity_uri(asset_path) - if not uri_data: - cache[asset_path] = asset_path - return asset_path - - relative_template = "{asset}_{product}_{version}_{representation}.usd" - # Set save output path to a relative path so other - # processors can potentially manage it easily? - path = relative_template.format(**uri_data) - - self.log.debug("AYON URI Resolver - save: %s -> %s", asset_path, path) - cache[asset_path] = path - return path - - -def usdOutputProcessor(): - return AYONURIOutputProcessor diff --git a/server_addon/houdini/client/ayon_houdini/startup/husdplugins/outputprocessors/remap_to_publish.py b/server_addon/houdini/client/ayon_houdini/startup/husdplugins/outputprocessors/remap_to_publish.py deleted file mode 100644 index 52e02f4160..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/husdplugins/outputprocessors/remap_to_publish.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import json - -import hou -from husd.outputprocessor import OutputProcessor - - -class AYONRemapPaths(OutputProcessor): - """Remap paths based on a mapping dict on rop node.""" - - def __init__(self): - self._mapping = dict() - - @staticmethod - def name(): - return "ayon_remap_paths" - - @staticmethod - def displayName(): - return "AYON Remap Paths" - - @staticmethod - def hidden(): - return True - - @staticmethod - def parameters(): - group = hou.ParmTemplateGroup() - - parm_template = hou.StringParmTemplate( - "ayon_remap_paths_remap_json", - "Remapping dict (json)", - default_value="{}", - num_components=1, - string_type=hou.stringParmType.Regular, - ) - group.append(parm_template) - - return group.asDialogScript() - - def beginSave(self, config_node, config_overrides, lop_node, t): - super(AYONRemapPaths, self).beginSave(config_node, - config_overrides, - lop_node, - t) - - value = config_node.evalParm("ayon_remap_paths_remap_json") - mapping = json.loads(value) - assert isinstance(self._mapping, dict) - - # Ensure all keys are normalized paths so the lookup can be done - # correctly - mapping = { - os.path.normpath(key): value for key, value in mapping.items() - } - self._mapping = mapping - - def processReferencePath(self, - asset_path, - referencing_layer_path, - asset_is_layer): - return self._mapping.get(os.path.normpath(asset_path), asset_path) - - -def usdOutputProcessor(): - return AYONRemapPaths diff --git a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/INDEX__SECTION b/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/INDEX__SECTION deleted file mode 100644 index 5b5d5a1340..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/INDEX__SECTION +++ /dev/null @@ -1,13 +0,0 @@ -Operator: ayon::lop_import::1.0 -Label: AYON Load Asset -Path: oplib:/ayon::Lop/lop_import::1.0?ayon::Lop/lop_import::1.0 -Icon: opdef:/ayon::Lop/lop_import::1.0?IconImage -Table: Lop -License: -Extra: -User: -Inputs: 0 to 1 -Subnet: true -Python: false -Empty: false -Modified: Thu Jun 10 16:44:00 2024 diff --git a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/Sections.list b/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/Sections.list deleted file mode 100644 index 0a1bfe4e69..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/Sections.list +++ /dev/null @@ -1,4 +0,0 @@ -"" -INDEX__SECTION INDEX_SECTION -houdini.hdalibrary houdini.hdalibrary -ayon_8_8Lop_1lop__import_8_81.0 ayon::Lop/lop_import::1.0 diff --git a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/AYON__icon.png b/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/AYON__icon.png deleted file mode 100644 index ed13aeea527299092c400f18045b11b28c4d23ac..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16907 zcmcJ%cU03~vp4!pC;X?AbH3XHSf}tgpqyaFPLnASRr) zh7kn8!ACemM+^S>rxgO1R-G;p!=H zNa7zF8g`zx9u9bdgPSYwAAd>#Iix~u2E#0Nk~9rj3_mXwzI zufqurxBm~r4}1Q{aQq!Nf}7_ZH~hZ^_-{-6yZe7k1n~XuQ+#jW|F^+cq#{l@~H#(sD^DI+^iH!lxcJ56spSAxJ_Bo1-GUUu-cbH1kG;9}?M z3D8yImpOgr|J7^uf9h3tb9VE%0@TV*iU0K9Nr%{BaV|IP?MxkR5$^mawg2q6Wan!C zKY9+ibM`viUK3XbAkRN6{GD{|Kl_yU<<3a`mxav!*RFr>1@duRW$_n%!j zb@j_0ZnqtrL7%6QmKrZkQ(gA7f~>5>8OeWE02_w|a~*E`Y1|-yQD>yjoRyG1D{)%Z z`1Cofj4bx7f|&GathDrB%6~_L^Kk11;l}^p_(QGcJr8E+>0z}UJPB?det(<(QS~c! z?*DxI=c}{BUmfS={kzlH8@7iaDe>by+-`Z<+THrQI3V{=uczB>g3k>Py9@SUla=@{ z+`jDq;Pm4K$H&3m)y{+WwB%`NN!kBC$>)w8Aozb4PwHRHNgW=%|KRSt)c@fB{NDlp z<)ngn|Fi)M08EM0zikQl@^5Qo=L$@a2e37o3HM|m2o~x9cIJQV2m}eodd1f0Qe9w) zY`{E7J3=ENZbWUED_l=EcugZP)EpzU=}0>JoX(HShh4*&W{7zA&d=P=233njbCzS8 zEEf64U5wim4DR25Y3E(Px6odsh_n)XbGz+<2qX{%VbD){BglVPW;E|A5nI?KE78fae6K6;ZMTVBd%ZG z9Q<}vQbi%rW2@eSD@NF4+1u2Ip z33F zKX3r2L66rq^MoMe(Zjzm=v5+UlEDI#fIUmcsD8>87EqhvvM zYa1b(qg@w5&Pf-+?Hy^Y1SM#bskvzv1w|#Y$W<7VP#WS>Iox@Ue)fLmezrS|Pk#kb zc*tvH35|@ZP!DZK1QHTqCi!}G!9qf8kgt`UU)z19bv-3P>-xt#^l1=Sh9SV=`f+2}2aP}kdRt)HiU2n}GZL`?s zwRD&L@1c5BZ$;jIARoY{j>ad`p)VS>!c|-EVn*+ly3_|x3m;z(A+Km;;8Mf1%i7_p z3?fkh@F}V>Ou*3{Q9D}v8N->1!>+HOOEAiZCYUCV8>_?X18ZT8x1vXDXmu)FuI}JT-PT{2uXC^x@q^E(+5fzE5fuF|XDHy*l z>O^hhYJLJs_9t;DT?aQFN@gNYroQ>r-9e1Iuk%nH5&Pg1Sot2IrG6*$2k00sBlEGr zjJ6c$FN;=S7HNj>g@q_^(LF+<)kOJ`P7mD2R+ zDvoOcg!?B6)VNyvX%?eoKcuQ-Awg}>Wi#MdE2hf=wghmRh($x$N?jj6!5-0ZFQ!T4HZI}3FMXHK>(Wy`LXnTc3^_b8atw#aO78rMS z%JWp)=_|Bw_6QW;#A6y_x9k{AFC$v*i*Rgn!eLt!Xfwy1rOX(ygpGZS2y=f0uSBt!y zfWE})b{vRfH~RglvMy^O{doj|39Y%XDlZ+8RVoiqemDa@u5D(A zP&H4+jIvcK5*WSuYslJZT?nAVX11W+oK+5b9vPil%t0Nf0@!~D_Q(!R^@stjX6FgV zo#zK_PN2;|XIz>5M6KnrnHR0(A)hCa(9HLC+U-bGYVlAQX8$JGp=a{+m$$W93z6rU z2n=Y=<11X!53)*y0aHl8l^h4xO7?FII#DQy`mvHTCg%h!7SK_oYwGU4B##WsyiIw zV(@pbkCEaalNyrmxn&Nv_hqvtHcDw-yMQ>$F6D{@XJi9anX?2%5g?P=54-H5Gfoxi z1O&sEX!{@&k74f{yIAdItz+CxDdU}|pVBLZ{#K8SQ7eKjOJHd@*s%A`{G=oKUIOBl z0fj6Rj54LNf~q5<(=Kf>7tt7ID}a+|J`ZK}UcVpK@T^P$?+b>?48SfgXpeVL9iY(a zFV|#?P=*fP;H1bLa-cnq?P}Ri?F@h%PVfmBq$ zk2hA1Nd;sBz)nshT(yiMZiAm@X?;wkJ?^3w86(<;BZ`(|tdPE_X;$n5j|rU3m9(yL zAh^yus@G{X)QFI>kvo*>PQG-VVyf5UI^$@fA`skF4|b6yev}y)FKR_dG!x?rNiw6= zt7IX0MFL&6kvL>*Y>kkmL#tK!aN%n80gHlwMPHJy9c8%lv@u;0Q5Ucz3Ru!4NOnpQ z=#GDM#jF$nfF~v~s>gwrBF@vCi-=BB^W*NnpH(Ui5V{T!x+B@tXgyIOiN6j=DgcDs zF)LPNrc~RkqT7%_l+G13L~Ls#iuiypN-<7yTfjtzhoW^z^(O3fn*5P#fV<R7tT3=Pa;^(8V&y~B~3)KU2(S5`ldTNaeq{Djw{WL4V@gNg9u=l&UtrSF3 zNR^y;L(uG?L_`9?2fx5o{k%>}gVWzr+k_i$J8WyRfLc>9+zho40g2Y+Pr6sF@Q-Rp z%tOLYB;Pk$53DFqz5r=l!13H7HCwSj!x&zZKIg22-I21VSY%1u{H(r{G;fzZ(4%>s@r#t=&eQ4M)69uSvG+>(*`U^9H)sxciL&ob>Y^pg#3LFRpMeSEIb=wC zToRm#mKafTo%mA)zzr#m_yYiY+~XLo)_t0#OG_Mj%7m+Bz+VO2(DcN$yTzUX%4+7$ z-3d%l_WOEORvy3@HP8(<%*t(Y)8_B2!R1p$l7T2C0L8K|DB^qEI5#;w)m9ruKu7`;TGj?v6d95k zGLj)_>9V!=0r{fxj^t7H_8Uf5wx^mkJ$>%{q?+u`nIqF59pS^t|I39oH(YK`tQXi znKHbZm>h8P-2T<#>%af!wE}|;tVr2#W}@?gu_CZfO1NQ8-y<9xSRE^u9v5DA=HW+e zHB5Bq3EMU@&sa$Kl0nd2dZc}1bez2$OZ+-L*qNpl$V5+t+}L0_OFX;n@+qjvyRDnT z>`*Qcz&!O?X8l&pVwif%=aM?yI2!ntg(9lgv)bda8%29&`W@C(dx~4moSv`YfW$btP#-h##x5`}WI| zyB(Ca8RMxIUBx`+KmlMpXceKHTFfX-r2^qm?TPPYDnCt!qWK<=sB1YqubHZC3UwQ6 zD$U#i%eMs_I?RC7u&jRZZzg8AVQv4lLRk(lWdk)NSP`Z|_8viaj?LB-8Pz>bk)1^* zf>ZsV1FjnH%~j+&&Ej_^{hgv*Ub)hU^7S0Kj^4Sh$lM5L%Xc5580P0NE89Rd`vI&9 zxM3dP10J29x!OnG>?ya67nPd7f~(oM{`fboGb{Gu=sEav5CGgPfi4T~aZDe@YFKr{ z8qDmF9|;RQSe=2SK{u-U;Q(K-oh1UeTAyhaQOO;ToJW{ZI$y3&Av`rY`b!mjQ|y~M ze!-qchid(h%xy_0u{z1M@H{PJa~6PL3yLT@;lZ_Q;gai0g&aGzWYkAq%XQU~5~3;p zfTs)rR$l;CgK@PgMV4S4L&PqD^5WH<7AA-JcAp(=MgHS@Emmf57(dv;Ua#tm7mz=s z+Wz{yUl(z@++t;MC*)b&PC*9+uIG_=*NVIa==lPAu79IHekp8E4}1MuLn-&# zS)Hd>Gc{P;Z{VXLjqN zGqORVky2D3s{9nU&t%!3vqSDYeRrsZKTAoUFKX{&c9S^NeHu4R#bfALqU4Xt7>%H} ztyz=mu1A=3G4Yg{oB-baO*?hOJyfh*zy{YztlI7QnT<3h6n|6$aH6Zgs4YBic7l)5 zqXpcm`{ar}J51DDJ~2+gN{1#e?^1p^%7yrLA5o!k^f?)zxX>ANyt=1K@4c4ybUlO8 z948q1MvwRgM6c;Cd%aaxrEXmoztPRzu6KE*h5-(;WP}TapcOtiibQ zjw~1~NT+{mseC@Ukv>8TR{`kEE&#hX5gE zal?pB;Hmv4dlrj#lL^{Fl&(&`rz(JCt|xJDpr#xixEN%{2BFIlxLQvTOR*!NGAmXs zas}Ab5-ivJvla`;&iw>#{^v1A8PNA3gUBO|?kr42Ce7}aS-jFTG`sdXS8o?%1dh!j zSrTa0$nNBaRDoMvyOWunM;?qjlPawNVQ{l86OT%|Wqj%^yVR}qam`fQTkXJzt7{1# zM!E&&pW&+c?r`Zdg!^g0&g)Opz5Kb6o);|`B?tWefIaN>ab0k;+fZ*2W-au7ZU?)3 zRkAzdDo`%nV8%Zvbec9`*3Qt_dnm41vM2v|G1O_VJN}Jq3MQOH679B>vhSymnk@n9 z-e%Ap&5u7ivi?r8)ni?vd^I?cfm<*4OJ#mn@#`f@40$434nN0Jqf#-#&n$#kN zb56b5+WATkB>uQ*7Az$?{{^!$1sGYs22Fn_`N~lKbe?{&7-8lX5gkMSJ0YRcWqk-N zX^EDO#uOmY8UrBS%dn)Ylnw|VQ3xQ3!QmAh$<5*o%soYaqw7{SBqR~rYym}vM;jt1 zW#!y+P3yhYa^~QqN-cmrI9eH9>y*mQ(~h65ReY5Q_g+uR$tkd>RbQoP2Wm8M1NQm} z7riqZ@X}STlbq*e4(h;(Ik90i{+*>~!;<9t7-$U>8*~?3mJ@lf7xT?7oqekpEKYdD zn<*zjxx0}}kAB1EMI(s<#B5-^#1dj-s(N?Y&~1ZX?E`UYEm_qmmjYhR=pn&J6GTk& z&^=V@a-ev6uiy%vVn&u}q8cc5QsK6=ebRY4XR*dzy5eLf`VJ(1S(LB^$j2VShf*We%$t3^P=92+k zZ&N|AcYKXJ{MmW$qlEa!jF7W7qSAM>&Iu4$!fzm6Ulnwx!uw5g=+xEY*$@V`6`fQ+ z?}e8P94rJd?2WxRyBEp>NaMvJ$G*N$Cbh^;){3^|Pd=kT7{a3mFG_rm1T+M(FI6wN z^>S&k7J`8AeG!zUi5q6$yfMvT-rJEWwCnv*H`Vs(NNn8VRFq#?&E$m$FSsO#NN-$& zx-&gP>r>V5n6;7R#`c&uI+D16+elPg6Ddg8x;V|T?Oa1@t^sCr)|1sHA2+_H%>#2} zL~EwF*r=N>a+V|K2#aW}PUQ8i#$2w`E5S=U_G3tLghoa<@QwZekK`}-<&N6VdcQY! zIpy4k=A(eO9;&*M;<4QE#EbY7Xrb2tEK3kq%Z^u{=5VY7zFCsH=)~A!jn%YpzHCKR zlz`A^N7-3xX0)cuSt<^k@hevB{hT*^1jq!(>T?RcTep!S@G#WE-r!fhre}XyOU}7& zyPDe}PT#od<$riDCSD2n!pW{``dZHPQNw8hi%Db!<#5CP;z0k-NZv(!35QR=L_uo?rK;;iAOv%Zu zl+4gi83cTk2zof@#mg^hw~-u&{o2y_z>N#FAHFufdt!dA*V4H&gL!BDSQ&_sUSarMw#z@rz_LU&l5Kgup8 zIk&`Rqw!q%@6Vwp^pKlV^A#K_zx7;`lww~#v=qqv_?5PoPqYq=y)W7Xf1TB?Sj`;Z zwsLN5t!Iu&%+tTm=eEX_6{Exa{tr{Rh;!cd@yrK_W3?|pxEr`D9v%%&hM|4%#*_;uLp{Xc%FBZjGpUq!6Jp zwvx~pYaGl5Y5wUMk)7(OQ2DgWY*Ak0h7#hT4zr}vHbQcT(6`sHCI>0)?HtK%zn#xl zSG9J8XHEEp^Lg#O)!E@q$gQ^K7W>QRqjwudnI5t?ML1n}b1)<*F_E-ENmL=zm^M8< zp}W5oa8~iG|0WAb?%;@IqG%&Z^;@7ri`&YSV(`k#gW&jJd~0LT7miPx#l@r?n^E}Y zg5GjMLRW)TaPUL zr$jtoCH#06`6I0%)AT@Mz~MWhYVVzdD9@#yn^Kk97g2j?W@W`plftBPOa)c!qc2f! zBGfk|kE4V{&L#(;>woLscMCVmzyAHk`(LR~y{jUHtb*U94H(OKLl4HC0t#BKmxAL? zlnmBS8+5oXx7Gd7Ra6t|iy$#L4I=OZUiiI+E|jW;3)X}GNnNGmR+tD#Kxn@6=uq@q zeB3j*HcIo55_<4nkGav2%BG)p3Zbo8Y-8wO7BVhH$u=DfNyL5kci)UI@ty2Jd|}s5 z-4wB=wjOa2+Q-kih>L!|g0*m=d&|Sn6!~<@m-POQYeYb@j{SPWmaAWM)yLO<&Sf`v z_5)jXSGg{OOpx$h?0(zOm4IgpzHa*ZJe+|cvX9J>)db0*R6&!^Vn=^;TQ?Q%C+|x> z|MFF0RO?a za#sINPNz|OUBEfd&T^;U2m4!&cKWX!i=a;>0zQi$M+`1awSA6Hd9UIjBmW!E!D+H< zOd3qhrZ1>Me6+M472EG99+`eyx%2s3yh^O-2i@xPoU+qW=XZA_OceS`nQeBdmdC04 zZ6u~g-iW@9?`AmgLq+fSRR>}QQk7+eO}@|jEkA5%%sxO$HJ~M1YQEQaPOq=+Zj{wD zgya<;J%Y zoGNAdDCIcz!Qe-f>VEgP?mtkRJ;(-9ia zGk@szzg96Z&X*jKs`K)i7Ix%B5^EK1)gUY9KBLuJt`)GKB3moZQ@a*r4xw}>mfHiC zn^fNWnKu49A<>|GFY1aG{ug`U&z0wy_oCB!f3vA3bJbb6x$brx8}%!^GA%4&UZ#@h zPd8th{hKO%{Y3>79D>~4{@5{^DW_XLxJSG&|ow^+yF$}3&sQuXC6 zclLdUetxx#X*jT7^TJcgXCB-Q3JBL1uT-~tq%fQO)lViVpq~DiXTiZTDzDUr+q`>I<@I2%ybwknT*iC z!cDa!esf6z0kl~GxUBpaXky+@ukzaPPItqL^$YIhzY>&@u7NDcelkJkHWRBZw}dA? z1{u8F*gDwaxmF}hc(wXcsW3S-=;QKgvkF-JcnS2W`UX`Y#k#}!1oon4?TYJmKjmu` zkE>$hULMV6E7~LUz#Rs{YdIpWG)Pp*c}P z6=R~cP%Riz|1+4)*ukkIZ@`W*(Swfh^Hdl!E5xkAJZ1T^fMoVCg860BU~WNk{*!Li zF9IR+&@B_&pA9di{W3tL>F3A4yS7O1=)UN5;q6DD9=!oh41ot5_|Gyk0>NdD+h6q` zaVtOl)yAo&^{p7p{q{pKnH=u}UTg%;(&jUcDbcO>58H*uyLe7E9_$G1E>7NTn_s#g zA0TnNAwObuC_xA?YXF#)!Cg04;yx{9FyEoL6s{k~R5I1L@T8Hd<2_dW`(4#m(ucO? zCy4JY+lxWix~0NWo3S-duRy!!?YlGUAh`eb)=wc`1{Wy4+;C2=NrmTb%D&8qtEujM z&GAG6#0U}qQPc(vaFr_2y(_xTwX%tRzSX@5aR@xN)N#qjD>uS%yC;)aD$kG9yzkq%Cs04V4P&rfee{R>VC(>m#g#KIB7tMAd2G4;Y-B7-Cm`}c zhMgI73&(2BkXSk#BIj;C$W*!YWA=7ybt+YtU3kS<{}wDKdJD-{PqV-Au~gMGM^{MR z-)D2SpfmrAeckLsjf|n(6S^sF24C~E$GwR5#N*cO%G+wy7X827t_ogtb*|@5^y7GR zO@hFB+{Dup%RZ6TKps*UEqcr9jZUyytJv*dEFlZ0k)F`pCeo;0#jL1=j+n0vW_|$P zjIugMse2jU7B2)&>!)@6ZhdFV9*yxusY}`^Qz4sMow#DbleG3|R6?}#mzroH(Y-XI z*lvF|$SkJ01};~&6?As$y_6eKWz>pO_hEUjj#3vtC1&CEH-u$3?7RK@j)F+1MV$S% zSdW|F=K6lo68m5YVdF3vM0%n?a~sBD&guhCrtNjvIBgXmG@`pZ`nHuVmnDK3XV2&M zE_fQ#nxLtN?@3yIxFYSTKYecCTYrpx?cTOkXRd`-G=au&rqbhs(atsK-m_*Q*z;!- z*EW=x*ssU4QJdea`8K1`cq2?r$dG$??&Y72=^k8a+^sL=Z+?`j+dOWZkT*Xn{VMCv zm${=){@rd}<-|3&~T)OHUf`l5O@g zJxWQsz`MjJ>XtAYehz}eSB^?Q#cLr|4fwlXrub%1UeAdovv4@K%RYeS+LNr(*Ihzg#b80)culBFu1CS-?aH4kgtXTkQXzw`JjxBV z4~9^E$NIistAA%`&haE~*8k+&qw$h?ll1Y$(9IFSG6>?laaapL#}~DMD9GLRvut$U zs`id3T%^@ndbVHo;LCBD=Wb=N8R`bKEd9H*$xAjE!_Na>Om2gxAKdV9U(~g;*CIZj zrF1#{h(O%c{FL4p^y6vRjlPbmJp8xHw;xx%=17a#?!VeoH@Y{K3}>=pV2-gV9zD;R z?O`e?G}R`ay+rKCjH$ZN=Vy8s(_K2(`sk80&=Uo(Qe0A)eR{ZFL|Mut9A<~Pnb7Ia zu0h6ESeao~)c*IPSKobT!~YCh{_bSc`l!>P_)P(7eMfB@HsF@g>CnyHQTt4v81x=} zN5!stxs*I|C%+T-Y~3xb3*@f5S&>9XopBYIH3QnVYVxZ^7r*79O`o6scD(+bT%e!O z^e_kZvAJz4W92+{b7=2SS;)%x)yAToNwZ+;b$nWv2_W;}>;AE0`iJkOn7Ogv6`AA@ zA>l*w%|WfRVO<|Cm!l_kiFxKlbo;(yyQ@477jliKJ_^QNukm3@$`o{if!zD`kSc~< zOl}0GKmfhl5)s|y$SjE8H<^*VV}A1s6Xj7;SI@VnOl@uWzN0g?1xGXQ>BmVZKEVl< z7YwQpNadS+hBNb>e2$?ssxp|BBV-=nHcK*3Rjw;@Y=lVLUOhY{ms1L&y@%^N6&1fL zICkl}HG+5{BpI26vFJs=8lB?#zOVEV{t5}6dOczz7)qe5Qp_TKE%a-Hv=RowKPLP{ zl4Sfgt}cKawbOWENG)s#W!m$|Z25OXqxGUk_MCxvm1(u!(!NE7eZr`CRYVp6yz6P& z!!A4E0=e@H+Ds^Ux|2c=*1VWUYtK!aP{@wOtw=cDZ5U2M?ely^C7n_{d6J^5 z_`YIYzi(M8B0ow8yeOrCq5x1V&_~VsT6bF5?)&iWkMbtdw>57aqR_ITSC~V45E1%D zmkxs2+H1~8Xp}Yga%K!!7MAHuVdiJw5vT?HAe?2D1uD_AGr%#gL%%caE^-Uit38V6gt!Z=T z`-i~Ht*V1})zyUeGxR$b#vZtRPY`_060Z;PW%mItd{GUsU4v;3lfGfb3S|Zm@}AN| zCY`ca-whl*!Z9pIAUOxjfB2((;OTa_fNigy(5r$P|7i@LIJGRVXdMSu``b0>sRio> z%nI&NwtCU5%>rs@pa~AL-R{%zT3~Ehgjo-KaEW{_>tJ5Z^Ffa}9FS4p&N!;uP-&K5 zsJdm4*2SHweoRmAK*L0X)fxpJ884oy{cTmfu&lz(2sS(7PuLYyXp+;ux{?G-i09GW z^-~W&7|aeTBx#!u4%~E)vb?WT5rHzq$n4KtxsBt38Bm3%y!?L3pf<7G>tM%x8Z%Hl zfSuVTnmfO6hM z!Ts#+9<1`0`HSsfFx=1-fpBILTC`RiLaFTdo3kkI8yyn=W6{8AV;x3m%a&ERAw zqCS9kzAF65qjG238Qah+>Ie~d$Y3jY_k|tMT#Se&ae^yZ3R~sx72rYb9yiXEHw`^v zNJuJ%U_{d)+!6{BH-3pphuoOfxZyPiH%+{jcOms1jCL*Df(IPU+SuGig9cd3{NmNF z1ykm{rsF>kfR2m~z@{tzmMDBGi=dS!AS&0=2wEk9ZnlG`t%s;+*XfhecD+Z%gJx5& zuFcS^$ioX%*kzxny%o8;fE&NLSYo8v4x7G@8RhUjhRj~|-*S;Z$RTW0m=`-9lu8U| zT$#qiuk0oijSZB^z=~3pllr&v>uv}ZZho1U0ePA>W;CXds}g&gE13C<+k(jtIiaS5 zK+dZ?$FO>9iQaeI5h2=rit;vdogz8*z+y)5G@Xc4*fjn4itGd45Y8$>3-QeeSO|Cr zTxJu$tI|VeTzz+BcfWhQkWqets(+&NrW>D0g6loRJ|`BpwKh*XrVL5S z(?&46FBe9)lc53*6=g!q`Qo4v<+l*XCTEH4UA@c0L(`X>#73>@(OF*9-U%3=T1J0g zLp?|?b!(+vV8D}Qg~ zNB!F$PR}FszquzwUwXb|*)bPN-luZDV4RbBA`@+oc)cLm+^CXQjqP_49}a|ZRT$L! z=qkFV407(m%14FwT62NF?s9vd(hf3>5pAJTU^zK&XpkS=NVw_H>bjrjtX7@!N$sp| zdEc_-3-#~T)g$!uQsYXk`r2psm9-XLExj*!af+l*mx#t{TXjzh87N}h(Rd|k4B?nW zhN(}#A7fx}9f3r7nm8}(`=igM4L*W&%Z(4nII5W9FGwKu4%q%C#D83nEf`6Y81jd; zT|d2F05;$~O4Uhl#gsS}9-Yc|P9kE3nQYgW`6SVPEMy5L*lo#$NJy_$ou7xhrE9VZI@21mQN>)yKH^$zTX5?3_ zf~$1_ua*=8Z(WkF{IYnbAmNx7nd(4qgWcEeWixIWe=(+*54CEPumV{#FgaHR8dof2G=E@>YfROEB(KGyVru>RN^`y%rMN-;X(* z^$lXJl_|qc)y*dq4H9P)#hzT9g(2DAzmdASf1PO!-;Y}U{qy^xdYz^j%nMQN@nO?K z9r0OI5lRt^YM|NWCoq~^^E^AeMt$8^=F$tJs^9b5RfGxo{QgA0qPZFAs=MqiDb_sg z^2aXbU9IvI7Gr~R9*Iu|76Furi5swP(CpLtmXuG=&UPK&LXC3!6GGowg@@a@UhtWr zHVssq9}cUX?tY;h?9*a9p|U=Z6me7u?$*3()S#C#c#Ru+`$j1@Smwdr3?=zxNr9Esx&O2drump*#wU}lx z`{nk&i{u+b!FACH8s{eaF&*Yl1FLLXPLz67I~dCGU>xG!T8D;(bicFh?#cKtj6OqE4s`9MXFQXUh?qN|0m_ zJ`IY4SmP(dQLLmeE7mb)0>w{Y=4fWey?B>XCiliH;`^BI>!7bY^N%^!K-&Mj2E<@Y zMw~$-s|NGK8%`Cw?ig`MP~a;$)e--)T9%7=7U;Spv(aGE8xds2dk zu|oTD`|QM}n{UubKg;Uo!fKcGI3g(gT)ToeXa%A)T?4#^0W1>A7k_198!SQippO5l?;os6G%;zjAyF*o-Hp+>7SJ zwWdpw1yj|HuPhvcmV<0;rddeoUk4*GqkM$>j@gfKMBI7!rc|c_;TX;+bB~=v@37)3 zy~{C!3NM1%C#64gfdVDKP~l*(xDTx>DB&VK<@|nF;53sOW^D9M5yB1AU1!5aU_?x} zVN)Q~bNC<$Q4&;-9rEN;e}NTT1T+MXR{)-Z$qI+edt@)8e~q(2%K|nNpj-)8wWdk( z?daqa&3F+9#rdjJ=wI_4@>5Vp?W}rOWuaYlyo?I3aL7+ICk(`(8iL0TLE3@$MFJT` zA2EZ{$Ur$ljECqUMm3NM6JXO=SPV85jxu!pAPP|wRrMAoF}R!uthno0Bex!20qP;z zO$lx%rRQWJKp}*o{s$3=68IWa@{-cu4?e&sV+i*>L_T837;dWb8ixA;7@V05>{?{j zIj~T4Ysled5L13oq;NU4sbq&#^{49lNJyTm|2%;XXVfMikKqPoRaz5=I>QZ~oPpqu zI&ze>PCl&Y7!8z&0##S{Fxit_YKIlG8d|?L>it-msge=Z5z+j(9NZHs+rQP(4o}Ye zK*0~f8ZaV>dc?wesBczVhsrHWR{BeGv?hIrIuL+lF>#1VxW!`}>8V8+5fmSPjrHw7uYjgO6Q5nbAwG{YF)9^6R;Jh*BD<5T4-ZGTjV@7ODz_m-#p>O#Wn*r=#l~7We@yT6N)LG7bk5p|1GT-mk2QJ2f2?*c>*Jj)cRToA3V+SC9Vz*>C^Ut z7kN_p8HKwT?xWb|u3{5FkL9pL0D}{KgFUl(&8D0Is$`iy0(KW?x9&&UT<+vk7EuJC zyPk87+cuv$YWpI@E04%)8pP+EGhW@T7N2riIAki?NzXE{i|#xnZGTYzB%e5`QO z4})uM@Xsl|Y$bz*RA5(g-h&p=yDqr#J+UBCD;p@^K8-il?BK&0tz5sv>XZMAUUUS; zVH)lU5IG0RgANc#%}VfpM>d&02Pz*Sl-0v#ACA8yJ0`I$E!VfDp}FQhs4F>@&y=N*&Xra z2@jsq+QZ$a=Q8iL)gh(-#*HUWoVVFxAeJhfE^uy%!{B&Mz!S~dsh*WkU53j_{9M7{ zx>(2fqWZ-98e4&>a+jji7IW#6FcJprl(!SuYgJfijEeU8%rz3mVQXC=szR(K6l(=G zX^>o@$gN4jl)az8xT|Y18%3kYf~r$zxU@in;zI%#;5{< zID5*U_4=whG*O*+WI-t#;Q#QA_Bh{ypY0I9rU23I*zp;IGcxl^O;TJge@lhuWhtAw z1(ux$msmvEnx2ao)WsB1-`5+bO-g?-Lz~#uD_2%VRU}n(!NPzaM-rW#!?+)m*QVyv zkUzTw)&Z5TpBIFd0MQ8DqsWd`Ev{KyNa`}Yb&^*Jp(95RBq z`lF=}Tg;WNRulkJoJ#Ir&y2<3Ouo&!201B0J5*i^U%)~+x2Q<_{8`4}9syzS) za-TqRwUYO;E{$rg-0>I;?q%`&)3q7!*9IrT05r2^KR8B>V4b;-E&x2&^6-pq+0eAv zKhj>6j~If1gZ~2jI)P*yCJq6VE{wuK3$0fcRZ-S|o%6!z-*YOAp-YmsEC4@A1L>&&8w3=c7YSr=yd23utoXvA0i^6`KEC(m83VDU_3zgfPK}IgG5-s? zG-M?C(dxt@uFIEorLMWdSmIxWYPH56LZgX561$uJ4UFnT1Q_@79i4A&20QrW6UkWI z;As40`Jc3BNMhEx6TW3!Q9#BIdv9f=9Q6>8O(5d#Ap%<#PFCH6a7qq}8qDumLE%lE ztop0ryrN1Mt4|SuH7IZb9@FCY`(AGdDsZ*uI8==M#eR#!H5dz7;?4JdSr?a55YZZ1D*G7(#_jH3Fm`lbILIiggYN5`4b>gmRR)XGPnAyTEzCgBE@KUV zGrYGTH{^~REXErq>C_h>h%>4x-5Nd68XST_9EjSU&<$|T)_1-{>aJDj?jVV!NW62+ zm=umQ=)`~z)IxQ}q9BcogwV>vI1}gM#bxN=2wLC4dA(Sr-!ZlTR3vJmvj}=x%v~!0 zmNuxq_s35F-Lp$H^(N6&@o%Ug!NLS{*E6K zB4!bo@s|dkh~bPDa5Z!2z+Y~Dt=14=SOrok`1DQ?m&1f421V&e8IM!qMIb1xYa=xG zUv?MdVC#U5onRmu(^nS1p8}ez{Yo2gpmKE;6E6XVs!_g>1&=E|!Qehfv-hWD{YlVC z$?7`fr%fXxE;KH5@3O z`W}cSexGrk{%g*xA2`?rQ$z_M*45wXJrajp<&(j$FHBT`_ZwK{v=KLSP%AUIz6Xau zK9$ayK9NeCfpmeCjz2{ea3nsOPn2fnLF8X)z)v-*$ZV>bYzi?Lk+6@4zaHB^az>*P zP7yGiIbvf=Vc&d8#AH|AhHD*(Ul$bBqeq_#Jmh7PH9i_q4qIeKYk|1J5gu&d-*d3j zc|*C21519Zkr4v32B8%!nA(|59&GVK8ONp=zCF@&?2I5MnwX)PVR2u_mKa2Qs}3|E zo9R{PTt0uD{s! z5sGq}5v#0G%R~jVscr1Y@6s^5v7(|o4igul{RkZ?I@6hu*AWnd2``QzXw_$v= 0, 0, -1)" } - parmtag { "sidefx::usdpathtype" "prim" } - } - parm { - name "timeoffset1" - label "Time Offset (in Frames)" - type float - default { "0" } - range { -100 100 } - parmtag { "autoscope" "0000000000000000" } - parmtag { "script_callback_language" "python" } - } - parm { - name "timescale1" - label "Time Scale" - type float - default { "1" } - range { 0 5 } - parmtag { "autoscope" "0000000000000000" } - parmtag { "script_callback_language" "python" } - } - } - - } - - groupcollapsible { - name "info_display2" - label "Info Display" - - parm { - name "show_thumbnail" - label "Show Entity Thumbnail" - type toggle - joinnext - default { "0" } - parmtag { "script_callback" "hou.phm().on_thumbnail_show_changed(kwargs['node'])" } - parmtag { "script_callback_language" "python" } - } - parm { - name "thumbnail_size" - label "Size" - type float - joinnext - default { "2" } - hidewhen "{ show_thumbnail == 0 }" - range { 0 10 } - parmtag { "script_callback" "hou.phm().on_thumbnail_size_changed(kwargs['node'])" } - parmtag { "script_callback_language" "python" } - } - parm { - name "thumbnail_cache_dir" - label "Thumbnail Cache Dir" - type directory - invisible - default { "$JOB/.houdini_loader_thumbnails" } - parmtag { "script_callback_language" "python" } - } - parm { - name "thumbnail_padding" - label "Padding" - type float - invisible - default { "1" } - range { 0 10 } - parmtag { "script_callback_language" "python" } - } - parm { - name "thumbnail_offset" - label "Offset" - type vector2 - size 2 - default { "0" "0.35" } - hidewhen "{ show_thumbnail == 0 }" - range { -1 1 } - parmtag { "script_callback" "hou.phm().on_thumbnail_size_changed(kwargs['node'])" } - parmtag { "script_callback_language" "python" } - } - parm { - name "show_pipeline_parms" - label "Show Pipeline Parms" - type toggle - default { "0" } - parmtag { "script_callback" "hou.phm().on_thumbnail_show_changed(kwargs['node'])" } - parmtag { "script_callback_language" "python" } - } - } - - group { - name "ayon_folder0" - label "Ayon" - hidewhen "{ show_pipeline_parms == 0 }" - - parm { - name "name" - label "Name" - type label - default { "$OS" } - } - parm { - name "namespace" - label "Namespace" - type label - default { "`opfullpath(\".\")`" } - } - parm { - name "loader" - label "Loader" - type label - default { "LOPLoadAssetLoader" } - } - parm { - name "id" - label "ID" - type label - default { "pyblish.avalon.container" } - } - parm { - name "representation" - label "Representation ID" - type string - default { "" } - parmtag { "script_callback" "hou.phm().on_representation_id_changed(kwargs['node'])" } - parmtag { "script_callback_language" "python" } - } - parm { - name "version_name" - label "Current Version Label" - type label - invisible - default { "" } - } - parm { - name "subset_name" - label "Subset (backwards compatibility)" - type label - invisible - default { "`chs(\"product_name\")`" } - } - } - -} diff --git a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/ExtraFileOptions b/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/ExtraFileOptions deleted file mode 100644 index fb58b6889f..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/ExtraFileOptions +++ /dev/null @@ -1,122 +0,0 @@ -{ - "AYON_icon.png/Cursor":{ - "type":"intarray", - "value":[0,0] - }, - "AYON_icon.png/IsExpr":{ - "type":"bool", - "value":false - }, - "AYON_icon.png/IsPython":{ - "type":"bool", - "value":false - }, - "AYON_icon.png/IsScript":{ - "type":"bool", - "value":false - }, - "AYON_icon.png/Source":{ - "type":"string", - "value":"C:/Users/Maqina-05/Desktop/AYON_icon.png" - }, - "OnCreated/Cursor":{ - "type":"intarray", - "value":[5,29] - }, - "OnCreated/IsExpr":{ - "type":"bool", - "value":false - }, - "OnCreated/IsPython":{ - "type":"bool", - "value":true - }, - "OnCreated/IsScript":{ - "type":"bool", - "value":true - }, - "OnCreated/Source":{ - "type":"string", - "value":"" - }, - "OnDeleted/Cursor":{ - "type":"intarray", - "value":[1,15] - }, - "OnDeleted/IsExpr":{ - "type":"bool", - "value":false - }, - "OnDeleted/IsPython":{ - "type":"bool", - "value":true - }, - "OnDeleted/IsScript":{ - "type":"bool", - "value":true - }, - "OnDeleted/Source":{ - "type":"string", - "value":"" - }, - "OnLoaded/Cursor":{ - "type":"intarray", - "value":[9,76] - }, - "OnLoaded/IsExpr":{ - "type":"bool", - "value":false - }, - "OnLoaded/IsPython":{ - "type":"bool", - "value":true - }, - "OnLoaded/IsScript":{ - "type":"bool", - "value":true - }, - "OnLoaded/Source":{ - "type":"string", - "value":"" - }, - "OnNameChanged/Cursor":{ - "type":"intarray", - "value":[1,15] - }, - "OnNameChanged/IsExpr":{ - "type":"bool", - "value":false - }, - "OnNameChanged/IsPython":{ - "type":"bool", - "value":true - }, - "OnNameChanged/IsScript":{ - "type":"bool", - "value":true - }, - "OnNameChanged/Source":{ - "type":"string", - "value":"" - }, - "PythonModule/Cursor":{ - "type":"intarray", - "value":[10,1] - }, - "PythonModule/IsExpr":{ - "type":"bool", - "value":false - }, - "PythonModule/IsPython":{ - "type":"bool", - "value":true - }, - "PythonModule/IsScript":{ - "type":"bool", - "value":true - }, - "PythonModule/Source":{ - "type":"string", - "value":"" - } -} diff --git a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Help b/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/Help deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/IconImage b/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/IconImage deleted file mode 100644 index aa0d42021d18d060470b471f05f06031ac2a056d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6939 zcmbtZ33Qaz6@Ih-Gn1LgWHOmdCV?m*vSuSD5Eda6QH~lJ76Z}(5|k>DC5mV%sMw-K z1q3Zi$g2y-wJmC;Rt^YmD2if)-yN#_q*@= z?!E8*_q`dKE^e&0F~-JEv@q6KfuqJnEwkEM=9JG~T0Z`U8|TbxomVwjI9{eiw0JFHBHAqm(i$^O(7#D!>*fb9NldU4d~xg8UVhop#i0J5e?pcEAPUlAvj3!kIvvHp)*j$M>!ZU;KDLep= z+7x)I@IYeE*6D7W?ytEh#=4|SRAZU9<7nZxaNPDi91nea z`kNzvX;Ew}ew`i!EpE0T3ej$5V(8w83QyV?JR4EP?pmwvtHA@-MmQ`-_|6Cy_nt4| zy!7mmpVxx;tlnzUbDlxZ!oR?$>mc#t zaK_>-Xh;>=l|!!p?MW8e7uDpVYVwtUo?ROBZ-{DQQJ!4q)j>~s=GQ|Z`K6!5f1xYU zW1X#%`Dhoctwa+lQN{JTUmBWPqREtSafJ>Mn_zb|fVjn^;Xb3rE|bO%ohYqwuSsK- z4y83dL5)rpzY3{S6{GyJSJT7stX=m^gZ8^j567Ld>!cqJ`yqTd4q%&^ivp=lMWN+V zvM?B9QW_>bNeRI!lhObQ(k?=9Ts#!4{*zzy{f{ND9F=qrJcl5~QRM ze5MZIadR)DN!gOpnq%)yQ|~<3L(*w*tliW*w?rhY_0B%0uy_906ZK>-3+2$GIIsub zYREyGToDbt$`FGo5~R#<=I?3&Za1F_6k@K-Y)k}RFH+$>+e^?{q>7jGBpppd*t@=n zL%Z!hXfNVot#N8mW(fO_)qwcIi+YpMklG&M9C{R=ox!vk74WK~MWh)UYdDO^G+tc; zFwM^5bPGx`7v*54SnN#Lo~csBQ&vf%nXN0UxL9HNiTUj+5U--yXl9Wd{IoqC42D*! z;&8P?4_&kNujHbmkF=7ggNfmy0vlzm!&DbK=d-N?mB7hheT53Eow|o+Y^~sMZKmQEhc}{U~ zxB@-TyUoSn7Prn%dz>6!=?Czlo5g>NpHqb(hHNpMi*`2?Rfp|ra?j-!q#n2pwIAx| zU`gwwSL~`d)L*h|&b>SAJh`TUoX`0cMzpjCI$K7rv*Ux+Q##Y4AahN!FNo6vF_t^^ zSSasYv3Rf{TGiyc3q8mg{tYfg^3ZJsIL^EAJ*x_Lc=VYR3p!t8<;l&H6m$s8>A)r0 zI@6D1_tY&`4zr{)tqeN1_XSw*F|AB_j!39l9(KC;fV!>}>6|TZ`PAf_GaU|gy5|Gx z{*WZr91Aw``C)XlPx!r$Cm$GZL&wMh{-9qvQZ+nG6rbn=;yBKa&qqUQWzNYF12s=C z6O-$3l^g#SRK@e7aMaKw@4}kHsD5a!zB|apstOzp94;LwYcGQ(eE?d$EPjKoNTthG ze~r-Uz~0iTj;{MTAhlEJQ$Ka)N0CxCTe1Wdg$55f~ z2W2c#-8#RbOlPE9gAY5?cAuI!a_rb=K0NRgNvfgBN*_-=`^FpFeR%FUB}&D_xr0yl z1aY!2iY`V&Dz)=)pe-Od#qiXB$_C<5cT6|aQQ>EBJ2uo0zeDK62VNC!@n^s@v5h`* z6yeVv_HvjmVM-Qfuj>hL7kT4 zz{mLBEWc4wnGqb1N9NN`5aLN3jFBj1g6MT+0J8(8Wc#6AD>Av1U}Hz-CMikT1n}{A zUQyv6hyA&=~S|v<*;B0$m48&V#j<=#AwYdFp&|P;TPQO^{ zQmXk3E=1Wc5%>b%>+|&)8d!m%yAga5-}_6D4hNh$wghX?kZ($LfFY^XH+&N7`$N7I zrEG!>HsR6mQL6MGx>UG7sLN{bVmu5UOW@4Y+wfGHFJY=5&i+#rU|Z0nJJ8T27u-+F za9gx>+fP9z&h138ZxHz!OwxifP2=7Pv&0z^sDmc5>$q1%`vyA)DAVZg& zV(gTq?F4Su)>1to+A;+UmMUu3*GYiHF|6{!f(&$;!n^i&0i1dR%REVPYay0wY|yr93TsEH4b9{B*b(_XJ6O$Q`A_bWW{@N>^SjJs)#B2mf$ z=lZ@1gZR4#Mdjtb!VtCMQ2yRWL53Jy8CIbwr01m>M~68yWEy9N0j7pbm;1^b%oIxq zFI0aHsp1KXq|waQJ3@Gc`KkHM5Qvx2Y+UZEa`4kGE@V(wfS3EqLOpcN+OGibAibrP zL<^Z1CW6>4stZkZsm{SpB?vQE8&u)$uuiTSuMcvVn`wMB2(Tq=s!L4{W{M?@=jOWs zywpb|jb;vS4sfwCm}xHkK7h>|y^R$el7pXuEMl-Apu)99=E%_)4{*3F)3_{v&svcw z^4c8C6k-t`VterP+Nnqy&For($JT0RrrCYe2jU|%81+XtjobnyCQJzcZ=CfvMw^^!}xnfBnq7e4-YN2@=^ nj9>flH|4A;bjv5p2ZMqB`y< - - - - - LOP - - - $HDA_TABLE_AND_NAME - - AYON - - - diff --git a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/TypePropertiesOptions b/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/TypePropertiesOptions deleted file mode 100644 index a6d52acf2a..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/ayon_8_8Lop_1lop__import_8_81.0/TypePropertiesOptions +++ /dev/null @@ -1,14 +0,0 @@ -CheckExternal := 1; -ContentsCompressionType := 1; -ForbidOutsideParms := 1; -GzipContents := 1; -LockContents := 1; -MakeDefault := 1; -ParmsFromVfl := 0; -PrefixDroppedParmLabel := 0; -PrefixDroppedParmName := 0; -SaveCachedCode := 0; -SaveIcon := 1; -SaveSpareParms := 0; -UnlockOnCreate := 0; -UseDSParms := 1; diff --git a/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/houdini.hdalibrary b/server_addon/houdini/client/ayon_houdini/startup/otls/ayon_lop_import.hda/houdini.hdalibrary deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/houdini/client/ayon_houdini/startup/python2.7libs/pythonrc.py b/server_addon/houdini/client/ayon_houdini/startup/python2.7libs/pythonrc.py deleted file mode 100644 index 40ff7fb758..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/python2.7libs/pythonrc.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype startup script.""" -from ayon_core.pipeline import install_host -from ayon_houdini.api import HoudiniHost - - -def main(): - print("Installing AYON ...") - install_host(HoudiniHost()) - - -main() diff --git a/server_addon/houdini/client/ayon_houdini/startup/python3.10libs/pythonrc.py b/server_addon/houdini/client/ayon_houdini/startup/python3.10libs/pythonrc.py deleted file mode 100644 index 40ff7fb758..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/python3.10libs/pythonrc.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype startup script.""" -from ayon_core.pipeline import install_host -from ayon_houdini.api import HoudiniHost - - -def main(): - print("Installing AYON ...") - install_host(HoudiniHost()) - - -main() diff --git a/server_addon/houdini/client/ayon_houdini/startup/python3.7libs/pythonrc.py b/server_addon/houdini/client/ayon_houdini/startup/python3.7libs/pythonrc.py deleted file mode 100644 index 40ff7fb758..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/python3.7libs/pythonrc.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype startup script.""" -from ayon_core.pipeline import install_host -from ayon_houdini.api import HoudiniHost - - -def main(): - print("Installing AYON ...") - install_host(HoudiniHost()) - - -main() diff --git a/server_addon/houdini/client/ayon_houdini/startup/python3.9libs/pythonrc.py b/server_addon/houdini/client/ayon_houdini/startup/python3.9libs/pythonrc.py deleted file mode 100644 index 40ff7fb758..0000000000 --- a/server_addon/houdini/client/ayon_houdini/startup/python3.9libs/pythonrc.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype startup script.""" -from ayon_core.pipeline import install_host -from ayon_houdini.api import HoudiniHost - - -def main(): - print("Installing AYON ...") - install_host(HoudiniHost()) - - -main() diff --git a/server_addon/houdini/client/ayon_houdini/version.py b/server_addon/houdini/client/ayon_houdini/version.py deleted file mode 100644 index 9a101eeacc..0000000000 --- a/server_addon/houdini/client/ayon_houdini/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'houdini' version.""" -__version__ = "0.3.9" diff --git a/server_addon/houdini/package.py b/server_addon/houdini/package.py deleted file mode 100644 index ce10ad33bb..0000000000 --- a/server_addon/houdini/package.py +++ /dev/null @@ -1,10 +0,0 @@ -name = "houdini" -title = "Houdini" -version = "0.3.9" - -client_dir = "ayon_houdini" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = {} diff --git a/server_addon/houdini/server/__init__.py b/server_addon/houdini/server/__init__.py deleted file mode 100644 index 8c1ffcb0b3..0000000000 --- a/server_addon/houdini/server/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Type - -from ayon_server.addons import BaseServerAddon - -from .settings import HoudiniSettings, DEFAULT_VALUES - - -class Houdini(BaseServerAddon): - settings_model: Type[HoudiniSettings] = HoudiniSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_VALUES) diff --git a/server_addon/houdini/server/settings/__init__.py b/server_addon/houdini/server/settings/__init__.py deleted file mode 100644 index 9fd2678925..0000000000 --- a/server_addon/houdini/server/settings/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .main import ( - HoudiniSettings, - DEFAULT_VALUES, -) - - -__all__ = ( - "HoudiniSettings", - "DEFAULT_VALUES", -) diff --git a/server_addon/houdini/server/settings/create.py b/server_addon/houdini/server/settings/create.py deleted file mode 100644 index ae01ab6642..0000000000 --- a/server_addon/houdini/server/settings/create.py +++ /dev/null @@ -1,187 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -# Creator Plugins -class CreatorModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - default_variants: list[str] = SettingsField( - title="Default Products", - default_factory=list, - ) - - -class CreateArnoldAssModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - default_variants: list[str] = SettingsField( - title="Default Products", - default_factory=list, - ) - ext: str = SettingsField(Title="Extension") - - -class CreateStaticMeshModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - default_variants: list[str] = SettingsField( - default_factory=list, - title="Default Products" - ) - static_mesh_prefix: str = SettingsField("S", title="Static Mesh Prefix") - collision_prefixes: list[str] = SettingsField( - default_factory=list, - title="Collision Prefixes" - ) - - -class CreateUSDRenderModel(CreatorModel): - default_renderer: str = SettingsField( - "Karma CPU", - title="Default Renderer", - description=( - "Specify either the Hydra renderer plug-in nice name, like " - "'Karma CPU', or the plug-in name, e.g. 'BRAY_HdKarma'" - )) - - -class CreatePluginsModel(BaseSettingsModel): - CreateAlembicCamera: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Alembic Camera") - CreateArnoldAss: CreateArnoldAssModel = SettingsField( - default_factory=CreateArnoldAssModel, - title="Create Arnold Ass") - CreateArnoldRop: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Arnold ROP") - CreateCompositeSequence: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Composite (Image Sequence)") - CreateHDA: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Houdini Digital Asset") - CreateKarmaROP: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Karma ROP") - CreateMantraROP: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Mantra ROP") - CreateModel: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Model") - CreatePointCache: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create PointCache (Abc)") - CreateBGEO: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create PointCache (Bgeo)") - CreateRedshiftProxy: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Redshift Proxy") - CreateRedshiftROP: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Redshift ROP") - CreateReview: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create Review") - # "-" is not compatible in the new model - CreateStaticMesh: CreateStaticMeshModel = SettingsField( - default_factory=CreateStaticMeshModel, - title="Create Static Mesh") - CreateUSD: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create USD") - CreateUSDRender: CreateUSDRenderModel = SettingsField( - default_factory=CreateUSDRenderModel, - title="Create USD render") - CreateVDBCache: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create VDB Cache") - CreateVrayROP: CreatorModel = SettingsField( - default_factory=CreatorModel, - title="Create VRay ROP") - - -DEFAULT_HOUDINI_CREATE_SETTINGS = { - "CreateAlembicCamera": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateArnoldAss": { - "enabled": True, - "default_variants": ["Main"], - "ext": ".ass" - }, - "CreateArnoldRop": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateCompositeSequence": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateHDA": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateKarmaROP": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateMantraROP": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateModel": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreatePointCache": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateBGEO": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateRedshiftProxy": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateRedshiftROP": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateReview": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateStaticMesh": { - "enabled": True, - "default_variants": [ - "Main" - ], - "static_mesh_prefix": "S", - "collision_prefixes": [ - "UBX", - "UCP", - "USP", - "UCX" - ] - }, - "CreateUSD": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateUSDRender": { - "enabled": True, - "default_variants": ["Main"], - "default_renderer": "Karma CPU" - }, - "CreateVDBCache": { - "enabled": True, - "default_variants": ["Main"] - }, - "CreateVrayROP": { - "enabled": True, - "default_variants": ["Main"] - }, -} diff --git a/server_addon/houdini/server/settings/general.py b/server_addon/houdini/server/settings/general.py deleted file mode 100644 index b71feae554..0000000000 --- a/server_addon/houdini/server/settings/general.py +++ /dev/null @@ -1,49 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class HoudiniVarModel(BaseSettingsModel): - _layout = "expanded" - var: str = SettingsField("", title="Var") - value: str = SettingsField("", title="Value") - is_directory: bool = SettingsField(False, title="Treat as directory") - - -class UpdateHoudiniVarcontextModel(BaseSettingsModel): - """Sync vars with context changes. - - If a value is treated as a directory on update - it will be ensured the folder exists. - """ - - enabled: bool = SettingsField(title="Enabled") - # TODO this was dynamic dictionary '{var: path}' - houdini_vars: list[HoudiniVarModel] = SettingsField( - default_factory=list, - title="Houdini Vars" - ) - - -class GeneralSettingsModel(BaseSettingsModel): - add_self_publish_button: bool = SettingsField( - False, - title="Add Self Publish Button" - ) - update_houdini_var_context: UpdateHoudiniVarcontextModel = SettingsField( - default_factory=UpdateHoudiniVarcontextModel, - title="Update Houdini Vars on context change" - ) - - -DEFAULT_GENERAL_SETTINGS = { - "add_self_publish_button": False, - "update_houdini_var_context": { - "enabled": True, - "houdini_vars": [ - { - "var": "JOB", - "value": "{root[work]}/{project[name]}/{hierarchy}/{asset}/work/{task[name]}", # noqa - "is_directory": True - } - ] - } -} diff --git a/server_addon/houdini/server/settings/imageio.py b/server_addon/houdini/server/settings/imageio.py deleted file mode 100644 index d77ff0751c..0000000000 --- a/server_addon/houdini/server/settings/imageio.py +++ /dev/null @@ -1,114 +0,0 @@ -from pydantic import validator -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.settings.validators import ensure_unique_names - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class WorkfileImageIOModel(BaseSettingsModel): - """Workfile settings help. - - Empty values will be skipped, allowing any existing env vars to - pass through as defined. - - Note: The render space in Houdini is - always set to the 'scene_linear' role.""" - - enabled: bool = SettingsField(False, title="Enabled") - default_display: str = SettingsField( - title="Default active displays", - description="It behaves like the 'OCIO_ACTIVE_DISPLAYS' env var," - " Colon-separated list of displays, e.g ACES:P3" - ) - default_view: str = SettingsField( - title="Default active views", - description="It behaves like the 'OCIO_ACTIVE_VIEWS' env var," - " Colon-separated list of views, e.g sRGB:DCDM" - ) - review_color_space: str = SettingsField( - title="Review colorspace", - description="It exposes OCIO Colorspace parameter in opengl nodes." - "if left empty, Ayon will figure out the default " - "colorspace using your default display and default view." - ) - - -class HoudiniImageIOModel(BaseSettingsModel): - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) - workfile: WorkfileImageIOModel = SettingsField( - default_factory=WorkfileImageIOModel, - title="Workfile" - ) - - -DEFAULT_IMAGEIO_SETTINGS = { - "activate_host_color_management": False, - "ocio_config": { - "override_global_config": False, - "filepath": [] - }, - "file_rules": { - "activate_host_rules": False, - "rules": [] - }, - "workfile": { - "enabled": False, - "default_display": "ACES", - "default_view": "sRGB", - "review_color_space": "" - } -} diff --git a/server_addon/houdini/server/settings/main.py b/server_addon/houdini/server/settings/main.py deleted file mode 100644 index 3acab0ce74..0000000000 --- a/server_addon/houdini/server/settings/main.py +++ /dev/null @@ -1,50 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField -from .general import ( - GeneralSettingsModel, - DEFAULT_GENERAL_SETTINGS -) -from .imageio import ( - HoudiniImageIOModel, - DEFAULT_IMAGEIO_SETTINGS -) -from .shelves import ShelvesModel -from .create import ( - CreatePluginsModel, - DEFAULT_HOUDINI_CREATE_SETTINGS -) -from .publish import ( - PublishPluginsModel, - DEFAULT_HOUDINI_PUBLISH_SETTINGS, -) - - -class HoudiniSettings(BaseSettingsModel): - general: GeneralSettingsModel = SettingsField( - default_factory=GeneralSettingsModel, - title="General" - ) - imageio: HoudiniImageIOModel = SettingsField( - default_factory=HoudiniImageIOModel, - title="Color Management (ImageIO)" - ) - shelves: list[ShelvesModel] = SettingsField( - default_factory=list, - title="Shelves Manager", - ) - create: CreatePluginsModel = SettingsField( - default_factory=CreatePluginsModel, - title="Creator Plugins", - ) - publish: PublishPluginsModel = SettingsField( - default_factory=PublishPluginsModel, - title="Publish Plugins", - ) - - -DEFAULT_VALUES = { - "general": DEFAULT_GENERAL_SETTINGS, - "imageio": DEFAULT_IMAGEIO_SETTINGS, - "shelves": [], - "create": DEFAULT_HOUDINI_CREATE_SETTINGS, - "publish": DEFAULT_HOUDINI_PUBLISH_SETTINGS -} diff --git a/server_addon/houdini/server/settings/publish.py b/server_addon/houdini/server/settings/publish.py deleted file mode 100644 index b21de39e93..0000000000 --- a/server_addon/houdini/server/settings/publish.py +++ /dev/null @@ -1,218 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField -) - - -# Publish Plugins -class CollectAssetHandlesModel(BaseSettingsModel): - """Collect Frame Range - Disable this if you want the publisher to - ignore start and end handles specified in the - asset data for publish instances - """ - use_asset_handles: bool = SettingsField( - title="Use asset handles") - - -class CollectChunkSizeModel(BaseSettingsModel): - """Collect Chunk Size.""" - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - chunk_size: int = SettingsField( - title="Frames Per Task") - - -class AOVFilterSubmodel(BaseSettingsModel): - """You should use the same host name you are using for Houdini.""" - host_name: str = SettingsField("", title="Houdini Host name") - value: list[str] = SettingsField( - default_factory=list, - title="AOV regex" - ) - - -class CollectLocalRenderInstancesModel(BaseSettingsModel): - - use_deadline_aov_filter: bool = SettingsField( - False, - title="Use Deadline AOV Filter" - ) - - aov_filter: AOVFilterSubmodel = SettingsField( - default_factory=AOVFilterSubmodel, - title="Reviewable products filter" - ) - - -def product_types_enum(): - return [ - {"value": "camera", "label": "Camera (Abc)"}, - {"value": "pointcache", "label": "PointCache (Abc)/PointCache (Bgeo)"}, - {"value": "review", "label": "Review"}, - {"value": "staticMesh", "label": "Static Mesh (FBX)"}, - {"value": "usd", "label": "USD (experimental)"}, - {"value": "vdbcache", "label": "VDB Cache"}, - {"value": "imagesequence", "label": "Composite (Image Sequence)"}, - {"value": "ass", "label": "Arnold ASS"}, - {"value": "arnold_rop", "label": "Arnold ROP"}, - {"value": "mantra_rop", "label": "Mantra ROP"}, - {"value": "redshiftproxy", "label": "Redshift Proxy"}, - {"value": "redshift_rop", "label": "Redshift ROP"}, - {"value": "karma_rop", "label": "Karma ROP"}, - {"value": "vray_rop", "label": "VRay ROP"}, - {"value": "model", "label": "Model"}, - ] - - -class CollectFilesForCleaningUpModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - - families: list[str] = SettingsField( - default_factory=list, - enum_resolver=product_types_enum, - conditionalEnum=True, - title="Product Types" - ) - - -class ValidateWorkfilePathsModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - node_types: list[str] = SettingsField( - default_factory=list, - title="Node Types" - ) - prohibited_vars: list[str] = SettingsField( - default_factory=list, - title="Prohibited Variables" - ) - - -class BasicEnabledStatesModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - - -class PublishPluginsModel(BaseSettingsModel): - CollectAssetHandles: CollectAssetHandlesModel = SettingsField( - default_factory=CollectAssetHandlesModel, - title="Collect Asset Handles", - section="Collectors" - ) - CollectChunkSize: CollectChunkSizeModel = SettingsField( - default_factory=CollectChunkSizeModel, - title="Collect Chunk Size" - ) - CollectFilesForCleaningUp: CollectFilesForCleaningUpModel = SettingsField( - default_factory=CollectFilesForCleaningUpModel, - title="Collect Files For Cleaning Up." - ) - CollectLocalRenderInstances: CollectLocalRenderInstancesModel = SettingsField( - default_factory=CollectLocalRenderInstancesModel, - title="Collect Local Render Instances" - ) - ValidateInstanceInContextHoudini: BasicEnabledStatesModel = SettingsField( - default_factory=BasicEnabledStatesModel, - title="Validate Instance is in same Context", - section="Validators") - ValidateMeshIsStatic: BasicEnabledStatesModel = SettingsField( - default_factory=BasicEnabledStatesModel, - title="Validate Mesh is Static") - ValidateReviewColorspace: BasicEnabledStatesModel = SettingsField( - default_factory=BasicEnabledStatesModel, - title="Validate Review Colorspace") - ValidateSubsetName: BasicEnabledStatesModel = SettingsField( - default_factory=BasicEnabledStatesModel, - title="Validate Subset Name") - ValidateUnrealStaticMeshName: BasicEnabledStatesModel = SettingsField( - default_factory=BasicEnabledStatesModel, - title="Validate Unreal Static Mesh Name") - ValidateWorkfilePaths: ValidateWorkfilePathsModel = SettingsField( - default_factory=ValidateWorkfilePathsModel, - title="Validate workfile paths settings") - ValidateUSDRenderProductPaths: BasicEnabledStatesModel = SettingsField( - default_factory=BasicEnabledStatesModel, - title="Validate USD Render Product Paths") - ExtractActiveViewThumbnail: BasicEnabledStatesModel = SettingsField( - default_factory=BasicEnabledStatesModel, - title="Extract Active View Thumbnail", - section="Extractors" - ) - - -DEFAULT_HOUDINI_PUBLISH_SETTINGS = { - "CollectAssetHandles": { - "use_asset_handles": True - }, - "CollectChunkSize": { - "enabled": True, - "optional": True, - "chunk_size": 999999 - }, - "CollectFilesForCleaningUp": { - "enabled": False, - "optional": True, - "active": True, - "families" : [] - }, - "CollectLocalRenderInstances": { - "use_deadline_aov_filter": False, - "aov_filter": { - "host_name": "houdini", - "value": [ - ".*([Bb]eauty).*" - ] - } - }, - "ValidateInstanceInContextHoudini": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateMeshIsStatic": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateReviewColorspace": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateSubsetName": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateUnrealStaticMeshName": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateWorkfilePaths": { - "enabled": True, - "optional": True, - "node_types": [ - "file", - "alembic" - ], - "prohibited_vars": [ - "$HIP", - "$JOB" - ] - }, - "ValidateUSDRenderProductPaths": { - "enabled": False, - "optional": True, - "active": True - }, - "ExtractActiveViewThumbnail": { - "enabled": True, - "optional": False, - "active": True - } -} diff --git a/server_addon/houdini/server/settings/shelves.py b/server_addon/houdini/server/settings/shelves.py deleted file mode 100644 index f6d7f1d06c..0000000000 --- a/server_addon/houdini/server/settings/shelves.py +++ /dev/null @@ -1,67 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - MultiplatformPathModel -) - - -class ShelfToolsModel(BaseSettingsModel): - """Name and Script Path are mandatory.""" - label: str = SettingsField(title="Name") - script: str = SettingsField(title="Script Path") - icon: str = SettingsField("", title="Icon Path") - help: str = SettingsField("", title="Help text") - - -class ShelfDefinitionModel(BaseSettingsModel): - _layout = "expanded" - shelf_name: str = SettingsField(title="Shelf name") - tools_list: list[ShelfToolsModel] = SettingsField( - default_factory=list, - title="Shelf Tools" - ) - - -class AddShelfFileModel(BaseSettingsModel): - shelf_set_source_path: MultiplatformPathModel = SettingsField( - default_factory=MultiplatformPathModel, - title="Shelf Set Path" - ) - - -class AddSetAndDefinitionsModel(BaseSettingsModel): - shelf_set_name: str = SettingsField("", title="Shelf Set Name") - shelf_definition: list[ShelfDefinitionModel] = SettingsField( - default_factory=list, - title="Shelves Definitions" - ) - - -def shelves_enum_options(): - return [ - { - "value": "add_shelf_file", - "label": "Add a .shelf file" - }, - { - "value": "add_set_and_definitions", - "label": "Add Shelf Set Name and Shelves Definitions" - } - ] - - -class ShelvesModel(BaseSettingsModel): - options: str = SettingsField( - title="Options", - description="Switch between shelves manager options", - enum_resolver=shelves_enum_options, - conditionalEnum=True - ) - add_shelf_file: AddShelfFileModel = SettingsField( - title="Add a .shelf file", - default_factory=AddShelfFileModel - ) - add_set_and_definitions: AddSetAndDefinitionsModel = SettingsField( - title="Add Shelf Set Name and Shelves Definitions", - default_factory=AddSetAndDefinitionsModel - ) From 58693b4fa5c3f118312f9ecd81780ab33f38422f Mon Sep 17 00:00:00 2001 From: Jakub Trllo <43494761+iLLiCiTiT@users.noreply.github.com> Date: Wed, 3 Jul 2024 12:45:30 +0200 Subject: [PATCH 07/10] removed blender addon --- .../blender/client/ayon_blender/__init__.py | 13 - .../blender/client/ayon_blender/addon.py | 71 --- .../client/ayon_blender/api/__init__.py | 72 --- .../blender/client/ayon_blender/api/action.py | 47 -- .../client/ayon_blender/api/capture.py | 282 --------- .../client/ayon_blender/api/colorspace.py | 51 -- .../ayon_blender/api/icons/pyblish-32x32.png | Bin 632 -> 0 bytes .../blender/client/ayon_blender/api/lib.py | 426 ------------- .../blender/client/ayon_blender/api/ops.py | 456 -------------- .../client/ayon_blender/api/pipeline.py | 574 ------------------ .../blender/client/ayon_blender/api/plugin.py | 542 ----------------- .../client/ayon_blender/api/render_lib.py | 364 ----------- .../blender/client/ayon_blender/api/workio.py | 89 --- .../blender_addon/startup/init.py | 10 - .../hooks/pre_add_run_python_script_arg.py | 54 -- .../ayon_blender/hooks/pre_pyside_install.py | 295 --------- .../ayon_blender/hooks/pre_windows_console.py | 29 - .../plugins/create/convert_legacy.py | 78 --- .../plugins/create/create_action.py | 41 -- .../plugins/create/create_animation.py | 32 - .../plugins/create/create_blendScene.py | 34 -- .../plugins/create/create_camera.py | 42 -- .../plugins/create/create_layout.py | 32 - .../plugins/create/create_model.py | 31 - .../plugins/create/create_pointcache.py | 29 - .../plugins/create/create_render.py | 45 -- .../plugins/create/create_review.py | 27 - .../ayon_blender/plugins/create/create_rig.py | 31 - .../ayon_blender/plugins/create/create_usd.py | 30 - .../plugins/create/create_workfile.py | 132 ---- .../plugins/load/import_workfile.py | 84 --- .../ayon_blender/plugins/load/load_action.py | 293 --------- .../plugins/load/load_animation.py | 70 --- .../ayon_blender/plugins/load/load_audio.py | 227 ------- .../ayon_blender/plugins/load/load_blend.py | 286 --------- .../plugins/load/load_blendscene.py | 235 ------- .../ayon_blender/plugins/load/load_cache.py | 284 --------- .../plugins/load/load_camera_abc.py | 238 -------- .../plugins/load/load_camera_fbx.py | 224 ------- .../ayon_blender/plugins/load/load_fbx.py | 279 --------- .../plugins/load/load_layout_json.py | 297 --------- .../ayon_blender/plugins/load/load_look.py | 223 ------- .../plugins/publish/collect_current_file.py | 15 - .../publish/collect_file_dependencies.py | 36 -- .../plugins/publish/collect_instance.py | 44 -- .../plugins/publish/collect_render.py | 120 ---- .../plugins/publish/collect_review.py | 68 --- .../plugins/publish/collect_workfile.py | 38 -- .../plugins/publish/extract_abc.py | 94 --- .../plugins/publish/extract_abc_animation.py | 80 --- .../plugins/publish/extract_blend.py | 76 --- .../publish/extract_blend_animation.py | 67 -- .../plugins/publish/extract_camera_abc.py | 70 --- .../plugins/publish/extract_camera_fbx.py | 85 --- .../plugins/publish/extract_fbx.py | 93 --- .../plugins/publish/extract_fbx_animation.py | 227 ------- .../plugins/publish/extract_layout.py | 279 --------- .../plugins/publish/extract_playblast.py | 129 ---- .../plugins/publish/extract_thumbnail.py | 107 ---- .../plugins/publish/extract_usd.py | 90 --- .../publish/increment_workfile_version.py | 33 - .../plugins/publish/integrate_animation.py | 54 -- .../publish/validate_camera_zero_keyframe.py | 57 -- .../publish/validate_deadline_publish.py | 61 -- .../plugins/publish/validate_file_saved.py | 66 -- .../publish/validate_instance_empty.py | 20 - .../plugins/publish/validate_mesh_has_uv.py | 65 -- .../validate_mesh_no_negative_scale.py | 44 -- .../plugins/publish/validate_model_uv_map1.py | 93 --- .../publish/validate_no_colons_in_name.py | 53 -- .../plugins/publish/validate_object_mode.py | 44 -- .../publish/validate_render_camera_is_set.py | 29 - .../publish/validate_transform_zero.py | 94 --- .../blender/client/ayon_blender/version.py | 3 - server_addon/blender/package.py | 11 - server_addon/blender/server/__init__.py | 13 - .../blender/server/settings/__init__.py | 10 - .../blender/server/settings/imageio.py | 63 -- server_addon/blender/server/settings/main.py | 70 --- .../server/settings/publish_plugins.py | 361 ----------- .../server/settings/render_settings.py | 158 ----- 81 files changed, 9819 deletions(-) delete mode 100644 server_addon/blender/client/ayon_blender/__init__.py delete mode 100644 server_addon/blender/client/ayon_blender/addon.py delete mode 100644 server_addon/blender/client/ayon_blender/api/__init__.py delete mode 100644 server_addon/blender/client/ayon_blender/api/action.py delete mode 100644 server_addon/blender/client/ayon_blender/api/capture.py delete mode 100644 server_addon/blender/client/ayon_blender/api/colorspace.py delete mode 100644 server_addon/blender/client/ayon_blender/api/icons/pyblish-32x32.png delete mode 100644 server_addon/blender/client/ayon_blender/api/lib.py delete mode 100644 server_addon/blender/client/ayon_blender/api/ops.py delete mode 100644 server_addon/blender/client/ayon_blender/api/pipeline.py delete mode 100644 server_addon/blender/client/ayon_blender/api/plugin.py delete mode 100644 server_addon/blender/client/ayon_blender/api/render_lib.py delete mode 100644 server_addon/blender/client/ayon_blender/api/workio.py delete mode 100644 server_addon/blender/client/ayon_blender/blender_addon/startup/init.py delete mode 100644 server_addon/blender/client/ayon_blender/hooks/pre_add_run_python_script_arg.py delete mode 100644 server_addon/blender/client/ayon_blender/hooks/pre_pyside_install.py delete mode 100644 server_addon/blender/client/ayon_blender/hooks/pre_windows_console.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/convert_legacy.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_action.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_animation.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_blendScene.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_camera.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_layout.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_model.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_pointcache.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_render.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_review.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_rig.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_usd.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/create/create_workfile.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/import_workfile.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_action.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_animation.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_audio.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_blend.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_blendscene.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_cache.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_camera_abc.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_camera_fbx.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_fbx.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_layout_json.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/load/load_look.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/collect_current_file.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/collect_file_dependencies.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/collect_instance.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/collect_render.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/collect_review.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/collect_workfile.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_abc.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_abc_animation.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_blend.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_blend_animation.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_camera_abc.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_camera_fbx.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_fbx.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_fbx_animation.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_layout.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_playblast.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_thumbnail.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/extract_usd.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/increment_workfile_version.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/integrate_animation.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_camera_zero_keyframe.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_deadline_publish.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_file_saved.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_instance_empty.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_mesh_has_uv.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_mesh_no_negative_scale.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_model_uv_map1.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_no_colons_in_name.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_object_mode.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_render_camera_is_set.py delete mode 100644 server_addon/blender/client/ayon_blender/plugins/publish/validate_transform_zero.py delete mode 100644 server_addon/blender/client/ayon_blender/version.py delete mode 100644 server_addon/blender/package.py delete mode 100644 server_addon/blender/server/__init__.py delete mode 100644 server_addon/blender/server/settings/__init__.py delete mode 100644 server_addon/blender/server/settings/imageio.py delete mode 100644 server_addon/blender/server/settings/main.py delete mode 100644 server_addon/blender/server/settings/publish_plugins.py delete mode 100644 server_addon/blender/server/settings/render_settings.py diff --git a/server_addon/blender/client/ayon_blender/__init__.py b/server_addon/blender/client/ayon_blender/__init__.py deleted file mode 100644 index 221dcd4138..0000000000 --- a/server_addon/blender/client/ayon_blender/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .version import __version__ -from .addon import ( - BlenderAddon, - BLENDER_ADDON_ROOT, -) - - -__all__ = ( - "__version__", - - "BlenderAddon", - "BLENDER_ADDON_ROOT", -) diff --git a/server_addon/blender/client/ayon_blender/addon.py b/server_addon/blender/client/ayon_blender/addon.py deleted file mode 100644 index 9711580369..0000000000 --- a/server_addon/blender/client/ayon_blender/addon.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ - -BLENDER_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__)) - - -class BlenderAddon(AYONAddon, IHostAddon): - name = "blender" - version = __version__ - host_name = "blender" - - def add_implementation_envs(self, env, _app): - """Modify environments to contain all required for implementation.""" - # Prepare path to implementation script - implementation_user_script_path = os.path.join( - BLENDER_ADDON_ROOT, - "blender_addon" - ) - - # Add blender implementation script path to PYTHONPATH - python_path = env.get("PYTHONPATH") or "" - python_path_parts = [ - path - for path in python_path.split(os.pathsep) - if path - ] - python_path_parts.insert(0, implementation_user_script_path) - env["PYTHONPATH"] = os.pathsep.join(python_path_parts) - - # Modify Blender user scripts path - previous_user_scripts = set() - # Implementation path is added to set for easier paths check inside - # loops - will be removed at the end - previous_user_scripts.add(implementation_user_script_path) - - ayon_blender_user_scripts = ( - env.get("AYON_BLENDER_USER_SCRIPTS") or "" - ) - for path in ayon_blender_user_scripts.split(os.pathsep): - if path: - previous_user_scripts.add(os.path.normpath(path)) - - blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or "" - for path in blender_user_scripts.split(os.pathsep): - if path: - previous_user_scripts.add(os.path.normpath(path)) - - # Remove implementation path from user script paths as is set to - # `BLENDER_USER_SCRIPTS` - previous_user_scripts.remove(implementation_user_script_path) - env["BLENDER_USER_SCRIPTS"] = implementation_user_script_path - - # Set custom user scripts env - env["AYON_BLENDER_USER_SCRIPTS"] = os.pathsep.join( - previous_user_scripts - ) - - # Define Qt binding if not defined - env.pop("QT_PREFERRED_BINDING", None) - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(BLENDER_ADDON_ROOT, "hooks") - ] - - def get_workfile_extensions(self): - return [".blend"] diff --git a/server_addon/blender/client/ayon_blender/api/__init__.py b/server_addon/blender/client/ayon_blender/api/__init__.py deleted file mode 100644 index da2a6fbbbb..0000000000 --- a/server_addon/blender/client/ayon_blender/api/__init__.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Public API - -Anything that isn't defined here is INTERNAL and unreliable for external use. - -""" - -from .pipeline import ( - install, - uninstall, - ls, - publish, - containerise, - BlenderHost, -) - -from .plugin import ( - Creator, -) - -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root, -) - -from .lib import ( - lsattr, - lsattrs, - read, - maintained_selection, - maintained_time, - get_selection, - # unique_name, -) - -from .capture import capture - -from .render_lib import prepare_rendering - - -__all__ = [ - "install", - "uninstall", - "ls", - "publish", - "containerise", - "BlenderHost", - - "Creator", - - # Workfiles API - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root", - - # Utility functions - "maintained_selection", - "maintained_time", - "lsattr", - "lsattrs", - "read", - "get_selection", - "capture", - # "unique_name", - "prepare_rendering", -] diff --git a/server_addon/blender/client/ayon_blender/api/action.py b/server_addon/blender/client/ayon_blender/api/action.py deleted file mode 100644 index 865c2443e0..0000000000 --- a/server_addon/blender/client/ayon_blender/api/action.py +++ /dev/null @@ -1,47 +0,0 @@ -import bpy - -import pyblish.api - -from ayon_core.pipeline.publish import get_errored_instances_from_context - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid objects in Blender when a publish plug-in failed.""" - label = "Select Invalid" - on = "failed" - icon = "search" - - def process(self, context, plugin): - errored_instances = get_errored_instances_from_context(context, - plugin=plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid nodes...") - invalid = list() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning( - "Failed plug-in doesn't have any selectable objects." - ) - - bpy.ops.object.select_all(action='DESELECT') - - # Make sure every node is only processed once - invalid = list(set(invalid)) - if not invalid: - self.log.info("No invalid nodes found.") - return - - invalid_names = [obj.name for obj in invalid] - self.log.info( - "Selecting invalid objects: %s", ", ".join(invalid_names) - ) - # Select the objects and also make the last one the active object. - for obj in invalid: - obj.select_set(True) - - bpy.context.view_layer.objects.active = invalid[-1] diff --git a/server_addon/blender/client/ayon_blender/api/capture.py b/server_addon/blender/client/ayon_blender/api/capture.py deleted file mode 100644 index e5e6041563..0000000000 --- a/server_addon/blender/client/ayon_blender/api/capture.py +++ /dev/null @@ -1,282 +0,0 @@ - -"""Blender Capture -Playblasting with independent viewport, camera and display options -""" -import contextlib -import bpy - -from .lib import maintained_time -from .plugin import deselect_all, create_blender_context - - -def capture( - camera=None, - width=None, - height=None, - filename=None, - start_frame=None, - end_frame=None, - step_frame=None, - sound=None, - isolate=None, - maintain_aspect_ratio=True, - overwrite=False, - image_settings=None, - display_options=None -): - """Playblast in an independent windows - Arguments: - camera (str, optional): Name of camera, defaults to "Camera" - width (int, optional): Width of output in pixels - height (int, optional): Height of output in pixels - filename (str, optional): Name of output file path. Defaults to current - render output path. - start_frame (int, optional): Defaults to current start frame. - end_frame (int, optional): Defaults to current end frame. - step_frame (int, optional): Defaults to 1. - sound (str, optional): Specify the sound node to be used during - playblast. When None (default) no sound will be used. - isolate (list): List of nodes to isolate upon capturing - maintain_aspect_ratio (bool, optional): Modify height in order to - maintain aspect ratio. - overwrite (bool, optional): Whether or not to overwrite if file - already exists. If disabled and file exists and error will be - raised. - image_settings (dict, optional): Supplied image settings for render, - using `ImageSettings` - display_options (dict, optional): Supplied display options for render - """ - - scene = bpy.context.scene - camera = camera or "Camera" - - # Ensure camera exists. - if camera not in scene.objects and camera != "AUTO": - raise RuntimeError("Camera does not exist: {0}".format(camera)) - - # Ensure resolution. - if width and height: - maintain_aspect_ratio = False - width = width or scene.render.resolution_x - height = height or scene.render.resolution_y - if maintain_aspect_ratio: - ratio = scene.render.resolution_x / scene.render.resolution_y - height = round(width / ratio) - - # Get frame range. - if start_frame is None: - start_frame = scene.frame_start - if end_frame is None: - end_frame = scene.frame_end - if step_frame is None: - step_frame = 1 - frame_range = (start_frame, end_frame, step_frame) - - if filename is None: - filename = scene.render.filepath - - render_options = { - "filepath": "{}.".format(filename.rstrip(".")), - "resolution_x": width, - "resolution_y": height, - "use_overwrite": overwrite, - } - - with _independent_window() as window: - - applied_view(window, camera, isolate, options=display_options) - - with contextlib.ExitStack() as stack: - stack.enter_context(maintain_camera(window, camera)) - stack.enter_context(applied_frame_range(window, *frame_range)) - stack.enter_context(applied_render_options(window, render_options)) - stack.enter_context(applied_image_settings(window, image_settings)) - stack.enter_context(maintained_time()) - - bpy.ops.render.opengl( - animation=True, - render_keyed_only=False, - sequencer=False, - write_still=False, - view_context=True - ) - - return filename - - -ImageSettings = { - "file_format": "FFMPEG", - "color_mode": "RGB", - "ffmpeg": { - "format": "QUICKTIME", - "use_autosplit": False, - "codec": "H264", - "constant_rate_factor": "MEDIUM", - "gopsize": 18, - "use_max_b_frames": False, - }, -} - - -def isolate_objects(window, objects): - """Isolate selection""" - deselect_all() - - for obj in objects: - obj.select_set(True) - - context = create_blender_context(selected=objects, window=window) - - with bpy.context.temp_override(**context): - bpy.ops.view3d.view_axis(type="FRONT") - bpy.ops.view3d.localview() - - deselect_all() - - -def _apply_options(entity, options): - for option, value in options.items(): - if isinstance(value, dict): - _apply_options(getattr(entity, option), value) - else: - setattr(entity, option, value) - - -def applied_view(window, camera, isolate=None, options=None): - """Apply view options to window.""" - area = window.screen.areas[0] - space = area.spaces[0] - - area.ui_type = "VIEW_3D" - - types = {"MESH", "GPENCIL"} - objects = [obj for obj in window.scene.objects if obj.type in types] - - if camera == "AUTO": - space.region_3d.view_perspective = "ORTHO" - isolate_objects(window, isolate or objects) - else: - isolate_objects(window, isolate or objects) - space.camera = window.scene.objects.get(camera) - space.region_3d.view_perspective = "CAMERA" - - if isinstance(options, dict): - _apply_options(space, options) - else: - space.shading.type = "SOLID" - space.shading.color_type = "MATERIAL" - space.show_gizmo = False - space.overlay.show_overlays = False - - -@contextlib.contextmanager -def applied_frame_range(window, start, end, step): - """Context manager for setting frame range.""" - # Store current frame range - current_frame_start = window.scene.frame_start - current_frame_end = window.scene.frame_end - current_frame_step = window.scene.frame_step - # Apply frame range - window.scene.frame_start = start - window.scene.frame_end = end - window.scene.frame_step = step - try: - yield - finally: - # Restore frame range - window.scene.frame_start = current_frame_start - window.scene.frame_end = current_frame_end - window.scene.frame_step = current_frame_step - - -@contextlib.contextmanager -def applied_render_options(window, options): - """Context manager for setting render options.""" - render = window.scene.render - - # Store current settings - original = {} - for opt in options.copy(): - try: - original[opt] = getattr(render, opt) - except ValueError: - options.pop(opt) - - # Apply settings - _apply_options(render, options) - - try: - yield - finally: - # Restore previous settings - _apply_options(render, original) - - -@contextlib.contextmanager -def applied_image_settings(window, options): - """Context manager to override image settings.""" - - options = options or ImageSettings.copy() - ffmpeg = options.pop("ffmpeg", {}) - render = window.scene.render - - # Store current image settings - original = {} - for opt in options.copy(): - try: - original[opt] = getattr(render.image_settings, opt) - except ValueError: - options.pop(opt) - - # Store current ffmpeg settings - original_ffmpeg = {} - for opt in ffmpeg.copy(): - try: - original_ffmpeg[opt] = getattr(render.ffmpeg, opt) - except ValueError: - ffmpeg.pop(opt) - - # Apply image settings - for opt, value in options.items(): - setattr(render.image_settings, opt, value) - - # Apply ffmpeg settings - for opt, value in ffmpeg.items(): - setattr(render.ffmpeg, opt, value) - - try: - yield - finally: - # Restore previous settings - for opt, value in original.items(): - setattr(render.image_settings, opt, value) - for opt, value in original_ffmpeg.items(): - setattr(render.ffmpeg, opt, value) - - -@contextlib.contextmanager -def maintain_camera(window, camera): - """Context manager to override camera.""" - current_camera = window.scene.camera - if camera in window.scene.objects: - window.scene.camera = window.scene.objects.get(camera) - try: - yield - finally: - window.scene.camera = current_camera - - -@contextlib.contextmanager -def _independent_window(): - """Create capture-window context.""" - context = create_blender_context() - current_windows = set(bpy.context.window_manager.windows) - with bpy.context.temp_override(**context): - bpy.ops.wm.window_new() - window = list( - set(bpy.context.window_manager.windows) - current_windows)[0] - context["window"] = window - try: - yield window - finally: - bpy.ops.wm.window_close() diff --git a/server_addon/blender/client/ayon_blender/api/colorspace.py b/server_addon/blender/client/ayon_blender/api/colorspace.py deleted file mode 100644 index 4521612b7d..0000000000 --- a/server_addon/blender/client/ayon_blender/api/colorspace.py +++ /dev/null @@ -1,51 +0,0 @@ -import attr - -import bpy - - -@attr.s -class LayerMetadata(object): - """Data class for Render Layer metadata.""" - frameStart = attr.ib() - frameEnd = attr.ib() - - -@attr.s -class RenderProduct(object): - """ - Getting Colorspace as Specific Render Product Parameter for submitting - publish job. - """ - colorspace = attr.ib() # colorspace - view = attr.ib() # OCIO view transform - productName = attr.ib(default=None) - - -class ARenderProduct(object): - def __init__(self): - """Constructor.""" - # Initialize - self.layer_data = self._get_layer_data() - self.layer_data.products = self.get_render_products() - - def _get_layer_data(self): - scene = bpy.context.scene - - return LayerMetadata( - frameStart=int(scene.frame_start), - frameEnd=int(scene.frame_end), - ) - - def get_render_products(self): - """To be implemented by renderer class. - This should return a list of RenderProducts. - Returns: - list: List of RenderProduct - """ - return [ - RenderProduct( - colorspace="sRGB", - view="ACES 1.0", - productName="" - ) - ] diff --git a/server_addon/blender/client/ayon_blender/api/icons/pyblish-32x32.png b/server_addon/blender/client/ayon_blender/api/icons/pyblish-32x32.png deleted file mode 100644 index b34e397e0bd502eb336f994f014a518198d93599..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 632 zcmV-;0*C#HP)?m3OVFzoDHzW14L zXLc9IkqG*a3_I>wqC(4bTV70aHK&_zb)S-or zdR!e?Gz01oa2+_3G`|NP#ua@8z5!o>X<#;9R|2bmL0|;f63ZT7V-l+d919^l%3Ar^ zg#3!SwovNY$J`#X`Bzud{=Sy+^`w3nIH_(b$uI|`d*DIZ+4=~EfmcB5%AW!Efyc4_ z377pl-+RGsR;{ENE2 zlz9SiqM-S_dY}^X1mao&fb*4_M}R@~`4Y_Us>><|h!DGM9;+<*qLEA%q6-GNXH<>i|*pjP~hX0nBH#FZ2qakcE>` z0W5F17Z?vA{OT}XF{!tbqt{SR^~5*hEygi&)Nu5GKn2{MR3FJp2!~ Sv8(t10000>> import bpy - >>> def compute(): - ... return 6 - ... - >>> bpy.ops.mesh.primitive_cube_add() - >>> cube = bpy.context.view_layer.objects.active - >>> imprint(cube, { - ... "regularString": "myFamily", - ... "computedValue": lambda: compute() - ... }) - ... - >>> cube['avalon']['computedValue'] - 6 - """ - - imprint_data = dict() - - for key, value in data.items(): - if value is None: - continue - - if callable(value): - # Support values evaluated at imprint - value = value() - - if not isinstance(value, (int, float, bool, str, list, dict)): - raise TypeError(f"Unsupported type: {type(value)}") - - imprint_data[key] = value - - pipeline.metadata_update(node, imprint_data) - - -def lsattr(attr: str, - value: Union[str, int, bool, List, Dict, None] = None) -> List: - r"""Return nodes matching `attr` and `value` - - Arguments: - attr: Name of Blender property - value: Value of attribute. If none - is provided, return all nodes with this attribute. - - Example: - >>> lsattr("id", "myId") - ... [bpy.data.objects["myNode"] - >>> lsattr("id") - ... [bpy.data.objects["myNode"], bpy.data.objects["myOtherNode"]] - - Returns: - list - """ - - return lsattrs({attr: value}) - - -def lsattrs(attrs: Dict) -> List: - r"""Return nodes with the given attribute(s). - - Arguments: - attrs: Name and value pairs of expected matches - - Example: - >>> lsattrs({"age": 5}) # Return nodes with an `age` of 5 - # Return nodes with both `age` and `color` of 5 and blue - >>> lsattrs({"age": 5, "color": "blue"}) - - Returns a list. - - """ - - # For now return all objects, not filtered by scene/collection/view_layer. - matches = set() - for coll in dir(bpy.data): - if not isinstance( - getattr(bpy.data, coll), - bpy.types.bpy_prop_collection, - ): - continue - for node in getattr(bpy.data, coll): - for attr, value in attrs.items(): - avalon_prop = node.get(pipeline.AVALON_PROPERTY) - if not avalon_prop: - continue - if (avalon_prop.get(attr) - and (value is None or avalon_prop.get(attr) == value)): - matches.add(node) - return list(matches) - - -def read(node: bpy.types.bpy_struct_meta_idprop): - """Return user-defined attributes from `node`""" - - data = dict(node.get(pipeline.AVALON_PROPERTY, {})) - - # Ignore hidden/internal data - data = { - key: value - for key, value in data.items() if not key.startswith("_") - } - - return data - - -def get_selected_collections(): - """ - Returns a list of the currently selected collections in the outliner. - - Raises: - RuntimeError: If the outliner cannot be found in the main Blender - window. - - Returns: - list: A list of `bpy.types.Collection` objects that are currently - selected in the outliner. - """ - window = bpy.context.window or bpy.context.window_manager.windows[0] - - try: - area = next( - area for area in window.screen.areas - if area.type == 'OUTLINER') - region = next( - region for region in area.regions - if region.type == 'WINDOW') - except StopIteration as e: - raise RuntimeError("Could not find outliner. An outliner space " - "must be in the main Blender window.") from e - - with bpy.context.temp_override( - window=window, - area=area, - region=region, - screen=window.screen - ): - ids = bpy.context.selected_ids - - return [id for id in ids if isinstance(id, bpy.types.Collection)] - - -def get_selection(include_collections: bool = False) -> List[bpy.types.Object]: - """ - Returns a list of selected objects in the current Blender scene. - - Args: - include_collections (bool, optional): Whether to include selected - collections in the result. Defaults to False. - - Returns: - List[bpy.types.Object]: A list of selected objects. - """ - selection = [obj for obj in bpy.context.scene.objects if obj.select_get()] - - if include_collections: - selection.extend(get_selected_collections()) - - return selection - - -@contextlib.contextmanager -def maintained_selection(): - r"""Maintain selection during context - - Example: - >>> with maintained_selection(): - ... # Modify selection - ... bpy.ops.object.select_all(action='DESELECT') - >>> # Selection restored - """ - - previous_selection = get_selection() - previous_active = bpy.context.view_layer.objects.active - try: - yield - finally: - # Clear the selection - for node in get_selection(): - node.select_set(state=False) - if previous_selection: - for node in previous_selection: - try: - node.select_set(state=True) - except ReferenceError: - # This could happen if a selected node was deleted during - # the context. - log.exception("Failed to reselect") - continue - try: - bpy.context.view_layer.objects.active = previous_active - except ReferenceError: - # This could happen if the active node was deleted during the - # context. - log.exception("Failed to set active object.") - - -@contextlib.contextmanager -def maintained_time(): - """Maintain current frame during context.""" - current_time = bpy.context.scene.frame_current - try: - yield - finally: - bpy.context.scene.frame_current = current_time - - -def get_all_parents(obj): - """Get all recursive parents of object. - - Arguments: - obj (bpy.types.Object): Object to get all parents for. - - Returns: - List[bpy.types.Object]: All parents of object - - """ - result = [] - while True: - obj = obj.parent - if not obj: - break - result.append(obj) - return result - - -def get_highest_root(objects): - """Get the highest object (the least parents) among the objects. - - If multiple objects have the same amount of parents (or no parents) the - first object found in the input iterable will be returned. - - Note that this will *not* return objects outside of the input list, as - such it will not return the root of node from a child node. It is purely - intended to find the highest object among a list of objects. To instead - get the root from one object use, e.g. `get_all_parents(obj)[-1]` - - Arguments: - objects (List[bpy.types.Object]): Objects to find the highest root in. - - Returns: - Optional[bpy.types.Object]: First highest root found or None if no - `bpy.types.Object` found in input list. - - """ - included_objects = {obj.name_full for obj in objects} - num_parents_to_obj = {} - for obj in objects: - if isinstance(obj, bpy.types.Object): - parents = get_all_parents(obj) - # included parents - parents = [parent for parent in parents if - parent.name_full in included_objects] - if not parents: - # A node without parents must be a highest root - return obj - - num_parents_to_obj.setdefault(len(parents), obj) - - if not num_parents_to_obj: - return - - minimum_parent = min(num_parents_to_obj) - return num_parents_to_obj[minimum_parent] diff --git a/server_addon/blender/client/ayon_blender/api/ops.py b/server_addon/blender/client/ayon_blender/api/ops.py deleted file mode 100644 index 7cf9600067..0000000000 --- a/server_addon/blender/client/ayon_blender/api/ops.py +++ /dev/null @@ -1,456 +0,0 @@ -"""Blender operators and menus for use with Avalon.""" - -import os -import sys -import platform -import time -import traceback -import collections -from pathlib import Path -from types import ModuleType -from typing import Dict, List, Optional, Union - -from qtpy import QtWidgets, QtCore - -import bpy -import bpy.utils.previews - -from ayon_core import style -from ayon_core.pipeline import get_current_folder_path, get_current_task_name -from ayon_core.tools.utils import host_tools - -from .workio import OpenFileCacher -from . import pipeline - -PREVIEW_COLLECTIONS: Dict = dict() - -# This seems like a good value to keep the Qt app responsive and doesn't slow -# down Blender. At least on macOS I the interface of Blender gets very laggy if -# you make it smaller. -TIMER_INTERVAL: float = 0.01 if platform.system() == "Windows" else 0.1 - - -def execute_function_in_main_thread(f): - """Decorator to move a function call into main thread items""" - def wrapper(*args, **kwargs): - mti = MainThreadItem(f, *args, **kwargs) - execute_in_main_thread(mti) - return wrapper - - -class BlenderApplication(QtWidgets.QApplication): - _instance = None - blender_windows = {} - - def __init__(self, *args, **kwargs): - super(BlenderApplication, self).__init__(*args, **kwargs) - self.setQuitOnLastWindowClosed(False) - - self.setStyleSheet(style.load_stylesheet()) - self.lastWindowClosed.connect(self.__class__.reset) - - @classmethod - def get_app(cls): - if cls._instance is None: - cls._instance = cls(sys.argv) - return cls._instance - - @classmethod - def reset(cls): - cls._instance = None - - @classmethod - def store_window(cls, identifier, window): - current_window = cls.get_window(identifier) - cls.blender_windows[identifier] = window - if current_window: - current_window.close() - # current_window.deleteLater() - - @classmethod - def get_window(cls, identifier): - return cls.blender_windows.get(identifier) - - -class MainThreadItem: - """Structure to store information about callback in main thread. - - Item should be used to execute callback in main thread which may be needed - for execution of Qt objects. - - Item store callback (callable variable), arguments and keyword arguments - for the callback. Item hold information about it's process. - """ - not_set = object() - sleep_time = 0.1 - - def __init__(self, callback, *args, **kwargs): - self.done = False - self.exception = self.not_set - self.result = self.not_set - self.callback = callback - self.args = args - self.kwargs = kwargs - - def execute(self): - """Execute callback and store its result. - - Method must be called from main thread. Item is marked as `done` - when callback execution finished. Store output of callback of exception - information when callback raises one. - """ - print("Executing process in main thread") - if self.done: - print("- item is already processed") - return - - callback = self.callback - args = self.args - kwargs = self.kwargs - print("Running callback: {}".format(str(callback))) - try: - result = callback(*args, **kwargs) - self.result = result - - except Exception: - self.exception = sys.exc_info() - - finally: - print("Done") - self.done = True - - def wait(self): - """Wait for result from main thread. - - This method stops current thread until callback is executed. - - Returns: - object: Output of callback. May be any type or object. - - Raises: - Exception: Reraise any exception that happened during callback - execution. - """ - while not self.done: - print(self.done) - time.sleep(self.sleep_time) - - if self.exception is self.not_set: - return self.result - raise self.exception - - -class GlobalClass: - app = None - main_thread_callbacks = collections.deque() - is_windows = platform.system().lower() == "windows" - - -def execute_in_main_thread(main_thead_item): - print("execute_in_main_thread") - GlobalClass.main_thread_callbacks.append(main_thead_item) - - -def _process_app_events() -> Optional[float]: - """Process the events of the Qt app if the window is still visible. - - If the app has any top level windows and at least one of them is visible - return the time after which this function should be run again. Else return - None, so the function is not run again and will be unregistered. - """ - while GlobalClass.main_thread_callbacks: - main_thread_item = GlobalClass.main_thread_callbacks.popleft() - main_thread_item.execute() - if main_thread_item.exception is not MainThreadItem.not_set: - _clc, val, tb = main_thread_item.exception - msg = str(val) - detail = "\n".join(traceback.format_exception(_clc, val, tb)) - dialog = QtWidgets.QMessageBox( - QtWidgets.QMessageBox.Warning, - "Error", - msg) - dialog.setMinimumWidth(500) - dialog.setDetailedText(detail) - dialog.exec_() - - # Refresh Manager - if GlobalClass.app: - manager = GlobalClass.app.get_window("WM_OT_avalon_manager") - if manager: - manager.refresh() - - if not GlobalClass.is_windows: - if OpenFileCacher.opening_file: - return TIMER_INTERVAL - - app = GlobalClass.app - if app._instance: - app.processEvents() - return TIMER_INTERVAL - return TIMER_INTERVAL - - -class LaunchQtApp(bpy.types.Operator): - """A Base class for operators to launch a Qt app.""" - - _app: QtWidgets.QApplication - _window = Union[QtWidgets.QDialog, ModuleType] - _tool_name: str = None - _init_args: Optional[List] = list() - _init_kwargs: Optional[Dict] = dict() - bl_idname: str = None - - def __init__(self): - if self.bl_idname is None: - raise NotImplementedError("Attribute `bl_idname` must be set!") - print(f"Initialising {self.bl_idname}...") - self._app = BlenderApplication.get_app() - GlobalClass.app = self._app - - if not bpy.app.timers.is_registered(_process_app_events): - bpy.app.timers.register( - _process_app_events, - persistent=True - ) - - def execute(self, context): - """Execute the operator. - - The child class must implement `execute()` where it only has to set - `self._window` to the desired Qt window and then simply run - `return super().execute(context)`. - `self._window` is expected to have a `show` method. - If the `show` method requires arguments, you can set `self._show_args` - and `self._show_kwargs`. `args` should be a list, `kwargs` a - dictionary. - """ - - if self._tool_name is None: - if self._window is None: - raise AttributeError("`self._window` is not set.") - - else: - window = self._app.get_window(self.bl_idname) - if window is None: - window = host_tools.get_tool_by_name(self._tool_name) - self._app.store_window(self.bl_idname, window) - self._window = window - - if not isinstance(self._window, (QtWidgets.QWidget, ModuleType)): - raise AttributeError( - "`window` should be a `QWidget or module`. Got: {}".format( - str(type(window)) - ) - ) - - self.before_window_show() - - def pull_to_front(window): - """Pull window forward to screen. - - If Window is minimized this will un-minimize, then it can be raised - and activated to the front. - """ - window.setWindowState( - (window.windowState() & ~QtCore.Qt.WindowMinimized) | - QtCore.Qt.WindowActive - ) - window.raise_() - window.activateWindow() - - if isinstance(self._window, ModuleType): - self._window.show() - pull_to_front(self._window) - - # Pull window to the front - window = None - if hasattr(self._window, "window"): - window = self._window.window - elif hasattr(self._window, "_window"): - window = self._window.window - - if window: - self._app.store_window(self.bl_idname, window) - - else: - origin_flags = self._window.windowFlags() - on_top_flags = origin_flags | QtCore.Qt.WindowStaysOnTopHint - self._window.setWindowFlags(on_top_flags) - self._window.show() - pull_to_front(self._window) - - # if on_top_flags != origin_flags: - # self._window.setWindowFlags(origin_flags) - # self._window.show() - - return {'FINISHED'} - - def before_window_show(self): - return - - -class LaunchCreator(LaunchQtApp): - """Launch Avalon Creator.""" - - bl_idname = "wm.avalon_creator" - bl_label = "Create..." - _tool_name = "creator" - - def before_window_show(self): - self._window.refresh() - - def execute(self, context): - host_tools.show_publisher(tab="create") - return {"FINISHED"} - - -class LaunchLoader(LaunchQtApp): - """Launch AYON Loader.""" - - bl_idname = "wm.avalon_loader" - bl_label = "Load..." - _tool_name = "loader" - - -class LaunchPublisher(LaunchQtApp): - """Launch Avalon Publisher.""" - - bl_idname = "wm.avalon_publisher" - bl_label = "Publish..." - - def execute(self, context): - host_tools.show_publisher(tab="publish") - return {"FINISHED"} - - -class LaunchManager(LaunchQtApp): - """Launch Avalon Manager.""" - - bl_idname = "wm.avalon_manager" - bl_label = "Manage..." - _tool_name = "sceneinventory" - - -class LaunchLibrary(LaunchQtApp): - """Launch Library Loader.""" - - bl_idname = "wm.library_loader" - bl_label = "Library..." - _tool_name = "libraryloader" - - -class LaunchWorkFiles(LaunchQtApp): - """Launch Avalon Work Files.""" - - bl_idname = "wm.avalon_workfiles" - bl_label = "Work Files..." - _tool_name = "workfiles" - - def execute(self, context): - return super().execute(context) - - -class SetFrameRange(bpy.types.Operator): - bl_idname = "wm.ayon_set_frame_range" - bl_label = "Set Frame Range" - - def execute(self, context): - data = pipeline.get_folder_attributes() - pipeline.set_frame_range(data) - return {"FINISHED"} - - -class SetResolution(bpy.types.Operator): - bl_idname = "wm.ayon_set_resolution" - bl_label = "Set Resolution" - - def execute(self, context): - data = pipeline.get_folder_attributes() - pipeline.set_resolution(data) - return {"FINISHED"} - - -class TOPBAR_MT_avalon(bpy.types.Menu): - """Avalon menu.""" - - bl_idname = "TOPBAR_MT_avalon" - bl_label = os.environ.get("AYON_MENU_LABEL") - - def draw(self, context): - """Draw the menu in the UI.""" - - layout = self.layout - - pcoll = PREVIEW_COLLECTIONS.get("avalon") - if pcoll: - pyblish_menu_icon = pcoll["pyblish_menu_icon"] - pyblish_menu_icon_id = pyblish_menu_icon.icon_id - else: - pyblish_menu_icon_id = 0 - - folder_path = get_current_folder_path() - task_name = get_current_task_name() - context_label = f"{folder_path}, {task_name}" - context_label_item = layout.row() - context_label_item.operator( - LaunchWorkFiles.bl_idname, text=context_label - ) - context_label_item.enabled = False - layout.separator() - layout.operator(LaunchCreator.bl_idname, text="Create...") - layout.operator(LaunchLoader.bl_idname, text="Load...") - layout.operator( - LaunchPublisher.bl_idname, - text="Publish...", - icon_value=pyblish_menu_icon_id, - ) - layout.operator(LaunchManager.bl_idname, text="Manage...") - layout.operator(LaunchLibrary.bl_idname, text="Library...") - layout.separator() - layout.operator(SetFrameRange.bl_idname, text="Set Frame Range") - layout.operator(SetResolution.bl_idname, text="Set Resolution") - layout.separator() - layout.operator(LaunchWorkFiles.bl_idname, text="Work Files...") - - -def draw_avalon_menu(self, context): - """Draw the Avalon menu in the top bar.""" - - self.layout.menu(TOPBAR_MT_avalon.bl_idname) - - -classes = [ - LaunchCreator, - LaunchLoader, - LaunchPublisher, - LaunchManager, - LaunchLibrary, - LaunchWorkFiles, - SetFrameRange, - SetResolution, - TOPBAR_MT_avalon, -] - - -def register(): - "Register the operators and menu." - - pcoll = bpy.utils.previews.new() - pyblish_icon_file = Path(__file__).parent / "icons" / "pyblish-32x32.png" - pcoll.load("pyblish_menu_icon", str(pyblish_icon_file.absolute()), 'IMAGE') - PREVIEW_COLLECTIONS["avalon"] = pcoll - - BlenderApplication.get_app() - for cls in classes: - bpy.utils.register_class(cls) - bpy.types.TOPBAR_MT_editor_menus.append(draw_avalon_menu) - - -def unregister(): - """Unregister the operators and menu.""" - - pcoll = PREVIEW_COLLECTIONS.pop("avalon") - bpy.utils.previews.remove(pcoll) - bpy.types.TOPBAR_MT_editor_menus.remove(draw_avalon_menu) - for cls in reversed(classes): - bpy.utils.unregister_class(cls) diff --git a/server_addon/blender/client/ayon_blender/api/pipeline.py b/server_addon/blender/client/ayon_blender/api/pipeline.py deleted file mode 100644 index d2ff129a48..0000000000 --- a/server_addon/blender/client/ayon_blender/api/pipeline.py +++ /dev/null @@ -1,574 +0,0 @@ -import os -import sys -import traceback -from typing import Callable, Dict, Iterator, List, Optional - -import bpy - -import pyblish.api -import ayon_api - -from ayon_core.host import ( - HostBase, - IWorkfileHost, - IPublishHost, - ILoadHost -) -from ayon_core.pipeline import ( - schema, - get_current_project_name, - get_current_folder_path, - register_loader_plugin_path, - register_creator_plugin_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, - AVALON_CONTAINER_ID, - AYON_CONTAINER_ID, -) -from ayon_core.lib import ( - Logger, - register_event_callback, - emit_event -) -from ayon_core.settings import get_project_settings -from ayon_blender import BLENDER_ADDON_ROOT - -from . import lib -from . import ops - -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root, -) - -PLUGINS_DIR = os.path.join(BLENDER_ADDON_ROOT, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") - -ORIGINAL_EXCEPTHOOK = sys.excepthook - -AVALON_INSTANCES = "AVALON_INSTANCES" -AVALON_CONTAINERS = "AVALON_CONTAINERS" -AVALON_PROPERTY = 'avalon' -IS_HEADLESS = bpy.app.background - -log = Logger.get_logger(__name__) - - -class BlenderHost(HostBase, IWorkfileHost, IPublishHost, ILoadHost): - name = "blender" - - def install(self): - """Override install method from HostBase. - Install Blender host functionality.""" - install() - - def get_containers(self) -> Iterator: - """List containers from active Blender scene.""" - return ls() - - def get_workfile_extensions(self) -> List[str]: - """Override get_workfile_extensions method from IWorkfileHost. - Get workfile possible extensions. - - Returns: - List[str]: Workfile extensions. - """ - return file_extensions() - - def save_workfile(self, dst_path: str = None): - """Override save_workfile method from IWorkfileHost. - Save currently opened workfile. - - Args: - dst_path (str): Where the current scene should be saved. Or use - current path if `None` is passed. - """ - save_file(dst_path if dst_path else bpy.data.filepath) - - def open_workfile(self, filepath: str): - """Override open_workfile method from IWorkfileHost. - Open workfile at specified filepath in the host. - - Args: - filepath (str): Path to workfile. - """ - open_file(filepath) - - def get_current_workfile(self) -> str: - """Override get_current_workfile method from IWorkfileHost. - Retrieve currently opened workfile path. - - Returns: - str: Path to currently opened workfile. - """ - return current_file() - - def workfile_has_unsaved_changes(self) -> bool: - """Override wokfile_has_unsaved_changes method from IWorkfileHost. - Returns True if opened workfile has no unsaved changes. - - Returns: - bool: True if scene is saved and False if it has unsaved - modifications. - """ - return has_unsaved_changes() - - def work_root(self, session) -> str: - """Override work_root method from IWorkfileHost. - Modify workdir per host. - - Args: - session (dict): Session context data. - - Returns: - str: Path to new workdir. - """ - return work_root(session) - - def get_context_data(self) -> dict: - """Override abstract method from IPublishHost. - Get global data related to creation-publishing from workfile. - - Returns: - dict: Context data stored using 'update_context_data'. - """ - property = bpy.context.scene.get(AVALON_PROPERTY) - if property: - return property.to_dict() - return {} - - def update_context_data(self, data: dict, changes: dict): - """Override abstract method from IPublishHost. - Store global context data to workfile. - - Args: - data (dict): New data as are. - changes (dict): Only data that has been changed. Each value has - tuple with '(, )' value. - """ - bpy.context.scene[AVALON_PROPERTY] = data - - -def pype_excepthook_handler(*args): - traceback.print_exception(*args) - - -def install(): - """Install Blender configuration for Avalon.""" - sys.excepthook = pype_excepthook_handler - - pyblish.api.register_host("blender") - pyblish.api.register_plugin_path(str(PUBLISH_PATH)) - - register_loader_plugin_path(str(LOAD_PATH)) - register_creator_plugin_path(str(CREATE_PATH)) - - lib.append_user_scripts() - lib.set_app_templates_path() - - register_event_callback("new", on_new) - register_event_callback("open", on_open) - - _register_callbacks() - _register_events() - - if not IS_HEADLESS: - ops.register() - - -def uninstall(): - """Uninstall Blender configuration for Avalon.""" - sys.excepthook = ORIGINAL_EXCEPTHOOK - - pyblish.api.deregister_host("blender") - pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) - - deregister_loader_plugin_path(str(LOAD_PATH)) - deregister_creator_plugin_path(str(CREATE_PATH)) - - if not IS_HEADLESS: - ops.unregister() - - -def show_message(title, message): - from ayon_core.tools.utils import show_message_dialog - from .ops import BlenderApplication - - BlenderApplication.get_app() - - show_message_dialog( - title=title, - message=message, - level="warning") - - -def message_window(title, message): - from .ops import ( - MainThreadItem, - execute_in_main_thread, - _process_app_events - ) - - mti = MainThreadItem(show_message, title, message) - execute_in_main_thread(mti) - _process_app_events() - - -def get_folder_attributes(): - project_name = get_current_project_name() - folder_path = get_current_folder_path() - folder_entity = ayon_api.get_folder_by_path(project_name, folder_path) - - return folder_entity["attrib"] - - -def set_frame_range(data): - scene = bpy.context.scene - - # Default scene settings - frameStart = scene.frame_start - frameEnd = scene.frame_end - fps = scene.render.fps / scene.render.fps_base - - if not data: - return - - if data.get("frameStart"): - frameStart = data.get("frameStart") - if data.get("frameEnd"): - frameEnd = data.get("frameEnd") - if data.get("fps"): - fps = data.get("fps") - - scene.frame_start = frameStart - scene.frame_end = frameEnd - scene.render.fps = round(fps) - scene.render.fps_base = round(fps) / fps - - -def set_resolution(data): - scene = bpy.context.scene - - # Default scene settings - resolution_x = scene.render.resolution_x - resolution_y = scene.render.resolution_y - - if not data: - return - - if data.get("resolutionWidth"): - resolution_x = data.get("resolutionWidth") - if data.get("resolutionHeight"): - resolution_y = data.get("resolutionHeight") - - scene.render.resolution_x = resolution_x - scene.render.resolution_y = resolution_y - - -def on_new(): - project = os.environ.get("AYON_PROJECT_NAME") - settings = get_project_settings(project).get("blender") - - set_resolution_startup = settings.get("set_resolution_startup") - set_frames_startup = settings.get("set_frames_startup") - - data = get_folder_attributes() - - if set_resolution_startup: - set_resolution(data) - if set_frames_startup: - set_frame_range(data) - - unit_scale_settings = settings.get("unit_scale_settings") - unit_scale_enabled = unit_scale_settings.get("enabled") - if unit_scale_enabled: - unit_scale = unit_scale_settings.get("base_file_unit_scale") - bpy.context.scene.unit_settings.scale_length = unit_scale - - -def on_open(): - project = os.environ.get("AYON_PROJECT_NAME") - settings = get_project_settings(project).get("blender") - - set_resolution_startup = settings.get("set_resolution_startup") - set_frames_startup = settings.get("set_frames_startup") - - data = get_folder_attributes() - - if set_resolution_startup: - set_resolution(data) - if set_frames_startup: - set_frame_range(data) - - unit_scale_settings = settings.get("unit_scale_settings") - unit_scale_enabled = unit_scale_settings.get("enabled") - apply_on_opening = unit_scale_settings.get("apply_on_opening") - if unit_scale_enabled and apply_on_opening: - unit_scale = unit_scale_settings.get("base_file_unit_scale") - prev_unit_scale = bpy.context.scene.unit_settings.scale_length - - if unit_scale != prev_unit_scale: - bpy.context.scene.unit_settings.scale_length = unit_scale - - message_window( - "Base file unit scale changed", - "Base file unit scale changed to match the project settings.") - - -@bpy.app.handlers.persistent -def _on_save_pre(*args): - emit_event("before.save") - - -@bpy.app.handlers.persistent -def _on_save_post(*args): - emit_event("save") - - -@bpy.app.handlers.persistent -def _on_load_post(*args): - # Detect new file or opening an existing file - if bpy.data.filepath: - # Likely this was an open operation since it has a filepath - emit_event("open") - else: - emit_event("new") - - ops.OpenFileCacher.post_load() - - -def _register_callbacks(): - """Register callbacks for certain events.""" - def _remove_handler(handlers: List, callback: Callable): - """Remove the callback from the given handler list.""" - - try: - handlers.remove(callback) - except ValueError: - pass - - # TODO (jasper): implement on_init callback? - - # Be sure to remove existig ones first. - _remove_handler(bpy.app.handlers.save_pre, _on_save_pre) - _remove_handler(bpy.app.handlers.save_post, _on_save_post) - _remove_handler(bpy.app.handlers.load_post, _on_load_post) - - bpy.app.handlers.save_pre.append(_on_save_pre) - bpy.app.handlers.save_post.append(_on_save_post) - bpy.app.handlers.load_post.append(_on_load_post) - - log.info("Installed event handler _on_save_pre...") - log.info("Installed event handler _on_save_post...") - log.info("Installed event handler _on_load_post...") - - -def _on_task_changed(): - """Callback for when the task in the context is changed.""" - - # TODO (jasper): Blender has no concept of projects or workspace. - # It would be nice to override 'bpy.ops.wm.open_mainfile' so it takes the - # workdir as starting directory. But I don't know if that is possible. - # Another option would be to create a custom 'File Selector' and add the - # `directory` attribute, so it opens in that directory (does it?). - # https://docs.blender.org/api/blender2.8/bpy.types.Operator.html#calling-a-file-selector - # https://docs.blender.org/api/blender2.8/bpy.types.WindowManager.html#bpy.types.WindowManager.fileselect_add - workdir = os.getenv("AYON_WORKDIR") - log.debug("New working directory: %s", workdir) - - -def _register_events(): - """Install callbacks for specific events.""" - - register_event_callback("taskChanged", _on_task_changed) - log.info("Installed event callback for 'taskChanged'...") - - -def _discover_gui() -> Optional[Callable]: - """Return the most desirable of the currently registered GUIs""" - - # Prefer last registered - guis = reversed(pyblish.api.registered_guis()) - - for gui in guis: - try: - gui = __import__(gui).show - except (ImportError, AttributeError): - continue - else: - return gui - - return None - - -def add_to_avalon_container(container: bpy.types.Collection): - """Add the container to the Avalon container.""" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - - # Link the container to the scene so it's easily visible to the artist - # and can be managed easily. Otherwise it's only found in "Blender - # File" view and it will be removed by Blenders garbage collection, - # unless you set a 'fake user'. - bpy.context.scene.collection.children.link(avalon_container) - - avalon_container.children.link(container) - - # Disable Avalon containers for the view layers. - for view_layer in bpy.context.scene.view_layers: - for child in view_layer.layer_collection.children: - if child.collection == avalon_container: - child.exclude = True - - -def metadata_update(node: bpy.types.bpy_struct_meta_idprop, data: Dict): - """Imprint the node with metadata. - - Existing metadata will be updated. - """ - - if not node.get(AVALON_PROPERTY): - node[AVALON_PROPERTY] = dict() - for key, value in data.items(): - if value is None: - continue - node[AVALON_PROPERTY][key] = value - - -def containerise(name: str, - namespace: str, - nodes: List, - context: Dict, - loader: Optional[str] = None, - suffix: Optional[str] = "CON") -> bpy.types.Collection: - """Bundle `nodes` into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - name: Name of resulting assembly - namespace: Namespace under which to host container - nodes: Long names of nodes to containerise - context: Asset information - loader: Name of loader used to produce this container. - suffix: Suffix of container, defaults to `_CON`. - - Returns: - The container assembly - - """ - - node_name = f"{context['folder']['name']}_{name}" - if namespace: - node_name = f"{namespace}:{node_name}" - if suffix: - node_name = f"{node_name}_{suffix}" - container = bpy.data.collections.new(name=node_name) - # Link the children nodes - for obj in nodes: - container.objects.link(obj) - - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(loader), - "representation": context["representation"]["id"], - } - - metadata_update(container, data) - add_to_avalon_container(container) - - return container - - -def containerise_existing( - container: bpy.types.Collection, - name: str, - namespace: str, - context: Dict, - loader: Optional[str] = None, - suffix: Optional[str] = "CON") -> bpy.types.Collection: - """Imprint or update container with metadata. - - Arguments: - name: Name of resulting assembly - namespace: Namespace under which to host container - context: Asset information - loader: Name of loader used to produce this container. - suffix: Suffix of container, defaults to `_CON`. - - Returns: - The container assembly - """ - - node_name = container.name - if suffix: - node_name = f"{node_name}_{suffix}" - container.name = node_name - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(loader), - "representation": context["representation"]["id"], - } - - metadata_update(container, data) - add_to_avalon_container(container) - - return container - - -def parse_container(container: bpy.types.Collection, - validate: bool = True) -> Dict: - """Return the container node's full container data. - - Args: - container: A container node name. - validate: turn the validation for the container on or off - - Returns: - The container schema data for this container node. - - """ - - data = lib.read(container) - - # Append transient data - data["objectName"] = container.name - - if validate: - schema.validate(data) - - return data - - -def ls() -> Iterator: - """List containers from active Blender scene. - - This is the host-equivalent of api.ls(), but instead of listing assets on - disk, it lists assets already loaded in Blender; once loaded they are - called containers. - """ - - for id_type in {AYON_CONTAINER_ID, AVALON_CONTAINER_ID}: - for container in lib.lsattr("id", id_type): - yield parse_container(container) - - -def publish(): - """Shorthand to publish from within host.""" - - return pyblish.util.publish() diff --git a/server_addon/blender/client/ayon_blender/api/plugin.py b/server_addon/blender/client/ayon_blender/api/plugin.py deleted file mode 100644 index e72bf20287..0000000000 --- a/server_addon/blender/client/ayon_blender/api/plugin.py +++ /dev/null @@ -1,542 +0,0 @@ -"""Shared functionality for pipeline plugins for Blender.""" - -import itertools -from pathlib import Path -from typing import Dict, List, Optional - -import pyblish.api -import bpy - -from ayon_core.pipeline import ( - Creator, - CreatedInstance, - LoaderPlugin, - AVALON_INSTANCE_ID, - AYON_INSTANCE_ID, -) -from ayon_core.pipeline.publish import Extractor -from ayon_core.lib import BoolDef - -from .pipeline import ( - AVALON_CONTAINERS, - AVALON_INSTANCES, - AVALON_PROPERTY, -) -from .ops import ( - MainThreadItem, - execute_in_main_thread -) -from .lib import imprint - -VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx", - ".usd", ".usdc", ".usda"] - - -def prepare_scene_name( - folder_name: str, product_name: str, namespace: Optional[str] = None -) -> str: - """Return a consistent name for an asset.""" - name = f"{folder_name}" - if namespace: - name = f"{name}_{namespace}" - name = f"{name}_{product_name}" - - # Blender name for a collection or object cannot be longer than 63 - # characters. If the name is longer, it will raise an error. - if len(name) > 63: - raise ValueError(f"Scene name '{name}' would be too long.") - - return name - - -def get_unique_number( - folder_name: str, product_name: str -) -> str: - """Return a unique number based on the folder name.""" - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - return "01" - # Check the names of both object and collection containers - obj_asset_groups = avalon_container.objects - obj_group_names = { - c.name for c in obj_asset_groups - if c.type == 'EMPTY' and c.get(AVALON_PROPERTY)} - coll_asset_groups = avalon_container.children - coll_group_names = { - c.name for c in coll_asset_groups - if c.get(AVALON_PROPERTY)} - container_names = obj_group_names.union(coll_group_names) - count = 1 - name = f"{folder_name}_{count:0>2}_{product_name}" - while name in container_names: - count += 1 - name = f"{folder_name}_{count:0>2}_{product_name}" - return f"{count:0>2}" - - -def prepare_data(data, container_name=None): - name = data.name - local_data = data.make_local() - if container_name: - local_data.name = f"{container_name}:{name}" - else: - local_data.name = f"{name}" - return local_data - - -def create_blender_context(active: Optional[bpy.types.Object] = None, - selected: Optional[bpy.types.Object] = None, - window: Optional[bpy.types.Window] = None): - """Create a new Blender context. If an object is passed as - parameter, it is set as selected and active. - """ - - if not isinstance(selected, list): - selected = [selected] - - override_context = bpy.context.copy() - - windows = [window] if window else bpy.context.window_manager.windows - - for win in windows: - for area in win.screen.areas: - if area.type == 'VIEW_3D': - for region in area.regions: - if region.type == 'WINDOW': - override_context['window'] = win - override_context['screen'] = win.screen - override_context['area'] = area - override_context['region'] = region - override_context['scene'] = bpy.context.scene - override_context['active_object'] = active - override_context['selected_objects'] = selected - return override_context - raise Exception("Could not create a custom Blender context.") - - -def get_parent_collection(collection): - """Get the parent of the input collection""" - check_list = [bpy.context.scene.collection] - - for c in check_list: - if collection.name in c.children.keys(): - return c - check_list.extend(c.children) - - return None - - -def get_local_collection_with_name(name): - for collection in bpy.data.collections: - if collection.name == name and collection.library is None: - return collection - return None - - -def deselect_all(): - """Deselect all objects in the scene. - - Blender gives context error if trying to deselect object that it isn't - in object mode. - """ - modes = [] - active = bpy.context.view_layer.objects.active - - for obj in bpy.data.objects: - if obj.mode != 'OBJECT': - modes.append((obj, obj.mode)) - bpy.context.view_layer.objects.active = obj - context_override = create_blender_context(active=obj) - with bpy.context.temp_override(**context_override): - bpy.ops.object.mode_set(mode='OBJECT') - - context_override = create_blender_context() - with bpy.context.temp_override(**context_override): - bpy.ops.object.select_all(action='DESELECT') - - for p in modes: - bpy.context.view_layer.objects.active = p[0] - context_override = create_blender_context(active=p[0]) - with bpy.context.temp_override(**context_override): - bpy.ops.object.mode_set(mode=p[1]) - - bpy.context.view_layer.objects.active = active - - -class BlenderInstancePlugin(pyblish.api.InstancePlugin): - settings_category = "blender" - - -class BlenderContextPlugin(pyblish.api.ContextPlugin): - settings_category = "blender" - - -class BlenderExtractor(Extractor): - settings_category = "blender" - - -class BlenderCreator(Creator): - """Base class for Blender Creator plug-ins.""" - defaults = ['Main'] - - settings_category = "blender" - create_as_asset_group = False - - @staticmethod - def cache_instance_data(shared_data): - """Cache instances for Creators shared data. - - Create `blender_cached_instances` key when needed in shared data and - fill it with all collected instances from the scene under its - respective creator identifiers. - - If legacy instances are detected in the scene, create - `blender_cached_legacy_instances` key and fill it with - all legacy products from this family as a value. # key or value? - - Args: - shared_data(Dict[str, Any]): Shared data. - - """ - if not shared_data.get('blender_cached_instances'): - cache = {} - cache_legacy = {} - - avalon_instances = bpy.data.collections.get(AVALON_INSTANCES) - avalon_instance_objs = ( - avalon_instances.objects if avalon_instances else [] - ) - - for obj_or_col in itertools.chain( - avalon_instance_objs, - bpy.data.collections - ): - avalon_prop = obj_or_col.get(AVALON_PROPERTY, {}) - if not avalon_prop: - continue - - if avalon_prop.get('id') not in { - AYON_INSTANCE_ID, AVALON_INSTANCE_ID - }: - continue - - creator_id = avalon_prop.get('creator_identifier') - if creator_id: - # Creator instance - cache.setdefault(creator_id, []).append(obj_or_col) - else: - family = avalon_prop.get('family') - if family: - # Legacy creator instance - cache_legacy.setdefault(family, []).append(obj_or_col) - - shared_data["blender_cached_instances"] = cache - shared_data["blender_cached_legacy_instances"] = cache_legacy - - return shared_data - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - """Override abstract method from Creator. - Create new instance and store it. - - Args: - product_name (str): Product name of created instance. - instance_data (dict): Instance base data. - pre_create_data (dict): Data based on pre creation attributes. - Those may affect how creator works. - """ - # Get Instance Container or create it if it does not exist - instances = bpy.data.collections.get(AVALON_INSTANCES) - if not instances: - instances = bpy.data.collections.new(name=AVALON_INSTANCES) - bpy.context.scene.collection.children.link(instances) - - # Create asset group - folder_name = instance_data["folderPath"].split("/")[-1] - - name = prepare_scene_name(folder_name, product_name) - if self.create_as_asset_group: - # Create instance as empty - instance_node = bpy.data.objects.new(name=name, object_data=None) - instance_node.empty_display_type = 'SINGLE_ARROW' - instances.objects.link(instance_node) - else: - # Create instance collection - instance_node = bpy.data.collections.new(name=name) - instances.children.link(instance_node) - - self.set_instance_data(product_name, instance_data) - - instance = CreatedInstance( - self.product_type, product_name, instance_data, self - ) - instance.transient_data["instance_node"] = instance_node - self._add_instance_to_context(instance) - - imprint(instance_node, instance_data) - - return instance_node - - def collect_instances(self): - """Override abstract method from BlenderCreator. - Collect existing instances related to this creator plugin.""" - - # Cache instances in shared data - self.cache_instance_data(self.collection_shared_data) - - # Get cached instances - cached_instances = self.collection_shared_data.get( - "blender_cached_instances" - ) - if not cached_instances: - return - - # Process only instances that were created by this creator - for instance_node in cached_instances.get(self.identifier, []): - property = instance_node.get(AVALON_PROPERTY) - # Create instance object from existing data - instance = CreatedInstance.from_existing( - instance_data=property.to_dict(), - creator=self - ) - instance.transient_data["instance_node"] = instance_node - - # Add instance to create context - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - """Override abstract method from BlenderCreator. - Store changes of existing instances so they can be recollected. - - Args: - update_list(List[UpdateData]): Changed instances - and their changes, as a list of tuples. - """ - - for created_instance, changes in update_list: - data = created_instance.data_to_store() - node = created_instance.transient_data["instance_node"] - if not node: - # We can't update if we don't know the node - self.log.error( - f"Unable to update instance {created_instance} " - f"without instance node." - ) - return - - # Rename the instance node in the scene if product - # or folder changed. - # Do not rename the instance if the family is workfile, as the - # workfile instance is included in the AVALON_CONTAINER collection. - if ( - "productName" in changes.changed_keys - or "folderPath" in changes.changed_keys - ) and created_instance.product_type != "workfile": - folder_name = data["folderPath"].split("/")[-1] - name = prepare_scene_name( - folder_name, data["productName"] - ) - node.name = name - - imprint(node, data) - - def remove_instances(self, instances: List[CreatedInstance]): - - for instance in instances: - node = instance.transient_data["instance_node"] - - if isinstance(node, bpy.types.Collection): - for children in node.children_recursive: - if isinstance(children, bpy.types.Collection): - bpy.data.collections.remove(children) - else: - bpy.data.objects.remove(children) - - bpy.data.collections.remove(node) - elif isinstance(node, bpy.types.Object): - bpy.data.objects.remove(node) - - self._remove_instance_from_context(instance) - - def set_instance_data( - self, - product_name: str, - instance_data: dict - ): - """Fill instance data with required items. - - Args: - product_name(str): Product name of created instance. - instance_data(dict): Instance base data. - instance_node(bpy.types.ID): Instance node in blender scene. - """ - if not instance_data: - instance_data = {} - - instance_data.update( - { - "id": AVALON_INSTANCE_ID, - "creator_identifier": self.identifier, - "productName": product_name, - } - ) - - def get_pre_create_attr_defs(self): - return [ - BoolDef("use_selection", - label="Use selection", - default=True) - ] - - -class BlenderLoader(LoaderPlugin): - """A basic AssetLoader for Blender - - This will implement the basic logic for linking/appending assets - into another Blender scene. - - The `update` method should be implemented by a sub-class, because - it's different for different types (e.g. model, rig, animation, - etc.). - """ - settings_category = "blender" - - @staticmethod - def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]: - """Get the 'instance empty' that holds the collection instance.""" - for node in nodes: - if not isinstance(node, bpy.types.Object): - continue - if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION' - and node.instance_collection and node.name == instance_name): - return node - return None - - @staticmethod - def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]: - """Get the 'instance collection' (container) for this asset.""" - for node in nodes: - if not isinstance(node, bpy.types.Collection): - continue - if node.name == instance_name: - return node - return None - - @staticmethod - def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library: - """Find the library file from the container. - - It traverses the objects from this collection, checks if there is only - 1 library from which the objects come from and returns the library. - - Warning: - No nested collections are supported at the moment! - """ - assert not container.children, "Nested collections are not supported." - assert container.objects, "The collection doesn't contain any objects." - libraries = set() - for obj in container.objects: - assert obj.library, f"'{obj.name}' is not linked." - libraries.add(obj.library) - - assert len( - libraries) == 1, "'{container.name}' contains objects from more then 1 library." - - return list(libraries)[0] - - def process_asset(self, - context: dict, - name: str, - namespace: Optional[str] = None, - options: Optional[Dict] = None): - """Must be implemented by a sub-class""" - raise NotImplementedError("Must be implemented by a sub-class") - - def load(self, - context: dict, - name: Optional[str] = None, - namespace: Optional[str] = None, - options: Optional[Dict] = None) -> Optional[bpy.types.Collection]: - """ Run the loader on Blender main thread""" - mti = MainThreadItem(self._load, context, name, namespace, options) - execute_in_main_thread(mti) - - def _load(self, - context: dict, - name: Optional[str] = None, - namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[bpy.types.Collection]: - """Load asset via database - - Arguments: - context: Full parenthood of representation to load - name: Use pre-defined name - namespace: Use pre-defined namespace - options: Additional settings dictionary - """ - # TODO: make it possible to add the asset several times by - # just re-using the collection - filepath = self.filepath_from_context(context) - assert Path(filepath).exists(), f"{filepath} doesn't exist." - - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - unique_number = get_unique_number( - folder_name, product_name - ) - namespace = namespace or f"{folder_name}_{unique_number}" - name = name or prepare_scene_name( - folder_name, product_name, unique_number - ) - - nodes = self.process_asset( - context=context, - name=name, - namespace=namespace, - options=options, - ) - - # Only containerise if anything was loaded by the Loader. - if not nodes: - return None - - # Only containerise if it's not already a collection from a .blend file. - # representation = context["representation"]["name"] - # if representation != "blend": - # from ayon_blender.api.pipeline import containerise - # return containerise( - # name=name, - # namespace=namespace, - # nodes=nodes, - # context=context, - # loader=self.__class__.__name__, - # ) - - # folder_name = context["folder"]["name"] - # product_name = context["product"]["name"] - # instance_name = prepare_scene_name( - # folder_name, product_name, unique_number - # ) + '_CON' - - # return self._get_instance_collection(instance_name, nodes) - - def exec_update(self, container: Dict, context: Dict): - """Must be implemented by a sub-class""" - raise NotImplementedError("Must be implemented by a sub-class") - - def update(self, container: Dict, context: Dict): - """ Run the update on Blender main thread""" - mti = MainThreadItem(self.exec_update, container, context) - execute_in_main_thread(mti) - - def exec_remove(self, container: Dict) -> bool: - """Must be implemented by a sub-class""" - raise NotImplementedError("Must be implemented by a sub-class") - - def remove(self, container: Dict) -> bool: - """ Run the remove on Blender main thread""" - mti = MainThreadItem(self.exec_remove, container) - execute_in_main_thread(mti) diff --git a/server_addon/blender/client/ayon_blender/api/render_lib.py b/server_addon/blender/client/ayon_blender/api/render_lib.py deleted file mode 100644 index 91913f7913..0000000000 --- a/server_addon/blender/client/ayon_blender/api/render_lib.py +++ /dev/null @@ -1,364 +0,0 @@ -from pathlib import Path - -import bpy - -from ayon_core.settings import get_project_settings -from ayon_core.pipeline import get_current_project_name - - -def get_default_render_folder(settings): - """Get default render folder from blender settings.""" - - return (settings["blender"] - ["RenderSettings"] - ["default_render_image_folder"]) - - -def get_aov_separator(settings): - """Get aov separator from blender settings.""" - - aov_sep = (settings["blender"] - ["RenderSettings"] - ["aov_separator"]) - - if aov_sep == "dash": - return "-" - elif aov_sep == "underscore": - return "_" - elif aov_sep == "dot": - return "." - else: - raise ValueError(f"Invalid aov separator: {aov_sep}") - - -def get_image_format(settings): - """Get image format from blender settings.""" - - return (settings["blender"] - ["RenderSettings"] - ["image_format"]) - - -def get_multilayer(settings): - """Get multilayer from blender settings.""" - - return (settings["blender"] - ["RenderSettings"] - ["multilayer_exr"]) - - -def get_renderer(settings): - """Get renderer from blender settings.""" - - return (settings["blender"] - ["RenderSettings"] - ["renderer"]) - - -def get_compositing(settings): - """Get compositing from blender settings.""" - - return (settings["blender"] - ["RenderSettings"] - ["compositing"]) - - -def get_render_product(output_path, name, aov_sep): - """ - Generate the path to the render product. Blender interprets the `#` - as the frame number, when it renders. - - Args: - file_path (str): The path to the blender scene. - render_folder (str): The render folder set in settings. - file_name (str): The name of the blender scene. - instance (pyblish.api.Instance): The instance to publish. - ext (str): The image format to render. - """ - filepath = output_path / name.lstrip("/") - render_product = f"{filepath}{aov_sep}beauty.####" - render_product = render_product.replace("\\", "/") - - return render_product - - -def set_render_format(ext, multilayer): - # Set Blender to save the file with the right extension - bpy.context.scene.render.use_file_extension = True - - image_settings = bpy.context.scene.render.image_settings - - if ext == "exr": - image_settings.file_format = ( - "OPEN_EXR_MULTILAYER" if multilayer else "OPEN_EXR") - elif ext == "bmp": - image_settings.file_format = "BMP" - elif ext == "rgb": - image_settings.file_format = "IRIS" - elif ext == "png": - image_settings.file_format = "PNG" - elif ext == "jpeg": - image_settings.file_format = "JPEG" - elif ext == "jp2": - image_settings.file_format = "JPEG2000" - elif ext == "tga": - image_settings.file_format = "TARGA" - elif ext == "tif": - image_settings.file_format = "TIFF" - - -def set_render_passes(settings, renderer): - aov_list = set(settings["blender"]["RenderSettings"]["aov_list"]) - custom_passes = settings["blender"]["RenderSettings"]["custom_passes"] - - # Common passes for both renderers - vl = bpy.context.view_layer - - # Data Passes - vl.use_pass_combined = "combined" in aov_list - vl.use_pass_z = "z" in aov_list - vl.use_pass_mist = "mist" in aov_list - vl.use_pass_normal = "normal" in aov_list - - # Light Passes - vl.use_pass_diffuse_direct = "diffuse_light" in aov_list - vl.use_pass_diffuse_color = "diffuse_color" in aov_list - vl.use_pass_glossy_direct = "specular_light" in aov_list - vl.use_pass_glossy_color = "specular_color" in aov_list - vl.use_pass_emit = "emission" in aov_list - vl.use_pass_environment = "environment" in aov_list - vl.use_pass_ambient_occlusion = "ao" in aov_list - - # Cryptomatte Passes - vl.use_pass_cryptomatte_object = "cryptomatte_object" in aov_list - vl.use_pass_cryptomatte_material = "cryptomatte_material" in aov_list - vl.use_pass_cryptomatte_asset = "cryptomatte_asset" in aov_list - - if renderer == "BLENDER_EEVEE": - # Eevee exclusive passes - eevee = vl.eevee - - # Light Passes - vl.use_pass_shadow = "shadow" in aov_list - eevee.use_pass_volume_direct = "volume_light" in aov_list - - # Effects Passes - eevee.use_pass_bloom = "bloom" in aov_list - eevee.use_pass_transparent = "transparent" in aov_list - - # Cryptomatte Passes - vl.use_pass_cryptomatte_accurate = "cryptomatte_accurate" in aov_list - elif renderer == "CYCLES": - # Cycles exclusive passes - cycles = vl.cycles - - # Data Passes - vl.use_pass_position = "position" in aov_list - vl.use_pass_vector = "vector" in aov_list - vl.use_pass_uv = "uv" in aov_list - cycles.denoising_store_passes = "denoising" in aov_list - vl.use_pass_object_index = "object_index" in aov_list - vl.use_pass_material_index = "material_index" in aov_list - cycles.pass_debug_sample_count = "sample_count" in aov_list - - # Light Passes - vl.use_pass_diffuse_indirect = "diffuse_indirect" in aov_list - vl.use_pass_glossy_indirect = "specular_indirect" in aov_list - vl.use_pass_transmission_direct = "transmission_direct" in aov_list - vl.use_pass_transmission_indirect = "transmission_indirect" in aov_list - vl.use_pass_transmission_color = "transmission_color" in aov_list - cycles.use_pass_volume_direct = "volume_light" in aov_list - cycles.use_pass_volume_indirect = "volume_indirect" in aov_list - cycles.use_pass_shadow_catcher = "shadow" in aov_list - - aovs_names = [aov.name for aov in vl.aovs] - for cp in custom_passes: - cp_name = cp["attribute"] - if cp_name not in aovs_names: - aov = vl.aovs.add() - aov.name = cp_name - else: - aov = vl.aovs[cp_name] - aov.type = cp["value"] - - return list(aov_list), custom_passes - - -def _create_aov_slot(name, aov_sep, slots, rpass_name, multi_exr, output_path): - filename = f"{name}{aov_sep}{rpass_name}.####" - slot = slots.new(rpass_name if multi_exr else filename) - filepath = str(output_path / filename.lstrip("/")) - - return slot, filepath - - -def set_node_tree( - output_path, render_product, name, aov_sep, ext, multilayer, compositing -): - # Set the scene to use the compositor node tree to render - bpy.context.scene.use_nodes = True - - tree = bpy.context.scene.node_tree - - comp_layer_type = "CompositorNodeRLayers" - output_type = "CompositorNodeOutputFile" - compositor_type = "CompositorNodeComposite" - - # Get the Render Layer, Composite and the previous output nodes - render_layer_node = None - composite_node = None - old_output_node = None - for node in tree.nodes: - if node.bl_idname == comp_layer_type: - render_layer_node = node - elif node.bl_idname == compositor_type: - composite_node = node - elif node.bl_idname == output_type and "AYON" in node.name: - old_output_node = node - if render_layer_node and composite_node and old_output_node: - break - - # If there's not a Render Layers node, we create it - if not render_layer_node: - render_layer_node = tree.nodes.new(comp_layer_type) - - # Get the enabled output sockets, that are the active passes for the - # render. - # We also exclude some layers. - exclude_sockets = ["Image", "Alpha", "Noisy Image"] - passes = [ - socket - for socket in render_layer_node.outputs - if socket.enabled and socket.name not in exclude_sockets - ] - - # Create a new output node - output = tree.nodes.new(output_type) - - image_settings = bpy.context.scene.render.image_settings - output.format.file_format = image_settings.file_format - - slots = None - - # In case of a multilayer exr, we don't need to use the output node, - # because the blender render already outputs a multilayer exr. - multi_exr = ext == "exr" and multilayer - slots = output.layer_slots if multi_exr else output.file_slots - output.base_path = render_product if multi_exr else str(output_path) - - slots.clear() - - aov_file_products = [] - - old_links = { - link.from_socket.name: link for link in tree.links - if link.to_node == old_output_node} - - # Create a new socket for the beauty output - pass_name = "rgba" if multi_exr else "beauty" - slot, _ = _create_aov_slot( - name, aov_sep, slots, pass_name, multi_exr, output_path) - tree.links.new(render_layer_node.outputs["Image"], slot) - - if compositing: - # Create a new socket for the composite output - pass_name = "composite" - comp_socket, filepath = _create_aov_slot( - name, aov_sep, slots, pass_name, multi_exr, output_path) - aov_file_products.append(("Composite", filepath)) - - # For each active render pass, we add a new socket to the output node - # and link it - for rpass in passes: - slot, filepath = _create_aov_slot( - name, aov_sep, slots, rpass.name, multi_exr, output_path) - aov_file_products.append((rpass.name, filepath)) - - # If the rpass was not connected with the old output node, we connect - # it with the new one. - if not old_links.get(rpass.name): - tree.links.new(rpass, slot) - - for link in list(old_links.values()): - # Check if the socket is still available in the new output node. - socket = output.inputs.get(link.to_socket.name) - # If it is, we connect it with the new output node. - if socket: - tree.links.new(link.from_socket, socket) - # Then, we remove the old link. - tree.links.remove(link) - - # If there's a composite node, we connect its input with the new output - if compositing and composite_node: - for link in tree.links: - if link.to_node == composite_node: - tree.links.new(link.from_socket, comp_socket) - break - - if old_output_node: - output.location = old_output_node.location - tree.nodes.remove(old_output_node) - - output.name = "AYON File Output" - output.label = "AYON File Output" - - return [] if multi_exr else aov_file_products - - -def imprint_render_settings(node, data): - RENDER_DATA = "render_data" - if not node.get(RENDER_DATA): - node[RENDER_DATA] = {} - for key, value in data.items(): - if value is None: - continue - node[RENDER_DATA][key] = value - - -def prepare_rendering(asset_group): - name = asset_group.name - - filepath = Path(bpy.data.filepath) - assert filepath, "Workfile not saved. Please save the file first." - - dirpath = filepath.parent - file_name = Path(filepath.name).stem - - project = get_current_project_name() - settings = get_project_settings(project) - - render_folder = get_default_render_folder(settings) - aov_sep = get_aov_separator(settings) - ext = get_image_format(settings) - multilayer = get_multilayer(settings) - renderer = get_renderer(settings) - compositing = get_compositing(settings) - - set_render_format(ext, multilayer) - bpy.context.scene.render.engine = renderer - aov_list, custom_passes = set_render_passes(settings, renderer) - - output_path = Path.joinpath(dirpath, render_folder, file_name) - - render_product = get_render_product(output_path, name, aov_sep) - aov_file_product = set_node_tree( - output_path, render_product, name, aov_sep, - ext, multilayer, compositing) - - # Clear the render filepath, so that the output is handled only by the - # output node in the compositor. - bpy.context.scene.render.filepath = "" - - render_settings = { - "render_folder": render_folder, - "aov_separator": aov_sep, - "image_format": ext, - "multilayer_exr": multilayer, - "aov_list": aov_list, - "custom_passes": custom_passes, - "render_product": render_product, - "aov_file_product": aov_file_product, - "review": True, - } - - imprint_render_settings(asset_group, render_settings) diff --git a/server_addon/blender/client/ayon_blender/api/workio.py b/server_addon/blender/client/ayon_blender/api/workio.py deleted file mode 100644 index e0f333843a..0000000000 --- a/server_addon/blender/client/ayon_blender/api/workio.py +++ /dev/null @@ -1,89 +0,0 @@ -"""Host API required for Work Files.""" - -from pathlib import Path -from typing import List, Optional - -import bpy - - -class OpenFileCacher: - """Store information about opening file. - - When file is opening QApplcation events should not be processed. - """ - opening_file = False - - @classmethod - def post_load(cls): - cls.opening_file = False - - @classmethod - def set_opening(cls): - cls.opening_file = True - - -def open_file(filepath: str) -> Optional[str]: - """Open the scene file in Blender.""" - OpenFileCacher.set_opening() - - preferences = bpy.context.preferences - load_ui = preferences.filepaths.use_load_ui - use_scripts = preferences.filepaths.use_scripts_auto_execute - result = bpy.ops.wm.open_mainfile( - filepath=filepath, - load_ui=load_ui, - use_scripts=use_scripts, - ) - - if result == {'FINISHED'}: - return filepath - return None - - -def save_file(filepath: str, copy: bool = False) -> Optional[str]: - """Save the open scene file.""" - - preferences = bpy.context.preferences - compress = preferences.filepaths.use_file_compression - relative_remap = preferences.filepaths.use_relative_paths - result = bpy.ops.wm.save_as_mainfile( - filepath=filepath, - compress=compress, - relative_remap=relative_remap, - copy=copy, - ) - - if result == {'FINISHED'}: - return filepath - return None - - -def current_file() -> Optional[str]: - """Return the path of the open scene file.""" - - current_filepath = bpy.data.filepath - if Path(current_filepath).is_file(): - return current_filepath - return None - - -def has_unsaved_changes() -> bool: - """Does the open scene file have unsaved changes?""" - - return bpy.data.is_dirty - - -def file_extensions() -> List[str]: - """Return the supported file extensions for Blender scene files.""" - - return [".blend"] - - -def work_root(session: dict) -> str: - """Return the default root to browse for work files.""" - - work_dir = session["AYON_WORKDIR"] - scene_dir = session.get("AVALON_SCENEDIR") - if scene_dir: - return str(Path(work_dir, scene_dir)) - return work_dir diff --git a/server_addon/blender/client/ayon_blender/blender_addon/startup/init.py b/server_addon/blender/client/ayon_blender/blender_addon/startup/init.py deleted file mode 100644 index bd0d52627c..0000000000 --- a/server_addon/blender/client/ayon_blender/blender_addon/startup/init.py +++ /dev/null @@ -1,10 +0,0 @@ -from ayon_core.pipeline import install_host -from ayon_blender.api import BlenderHost - - -def register(): - install_host(BlenderHost()) - - -def unregister(): - pass diff --git a/server_addon/blender/client/ayon_blender/hooks/pre_add_run_python_script_arg.py b/server_addon/blender/client/ayon_blender/hooks/pre_add_run_python_script_arg.py deleted file mode 100644 index 9041ef7309..0000000000 --- a/server_addon/blender/client/ayon_blender/hooks/pre_add_run_python_script_arg.py +++ /dev/null @@ -1,54 +0,0 @@ -from pathlib import Path - -from ayon_applications import PreLaunchHook, LaunchTypes - - -class AddPythonScriptToLaunchArgs(PreLaunchHook): - """Add python script to be executed before Blender launch.""" - - # Append after file argument - order = 15 - app_groups = {"blender"} - launch_types = {LaunchTypes.local} - - def execute(self): - if not self.launch_context.data.get("python_scripts"): - return - - # Add path to workfile to arguments - for python_script_path in self.launch_context.data["python_scripts"]: - self.log.info( - f"Adding python script {python_script_path} to launch" - ) - # Test script path exists - python_script_path = Path(python_script_path) - if not python_script_path.exists(): - self.log.warning( - f"Python script {python_script_path} doesn't exist. " - "Skipped..." - ) - continue - - if "--" in self.launch_context.launch_args: - # Insert before separator - separator_index = self.launch_context.launch_args.index("--") - self.launch_context.launch_args.insert( - separator_index, - "-P", - ) - self.launch_context.launch_args.insert( - separator_index + 1, - python_script_path.as_posix(), - ) - else: - self.launch_context.launch_args.extend( - ["-P", python_script_path.as_posix()] - ) - - # Ensure separator - if "--" not in self.launch_context.launch_args: - self.launch_context.launch_args.append("--") - - self.launch_context.launch_args.extend( - [*self.launch_context.data.get("script_args", [])] - ) diff --git a/server_addon/blender/client/ayon_blender/hooks/pre_pyside_install.py b/server_addon/blender/client/ayon_blender/hooks/pre_pyside_install.py deleted file mode 100644 index 87a4f5cfad..0000000000 --- a/server_addon/blender/client/ayon_blender/hooks/pre_pyside_install.py +++ /dev/null @@ -1,295 +0,0 @@ -import os -import re -import subprocess -from platform import system -from ayon_applications import PreLaunchHook, LaunchTypes - - -class InstallPySideToBlender(PreLaunchHook): - """Install Qt binding to blender's python packages. - - Prelaunch hook does 2 things: - 1.) Blender's python packages are pushed to the beginning of PYTHONPATH. - 2.) Check if blender has installed PySide2 and will try to install if not. - - For pipeline implementation is required to have Qt binding installed in - blender's python packages. - """ - - app_groups = {"blender"} - launch_types = {LaunchTypes.local} - - def execute(self): - # Prelaunch hook is not crucial - try: - self.inner_execute() - except Exception: - self.log.warning( - "Processing of {} crashed.".format(self.__class__.__name__), - exc_info=True - ) - - def inner_execute(self): - # Get blender's python directory - version_regex = re.compile(r"^([2-4])\.[0-9]+$") - - platform = system().lower() - executable = self.launch_context.executable.executable_path - expected_executable = "blender" - if platform == "windows": - expected_executable += ".exe" - - if os.path.basename(executable).lower() != expected_executable: - self.log.info(( - f"Executable does not lead to {expected_executable} file." - "Can't determine blender's python to check/install" - " Qt binding." - )) - return - - versions_dir = os.path.dirname(executable) - if platform == "darwin": - versions_dir = os.path.join( - os.path.dirname(versions_dir), "Resources" - ) - version_subfolders = [] - for dir_entry in os.scandir(versions_dir): - if dir_entry.is_dir() and version_regex.match(dir_entry.name): - version_subfolders.append(dir_entry.name) - - if not version_subfolders: - self.log.info( - "Didn't find version subfolder next to Blender executable" - ) - return - - if len(version_subfolders) > 1: - self.log.info(( - "Found more than one version subfolder next" - " to blender executable. {}" - ).format(", ".join([ - '"./{}"'.format(name) - for name in version_subfolders - ]))) - return - - version_subfolder = version_subfolders[0] - before_blender_4 = False - if int(version_regex.match(version_subfolder).group(1)) < 4: - before_blender_4 = True - # Blender 4 has Python 3.11 which does not support 'PySide2' - # QUESTION could we always install PySide6? - qt_binding = "PySide2" if before_blender_4 else "PySide6" - # Use PySide6 6.6.3 because 6.7.0 had a bug - # - 'QTextEdit' can't be added to 'QBoxLayout' - qt_binding_version = None if before_blender_4 else "6.6.3" - - python_dir = os.path.join(versions_dir, version_subfolder, "python") - python_lib = os.path.join(python_dir, "lib") - python_version = "python" - - if platform != "windows": - for dir_entry in os.scandir(python_lib): - if dir_entry.is_dir() and dir_entry.name.startswith("python"): - python_lib = dir_entry.path - python_version = dir_entry.name - break - - # Change PYTHONPATH to contain blender's packages as first - python_paths = [ - python_lib, - os.path.join(python_lib, "site-packages"), - ] - python_path = self.launch_context.env.get("PYTHONPATH") or "" - for path in python_path.split(os.pathsep): - if path: - python_paths.append(path) - - self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) - - # Get blender's python executable - python_bin = os.path.join(python_dir, "bin") - if platform == "windows": - python_executable = os.path.join(python_bin, "python.exe") - else: - python_executable = os.path.join(python_bin, python_version) - # Check for python with enabled 'pymalloc' - if not os.path.exists(python_executable): - python_executable += "m" - - if not os.path.exists(python_executable): - self.log.warning( - "Couldn't find python executable for blender. {}".format( - executable - ) - ) - return - - # Check if PySide2 is installed and skip if yes - if self.is_pyside_installed(python_executable, qt_binding): - self.log.debug("Blender has already installed PySide2.") - return - - # Install PySide2 in blender's python - if platform == "windows": - result = self.install_pyside_windows( - python_executable, - qt_binding, - qt_binding_version, - before_blender_4, - ) - else: - result = self.install_pyside( - python_executable, - qt_binding, - qt_binding_version, - ) - - if result: - self.log.info( - f"Successfully installed {qt_binding} module to blender." - ) - else: - self.log.warning( - f"Failed to install {qt_binding} module to blender." - ) - - def install_pyside_windows( - self, - python_executable, - qt_binding, - qt_binding_version, - before_blender_4, - ): - """Install PySide2 python module to blender's python. - - Installation requires administration rights that's why it is required - to use "pywin32" module which can execute command's and ask for - administration rights. - """ - try: - import win32con - import win32process - import win32event - import pywintypes - from win32comext.shell.shell import ShellExecuteEx - from win32comext.shell import shellcon - except Exception: - self.log.warning("Couldn't import \"pywin32\" modules") - return - - if qt_binding_version: - qt_binding = f"{qt_binding}=={qt_binding_version}" - - try: - # Parameters - # - use "-m pip" as module pip to install PySide2 and argument - # "--ignore-installed" is to force install module to blender's - # site-packages and make sure it is binary compatible - fake_exe = "fake.exe" - site_packages_prefix = os.path.dirname( - os.path.dirname(python_executable) - ) - args = [ - fake_exe, - "-m", - "pip", - "install", - "--ignore-installed", - qt_binding, - ] - if not before_blender_4: - # Define prefix for site package - # Python in blender 4.x is installing packages in AppData and - # not in blender's directory. - args.extend(["--prefix", site_packages_prefix]) - - parameters = ( - subprocess.list2cmdline(args) - .lstrip(fake_exe) - .lstrip(" ") - ) - - # Execute command and ask for administrator's rights - process_info = ShellExecuteEx( - nShow=win32con.SW_SHOWNORMAL, - fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, - lpVerb="runas", - lpFile=python_executable, - lpParameters=parameters, - lpDirectory=os.path.dirname(python_executable) - ) - process_handle = process_info["hProcess"] - win32event.WaitForSingleObject(process_handle, win32event.INFINITE) - returncode = win32process.GetExitCodeProcess(process_handle) - return returncode == 0 - except pywintypes.error: - pass - - def install_pyside( - self, - python_executable, - qt_binding, - qt_binding_version, - ): - """Install Qt binding python module to blender's python.""" - if qt_binding_version: - qt_binding = f"{qt_binding}=={qt_binding_version}" - try: - # Parameters - # - use "-m pip" as module pip to install qt binding and argument - # "--ignore-installed" is to force install module to blender's - # site-packages and make sure it is binary compatible - # TODO find out if blender 4.x on linux/darwin does install - # qt binding to correct place. - args = [ - python_executable, - "-m", - "pip", - "install", - "--ignore-installed", - qt_binding, - ] - process = subprocess.Popen( - args, stdout=subprocess.PIPE, universal_newlines=True - ) - process.communicate() - return process.returncode == 0 - except PermissionError: - self.log.warning( - "Permission denied with command:" - "\"{}\".".format(" ".join(args)) - ) - except OSError as error: - self.log.warning(f"OS error has occurred: \"{error}\".") - except subprocess.SubprocessError: - pass - - def is_pyside_installed(self, python_executable, qt_binding): - """Check if PySide2 module is in blender's pip list. - - Check that PySide2 is installed directly in blender's site-packages. - It is possible that it is installed in user's site-packages but that - may be incompatible with blender's python. - """ - - qt_binding_low = qt_binding.lower() - # Get pip list from blender's python executable - args = [python_executable, "-m", "pip", "list"] - process = subprocess.Popen(args, stdout=subprocess.PIPE) - stdout, _ = process.communicate() - lines = stdout.decode().split(os.linesep) - # Second line contain dashes that define maximum length of module name. - # Second column of dashes define maximum length of module version. - package_dashes, *_ = lines[1].split(" ") - package_len = len(package_dashes) - - # Got through printed lines starting at line 3 - for idx in range(2, len(lines)): - line = lines[idx] - if not line: - continue - package_name = line[0:package_len].strip() - if package_name.lower() == qt_binding_low: - return True - return False diff --git a/server_addon/blender/client/ayon_blender/hooks/pre_windows_console.py b/server_addon/blender/client/ayon_blender/hooks/pre_windows_console.py deleted file mode 100644 index 47303a7af4..0000000000 --- a/server_addon/blender/client/ayon_blender/hooks/pre_windows_console.py +++ /dev/null @@ -1,29 +0,0 @@ -import subprocess -from ayon_applications import PreLaunchHook, LaunchTypes - - -class BlenderConsoleWindows(PreLaunchHook): - """Foundry applications have specific way how to launch them. - - Blender is executed "like" python process so it is required to pass - `CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console. - At the same time the newly created console won't create it's own stdout - and stderr handlers so they should not be redirected to DEVNULL. - """ - - # Should be as last hook because must change launch arguments to string - order = 1000 - app_groups = {"blender"} - platforms = {"windows"} - launch_types = {LaunchTypes.local} - - def execute(self): - # Change `creationflags` to CREATE_NEW_CONSOLE - # - on Windows will blender create new window using it's console - # Set `stdout` and `stderr` to None so new created console does not - # have redirected output to DEVNULL in build - self.launch_context.kwargs.update({ - "creationflags": subprocess.CREATE_NEW_CONSOLE, - "stdout": None, - "stderr": None - }) diff --git a/server_addon/blender/client/ayon_blender/plugins/create/convert_legacy.py b/server_addon/blender/client/ayon_blender/plugins/create/convert_legacy.py deleted file mode 100644 index 095f3ab919..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/convert_legacy.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -"""Converter for legacy Houdini products.""" -from ayon_core.pipeline.create.creator_plugins import ProductConvertorPlugin -from ayon_blender.api.lib import imprint - - -class BlenderLegacyConvertor(ProductConvertorPlugin): - """Find and convert any legacy products in the scene. - - This Converter will find all legacy products in the scene and will - transform them to the current system. Since the old products doesn't - retain any information about their original creators, the only mapping - we can do is based on their product types. - - Its limitation is that you can have multiple creators creating product - of the same product type and there is no way to handle it. This code - should nevertheless cover all creators that came with OpenPype. - - """ - identifier = "io.openpype.creators.blender.legacy" - product_type_to_id = { - "action": "io.openpype.creators.blender.action", - "camera": "io.openpype.creators.blender.camera", - "animation": "io.openpype.creators.blender.animation", - "blendScene": "io.openpype.creators.blender.blendscene", - "layout": "io.openpype.creators.blender.layout", - "model": "io.openpype.creators.blender.model", - "pointcache": "io.openpype.creators.blender.pointcache", - "render": "io.openpype.creators.blender.render", - "review": "io.openpype.creators.blender.review", - "rig": "io.openpype.creators.blender.rig", - } - - def __init__(self, *args, **kwargs): - super(BlenderLegacyConvertor, self).__init__(*args, **kwargs) - self.legacy_instances = {} - - def find_instances(self): - """Find legacy products in the scene. - - Legacy products are the ones that doesn't have `creator_identifier` - parameter on them. - - This is using cached entries done in - :py:meth:`~BlenderCreator.cache_instance_data()` - - """ - self.legacy_instances = self.collection_shared_data.get( - "blender_cached_legacy_instances") - if not self.legacy_instances: - return - self.add_convertor_item( - "Found {} incompatible product{}".format( - len(self.legacy_instances), - "s" if len(self.legacy_instances) > 1 else "" - ) - ) - - def convert(self): - """Convert all legacy products to current. - - It is enough to add `creator_identifier` and `instance_node`. - - """ - if not self.legacy_instances: - return - - for product_type, instance_nodes in self.legacy_instances.items(): - if product_type in self.product_type_to_id: - for instance_node in instance_nodes: - creator_identifier = self.product_type_to_id[product_type] - self.log.info( - "Converting {} to {}".format(instance_node.name, - creator_identifier) - ) - imprint(instance_node, data={ - "creator_identifier": creator_identifier - }) diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_action.py b/server_addon/blender/client/ayon_blender/plugins/create/create_action.py deleted file mode 100644 index 123a2e0df1..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_action.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Create an animation asset.""" - -import bpy - -from ayon_blender.api import lib, plugin - - -class CreateAction(plugin.BlenderCreator): - """Action output for character rigs.""" - - identifier = "io.openpype.creators.blender.action" - label = "Action" - product_type = "action" - icon = "male" - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - # Run parent create method - collection = super().create( - product_name, instance_data, pre_create_data - ) - - # Get instance name - name = plugin.prepare_scene_name( - instance_data["folderPath"], product_name - ) - - if pre_create_data.get("use_selection"): - for obj in lib.get_selection(): - if (obj.animation_data is not None - and obj.animation_data.action is not None): - - empty_obj = bpy.data.objects.new(name=name, - object_data=None) - empty_obj.animation_data_create() - empty_obj.animation_data.action = obj.animation_data.action - empty_obj.animation_data.action.name = name - collection.objects.link(empty_obj) - - return collection diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_animation.py b/server_addon/blender/client/ayon_blender/plugins/create/create_animation.py deleted file mode 100644 index cfb2c254ef..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_animation.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Create an animation asset.""" - -from ayon_blender.api import plugin, lib - - -class CreateAnimation(plugin.BlenderCreator): - """Animation output for character rigs.""" - - identifier = "io.openpype.creators.blender.animation" - label = "Animation" - product_type = "animation" - icon = "male" - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - # Run parent create method - collection = super().create( - product_name, instance_data, pre_create_data - ) - - if pre_create_data.get("use_selection"): - selected = lib.get_selection() - for obj in selected: - collection.objects.link(obj) - elif pre_create_data.get("asset_group"): - # Use for Load Blend automated creation of animation instances - # upon loading rig files - obj = pre_create_data.get("asset_group") - collection.objects.link(obj) - - return collection diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_blendScene.py b/server_addon/blender/client/ayon_blender/plugins/create/create_blendScene.py deleted file mode 100644 index 363a35883b..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_blendScene.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Create a Blender scene asset.""" - -import bpy - -from ayon_blender.api import plugin, lib - - -class CreateBlendScene(plugin.BlenderCreator): - """Generic group of assets.""" - - identifier = "io.openpype.creators.blender.blendscene" - label = "Blender Scene" - product_type = "blendScene" - icon = "cubes" - - maintain_selection = False - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - - instance_node = super().create(product_name, - instance_data, - pre_create_data) - - if pre_create_data.get("use_selection"): - selection = lib.get_selection(include_collections=True) - for data in selection: - if isinstance(data, bpy.types.Collection): - instance_node.children.link(data) - elif isinstance(data, bpy.types.Object): - instance_node.objects.link(data) - - return instance_node diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_camera.py b/server_addon/blender/client/ayon_blender/plugins/create/create_camera.py deleted file mode 100644 index 8cfe8f989b..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_camera.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Create a camera asset.""" - -import bpy - -from ayon_blender.api import plugin, lib -from ayon_blender.api.pipeline import AVALON_INSTANCES - - -class CreateCamera(plugin.BlenderCreator): - """Polygonal static geometry.""" - - identifier = "io.openpype.creators.blender.camera" - label = "Camera" - product_type = "camera" - icon = "video-camera" - - create_as_asset_group = True - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - - asset_group = super().create(product_name, - instance_data, - pre_create_data) - - bpy.context.view_layer.objects.active = asset_group - if pre_create_data.get("use_selection"): - for obj in lib.get_selection(): - obj.parent = asset_group - else: - plugin.deselect_all() - camera = bpy.data.cameras.new(product_name) - camera_obj = bpy.data.objects.new(product_name, camera) - - instances = bpy.data.collections.get(AVALON_INSTANCES) - instances.objects.link(camera_obj) - - bpy.context.view_layer.objects.active = asset_group - camera_obj.parent = asset_group - - return asset_group diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_layout.py b/server_addon/blender/client/ayon_blender/plugins/create/create_layout.py deleted file mode 100644 index 1e0f8effdd..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_layout.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Create a layout asset.""" - -import bpy - -from ayon_blender.api import plugin, lib - - -class CreateLayout(plugin.BlenderCreator): - """Layout output for character rigs.""" - - identifier = "io.openpype.creators.blender.layout" - label = "Layout" - product_type = "layout" - icon = "cubes" - - create_as_asset_group = True - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - - asset_group = super().create(product_name, - instance_data, - pre_create_data) - - # Add selected objects to instance - if pre_create_data.get("use_selection"): - bpy.context.view_layer.objects.active = asset_group - for obj in lib.get_selection(): - obj.parent = asset_group - - return asset_group diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_model.py b/server_addon/blender/client/ayon_blender/plugins/create/create_model.py deleted file mode 100644 index 7e8bf566ea..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_model.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Create a model asset.""" - -import bpy - -from ayon_blender.api import plugin, lib - - -class CreateModel(plugin.BlenderCreator): - """Polygonal static geometry.""" - - identifier = "io.openpype.creators.blender.model" - label = "Model" - product_type = "model" - icon = "cube" - - create_as_asset_group = True - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - asset_group = super().create(product_name, - instance_data, - pre_create_data) - - # Add selected objects to instance - if pre_create_data.get("use_selection"): - bpy.context.view_layer.objects.active = asset_group - for obj in lib.get_selection(): - obj.parent = asset_group - - return asset_group diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_pointcache.py b/server_addon/blender/client/ayon_blender/plugins/create/create_pointcache.py deleted file mode 100644 index 9730ddb89d..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_pointcache.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Create a pointcache asset.""" - -from ayon_blender.api import plugin, lib - - -class CreatePointcache(plugin.BlenderCreator): - """Polygonal static geometry.""" - - identifier = "io.openpype.creators.blender.pointcache" - label = "Point Cache" - product_type = "pointcache" - icon = "gears" - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - # Run parent create method - collection = super().create( - product_name, instance_data, pre_create_data - ) - - if pre_create_data.get("use_selection"): - objects = lib.get_selection() - for obj in objects: - collection.objects.link(obj) - if obj.type == 'EMPTY': - objects.extend(obj.children) - - return collection diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_render.py b/server_addon/blender/client/ayon_blender/plugins/create/create_render.py deleted file mode 100644 index 6bbedb957f..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_render.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Create render.""" -import bpy - -from ayon_core.lib import version_up -from ayon_blender.api import plugin -from ayon_blender.api.render_lib import prepare_rendering -from ayon_blender.api.workio import save_file - - -class CreateRenderlayer(plugin.BlenderCreator): - """Single baked camera.""" - - identifier = "io.openpype.creators.blender.render" - label = "Render" - product_type = "render" - icon = "eye" - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - try: - # Run parent create method - collection = super().create( - product_name, instance_data, pre_create_data - ) - - prepare_rendering(collection) - except Exception: - # Remove the instance if there was an error - bpy.data.collections.remove(collection) - raise - - # TODO: this is undesiderable, but it's the only way to be sure that - # the file is saved before the render starts. - # Blender, by design, doesn't set the file as dirty if modifications - # happen by script. So, when creating the instance and setting the - # render settings, the file is not marked as dirty. This means that - # there is the risk of sending to deadline a file without the right - # settings. Even the validator to check that the file is saved will - # detect the file as saved, even if it isn't. The only solution for - # now it is to force the file to be saved. - filepath = version_up(bpy.data.filepath) - save_file(filepath, copy=False) - - return collection diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_review.py b/server_addon/blender/client/ayon_blender/plugins/create/create_review.py deleted file mode 100644 index dbef9e371f..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_review.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Create review.""" - -from ayon_blender.api import plugin, lib - - -class CreateReview(plugin.BlenderCreator): - """Single baked camera.""" - - identifier = "io.openpype.creators.blender.review" - label = "Review" - product_type = "review" - icon = "video-camera" - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - # Run parent create method - collection = super().create( - product_name, instance_data, pre_create_data - ) - - if pre_create_data.get("use_selection"): - selected = lib.get_selection() - for obj in selected: - collection.objects.link(obj) - - return collection diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_rig.py b/server_addon/blender/client/ayon_blender/plugins/create/create_rig.py deleted file mode 100644 index aad24bda69..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_rig.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Create a rig asset.""" - -import bpy - -from ayon_blender.api import plugin, lib - - -class CreateRig(plugin.BlenderCreator): - """Artist-friendly rig with controls to direct motion.""" - - identifier = "io.openpype.creators.blender.rig" - label = "Rig" - product_type = "rig" - icon = "wheelchair" - - create_as_asset_group = True - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - asset_group = super().create(product_name, - instance_data, - pre_create_data) - - # Add selected objects to instance - if pre_create_data.get("use_selection"): - bpy.context.view_layer.objects.active = asset_group - for obj in lib.get_selection(): - obj.parent = asset_group - - return asset_group diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_usd.py b/server_addon/blender/client/ayon_blender/plugins/create/create_usd.py deleted file mode 100644 index d7770b15f7..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_usd.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Create a USD Export.""" - -from ayon_blender.api import plugin, lib - - -class CreateUSD(plugin.BlenderCreator): - """Create USD Export""" - - identifier = "io.openpype.creators.blender.usd" - name = "usdMain" - label = "USD" - product_type = "usd" - icon = "gears" - - def create( - self, product_name: str, instance_data: dict, pre_create_data: dict - ): - # Run parent create method - collection = super().create( - product_name, instance_data, pre_create_data - ) - - if pre_create_data.get("use_selection"): - objects = lib.get_selection() - for obj in objects: - collection.objects.link(obj) - if obj.type == 'EMPTY': - objects.extend(obj.children) - - return collection diff --git a/server_addon/blender/client/ayon_blender/plugins/create/create_workfile.py b/server_addon/blender/client/ayon_blender/plugins/create/create_workfile.py deleted file mode 100644 index 03cfc322a9..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/create/create_workfile.py +++ /dev/null @@ -1,132 +0,0 @@ -import bpy -import ayon_api - -from ayon_core.pipeline import CreatedInstance, AutoCreator -from ayon_blender.api.plugin import BlenderCreator -from ayon_blender.api.pipeline import ( - AVALON_PROPERTY, - AVALON_CONTAINERS -) - - -class CreateWorkfile(BlenderCreator, AutoCreator): - """Workfile auto-creator. - - The workfile instance stores its data on the `AVALON_CONTAINERS` collection - as custom attributes, because unlike other instances it doesn't have an - instance node of its own. - - """ - identifier = "io.openpype.creators.blender.workfile" - label = "Workfile" - product_type = "workfile" - icon = "fa5.file" - - def create(self): - """Create workfile instances.""" - workfile_instance = next( - ( - instance for instance in self.create_context.instances - if instance.creator_identifier == self.identifier - ), - None, - ) - - project_name = self.project_name - folder_path = self.create_context.get_current_folder_path() - task_name = self.create_context.get_current_task_name() - host_name = self.create_context.host_name - - existing_folder_path = None - if workfile_instance is not None: - existing_folder_path = workfile_instance.get("folderPath") - - if not workfile_instance: - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - task_name, - host_name, - ) - data = { - "folderPath": folder_path, - "task": task_name, - "variant": task_name, - } - data.update( - self.get_dynamic_data( - project_name, - folder_entity, - task_entity, - task_name, - host_name, - workfile_instance, - ) - ) - self.log.info("Auto-creating workfile instance...") - workfile_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - self._add_instance_to_context(workfile_instance) - - elif ( - existing_folder_path != folder_path - or workfile_instance["task"] != task_name - ): - # Update instance context if it's different - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - self.default_variant, - host_name, - ) - - workfile_instance["folderPath"] = folder_path - workfile_instance["task"] = task_name - workfile_instance["productName"] = product_name - - instance_node = bpy.data.collections.get(AVALON_CONTAINERS) - if not instance_node: - instance_node = bpy.data.collections.new(name=AVALON_CONTAINERS) - workfile_instance.transient_data["instance_node"] = instance_node - - def collect_instances(self): - - instance_node = bpy.data.collections.get(AVALON_CONTAINERS) - if not instance_node: - return - - property = instance_node.get(AVALON_PROPERTY) - if not property: - return - - # Create instance object from existing data - instance = CreatedInstance.from_existing( - instance_data=property.to_dict(), - creator=self - ) - instance.transient_data["instance_node"] = instance_node - - # Add instance to create context - self._add_instance_to_context(instance) - - def remove_instances(self, instances): - for instance in instances: - node = instance.transient_data["instance_node"] - del node[AVALON_PROPERTY] - - self._remove_instance_from_context(instance) diff --git a/server_addon/blender/client/ayon_blender/plugins/load/import_workfile.py b/server_addon/blender/client/ayon_blender/plugins/load/import_workfile.py deleted file mode 100644 index 16cba6913d..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/import_workfile.py +++ /dev/null @@ -1,84 +0,0 @@ -import bpy - -from ayon_blender.api import plugin - - -def append_workfile(context, fname, do_import): - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - group_name = plugin.prepare_scene_name(folder_name, product_name) - - # We need to preserve the original names of the scenes, otherwise, - # if there are duplicate names in the current workfile, the imported - # scenes will be renamed by Blender to avoid conflicts. - original_scene_names = [] - - with bpy.data.libraries.load(fname) as (data_from, data_to): - for attr in dir(data_to): - if attr == "scenes": - for scene in data_from.scenes: - original_scene_names.append(scene) - setattr(data_to, attr, getattr(data_from, attr)) - - current_scene = bpy.context.scene - - for scene, s_name in zip(data_to.scenes, original_scene_names): - scene.name = f"{group_name}_{s_name}" - if do_import: - collection = bpy.data.collections.new(f"{group_name}_{s_name}") - for obj in scene.objects: - collection.objects.link(obj) - current_scene.collection.children.link(collection) - for coll in scene.collection.children: - collection.children.link(coll) - - -class AppendBlendLoader(plugin.BlenderLoader): - """Append workfile in Blender (unmanaged) - - Warning: - The loaded content will be unmanaged and is *not* visible in the - scene inventory. It's purely intended to merge content into your scene - so you could also use it as a new base. - """ - - representations = {"blend"} - product_types = {"workfile"} - - label = "Append Workfile" - order = 9 - icon = "arrow-circle-down" - color = "#775555" - - def load(self, context, name=None, namespace=None, data=None): - path = self.filepath_from_context(context) - append_workfile(context, path, False) - - # We do not containerize imported content, it remains unmanaged - return - - -class ImportBlendLoader(plugin.BlenderLoader): - """Import workfile in the current Blender scene (unmanaged) - - Warning: - The loaded content will be unmanaged and is *not* visible in the - scene inventory. It's purely intended to merge content into your scene - so you could also use it as a new base. - """ - - representations = {"blend"} - product_types = {"workfile"} - - label = "Import Workfile" - order = 9 - icon = "arrow-circle-down" - color = "#775555" - - def load(self, context, name=None, namespace=None, data=None): - path = self.filepath_from_context(context) - append_workfile(context, path, True) - - # We do not containerize imported content, it remains unmanaged - return diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_action.py b/server_addon/blender/client/ayon_blender/plugins/load/load_action.py deleted file mode 100644 index ddfaa94044..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_action.py +++ /dev/null @@ -1,293 +0,0 @@ -"""Load an action in Blender.""" - -import logging -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy -from ayon_core.pipeline import get_representation_path -from ayon_blender.api import plugin -from ayon_blender.api.pipeline import ( - containerise_existing, - AVALON_PROPERTY, -) - -logger = logging.getLogger("ayon").getChild("blender").getChild("load_action") - - -class BlendActionLoader(plugin.BlenderLoader): - """Load action from a .blend file. - - Warning: - Loading the same asset more then once is not properly supported at the - moment. - """ - - product_types = {"action"} - representations = {"blend"} - - label = "Link Action" - icon = "code-fork" - color = "orange" - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - - libpath = self.filepath_from_context(context) - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - lib_container = plugin.prepare_scene_name(folder_name, product_name) - container_name = plugin.prepare_scene_name( - folder_name, product_name, namespace - ) - - container = bpy.data.collections.new(lib_container) - container.name = container_name - containerise_existing( - container, - name, - namespace, - context, - self.__class__.__name__, - ) - - container_metadata = container.get(AVALON_PROPERTY) - - container_metadata["libpath"] = libpath - container_metadata["lib_container"] = lib_container - - relative = bpy.context.preferences.filepaths.use_relative_paths - with bpy.data.libraries.load( - libpath, link=True, relative=relative - ) as (_, data_to): - data_to.collections = [lib_container] - - collection = bpy.context.scene.collection - - collection.children.link(bpy.data.collections[lib_container]) - - animation_container = collection.children[lib_container].make_local() - - objects_list = [] - - # Link meshes first, then armatures. - # The armature is unparented for all the non-local meshes, - # when it is made local. - for obj in animation_container.objects: - - obj = obj.make_local() - - anim_data = obj.animation_data - - if anim_data is not None and anim_data.action is not None: - - anim_data.action.make_local() - - if not obj.get(AVALON_PROPERTY): - - obj[AVALON_PROPERTY] = dict() - - avalon_info = obj[AVALON_PROPERTY] - avalon_info.update({"container_name": container_name}) - - objects_list.append(obj) - - animation_container.pop(AVALON_PROPERTY) - - # Save the list of objects in the metadata container - container_metadata["objects"] = objects_list - - bpy.ops.object.select_all(action='DESELECT') - - nodes = list(container.objects) - nodes.append(container) - self[:] = nodes - return nodes - - def update(self, container: Dict, context: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - repre_entity = context["representation"] - collection = bpy.data.collections.get( - container["objectName"] - ) - - libpath = Path(get_representation_path(repre_entity)) - extension = libpath.suffix.lower() - - logger.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(repre_entity, indent=2), - ) - - assert collection, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert not (collection.children), ( - "Nested collections are not supported." - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - collection_metadata = collection.get(AVALON_PROPERTY) - - collection_libpath = collection_metadata["libpath"] - normalized_collection_libpath = ( - str(Path(bpy.path.abspath(collection_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - logger.debug( - "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_collection_libpath, - normalized_libpath, - ) - if normalized_collection_libpath == normalized_libpath: - logger.info("Library already loaded, not updating...") - return - - strips = [] - - for obj in list(collection_metadata["objects"]): - # Get all the strips that use the action - arm_objs = [ - arm for arm in bpy.data.objects if arm.type == 'ARMATURE'] - - for armature_obj in arm_objs: - if armature_obj.animation_data is not None: - for track in armature_obj.animation_data.nla_tracks: - for strip in track.strips: - if strip.action == obj.animation_data.action: - strips.append(strip) - - bpy.data.actions.remove(obj.animation_data.action) - bpy.data.objects.remove(obj) - - lib_container = collection_metadata["lib_container"] - - bpy.data.collections.remove(bpy.data.collections[lib_container]) - - relative = bpy.context.preferences.filepaths.use_relative_paths - with bpy.data.libraries.load( - str(libpath), link=True, relative=relative - ) as (_, data_to): - data_to.collections = [lib_container] - - scene = bpy.context.scene - - scene.collection.children.link(bpy.data.collections[lib_container]) - - anim_container = scene.collection.children[lib_container].make_local() - - objects_list = [] - - # Link meshes first, then armatures. - # The armature is unparented for all the non-local meshes, - # when it is made local. - for obj in anim_container.objects: - - obj = obj.make_local() - - anim_data = obj.animation_data - - if anim_data is not None and anim_data.action is not None: - - anim_data.action.make_local() - - for strip in strips: - - strip.action = anim_data.action - strip.action_frame_end = anim_data.action.frame_range[1] - - if not obj.get(AVALON_PROPERTY): - - obj[AVALON_PROPERTY] = dict() - - avalon_info = obj[AVALON_PROPERTY] - avalon_info.update({"container_name": collection.name}) - - objects_list.append(obj) - - anim_container.pop(AVALON_PROPERTY) - - # Save the list of objects in the metadata container - collection_metadata["objects"] = objects_list - collection_metadata["libpath"] = str(libpath) - collection_metadata["representation"] = repre_entity["id"] - - bpy.ops.object.select_all(action='DESELECT') - - def remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! - """ - - collection = bpy.data.collections.get( - container["objectName"] - ) - if not collection: - return False - assert not (collection.children), ( - "Nested collections are not supported." - ) - - collection_metadata = collection.get(AVALON_PROPERTY) - objects = collection_metadata["objects"] - lib_container = collection_metadata["lib_container"] - - for obj in list(objects): - # Get all the strips that use the action - arm_objs = [ - arm for arm in bpy.data.objects if arm.type == 'ARMATURE'] - - for armature_obj in arm_objs: - if armature_obj.animation_data is not None: - for track in armature_obj.animation_data.nla_tracks: - for strip in track.strips: - if strip.action == obj.animation_data.action: - track.strips.remove(strip) - - bpy.data.actions.remove(obj.animation_data.action) - bpy.data.objects.remove(obj) - - bpy.data.collections.remove(bpy.data.collections[lib_container]) - bpy.data.collections.remove(collection) - - return True diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_animation.py b/server_addon/blender/client/ayon_blender/plugins/load/load_animation.py deleted file mode 100644 index 241b76b600..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_animation.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Load an animation in Blender.""" - -from typing import Dict, List, Optional - -import bpy - -from ayon_blender.api import plugin -from ayon_blender.api.pipeline import AVALON_PROPERTY - - -class BlendAnimationLoader(plugin.BlenderLoader): - """Load animations from a .blend file. - - Warning: - Loading the same asset more then once is not properly supported at the - moment. - """ - - product_types = {"animation"} - representations = {"blend"} - - label = "Link Animation" - icon = "code-fork" - color = "orange" - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - - with bpy.data.libraries.load( - libpath, link=True, relative=False - ) as (data_from, data_to): - data_to.objects = data_from.objects - data_to.actions = data_from.actions - - container = data_to.objects[0] - - assert container, "No asset group found" - - target_namespace = container.get(AVALON_PROPERTY).get('namespace') - - action = data_to.actions[0].make_local().copy() - - for obj in bpy.data.objects: - if obj.get(AVALON_PROPERTY) and obj.get(AVALON_PROPERTY).get( - 'namespace') == target_namespace: - if obj.children[0]: - if not obj.children[0].animation_data: - obj.children[0].animation_data_create() - obj.children[0].animation_data.action = action - break - - bpy.data.objects.remove(container) - - filename = bpy.path.basename(libpath) - # Blender has a limit of 63 characters for any data name. - # If the filename is longer, it will be truncated. - if len(filename) > 63: - filename = filename[:63] - library = bpy.data.libraries.get(filename) - bpy.data.libraries.remove(library) diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_audio.py b/server_addon/blender/client/ayon_blender/plugins/load/load_audio.py deleted file mode 100644 index b8682e7c13..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_audio.py +++ /dev/null @@ -1,227 +0,0 @@ -"""Load audio in Blender.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy - -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from ayon_blender.api import plugin -from ayon_blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class AudioLoader(plugin.BlenderLoader): - """Load audio in Blender.""" - - product_types = {"audio"} - representations = {"wav"} - - label = "Load Audio" - icon = "volume-up" - color = "orange" - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - asset_name = plugin.prepare_scene_name(folder_name, product_name) - unique_number = plugin.get_unique_number(folder_name, product_name) - group_name = plugin.prepare_scene_name( - folder_name, product_name, unique_number - ) - namespace = namespace or f"{folder_name}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - avalon_container.objects.link(asset_group) - - # Blender needs the Sequence Editor in the current window, to be able - # to load the audio. We take one of the areas in the window, save its - # type, and switch to the Sequence Editor. After loading the audio, - # we switch back to the previous area. - window_manager = bpy.context.window_manager - old_type = window_manager.windows[-1].screen.areas[0].type - window_manager.windows[-1].screen.areas[0].type = "SEQUENCE_EDITOR" - - # We override the context to load the audio in the sequence editor. - oc = bpy.context.copy() - oc["area"] = window_manager.windows[-1].screen.areas[0] - - with bpy.context.temp_override(**oc): - bpy.ops.sequencer.sound_strip_add(filepath=libpath, frame_start=1) - - window_manager.windows[-1].screen.areas[0].type = old_type - - p = Path(libpath) - audio = p.name - - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - "libpath": libpath, - "asset_name": asset_name, - "parent": context["representation"]["versionId"], - "productType": context["product"]["productType"], - "objectName": group_name, - "audio": audio - } - - objects = [] - self[:] = objects - return [objects] - - def exec_update(self, container: Dict, context: Dict): - """Update an audio strip in the sequence editor. - - Arguments: - container (openpype:container-1.0): Container to update, - from `host.ls()`. - representation (openpype:representation-1.0): Representation to - update, from `host.ls()`. - """ - repre_entity = context["representation"] - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(repre_entity)) - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(repre_entity, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = ( - str(Path(bpy.path.abspath(group_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - old_audio = container["audio"] - p = Path(libpath) - new_audio = p.name - - # Blender needs the Sequence Editor in the current window, to be able - # to update the audio. We take one of the areas in the window, save its - # type, and switch to the Sequence Editor. After updating the audio, - # we switch back to the previous area. - window_manager = bpy.context.window_manager - old_type = window_manager.windows[-1].screen.areas[0].type - window_manager.windows[-1].screen.areas[0].type = "SEQUENCE_EDITOR" - - # We override the context to load the audio in the sequence editor. - oc = bpy.context.copy() - oc["area"] = window_manager.windows[-1].screen.areas[0] - - with bpy.context.temp_override(**oc): - # We deselect all sequencer strips, and then select the one we - # need to remove. - bpy.ops.sequencer.select_all(action='DESELECT') - scene = bpy.context.scene - scene.sequence_editor.sequences_all[old_audio].select = True - - bpy.ops.sequencer.delete() - bpy.data.sounds.remove(bpy.data.sounds[old_audio]) - - bpy.ops.sequencer.sound_strip_add( - filepath=str(libpath), frame_start=1) - - window_manager.windows[-1].screen.areas[0].type = old_type - - metadata["libpath"] = str(libpath) - metadata["representation"] = repre_entity["id"] - metadata["parent"] = repre_entity["versionId"] - metadata["audio"] = new_audio - - def exec_remove(self, container: Dict) -> bool: - """Remove an audio strip from the sequence editor and the container. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - audio = container["audio"] - - # Blender needs the Sequence Editor in the current window, to be able - # to remove the audio. We take one of the areas in the window, save its - # type, and switch to the Sequence Editor. After removing the audio, - # we switch back to the previous area. - window_manager = bpy.context.window_manager - old_type = window_manager.windows[-1].screen.areas[0].type - window_manager.windows[-1].screen.areas[0].type = "SEQUENCE_EDITOR" - - # We override the context to load the audio in the sequence editor. - oc = bpy.context.copy() - oc["area"] = window_manager.windows[-1].screen.areas[0] - - with bpy.context.temp_override(**oc): - # We deselect all sequencer strips, and then select the one we - # need to remove. - bpy.ops.sequencer.select_all(action='DESELECT') - scene = bpy.context.scene - scene.sequence_editor.sequences_all[audio].select = True - bpy.ops.sequencer.delete() - - window_manager.windows[-1].screen.areas[0].type = old_type - - bpy.data.sounds.remove(bpy.data.sounds[audio]) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_blend.py b/server_addon/blender/client/ayon_blender/plugins/load/load_blend.py deleted file mode 100644 index c9f3ec0c71..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_blend.py +++ /dev/null @@ -1,286 +0,0 @@ -from typing import Dict, List, Optional -from pathlib import Path - -import bpy - -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, - registered_host -) -from ayon_core.pipeline.create import CreateContext -from ayon_blender.api import plugin -from ayon_blender.api.lib import imprint -from ayon_blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class BlendLoader(plugin.BlenderLoader): - """Load assets from a .blend file.""" - - product_types = {"model", "rig", "layout", "camera"} - representations = {"blend"} - - label = "Append Blend" - icon = "code-fork" - color = "orange" - - @staticmethod - def _get_asset_container(objects): - empties = [obj for obj in objects if obj.type == 'EMPTY'] - - for empty in empties: - if empty.get(AVALON_PROPERTY) and empty.parent is None: - return empty - - return None - - @staticmethod - def get_all_container_parents(asset_group): - parent_containers = [] - parent = asset_group.parent - while parent: - if parent.get(AVALON_PROPERTY): - parent_containers.append(parent) - parent = parent.parent - - return parent_containers - - def _post_process_layout(self, container, asset, representation): - rigs = [ - obj for obj in container.children_recursive - if ( - obj.type == 'EMPTY' and - obj.get(AVALON_PROPERTY) and - obj.get(AVALON_PROPERTY).get('family') == 'rig' - ) - ] - if not rigs: - return - - # Create animation instances for each rig - creator_identifier = "io.openpype.creators.blender.animation" - host = registered_host() - create_context = CreateContext(host) - - for rig in rigs: - create_context.create( - creator_identifier=creator_identifier, - variant=rig.name.split(':')[-1], - pre_create_data={ - "use_selection": False, - "asset_group": rig - } - ) - - def _process_data(self, libpath, group_name): - # Append all the data from the .blend file - with bpy.data.libraries.load( - libpath, link=False, relative=False - ) as (data_from, data_to): - for attr in dir(data_to): - setattr(data_to, attr, getattr(data_from, attr)) - - members = [] - - # Rename the object to add the asset name - for attr in dir(data_to): - for data in getattr(data_to, attr): - data.name = f"{group_name}:{data.name}" - members.append(data) - - container = self._get_asset_container(data_to.objects) - assert container, "No asset group found" - - container.name = group_name - container.empty_display_type = 'SINGLE_ARROW' - - # Link the collection to the scene - bpy.context.scene.collection.objects.link(container) - - # Link all the container children to the collection - for obj in container.children_recursive: - bpy.context.scene.collection.objects.link(obj) - - # Remove the library from the blend file - filepath = bpy.path.basename(libpath) - # Blender has a limit of 63 characters for any data name. - # If the filepath is longer, it will be truncated. - if len(filepath) > 63: - filepath = filepath[:63] - library = bpy.data.libraries.get(filepath) - bpy.data.libraries.remove(library) - - return container, members - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - try: - product_type = context["product"]["productType"] - except ValueError: - product_type = "model" - - representation = context["representation"]["id"] - - asset_name = plugin.prepare_scene_name(folder_name, product_name) - unique_number = plugin.get_unique_number(folder_name, product_name) - group_name = plugin.prepare_scene_name( - folder_name, product_name, unique_number - ) - namespace = namespace or f"{folder_name}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - container, members = self._process_data(libpath, group_name) - - if product_type == "layout": - self._post_process_layout(container, folder_name, representation) - - avalon_container.objects.link(container) - - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - "libpath": libpath, - "asset_name": asset_name, - "parent": context["representation"]["versionId"], - "productType": context["product"]["productType"], - "objectName": group_name, - "members": members, - } - - container[AVALON_PROPERTY] = data - - objects = [ - obj for obj in bpy.data.objects - if obj.name.startswith(f"{group_name}:") - ] - - self[:] = objects - return objects - - def exec_update(self, container: Dict, context: Dict): - """ - Update the loaded asset. - """ - repre_entity = context["representation"] - group_name = container["objectName"] - asset_group = bpy.data.objects.get(group_name) - libpath = Path(get_representation_path(repre_entity)).as_posix() - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - - transform = asset_group.matrix_basis.copy() - old_data = dict(asset_group.get(AVALON_PROPERTY)) - old_members = old_data.get("members", []) - parent = asset_group.parent - - actions = {} - objects_with_anim = [ - obj for obj in asset_group.children_recursive - if obj.animation_data] - for obj in objects_with_anim: - # Check if the object has an action and, if so, add it to a dict - # so we can restore it later. Save and restore the action only - # if it wasn't originally loaded from the current asset. - if obj.animation_data.action not in old_members: - actions[obj.name] = obj.animation_data.action - - self.exec_remove(container) - - asset_group, members = self._process_data(libpath, group_name) - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - avalon_container.objects.link(asset_group) - - asset_group.matrix_basis = transform - asset_group.parent = parent - - # Restore the actions - for obj in asset_group.children_recursive: - if obj.name in actions: - if not obj.animation_data: - obj.animation_data_create() - obj.animation_data.action = actions[obj.name] - - # Restore the old data, but reset members, as they don't exist anymore - # This avoids a crash, because the memory addresses of those members - # are not valid anymore - old_data["members"] = [] - asset_group[AVALON_PROPERTY] = old_data - - new_data = { - "libpath": libpath, - "representation": repre_entity["id"], - "parent": repre_entity["versionId"], - "members": members, - } - - imprint(asset_group, new_data) - - # We need to update all the parent container members - parent_containers = self.get_all_container_parents(asset_group) - - for parent_container in parent_containers: - parent_members = parent_container[AVALON_PROPERTY]["members"] - parent_container[AVALON_PROPERTY]["members"] = ( - parent_members + members) - - def exec_remove(self, container: Dict) -> bool: - """ - Remove an existing container from a Blender scene. - """ - group_name = container["objectName"] - asset_group = bpy.data.objects.get(group_name) - - attrs = [ - attr for attr in dir(bpy.data) - if isinstance( - getattr(bpy.data, attr), - bpy.types.bpy_prop_collection - ) - ] - - members = asset_group.get(AVALON_PROPERTY).get("members", []) - - # We need to update all the parent container members - parent_containers = self.get_all_container_parents(asset_group) - - for parent in parent_containers: - parent.get(AVALON_PROPERTY)["members"] = list(filter( - lambda i: i not in members, - parent.get(AVALON_PROPERTY).get("members", []))) - - for attr in attrs: - for data in getattr(bpy.data, attr): - if data in members: - # Skip the asset group - if data == asset_group: - continue - getattr(bpy.data, attr).remove(data) - - bpy.data.objects.remove(asset_group) diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_blendscene.py b/server_addon/blender/client/ayon_blender/plugins/load/load_blendscene.py deleted file mode 100644 index 590ab0079e..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_blendscene.py +++ /dev/null @@ -1,235 +0,0 @@ -from typing import Dict, List, Optional -from pathlib import Path - -import bpy - -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from ayon_blender.api import plugin -from ayon_blender.api.lib import imprint -from ayon_blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class BlendSceneLoader(plugin.BlenderLoader): - """Load assets from a .blend file.""" - - product_types = {"blendScene"} - representations = {"blend"} - - label = "Append Blend" - icon = "code-fork" - color = "orange" - - @staticmethod - def _get_asset_container(collections): - for coll in collections: - parents = [c for c in collections if c.user_of_id(coll)] - if coll.get(AVALON_PROPERTY) and not parents: - return coll - - return None - - def _process_data(self, libpath, group_name, product_type): - # Append all the data from the .blend file - with bpy.data.libraries.load( - libpath, link=False, relative=False - ) as (data_from, data_to): - for attr in dir(data_to): - setattr(data_to, attr, getattr(data_from, attr)) - - members = [] - - # Rename the object to add the asset name - for attr in dir(data_to): - for data in getattr(data_to, attr): - data.name = f"{group_name}:{data.name}" - members.append(data) - - container = self._get_asset_container( - data_to.collections) - assert container, "No asset group found" - - container.name = group_name - - # Link the group to the scene - bpy.context.scene.collection.children.link(container) - - # Remove the library from the blend file - filepath = bpy.path.basename(libpath) - # Blender has a limit of 63 characters for any data name. - # If the filepath is longer, it will be truncated. - if len(filepath) > 63: - filepath = filepath[:63] - library = bpy.data.libraries.get(filepath) - bpy.data.libraries.remove(library) - - return container, members - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - try: - product_type = context["product"]["productType"] - except ValueError: - product_type = "model" - - asset_name = plugin.prepare_scene_name(folder_name, product_name) - unique_number = plugin.get_unique_number(folder_name, product_name) - group_name = plugin.prepare_scene_name( - folder_name, product_name, unique_number - ) - namespace = namespace or f"{folder_name}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - container, members = self._process_data( - libpath, group_name, product_type - ) - - avalon_container.children.link(container) - - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - "libpath": libpath, - "asset_name": asset_name, - "parent": context["representation"]["versionId"], - "productType": context["product"]["productType"], - "objectName": group_name, - "members": members, - } - - container[AVALON_PROPERTY] = data - - objects = [ - obj for obj in bpy.data.objects - if obj.name.startswith(f"{group_name}:") - ] - - self[:] = objects - return objects - - def exec_update(self, container: Dict, context: Dict): - """ - Update the loaded asset. - """ - repre_entity = context["representation"] - group_name = container["objectName"] - asset_group = bpy.data.collections.get(group_name) - libpath = Path(get_representation_path(repre_entity)).as_posix() - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - - # Get the parents of the members of the asset group, so we can - # re-link them after the update. - # Also gets the transform for each object to reapply after the update. - collection_parents = {} - member_transforms = {} - members = asset_group.get(AVALON_PROPERTY).get("members", []) - loaded_collections = {c for c in bpy.data.collections if c in members} - loaded_collections.add(bpy.data.collections.get(AVALON_CONTAINERS)) - for member in members: - if isinstance(member, bpy.types.Object): - member_parents = set(member.users_collection) - member_transforms[member.name] = member.matrix_basis.copy() - elif isinstance(member, bpy.types.Collection): - member_parents = { - c for c in bpy.data.collections if c.user_of_id(member)} - else: - continue - - member_parents = member_parents.difference(loaded_collections) - if member_parents: - collection_parents[member.name] = list(member_parents) - - old_data = dict(asset_group.get(AVALON_PROPERTY)) - - self.exec_remove(container) - - product_type = container.get("productType") - if product_type is None: - product_type = container["family"] - asset_group, members = self._process_data( - libpath, group_name, product_type - ) - - for member in members: - if member.name in collection_parents: - for parent in collection_parents[member.name]: - if isinstance(member, bpy.types.Object): - parent.objects.link(member) - elif isinstance(member, bpy.types.Collection): - parent.children.link(member) - if member.name in member_transforms and isinstance( - member, bpy.types.Object - ): - member.matrix_basis = member_transforms[member.name] - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - avalon_container.children.link(asset_group) - - # Restore the old data, but reset members, as they don't exist anymore - # This avoids a crash, because the memory addresses of those members - # are not valid anymore - old_data["members"] = [] - asset_group[AVALON_PROPERTY] = old_data - - new_data = { - "libpath": libpath, - "representation": repre_entity["id"], - "parent": repre_entity["versionId"], - "members": members, - } - - imprint(asset_group, new_data) - - def exec_remove(self, container: Dict) -> bool: - """ - Remove an existing container from a Blender scene. - """ - group_name = container["objectName"] - asset_group = bpy.data.collections.get(group_name) - - members = set(asset_group.get(AVALON_PROPERTY).get("members", [])) - - if members: - for attr_name in dir(bpy.data): - attr = getattr(bpy.data, attr_name) - if not isinstance(attr, bpy.types.bpy_prop_collection): - continue - - # ensure to make a list copy because we - # we remove members as we iterate - for data in list(attr): - if data not in members or data == asset_group: - continue - - attr.remove(data) - - bpy.data.collections.remove(asset_group) diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_cache.py b/server_addon/blender/client/ayon_blender/plugins/load/load_cache.py deleted file mode 100644 index 599610ff39..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_cache.py +++ /dev/null @@ -1,284 +0,0 @@ -"""Load an asset in Blender from an Alembic file.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy - -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) - -from ayon_blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) -from ayon_blender.api import plugin, lib - - -class CacheModelLoader(plugin.BlenderLoader): - """Load cache models. - - Stores the imported asset in a collection named after the asset. - - Note: - At least for now it only supports Alembic files. - """ - product_types = {"model", "pointcache", "animation", "usd"} - representations = {"abc", "usd"} - - label = "Load Cache" - icon = "code-fork" - color = "orange" - - def _remove(self, asset_group): - objects = list(asset_group.children) - empties = [] - - for obj in objects: - if obj.type == 'MESH': - for material_slot in list(obj.material_slots): - bpy.data.materials.remove(material_slot.material) - bpy.data.meshes.remove(obj.data) - elif obj.type == 'EMPTY': - objects.extend(obj.children) - empties.append(obj) - - for empty in empties: - bpy.data.objects.remove(empty) - - def _process(self, libpath, asset_group, group_name): - plugin.deselect_all() - - relative = bpy.context.preferences.filepaths.use_relative_paths - - if any(libpath.lower().endswith(ext) - for ext in [".usd", ".usda", ".usdc"]): - # USD - bpy.ops.wm.usd_import( - filepath=libpath, - relative_path=relative - ) - - else: - # Alembic - bpy.ops.wm.alembic_import( - filepath=libpath, - relative_path=relative - ) - - imported = lib.get_selection() - - # Use first EMPTY without parent as container - container = next( - (obj for obj in imported - if obj.type == "EMPTY" and not obj.parent), - None - ) - - objects = [] - if container: - nodes = list(container.children) - - for obj in nodes: - obj.parent = asset_group - - bpy.data.objects.remove(container) - - objects.extend(nodes) - for obj in nodes: - objects.extend(obj.children_recursive) - else: - for obj in imported: - obj.parent = asset_group - objects = imported - - for obj in objects: - # Unlink the object from all collections - collections = obj.users_collection - for collection in collections: - collection.objects.unlink(obj) - name = obj.name - obj.name = f"{group_name}:{name}" - if obj.type != 'EMPTY': - name_data = obj.data.name - obj.data.name = f"{group_name}:{name_data}" - - for material_slot in obj.material_slots: - name_mat = material_slot.material.name - material_slot.material.name = f"{group_name}:{name_mat}" - - if not obj.get(AVALON_PROPERTY): - obj[AVALON_PROPERTY] = {} - - avalon_info = obj[AVALON_PROPERTY] - avalon_info.update({"container_name": group_name}) - - plugin.deselect_all() - - return objects - - def _link_objects(self, objects, collection, containers, asset_group): - # Link the imported objects to any collection where the asset group is - # linked to, except the AVALON_CONTAINERS collection - group_collections = [ - collection - for collection in asset_group.users_collection - if collection != containers] - - for obj in objects: - for collection in group_collections: - collection.objects.link(obj) - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - - libpath = self.filepath_from_context(context) - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - asset_name = plugin.prepare_scene_name(folder_name, product_name) - unique_number = plugin.get_unique_number(folder_name, product_name) - group_name = plugin.prepare_scene_name( - folder_name, product_name, unique_number - ) - namespace = namespace or f"{folder_name}_{unique_number}" - - containers = bpy.data.collections.get(AVALON_CONTAINERS) - if not containers: - containers = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(containers) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - asset_group.empty_display_type = 'SINGLE_ARROW' - containers.objects.link(asset_group) - - objects = self._process(libpath, asset_group, group_name) - - # Link the asset group to the active collection - collection = bpy.context.view_layer.active_layer_collection.collection - collection.objects.link(asset_group) - - self._link_objects(objects, asset_group, containers, asset_group) - - product_type = context["product"]["productType"] - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - "libpath": libpath, - "asset_name": asset_name, - "parent": context["representation"]["versionId"], - "productType": product_type, - "objectName": group_name - } - - self[:] = objects - return objects - - def exec_update(self, container: Dict, context: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - repre_entity = context["representation"] - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(repre_entity)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(repre_entity, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = ( - str(Path(bpy.path.abspath(group_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - mat = asset_group.matrix_basis.copy() - self._remove(asset_group) - - objects = self._process(str(libpath), asset_group, object_name) - - containers = bpy.data.collections.get(AVALON_CONTAINERS) - self._link_objects(objects, asset_group, containers, asset_group) - - asset_group.matrix_basis = mat - - metadata["libpath"] = str(libpath) - metadata["representation"] = repre_entity["id"] - - def exec_remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - self._remove(asset_group) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_camera_abc.py b/server_addon/blender/client/ayon_blender/plugins/load/load_camera_abc.py deleted file mode 100644 index 7305afd423..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_camera_abc.py +++ /dev/null @@ -1,238 +0,0 @@ -"""Load an asset in Blender from an Alembic file.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy - -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from ayon_blender.api import plugin, lib -from ayon_blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class AbcCameraLoader(plugin.BlenderLoader): - """Load a camera from Alembic file. - - Stores the imported asset in an empty named after the asset. - """ - - product_types = {"camera"} - representations = {"abc"} - - label = "Load Camera (ABC)" - icon = "code-fork" - color = "orange" - - def _remove(self, asset_group): - objects = list(asset_group.children) - - for obj in objects: - if obj.type == "CAMERA": - bpy.data.cameras.remove(obj.data) - elif obj.type == "EMPTY": - objects.extend(obj.children) - bpy.data.objects.remove(obj) - - def _process(self, libpath, asset_group, group_name): - plugin.deselect_all() - - # Force the creation of the transform cache even if the camera - # doesn't have an animation. We use the cache to update the camera. - bpy.ops.wm.alembic_import( - filepath=libpath, always_add_cache_reader=True) - - objects = lib.get_selection() - - for obj in objects: - obj.parent = asset_group - - for obj in objects: - name = obj.name - obj.name = f"{group_name}:{name}" - if obj.type != "EMPTY": - name_data = obj.data.name - obj.data.name = f"{group_name}:{name_data}" - - if not obj.get(AVALON_PROPERTY): - obj[AVALON_PROPERTY] = dict() - - avalon_info = obj[AVALON_PROPERTY] - avalon_info.update({"container_name": group_name}) - - plugin.deselect_all() - - return objects - - def process_asset( - self, - context: dict, - name: str, - namespace: Optional[str] = None, - options: Optional[Dict] = None, - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - - libpath = self.filepath_from_context(context) - - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - asset_name = plugin.prepare_scene_name(folder_name, product_name) - unique_number = plugin.get_unique_number(folder_name, product_name) - group_name = plugin.prepare_scene_name( - folder_name, product_name, unique_number - ) - namespace = namespace or f"{folder_name}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - avalon_container.objects.link(asset_group) - - self._process(libpath, asset_group, group_name) - - objects = [] - nodes = list(asset_group.children) - - for obj in nodes: - objects.append(obj) - nodes.extend(list(obj.children)) - - bpy.context.scene.collection.objects.link(asset_group) - - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or "", - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - "libpath": libpath, - "asset_name": asset_name, - "parent": context["representation"]["versionId"], - "productType": context["product"]["productType"], - "objectName": group_name, - } - - self[:] = objects - return objects - - def exec_update(self, container: Dict, context: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - repre_entity = context["representation"] - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(repre_entity)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(repre_entity, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}") - assert libpath, ( - f"No existing library file found for {container['objectName']}") - assert libpath.is_file(), f"The file doesn't exist: {libpath}" - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}") - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = str( - Path(bpy.path.abspath(group_libpath)).resolve()) - normalized_libpath = str( - Path(bpy.path.abspath(str(libpath))).resolve()) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - for obj in asset_group.children: - found = False - for constraint in obj.constraints: - if constraint.type == "TRANSFORM_CACHE": - constraint.cache_file.filepath = libpath.as_posix() - found = True - break - if not found: - # This is to keep compatibility with cameras loaded with - # the old loader - # Create a new constraint for the cache file - constraint = obj.constraints.new("TRANSFORM_CACHE") - bpy.ops.cachefile.open(filepath=libpath.as_posix()) - constraint.cache_file = bpy.data.cache_files[-1] - constraint.cache_file.scale = 1.0 - - # This is a workaround to set the object path. Blender doesn't - # load the list of object paths until the object is evaluated. - # This is a hack to force the object to be evaluated. - # The modifier doesn't need to be removed because camera - # objects don't have modifiers. - obj.modifiers.new( - name='MeshSequenceCache', type='MESH_SEQUENCE_CACHE') - bpy.context.evaluated_depsgraph_get() - - constraint.object_path = ( - constraint.cache_file.object_paths[0].path) - - metadata["libpath"] = str(libpath) - metadata["representation"] = repre_entity["id"] - - def exec_remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - self._remove(asset_group) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_camera_fbx.py b/server_addon/blender/client/ayon_blender/plugins/load/load_camera_fbx.py deleted file mode 100644 index d2900c6c3f..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_camera_fbx.py +++ /dev/null @@ -1,224 +0,0 @@ -"""Load an asset in Blender from an Alembic file.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy - -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from ayon_blender.api import plugin, lib -from ayon_blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class FbxCameraLoader(plugin.BlenderLoader): - """Load a camera from FBX. - - Stores the imported asset in an empty named after the asset. - """ - - product_types = {"camera"} - representations = {"fbx"} - - label = "Load Camera (FBX)" - icon = "code-fork" - color = "orange" - - def _remove(self, asset_group): - objects = list(asset_group.children) - - for obj in objects: - if obj.type == 'CAMERA': - bpy.data.cameras.remove(obj.data) - elif obj.type == 'EMPTY': - objects.extend(obj.children) - bpy.data.objects.remove(obj) - - def _process(self, libpath, asset_group, group_name): - plugin.deselect_all() - - collection = bpy.context.view_layer.active_layer_collection.collection - - bpy.ops.import_scene.fbx(filepath=libpath) - - parent = bpy.context.scene.collection - - objects = lib.get_selection() - - for obj in objects: - obj.parent = asset_group - - for obj in objects: - parent.objects.link(obj) - collection.objects.unlink(obj) - - for obj in objects: - name = obj.name - obj.name = f"{group_name}:{name}" - if obj.type != 'EMPTY': - name_data = obj.data.name - obj.data.name = f"{group_name}:{name_data}" - - if not obj.get(AVALON_PROPERTY): - obj[AVALON_PROPERTY] = dict() - - avalon_info = obj[AVALON_PROPERTY] - avalon_info.update({"container_name": group_name}) - - plugin.deselect_all() - - return objects - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - asset_name = plugin.prepare_scene_name(folder_name, product_name) - unique_number = plugin.get_unique_number(folder_name, product_name) - group_name = plugin.prepare_scene_name( - folder_name, product_name, unique_number - ) - namespace = namespace or f"{folder_name}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - avalon_container.objects.link(asset_group) - - self._process(libpath, asset_group, group_name) - - objects = [] - nodes = list(asset_group.children) - - for obj in nodes: - objects.append(obj) - nodes.extend(list(obj.children)) - - bpy.context.scene.collection.objects.link(asset_group) - - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - "libpath": libpath, - "asset_name": asset_name, - "parent": context["representation"]["versionId"], - "productType": context["product"]["productType"], - "objectName": group_name - } - - self[:] = objects - return objects - - def exec_update(self, container: Dict, context: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - repre_entity = context["representation"] - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(repre_entity)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(repre_entity, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = ( - str(Path(bpy.path.abspath(group_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - mat = asset_group.matrix_basis.copy() - - self._remove(asset_group) - self._process(str(libpath), asset_group, object_name) - - asset_group.matrix_basis = mat - - metadata["libpath"] = str(libpath) - metadata["representation"] = repre_entity["id"] - - def exec_remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - self._remove(asset_group) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_fbx.py b/server_addon/blender/client/ayon_blender/plugins/load/load_fbx.py deleted file mode 100644 index fe3d747dab..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_fbx.py +++ /dev/null @@ -1,279 +0,0 @@ -"""Load an asset in Blender from an Alembic file.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy - -from ayon_core.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from ayon_blender.api import plugin, lib -from ayon_blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class FbxModelLoader(plugin.BlenderLoader): - """Load FBX models. - - Stores the imported asset in an empty named after the asset. - """ - - product_types = {"model", "rig"} - representations = {"fbx"} - - label = "Load FBX" - icon = "code-fork" - color = "orange" - - def _remove(self, asset_group): - objects = list(asset_group.children) - - for obj in objects: - if obj.type == 'MESH': - for material_slot in list(obj.material_slots): - if material_slot.material: - bpy.data.materials.remove(material_slot.material) - bpy.data.meshes.remove(obj.data) - elif obj.type == 'ARMATURE': - objects.extend(obj.children) - bpy.data.armatures.remove(obj.data) - elif obj.type == 'CURVE': - bpy.data.curves.remove(obj.data) - elif obj.type == 'EMPTY': - objects.extend(obj.children) - bpy.data.objects.remove(obj) - - def _process(self, libpath, asset_group, group_name, action): - plugin.deselect_all() - - collection = bpy.context.view_layer.active_layer_collection.collection - - bpy.ops.import_scene.fbx(filepath=libpath) - - parent = bpy.context.scene.collection - - imported = lib.get_selection() - - empties = [obj for obj in imported if obj.type == 'EMPTY'] - - container = None - - for empty in empties: - if not empty.parent: - container = empty - break - - assert container, "No asset group found" - - # Children must be linked before parents, - # otherwise the hierarchy will break - objects = [] - nodes = list(container.children) - - for obj in nodes: - obj.parent = asset_group - - bpy.data.objects.remove(container) - - for obj in nodes: - objects.append(obj) - nodes.extend(list(obj.children)) - - objects.reverse() - - for obj in objects: - parent.objects.link(obj) - collection.objects.unlink(obj) - - for obj in objects: - name = obj.name - obj.name = f"{group_name}:{name}" - if obj.type != 'EMPTY': - name_data = obj.data.name - obj.data.name = f"{group_name}:{name_data}" - - if obj.type == 'MESH': - for material_slot in obj.material_slots: - name_mat = material_slot.material.name - material_slot.material.name = f"{group_name}:{name_mat}" - elif obj.type == 'ARMATURE': - anim_data = obj.animation_data - if action is not None: - anim_data.action = action - elif anim_data.action is not None: - name_action = anim_data.action.name - anim_data.action.name = f"{group_name}:{name_action}" - - if not obj.get(AVALON_PROPERTY): - obj[AVALON_PROPERTY] = dict() - - avalon_info = obj[AVALON_PROPERTY] - avalon_info.update({"container_name": group_name}) - - plugin.deselect_all() - - return objects - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - asset_name = plugin.prepare_scene_name(folder_name, product_name) - unique_number = plugin.get_unique_number(folder_name, product_name) - group_name = plugin.prepare_scene_name( - folder_name, product_name, unique_number - ) - namespace = namespace or f"{folder_name}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - avalon_container.objects.link(asset_group) - - objects = self._process(libpath, asset_group, group_name, None) - - objects = [] - nodes = list(asset_group.children) - - for obj in nodes: - objects.append(obj) - nodes.extend(list(obj.children)) - - bpy.context.scene.collection.objects.link(asset_group) - - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - "libpath": libpath, - "asset_name": asset_name, - "parent": context["representation"]["versionId"], - "productType": context["product"]["productType"], - "objectName": group_name - } - - self[:] = objects - return objects - - def exec_update(self, container: Dict, context: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - repre_entity = context["representation"] - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(repre_entity)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(repre_entity, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = ( - str(Path(bpy.path.abspath(group_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - # Get the armature of the rig - objects = asset_group.children - armatures = [obj for obj in objects if obj.type == 'ARMATURE'] - action = None - - if armatures: - armature = armatures[0] - - if armature.animation_data and armature.animation_data.action: - action = armature.animation_data.action - - mat = asset_group.matrix_basis.copy() - self._remove(asset_group) - - self._process(str(libpath), asset_group, object_name, action) - - asset_group.matrix_basis = mat - - metadata["libpath"] = str(libpath) - metadata["representation"] = repre_entity["id"] - - def exec_remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - self._remove(asset_group) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_layout_json.py b/server_addon/blender/client/ayon_blender/plugins/load/load_layout_json.py deleted file mode 100644 index 9a2d17b4fc..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_layout_json.py +++ /dev/null @@ -1,297 +0,0 @@ -"""Load a layout in Blender.""" - -import json -from pathlib import Path -from pprint import pformat -from typing import Dict, Optional - -import bpy - -from ayon_core.pipeline import ( - discover_loader_plugins, - remove_container, - load_container, - get_representation_path, - loaders_from_representation, - AVALON_CONTAINER_ID, -) -from ayon_blender.api.pipeline import ( - AVALON_INSTANCES, - AVALON_CONTAINERS, - AVALON_PROPERTY, -) -from ayon_blender.api import plugin - - -class JsonLayoutLoader(plugin.BlenderLoader): - """Load layout published from Unreal.""" - - product_types = {"layout"} - representations = {"json"} - - label = "Load Layout" - icon = "code-fork" - color = "orange" - - animation_creator_name = "CreateAnimation" - - def _remove(self, asset_group): - objects = list(asset_group.children) - - for obj in objects: - remove_container(obj.get(AVALON_PROPERTY)) - - def _remove_animation_instances(self, asset_group): - instances = bpy.data.collections.get(AVALON_INSTANCES) - if instances: - for obj in list(asset_group.children): - anim_collection = instances.children.get( - obj.name + "_animation") - if anim_collection: - bpy.data.collections.remove(anim_collection) - - def _get_loader(self, loaders, product_type): - name = "" - if product_type == 'rig': - name = "BlendRigLoader" - elif product_type == 'model': - name = "BlendModelLoader" - - if name == "": - return None - - for loader in loaders: - if loader.__name__ == name: - return loader - - return None - - def _process(self, libpath, asset, asset_group, actions): - plugin.deselect_all() - - with open(libpath, "r") as fp: - data = json.load(fp) - - all_loaders = discover_loader_plugins() - - for element in data: - reference = element.get('reference') - product_type = element.get("product_type") - if product_type is None: - product_type = element.get("family") - - loaders = loaders_from_representation(all_loaders, reference) - loader = self._get_loader(loaders, product_type) - - if not loader: - continue - - instance_name = element.get('instance_name') - - action = None - - if actions: - action = actions.get(instance_name, None) - - options = { - 'parent': asset_group, - 'transform': element.get('transform'), - 'action': action, - 'create_animation': True if product_type == 'rig' else False, - 'animation_asset': asset - } - - if element.get('animation'): - options['animation_file'] = str(Path(libpath).with_suffix( - '')) + "." + element.get('animation') - - # This should return the loaded asset, but the load call will be - # added to the queue to run in the Blender main thread, so - # at this time it will not return anything. The assets will be - # loaded in the next Blender cycle, so we use the options to - # set the transform, parent and assign the action, if there is one. - load_container( - loader, - reference, - namespace=instance_name, - options=options - ) - - # Camera creation when loading a layout is not necessary for now, - # but the code is worth keeping in case we need it in the future. - # # Create the camera asset and the camera instance - # creator_plugin = get_legacy_creator_by_name("CreateCamera") - # if not creator_plugin: - # raise ValueError("Creator plugin \"CreateCamera\" was " - # "not found.") - - # TODO: Refactor legacy create usage to new style creators - # legacy_create( - # creator_plugin, - # name="camera", - # # name=f"{unique_number}_{product[name]}_animation", - # asset=asset, - # options={"useSelection": False} - # # data={"dependencies": context["representation"]["id"]} - # ) - - def process_asset(self, - context: dict, - name: str, - namespace: Optional[str] = None, - options: Optional[Dict] = None): - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - asset_name = plugin.prepare_scene_name(folder_name, product_name) - unique_number = plugin.get_unique_number(folder_name, product_name) - group_name = plugin.prepare_scene_name( - folder_name, product_name, unique_number - ) - namespace = namespace or f"{folder_name}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - asset_group.empty_display_type = 'SINGLE_ARROW' - avalon_container.objects.link(asset_group) - - self._process(libpath, asset_name, asset_group, None) - - bpy.context.scene.collection.objects.link(asset_group) - - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": context["representation"]["id"], - "libpath": libpath, - "asset_name": asset_name, - "parent": context["representation"]["versionId"], - "productType": context["product"]["productType"], - "objectName": group_name - } - - self[:] = asset_group.children - return asset_group.children - - def exec_update(self, container: Dict, context: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - """ - repre_entity = context["representation"] - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(repre_entity)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(repre_entity, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = ( - str(Path(bpy.path.abspath(group_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - actions = {} - - for obj in asset_group.children: - obj_meta = obj.get(AVALON_PROPERTY) - product_type = obj_meta.get("productType") - if product_type is None: - product_type = obj_meta.get("family") - if product_type == "rig": - rig = None - for child in obj.children: - if child.type == 'ARMATURE': - rig = child - break - if not rig: - raise Exception("No armature in the rig asset group.") - if rig.animation_data and rig.animation_data.action: - namespace = obj_meta.get('namespace') - actions[namespace] = rig.animation_data.action - - mat = asset_group.matrix_basis.copy() - - self._remove_animation_instances(asset_group) - - self._remove(asset_group) - - self._process(str(libpath), asset_group, actions) - - asset_group.matrix_basis = mat - - metadata["libpath"] = str(libpath) - metadata["representation"] = repre_entity["id"] - - def exec_remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - self._remove_animation_instances(asset_group) - - self._remove(asset_group) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/server_addon/blender/client/ayon_blender/plugins/load/load_look.py b/server_addon/blender/client/ayon_blender/plugins/load/load_look.py deleted file mode 100644 index d214917d3e..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/load/load_look.py +++ /dev/null @@ -1,223 +0,0 @@ -"""Load a model asset in Blender.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import os -import json -import bpy - -from ayon_core.pipeline import get_representation_path -from ayon_blender.api import plugin -from ayon_blender.api.pipeline import ( - containerise_existing, - AVALON_PROPERTY -) - - -class BlendLookLoader(plugin.BlenderLoader): - """Load models from a .blend file. - - Because they come from a .blend file we can simply link the collection that - contains the model. There is no further need to 'containerise' it. - """ - - product_types = {"look"} - representations = {"json"} - - label = "Load Look" - icon = "code-fork" - color = "orange" - - def get_all_children(self, obj): - children = list(obj.children) - - for child in children: - children.extend(child.children) - - return children - - def _process(self, libpath, container_name, objects): - with open(libpath, "r") as fp: - data = json.load(fp) - - path = os.path.dirname(libpath) - materials_path = f"{path}/resources" - - materials = [] - - for entry in data: - file = entry.get('fbx_filename') - if file is None: - continue - - bpy.ops.import_scene.fbx(filepath=f"{materials_path}/{file}") - - mesh = [o for o in bpy.context.scene.objects if o.select_get()][0] - material = mesh.data.materials[0] - material.name = f"{material.name}:{container_name}" - - texture_file = entry.get('tga_filename') - if texture_file: - node_tree = material.node_tree - pbsdf = node_tree.nodes['Principled BSDF'] - base_color = pbsdf.inputs[0] - tex_node = base_color.links[0].from_node - tex_node.image.filepath = f"{materials_path}/{texture_file}" - - materials.append(material) - - for obj in objects: - for child in self.get_all_children(obj): - mesh_name = child.name.split(':')[0] - if mesh_name == material.name.split(':')[0]: - child.data.materials.clear() - child.data.materials.append(material) - break - - bpy.data.objects.remove(mesh) - - return materials, objects - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - - libpath = self.filepath_from_context(context) - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - lib_container = plugin.prepare_scene_name( - folder_name, product_name - ) - unique_number = plugin.get_unique_number( - folder_name, product_name - ) - namespace = namespace or f"{folder_name}_{unique_number}" - container_name = plugin.prepare_scene_name( - folder_name, product_name, unique_number - ) - - container = bpy.data.collections.new(lib_container) - container.name = container_name - containerise_existing( - container, - name, - namespace, - context, - self.__class__.__name__, - ) - - metadata = container.get(AVALON_PROPERTY) - - metadata["libpath"] = libpath - metadata["lib_container"] = lib_container - - selected = [o for o in bpy.context.scene.objects if o.select_get()] - - materials, objects = self._process(libpath, container_name, selected) - - # Save the list of imported materials in the metadata container - metadata["objects"] = objects - metadata["materials"] = materials - - metadata["parent"] = context["representation"]["versionId"] - metadata["product_type"] = context["product"]["productType"] - - nodes = list(container.objects) - nodes.append(container) - self[:] = nodes - return nodes - - def update(self, container: Dict, context: Dict): - collection = bpy.data.collections.get(container["objectName"]) - repre_entity = context["representation"] - libpath = Path(get_representation_path(repre_entity)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(repre_entity, indent=2), - ) - - assert collection, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert not (collection.children), ( - "Nested collections are not supported." - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - collection_metadata = collection.get(AVALON_PROPERTY) - collection_libpath = collection_metadata["libpath"] - - normalized_collection_libpath = ( - str(Path(bpy.path.abspath(collection_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_collection_libpath, - normalized_libpath, - ) - if normalized_collection_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - for obj in collection_metadata['objects']: - for child in self.get_all_children(obj): - child.data.materials.clear() - - for material in collection_metadata['materials']: - bpy.data.materials.remove(material) - - namespace = collection_metadata['namespace'] - name = collection_metadata['name'] - - container_name = f"{namespace}_{name}" - - materials, objects = self._process( - libpath, container_name, collection_metadata['objects']) - - collection_metadata["objects"] = objects - collection_metadata["materials"] = materials - collection_metadata["libpath"] = str(libpath) - collection_metadata["representation"] = repre_entity["id"] - - def remove(self, container: Dict) -> bool: - collection = bpy.data.collections.get(container["objectName"]) - if not collection: - return False - - collection_metadata = collection.get(AVALON_PROPERTY) - - for obj in collection_metadata['objects']: - for child in self.get_all_children(obj): - child.data.materials.clear() - - for material in collection_metadata['materials']: - bpy.data.materials.remove(material) - - bpy.data.collections.remove(collection) - - return True diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/collect_current_file.py b/server_addon/blender/client/ayon_blender/plugins/publish/collect_current_file.py deleted file mode 100644 index 6568372169..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,15 +0,0 @@ -import pyblish.api -from ayon_blender.api import workio, plugin - - -class CollectBlenderCurrentFile(plugin.BlenderContextPlugin): - """Inject the current working file into context""" - - order = pyblish.api.CollectorOrder - 0.5 - label = "Blender Current File" - hosts = ["blender"] - - def process(self, context): - """Inject the current working file""" - current_file = workio.current_file() - context.data["currentFile"] = current_file diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/collect_file_dependencies.py b/server_addon/blender/client/ayon_blender/plugins/publish/collect_file_dependencies.py deleted file mode 100644 index ea36ab459c..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/collect_file_dependencies.py +++ /dev/null @@ -1,36 +0,0 @@ -from pathlib import Path - -import pyblish.api - -import bpy - - -class CollectFileDependencies(pyblish.api.ContextPlugin): - """Gather all files referenced in this scene.""" - - label = "Collect File Dependencies" - order = pyblish.api.CollectorOrder - 0.49 - hosts = ["blender"] - families = ["render"] - - @classmethod - def apply_settings(cls, project_settings): - # Disable plug-in if not used for deadline submission anyway - settings = project_settings["deadline"]["publish"]["BlenderSubmitDeadline"] # noqa - cls.enabled = settings.get("asset_dependencies", True) - - def process(self, context): - dependencies = set() - - # Add alembic files as dependencies - for cache in bpy.data.cache_files: - dependencies.add( - Path(bpy.path.abspath(cache.filepath)).resolve().as_posix()) - - # Add image files as dependencies - for image in bpy.data.images: - if image.filepath: - dependencies.add(Path( - bpy.path.abspath(image.filepath)).resolve().as_posix()) - - context.data["fileDependencies"] = list(dependencies) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/collect_instance.py b/server_addon/blender/client/ayon_blender/plugins/publish/collect_instance.py deleted file mode 100644 index 7d6f841ba3..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/collect_instance.py +++ /dev/null @@ -1,44 +0,0 @@ -import bpy - -import pyblish.api - -from ayon_core.pipeline.publish import KnownPublishError -from ayon_blender.api import plugin -from ayon_blender.api.pipeline import AVALON_PROPERTY - - -class CollectBlenderInstanceData(plugin.BlenderInstancePlugin): - """Validator to verify that the instance is not empty""" - - order = pyblish.api.CollectorOrder - hosts = ["blender"] - families = ["model", "pointcache", "animation", "rig", "camera", "layout", - "blendScene", "usd"] - label = "Collect Instance" - - def process(self, instance): - instance_node = instance.data["transientData"]["instance_node"] - - # Collect members of the instance - members = [instance_node] - if isinstance(instance_node, bpy.types.Collection): - members.extend(instance_node.objects) - members.extend(instance_node.children) - - # Special case for animation instances, include armatures - if instance.data["productType"] == "animation": - for obj in instance_node.objects: - if obj.type == 'EMPTY' and obj.get(AVALON_PROPERTY): - members.extend( - child for child in obj.children - if child.type == 'ARMATURE' - ) - elif isinstance(instance_node, bpy.types.Object): - members.extend(instance_node.children_recursive) - else: - raise KnownPublishError( - f"Unsupported instance node type '{type(instance_node)}' " - f"for instance '{instance}'" - ) - - instance[:] = members diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/collect_render.py b/server_addon/blender/client/ayon_blender/plugins/publish/collect_render.py deleted file mode 100644 index ac5dc5bf6f..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/collect_render.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect render data.""" - -import os -import re - -import bpy -import pyblish.api - -from ayon_blender.api import colorspace, plugin - - -class CollectBlenderRender(plugin.BlenderInstancePlugin): - """Gather all publishable render instances.""" - - order = pyblish.api.CollectorOrder + 0.01 - hosts = ["blender"] - families = ["render"] - label = "Collect Render" - sync_workfile_version = False - - @staticmethod - def generate_expected_beauty( - render_product, frame_start, frame_end, frame_step, ext - ): - """ - Generate the expected files for the render product for the beauty - render. This returns a list of files that should be rendered. It - replaces the sequence of `#` with the frame number. - """ - path = os.path.dirname(render_product) - file = os.path.basename(render_product) - - expected_files = [] - - for frame in range(frame_start, frame_end + 1, frame_step): - frame_str = str(frame).rjust(4, "0") - filename = re.sub("#+", frame_str, file) - expected_file = f"{os.path.join(path, filename)}.{ext}" - expected_files.append(expected_file.replace("\\", "/")) - - return { - "beauty": expected_files - } - - @staticmethod - def generate_expected_aovs( - aov_file_product, frame_start, frame_end, frame_step, ext - ): - """ - Generate the expected files for the render product for the beauty - render. This returns a list of files that should be rendered. It - replaces the sequence of `#` with the frame number. - """ - expected_files = {} - - for aov_name, aov_file in aov_file_product: - path = os.path.dirname(aov_file) - file = os.path.basename(aov_file) - - aov_files = [] - - for frame in range(frame_start, frame_end + 1, frame_step): - frame_str = str(frame).rjust(4, "0") - filename = re.sub("#+", frame_str, file) - expected_file = f"{os.path.join(path, filename)}.{ext}" - aov_files.append(expected_file.replace("\\", "/")) - - expected_files[aov_name] = aov_files - - return expected_files - - def process(self, instance): - context = instance.context - - instance_node = instance.data["transientData"]["instance_node"] - render_data = instance_node.get("render_data") - - assert render_data, "No render data found." - - render_product = render_data.get("render_product") - aov_file_product = render_data.get("aov_file_product") - ext = render_data.get("image_format") - multilayer = render_data.get("multilayer_exr") - - frame_start = context.data["frameStart"] - frame_end = context.data["frameEnd"] - frame_handle_start = context.data["frameStartHandle"] - frame_handle_end = context.data["frameEndHandle"] - - expected_beauty = self.generate_expected_beauty( - render_product, int(frame_start), int(frame_end), - int(bpy.context.scene.frame_step), ext) - - expected_aovs = self.generate_expected_aovs( - aov_file_product, int(frame_start), int(frame_end), - int(bpy.context.scene.frame_step), ext) - - expected_files = expected_beauty | expected_aovs - - instance.data.update({ - "families": ["render", "render.farm"], - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartHandle": frame_handle_start, - "frameEndHandle": frame_handle_end, - "fps": context.data["fps"], - "byFrameStep": bpy.context.scene.frame_step, - "review": render_data.get("review", False), - "multipartExr": ext == "exr" and multilayer, - "farm": True, - "expectedFiles": [expected_files], - # OCIO not currently implemented in Blender, but the following - # settings are required by the schema, so it is hardcoded. - # TODO: Implement OCIO in Blender - "colorspaceConfig": "", - "colorspaceDisplay": "sRGB", - "colorspaceView": "ACES 1.0 SDR-video", - "renderProducts": colorspace.ARenderProduct(), - }) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/collect_review.py b/server_addon/blender/client/ayon_blender/plugins/publish/collect_review.py deleted file mode 100644 index c013910b5a..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/collect_review.py +++ /dev/null @@ -1,68 +0,0 @@ -import bpy -import pyblish.api -from ayon_blender.api import plugin - - -class CollectReview(plugin.BlenderInstancePlugin): - """Collect Review data - - """ - - order = pyblish.api.CollectorOrder + 0.3 - label = "Collect Review Data" - families = ["review"] - - def process(self, instance): - - self.log.debug(f"instance: {instance}") - - datablock = instance.data["transientData"]["instance_node"] - - # get cameras - cameras = [ - obj - for obj in datablock.all_objects - if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA" - ] - - assert len(cameras) == 1, ( - f"Not a single camera found in extraction: {cameras}" - ) - camera = cameras[0].name - self.log.debug(f"camera: {camera}") - - focal_length = cameras[0].data.lens - - # get isolate objects list from meshes instance members. - types = {"MESH", "GPENCIL"} - isolate_objects = [ - obj - for obj in instance - if isinstance(obj, bpy.types.Object) and obj.type in types - ] - - if not instance.data.get("remove"): - # Store focal length in `burninDataMembers` - burninData = instance.data.setdefault("burninDataMembers", {}) - burninData["focalLength"] = focal_length - - instance.data.update({ - "review_camera": camera, - "frameStart": instance.context.data["frameStart"], - "frameEnd": instance.context.data["frameEnd"], - "fps": instance.context.data["fps"], - "isolate": isolate_objects, - }) - - self.log.debug(f"instance data: {instance.data}") - - # TODO : Collect audio - audio_tracks = [] - instance.data["audio"] = [] - for track in audio_tracks: - instance.data["audio"].append( - { - "offset": track.offset.get(), - "filename": track.filename.get(), - } - ) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/collect_workfile.py b/server_addon/blender/client/ayon_blender/plugins/publish/collect_workfile.py deleted file mode 100644 index 347a5caf01..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,38 +0,0 @@ -from pathlib import Path - -from pyblish.api import CollectorOrder -from ayon_blender.api import plugin - - -class CollectWorkfile(plugin.BlenderInstancePlugin): - """Inject workfile data into its instance.""" - - order = CollectorOrder - label = "Collect Workfile" - hosts = ["blender"] - families = ["workfile"] - - def process(self, instance): - """Process collector.""" - - context = instance.context - filepath = Path(context.data["currentFile"]) - ext = filepath.suffix - - instance.data.update( - { - "setMembers": [filepath.as_posix()], - "frameStart": context.data.get("frameStart", 1), - "frameEnd": context.data.get("frameEnd", 1), - "handleStart": context.data.get("handleStart", 1), - "handledEnd": context.data.get("handleEnd", 1), - "representations": [ - { - "name": ext.lstrip("."), - "ext": ext.lstrip("."), - "files": filepath.name, - "stagingDir": filepath.parent, - } - ], - } - ) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_abc.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_abc.py deleted file mode 100644 index 5da0258586..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_abc.py +++ /dev/null @@ -1,94 +0,0 @@ -import os - -import bpy - -from ayon_core.lib import BoolDef -from ayon_core.pipeline import publish -from ayon_blender.api import plugin - - -class ExtractABC(plugin.BlenderExtractor, publish.OptionalPyblishPluginMixin): - """Extract as ABC.""" - - label = "Extract ABC" - hosts = ["blender"] - families = ["pointcache"] - - def process(self, instance): - if not self.is_active(instance.data): - return - - attr_values = self.get_attr_values_from_data(instance.data) - - # Define extract output file path - stagingdir = self.staging_dir(instance) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - instance_name = f"{folder_name}_{product_name}" - filename = f"{instance_name}.abc" - filepath = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction..") - - plugin.deselect_all() - - asset_group = instance.data["transientData"]["instance_node"] - - selected = [] - for obj in instance: - if isinstance(obj, bpy.types.Object): - obj.select_set(True) - selected.append(obj) - - context = plugin.create_blender_context( - active=asset_group, selected=selected) - - with bpy.context.temp_override(**context): - # We export the abc - bpy.ops.wm.alembic_export( - filepath=filepath, - selected=True, - flatten=False, - subdiv_schema=attr_values.get("subdiv_schema", False) - ) - - plugin.deselect_all() - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, representation) - - @classmethod - def get_attribute_defs(cls): - return [ - BoolDef( - "subdiv_schema", - label="Alembic Mesh Subdiv Schema", - tooltip="Export Meshes using Alembic's subdivision schema.\n" - "Enabling this includes creases with the export but " - "excludes the mesh's normals.\n" - "Enabling this usually result in smaller file size " - "due to lack of normals.", - default=False - ) - ] - - -class ExtractModelABC(ExtractABC): - """Extract model as ABC.""" - - label = "Extract Model ABC" - hosts = ["blender"] - families = ["model"] - optional = True diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_abc_animation.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_abc_animation.py deleted file mode 100644 index 503593c8d3..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_abc_animation.py +++ /dev/null @@ -1,80 +0,0 @@ -import os - -import bpy - -from ayon_core.pipeline import publish -from ayon_blender.api import plugin - - -class ExtractAnimationABC( - plugin.BlenderExtractor, - publish.OptionalPyblishPluginMixin, -): - """Extract as ABC.""" - - label = "Extract Animation ABC" - hosts = ["blender"] - families = ["animation"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - instance_name = f"{folder_name}_{product_name}" - filename = f"{instance_name}.abc" - - filepath = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction..") - - plugin.deselect_all() - - selected = [] - asset_group = instance.data["transientData"]["instance_node"] - - objects = [] - for obj in instance: - if isinstance(obj, bpy.types.Collection): - for child in obj.all_objects: - objects.append(child) - for obj in objects: - children = [o for o in bpy.data.objects if o.parent == obj] - for child in children: - objects.append(child) - - for obj in objects: - obj.select_set(True) - selected.append(obj) - - context = plugin.create_blender_context( - active=asset_group, selected=selected) - - with bpy.context.temp_override(**context): - # We export the abc - bpy.ops.wm.alembic_export( - filepath=filepath, - selected=True, - flatten=False - ) - - plugin.deselect_all() - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, representation) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_blend.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_blend.py deleted file mode 100644 index 520bc274a1..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_blend.py +++ /dev/null @@ -1,76 +0,0 @@ -import os - -import bpy - -from ayon_core.pipeline import publish -from ayon_blender.api import plugin - - -class ExtractBlend( - plugin.BlenderExtractor, publish.OptionalPyblishPluginMixin -): - """Extract a blend file.""" - - label = "Extract Blend" - hosts = ["blender"] - families = ["model", "camera", "rig", "action", "layout", "blendScene"] - optional = True - - # From settings - compress = False - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - - stagingdir = self.staging_dir(instance) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - instance_name = f"{folder_name}_{product_name}" - filename = f"{instance_name}.blend" - filepath = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction..") - - data_blocks = set() - - for data in instance: - data_blocks.add(data) - # Pack used images in the blend files. - if not ( - isinstance(data, bpy.types.Object) and data.type == 'MESH' - ): - continue - for material_slot in data.material_slots: - mat = material_slot.material - if not (mat and mat.use_nodes): - continue - tree = mat.node_tree - if tree.type != 'SHADER': - continue - for node in tree.nodes: - if node.bl_idname != 'ShaderNodeTexImage': - continue - # Check if image is not packed already - # and pack it if not. - if node.image and node.image.packed_file is None: - node.image.pack() - - bpy.data.libraries.write(filepath, data_blocks, compress=self.compress) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'blend', - 'ext': 'blend', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, representation) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_blend_animation.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_blend_animation.py deleted file mode 100644 index cca8ab2dd6..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_blend_animation.py +++ /dev/null @@ -1,67 +0,0 @@ -import os - -import bpy - -from ayon_core.pipeline import publish -from ayon_blender.api import plugin - - -class ExtractBlendAnimation( - plugin.BlenderExtractor, - publish.OptionalPyblishPluginMixin, -): - """Extract a blend file.""" - - label = "Extract Blend" - hosts = ["blender"] - families = ["animation"] - optional = True - - # From settings - compress = False - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - - stagingdir = self.staging_dir(instance) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - instance_name = f"{folder_name}_{product_name}" - filename = f"{instance_name}.blend" - filepath = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction..") - - data_blocks = set() - - for obj in instance: - if isinstance(obj, bpy.types.Object) and obj.type == 'EMPTY': - child = obj.children[0] - if child and child.type == 'ARMATURE': - if child.animation_data and child.animation_data.action: - if not obj.animation_data: - obj.animation_data_create() - obj.animation_data.action = child.animation_data.action - obj.animation_data_clear() - data_blocks.add(child.animation_data.action) - data_blocks.add(obj) - - bpy.data.libraries.write(filepath, data_blocks, compress=self.compress) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'blend', - 'ext': 'blend', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, representation) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_camera_abc.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_camera_abc.py deleted file mode 100644 index 278cd293c5..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_camera_abc.py +++ /dev/null @@ -1,70 +0,0 @@ -import os - -import bpy - -from ayon_core.pipeline import publish -from ayon_blender.api import plugin - - -class ExtractCameraABC( - plugin.BlenderExtractor, publish.OptionalPyblishPluginMixin -): - """Extract camera as ABC.""" - - label = "Extract Camera (ABC)" - hosts = ["blender"] - families = ["camera"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - instance_name = f"{folder_name}_{product_name}" - filename = f"{instance_name}.abc" - filepath = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction..") - - plugin.deselect_all() - - asset_group = instance.data["transientData"]["instance_node"] - - # Need to cast to list because children is a tuple - selected = list(asset_group.children) - active = selected[0] - - for obj in selected: - obj.select_set(True) - - context = plugin.create_blender_context( - active=active, selected=selected) - - with bpy.context.temp_override(**context): - # We export the abc - bpy.ops.wm.alembic_export( - filepath=filepath, - selected=True, - flatten=True - ) - - plugin.deselect_all() - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, representation) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_camera_fbx.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_camera_fbx.py deleted file mode 100644 index 9094355a72..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_camera_fbx.py +++ /dev/null @@ -1,85 +0,0 @@ -import os - -import bpy - -from ayon_core.pipeline import publish -from ayon_blender.api import plugin - - -class ExtractCamera( - plugin.BlenderExtractor, publish.OptionalPyblishPluginMixin -): - """Extract as the camera as FBX.""" - - label = "Extract Camera (FBX)" - hosts = ["blender"] - families = ["camera"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - instance_name = f"{folder_name}_{product_name}" - filename = f"{instance_name}.fbx" - filepath = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction..") - - plugin.deselect_all() - - selected = [] - - camera = None - - for obj in instance: - if obj.type == "CAMERA": - obj.select_set(True) - selected.append(obj) - camera = obj - break - - assert camera, "No camera found" - - context = plugin.create_blender_context( - active=camera, selected=selected) - - scale_length = bpy.context.scene.unit_settings.scale_length - bpy.context.scene.unit_settings.scale_length = 0.01 - - with bpy.context.temp_override(**context): - # We export the fbx - bpy.ops.export_scene.fbx( - filepath=filepath, - use_active_collection=False, - use_selection=True, - bake_anim_use_nla_strips=False, - bake_anim_use_all_actions=False, - add_leaf_bones=False, - armature_nodetype='ROOT', - object_types={'CAMERA'}, - bake_anim_simplify_factor=0.0 - ) - - bpy.context.scene.unit_settings.scale_length = scale_length - - plugin.deselect_all() - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, representation) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_fbx.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_fbx.py deleted file mode 100644 index 085f7b18c3..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_fbx.py +++ /dev/null @@ -1,93 +0,0 @@ -import os - -import bpy - -from ayon_core.pipeline import publish -from ayon_blender.api import plugin - - -class ExtractFBX( - plugin.BlenderExtractor, publish.OptionalPyblishPluginMixin -): - """Extract as FBX.""" - - label = "Extract FBX" - hosts = ["blender"] - families = ["model", "rig"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - instance_name = f"{folder_name}_{product_name}" - filename = f"{instance_name}.fbx" - filepath = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction..") - - plugin.deselect_all() - - asset_group = instance.data["transientData"]["instance_node"] - - selected = [] - for obj in instance: - obj.select_set(True) - selected.append(obj) - - context = plugin.create_blender_context( - active=asset_group, selected=selected) - - new_materials = [] - new_materials_objs = [] - objects = list(asset_group.children) - - for obj in objects: - objects.extend(obj.children) - if obj.type == 'MESH' and len(obj.data.materials) == 0: - mat = bpy.data.materials.new(obj.name) - obj.data.materials.append(mat) - new_materials.append(mat) - new_materials_objs.append(obj) - - scale_length = bpy.context.scene.unit_settings.scale_length - bpy.context.scene.unit_settings.scale_length = 0.01 - - with bpy.context.temp_override(**context): - # We export the fbx - bpy.ops.export_scene.fbx( - filepath=filepath, - use_active_collection=False, - use_selection=True, - mesh_smooth_type='FACE', - add_leaf_bones=False - ) - - bpy.context.scene.unit_settings.scale_length = scale_length - - plugin.deselect_all() - - for mat in new_materials: - bpy.data.materials.remove(mat) - - for obj in new_materials_objs: - obj.data.materials.pop() - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, representation) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_fbx_animation.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_fbx_animation.py deleted file mode 100644 index 7f49e919db..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_fbx_animation.py +++ /dev/null @@ -1,227 +0,0 @@ -import os -import json - -import bpy -import bpy_extras -import bpy_extras.anim_utils - -from ayon_core.pipeline import publish -from ayon_blender.api import plugin -from ayon_blender.api.pipeline import AVALON_PROPERTY - - -def get_all_parents(obj): - """Get all recursive parents of object""" - result = [] - while True: - obj = obj.parent - if not obj: - break - result.append(obj) - return result - - -def get_highest_root(objects): - # Get the highest object that is also in the collection - included_objects = {obj.name_full for obj in objects} - num_parents_to_obj = {} - for obj in objects: - if isinstance(obj, bpy.types.Object): - parents = get_all_parents(obj) - # included parents - parents = [parent for parent in parents if - parent.name_full in included_objects] - if not parents: - # A node without parents must be a highest root - return obj - - num_parents_to_obj.setdefault(len(parents), obj) - - minimum_parent = min(num_parents_to_obj) - return num_parents_to_obj[minimum_parent] - - -class ExtractAnimationFBX( - plugin.BlenderExtractor, - publish.OptionalPyblishPluginMixin, -): - """Extract as animation.""" - - label = "Extract FBX" - hosts = ["blender"] - families = ["animation"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - - # Perform extraction - self.log.debug("Performing extraction..") - - asset_group = instance.data["transientData"]["instance_node"] - - # Get objects in this collection (but not in children collections) - # and for those objects include the children hierarchy - # TODO: Would it make more sense for the Collect Instance collector - # to also always retrieve all the children? - objects = set(asset_group.objects) - - # From the direct children of the collection find the 'root' node - # that we want to export - it is the 'highest' node in a hierarchy - root = get_highest_root(objects) - - for obj in list(objects): - objects.update(obj.children_recursive) - - # Find all armatures among the objects, assume to find only one - armatures = [obj for obj in objects if obj.type == "ARMATURE"] - if not armatures: - raise RuntimeError( - f"Unable to find ARMATURE in collection: " - f"{asset_group.name}" - ) - elif len(armatures) > 1: - self.log.warning( - "Found more than one ARMATURE, using " - f"only first of: {armatures}" - ) - armature = armatures[0] - - object_action_pairs = [] - original_actions = [] - - starting_frames = [] - ending_frames = [] - - # For each armature, we make a copy of the current action - if armature.animation_data and armature.animation_data.action: - curr_action = armature.animation_data.action - copy_action = curr_action.copy() - - curr_frame_range = curr_action.frame_range - - starting_frames.append(curr_frame_range[0]) - ending_frames.append(curr_frame_range[1]) - else: - self.log.info( - f"Armature '{armature.name}' has no animation, " - f"skipping FBX animation extraction for {instance}." - ) - return - - asset_group_name = asset_group.name - asset_name = asset_group.get(AVALON_PROPERTY).get("asset_name") - if asset_name: - # Rename for the export; this data is only present when loaded - # from a JSON Layout (layout family) - asset_group.name = asset_name - - # Remove : from the armature name for the export - armature_name = armature.name - original_name = armature_name.split(':')[1] - armature.name = original_name - - object_action_pairs.append((armature, copy_action)) - original_actions.append(curr_action) - - # We compute the starting and ending frames - max_frame = min(starting_frames) - min_frame = max(ending_frames) - - # We bake the copy of the current action for each object - bpy_extras.anim_utils.bake_action_objects( - object_action_pairs, - frames=range(int(min_frame), int(max_frame)), - do_object=False, - do_clean=False - ) - - for obj in bpy.data.objects: - obj.select_set(False) - - root.select_set(True) - armature.select_set(True) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - instance_name = f"{folder_name}_{product_name}" - fbx_filename = f"{instance_name}_{armature.name}.fbx" - filepath = os.path.join(stagingdir, fbx_filename) - - override = plugin.create_blender_context( - active=root, selected=[root, armature]) - - with bpy.context.temp_override(**override): - # We export the fbx - bpy.ops.export_scene.fbx( - filepath=filepath, - use_active_collection=False, - use_selection=True, - bake_anim_use_nla_strips=False, - bake_anim_use_all_actions=False, - add_leaf_bones=False, - armature_nodetype='ROOT', - object_types={'EMPTY', 'ARMATURE'} - ) - - armature.name = armature_name - asset_group.name = asset_group_name - root.select_set(True) - armature.select_set(False) - - # We delete the baked action and set the original one back - for i in range(0, len(object_action_pairs)): - pair = object_action_pairs[i] - action = original_actions[i] - - if action: - pair[0].animation_data.action = action - - if pair[1]: - pair[1].user_clear() - bpy.data.actions.remove(pair[1]) - - json_filename = f"{instance_name}.json" - json_path = os.path.join(stagingdir, json_filename) - - json_dict = { - "instance_name": asset_group.get(AVALON_PROPERTY).get("objectName") - } - - # collection = instance.data.get("name") - # container = None - # for obj in bpy.data.collections[collection].objects: - # if obj.type == "ARMATURE": - # container_name = obj.get("avalon").get("container_name") - # container = bpy.data.collections[container_name] - # if container: - # json_dict = { - # "instance_name": container.get("avalon").get("instance_name") - # } - - with open(json_path, "w+") as file: - json.dump(json_dict, fp=file, indent=2) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - fbx_representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': fbx_filename, - "stagingDir": stagingdir, - } - json_representation = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(fbx_representation) - instance.data["representations"].append(json_representation) - - self.log.debug("Extracted instance '{}' to: {}".format( - instance.name, fbx_representation)) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_layout.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_layout.py deleted file mode 100644 index 0732d29c9d..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_layout.py +++ /dev/null @@ -1,279 +0,0 @@ -import os -import json - -import bpy -import bpy_extras -import bpy_extras.anim_utils - -from ayon_api import get_representations - -from ayon_core.pipeline import publish -from ayon_blender.api import plugin -from ayon_blender.api.pipeline import AVALON_PROPERTY - - -class ExtractLayout( - plugin.BlenderExtractor, publish.OptionalPyblishPluginMixin -): - """Extract a layout.""" - - label = "Extract Layout (JSON)" - hosts = ["blender"] - families = ["layout"] - optional = True - - def _export_animation(self, asset, instance, stagingdir, fbx_count): - n = fbx_count - - for obj in asset.children: - if obj.type != "ARMATURE": - continue - - object_action_pairs = [] - original_actions = [] - - starting_frames = [] - ending_frames = [] - - # For each armature, we make a copy of the current action - curr_action = None - copy_action = None - - if obj.animation_data and obj.animation_data.action: - curr_action = obj.animation_data.action - copy_action = curr_action.copy() - - curr_frame_range = curr_action.frame_range - - starting_frames.append(curr_frame_range[0]) - ending_frames.append(curr_frame_range[1]) - else: - self.log.info("Object has no animation.") - continue - - asset_group_name = asset.name - asset.name = asset.get(AVALON_PROPERTY).get("asset_name") - - armature_name = obj.name - original_name = armature_name.split(':')[1] - obj.name = original_name - - object_action_pairs.append((obj, copy_action)) - original_actions.append(curr_action) - - # We compute the starting and ending frames - max_frame = min(starting_frames) - min_frame = max(ending_frames) - - # We bake the copy of the current action for each object - bpy_extras.anim_utils.bake_action_objects( - object_action_pairs, - frames=range(int(min_frame), int(max_frame)), - do_object=False, - do_clean=False - ) - - for o in bpy.data.objects: - o.select_set(False) - - asset.select_set(True) - obj.select_set(True) - fbx_filename = f"{n:03d}.fbx" - filepath = os.path.join(stagingdir, fbx_filename) - - override = plugin.create_blender_context( - active=asset, selected=[asset, obj]) - with bpy.context.temp_override(**override): - # We export the fbx - bpy.ops.export_scene.fbx( - filepath=filepath, - use_active_collection=False, - use_selection=True, - bake_anim_use_nla_strips=False, - bake_anim_use_all_actions=False, - add_leaf_bones=False, - armature_nodetype='ROOT', - object_types={'EMPTY', 'ARMATURE'} - ) - obj.name = armature_name - asset.name = asset_group_name - asset.select_set(False) - obj.select_set(False) - - # We delete the baked action and set the original one back - for i in range(0, len(object_action_pairs)): - pair = object_action_pairs[i] - action = original_actions[i] - - if action: - pair[0].animation_data.action = action - - if pair[1]: - pair[1].user_clear() - bpy.data.actions.remove(pair[1]) - - return fbx_filename, n + 1 - - return None, n - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - - # Perform extraction - self.log.debug("Performing extraction..") - - if "representations" not in instance.data: - instance.data["representations"] = [] - - json_data = [] - fbx_files = [] - - asset_group = instance.data["transientData"]["instance_node"] - - fbx_count = 0 - - project_name = instance.context.data["projectName"] - version_ids = set() - filtered_assets = [] - for asset in asset_group.children: - metadata = asset.get(AVALON_PROPERTY) - if not metadata: - # Avoid raising error directly if there's just invalid data - # inside the instance; better to log it to the artist - # TODO: This should actually be validated in a validator - self.log.warning( - f"Found content in layout that is not a loaded " - f"asset, skipping: {asset.name_full}" - ) - continue - - filtered_assets.append((asset, metadata)) - version_ids.add(metadata["parent"]) - - repre_entities = get_representations( - project_name, - representation_names={"blend", "fbx", "abc"}, - version_ids=version_ids, - fields={"id", "versionId", "name"} - ) - repre_mapping_by_version_id = { - version_id: {} - for version_id in version_ids - } - for repre_entity in repre_entities: - version_id = repre_entity["versionId"] - repre_mapping_by_version_id[version_id][repre_entity["name"]] = ( - repre_entity - ) - - for asset, metadata in filtered_assets: - version_id = metadata["parent"] - product_type = metadata.get("product_type") - if product_type is None: - product_type = metadata["family"] - - repres_by_name = repre_mapping_by_version_id[version_id] - - self.log.debug("Parent: {}".format(version_id)) - # Get blend, fbx and abc reference - blend_id = repres_by_name.get("blend", {}).get("id") - fbx_id = repres_by_name.get("fbx", {}).get("id") - abc_id = repres_by_name.get("abc", {}).get("id") - json_element = { - key: value - for key, value in ( - ("reference", blend_id), - ("reference_fbx", fbx_id), - ("reference_abc", abc_id), - ) - if value - } - json_element["product_type"] = product_type - json_element["instance_name"] = asset.name - json_element["asset_name"] = metadata["asset_name"] - json_element["file_path"] = metadata["libpath"] - - json_element["transform"] = { - "translation": { - "x": asset.location.x, - "y": asset.location.y, - "z": asset.location.z - }, - "rotation": { - "x": asset.rotation_euler.x, - "y": asset.rotation_euler.y, - "z": asset.rotation_euler.z - }, - "scale": { - "x": asset.scale.x, - "y": asset.scale.y, - "z": asset.scale.z - } - } - - json_element["transform_matrix"] = [] - - for row in list(asset.matrix_world.transposed()): - json_element["transform_matrix"].append(list(row)) - - json_element["basis"] = [ - [1, 0, 0, 0], - [0, -1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1] - ] - - # Extract the animation as well - if product_type == "rig": - f, n = self._export_animation( - asset, instance, stagingdir, fbx_count) - if f: - fbx_files.append(f) - json_element["animation"] = f - fbx_count = n - - json_data.append(json_element) - - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - instance_name = f"{folder_name}_{product_name}" - json_filename = f"{instance_name}.json" - - json_path = os.path.join(stagingdir, json_filename) - - with open(json_path, "w+") as file: - json.dump(json_data, fp=file, indent=2) - - json_representation = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(json_representation) - - self.log.debug(fbx_files) - - if len(fbx_files) == 1: - fbx_representation = { - 'name': 'fbx', - 'ext': '000.fbx', - 'files': fbx_files[0], - "stagingDir": stagingdir, - } - instance.data["representations"].append(fbx_representation) - elif len(fbx_files) > 1: - fbx_representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': fbx_files, - "stagingDir": stagingdir, - } - instance.data["representations"].append(fbx_representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, json_representation) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_playblast.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_playblast.py deleted file mode 100644 index 0f769c296d..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_playblast.py +++ /dev/null @@ -1,129 +0,0 @@ -import os -import json - -import clique -import pyblish.api - -import bpy - -from ayon_core.pipeline import publish -from ayon_blender.api import capture, plugin -from ayon_blender.api.lib import maintained_time - - -class ExtractPlayblast( - plugin.BlenderExtractor, publish.OptionalPyblishPluginMixin -): - """ - Extract viewport playblast. - - Takes review camera and creates review Quicktime video based on viewport - capture. - """ - - label = "Extract Playblast" - hosts = ["blender"] - families = ["review"] - optional = True - order = pyblish.api.ExtractorOrder + 0.01 - - presets = "{}" - - def process(self, instance): - if not self.is_active(instance.data): - return - - # get scene fps - fps = instance.data.get("fps") - if fps is None: - fps = bpy.context.scene.render.fps - instance.data["fps"] = fps - - self.log.debug(f"fps: {fps}") - - # If start and end frames cannot be determined, - # get them from Blender timeline. - start = instance.data.get("frameStart", bpy.context.scene.frame_start) - end = instance.data.get("frameEnd", bpy.context.scene.frame_end) - - self.log.debug(f"start: {start}, end: {end}") - assert end > start, "Invalid time range !" - - # get cameras - camera = instance.data("review_camera", None) - - # get isolate objects list - isolate = instance.data("isolate", None) - - # get output path - stagingdir = self.staging_dir(instance) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - filename = f"{folder_name}_{product_name}" - - path = os.path.join(stagingdir, filename) - - self.log.debug(f"Outputting images to {path}") - - presets = json.loads(self.presets) - preset = presets.get("default") - preset.update({ - "camera": camera, - "start_frame": start, - "end_frame": end, - "filename": path, - "overwrite": True, - "isolate": isolate, - }) - preset.setdefault( - "image_settings", - { - "file_format": "PNG", - "color_mode": "RGB", - "color_depth": "8", - "compression": 15, - }, - ) - - with maintained_time(): - path = capture(**preset) - - self.log.debug(f"playblast path {path}") - - collected_files = os.listdir(stagingdir) - collections, remainder = clique.assemble( - collected_files, - patterns=[f"{filename}\\.{clique.DIGITS_PATTERN}\\.png$"], - ) - - if len(collections) > 1: - raise RuntimeError( - f"More than one collection found in stagingdir: {stagingdir}" - ) - elif len(collections) == 0: - raise RuntimeError( - f"No collection found in stagingdir: {stagingdir}" - ) - - frame_collection = collections[0] - - self.log.debug(f"Found collection of interest {frame_collection}") - - instance.data.setdefault("representations", []) - - tags = ["review"] - if not instance.data.get("keepImages"): - tags.append("delete") - - representation = { - "name": "png", - "ext": "png", - "files": list(frame_collection), - "stagingDir": stagingdir, - "frameStart": start, - "frameEnd": end, - "fps": fps, - "tags": tags, - "camera_name": camera - } - instance.data["representations"].append(representation) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_thumbnail.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_thumbnail.py deleted file mode 100644 index e3bce8bf73..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import glob -import json - -import pyblish.api -from ayon_blender.api import capture, plugin -from ayon_blender.api.lib import maintained_time - -import bpy - - -class ExtractThumbnail(plugin.BlenderExtractor): - """Extract viewport thumbnail. - - Takes review camera and creates a thumbnail based on viewport - capture. - - """ - - label = "Extract Thumbnail" - hosts = ["blender"] - families = ["review"] - order = pyblish.api.ExtractorOrder + 0.01 - presets = "{}" - - def process(self, instance): - self.log.debug("Extracting capture..") - - if instance.data.get("thumbnailSource"): - self.log.debug("Thumbnail source found, skipping...") - return - - stagingdir = self.staging_dir(instance) - folder_name = instance.data["folderEntity"]["name"] - product_name = instance.data["productName"] - filename = f"{folder_name}_{product_name}" - - path = os.path.join(stagingdir, filename) - - self.log.debug(f"Outputting images to {path}") - - camera = instance.data.get("review_camera", "AUTO") - start = instance.data.get("frameStart", bpy.context.scene.frame_start) - product_type = instance.data["productType"] - isolate = instance.data("isolate", None) - - presets = json.loads(self.presets) - preset = presets.get(product_type, {}) - - preset.update({ - "camera": camera, - "start_frame": start, - "end_frame": start, - "filename": path, - "overwrite": True, - "isolate": isolate, - }) - preset.setdefault( - "image_settings", - { - "file_format": "JPEG", - "color_mode": "RGB", - "quality": 100, - }, - ) - - with maintained_time(): - path = capture(**preset) - - thumbnail = os.path.basename(self._fix_output_path(path)) - - self.log.debug(f"thumbnail: {thumbnail}") - - instance.data.setdefault("representations", []) - - representation = { - "name": "thumbnail", - "ext": "jpg", - "files": thumbnail, - "stagingDir": stagingdir, - "thumbnail": True - } - instance.data["representations"].append(representation) - - def _fix_output_path(self, filepath): - """Workaround to return correct filepath. - - To workaround this we just glob.glob() for any file extensions and - assume the latest modified file is the correct file and return it. - - """ - # Catch cancelled playblast - if filepath is None: - self.log.warning( - "Playblast did not result in output path. " - "Playblast is probably interrupted." - ) - return None - - if not os.path.exists(filepath): - files = glob.glob(f"{filepath}.*.jpg") - - if not files: - raise RuntimeError(f"Couldn't find playblast from: {filepath}") - filepath = max(files, key=os.path.getmtime) - - return filepath diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/extract_usd.py b/server_addon/blender/client/ayon_blender/plugins/publish/extract_usd.py deleted file mode 100644 index 7ea89ae3dc..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/extract_usd.py +++ /dev/null @@ -1,90 +0,0 @@ -import os - -import bpy - -from ayon_core.pipeline import KnownPublishError -from ayon_blender.api import plugin, lib - - -class ExtractUSD(plugin.BlenderExtractor): - """Extract as USD.""" - - label = "Extract USD" - hosts = ["blender"] - families = ["usd"] - - def process(self, instance): - - # Ignore runtime instances (e.g. USD layers) - # TODO: This is better done via more specific `families` - if not instance.data.get("transientData", {}).get("instance_node"): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - filename = f"{instance.name}.usd" - filepath = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction..") - - # Select all members to "export selected" - plugin.deselect_all() - - selected = [] - for obj in instance: - if isinstance(obj, bpy.types.Object): - obj.select_set(True) - selected.append(obj) - - root = lib.get_highest_root(objects=instance[:]) - if not root: - instance_node = instance.data["transientData"]["instance_node"] - raise KnownPublishError( - f"No root object found in instance: {instance_node.name}" - ) - self.log.debug(f"Exporting using active root: {root.name}") - - context = plugin.create_blender_context( - active=root, selected=selected) - - # Export USD - with bpy.context.temp_override(**context): - bpy.ops.wm.usd_export( - filepath=filepath, - selected_objects_only=True, - export_textures=False, - relative_paths=False, - export_animation=False, - export_hair=False, - export_uvmaps=True, - # TODO: add for new version of Blender (4+?) - # export_mesh_colors=True, - export_normals=True, - export_materials=True, - use_instancing=True - ) - - plugin.deselect_all() - - # Add representation - representation = { - 'name': 'usd', - 'ext': 'usd', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data.setdefault("representations", []).append(representation) - self.log.debug("Extracted instance '%s' to: %s", - instance.name, representation) - - -class ExtractModelUSD(ExtractUSD): - """Extract model as USD.""" - - label = "Extract USD (Model)" - hosts = ["blender"] - families = ["model"] - - # Driven by settings - optional = True diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/increment_workfile_version.py b/server_addon/blender/client/ayon_blender/plugins/publish/increment_workfile_version.py deleted file mode 100644 index 50d16ea54a..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/increment_workfile_version.py +++ /dev/null @@ -1,33 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.publish import OptionalPyblishPluginMixin -from ayon_blender.api.workio import save_file -from ayon_blender.api import plugin - - -class IncrementWorkfileVersion( - plugin.BlenderContextPlugin, - OptionalPyblishPluginMixin -): - """Increment current workfile version.""" - - order = pyblish.api.IntegratorOrder + 0.9 - label = "Increment Workfile Version" - optional = True - hosts = ["blender"] - families = ["animation", "model", "rig", "action", "layout", "blendScene", - "pointcache", "render.farm"] - - def process(self, context): - if not self.is_active(context.data): - return - - assert all(result["success"] for result in context.data["results"]), ( - "Publishing not successful so version is not increased.") - - from ayon_core.lib import version_up - path = context.data["currentFile"] - filepath = version_up(path) - - save_file(filepath, copy=False) - - self.log.debug('Incrementing blender workfile version') diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/integrate_animation.py b/server_addon/blender/client/ayon_blender/plugins/publish/integrate_animation.py deleted file mode 100644 index b95c280ab0..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/integrate_animation.py +++ /dev/null @@ -1,54 +0,0 @@ -import json - -import pyblish.api -from ayon_core.pipeline.publish import OptionalPyblishPluginMixin -from ayon_blender.api import plugin - - -class IntegrateAnimation( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin, -): - """Generate a JSON file for animation.""" - - label = "Integrate Animation" - order = pyblish.api.IntegratorOrder + 0.1 - optional = True - hosts = ["blender"] - families = ["setdress"] - - def process(self, instance): - self.log.debug("Integrate Animation") - - representation = instance.data.get('representations')[0] - json_path = representation.get('publishedFiles')[0] - - with open(json_path, "r") as file: - data = json.load(file) - - # Update the json file for the setdress to add the published - # representations of the animations - for json_dict in data: - json_product_name = json_dict["productName"] - i = None - for elem in instance.context: - if elem.data["productName"] == json_product_name: - i = elem - break - if not i: - continue - rep = None - pub_repr = i.data["published_representations"] - for elem in pub_repr: - if pub_repr[elem]["representation"]["name"] == "fbx": - rep = pub_repr[elem] - break - if not rep: - continue - obj_id = rep["representation"]["id"] - - if obj_id: - json_dict["representation_id"] = str(obj_id) - - with open(json_path, "w") as file: - json.dump(data, fp=file, indent=2) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_camera_zero_keyframe.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_camera_zero_keyframe.py deleted file mode 100644 index df66f71dc5..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_camera_zero_keyframe.py +++ /dev/null @@ -1,57 +0,0 @@ -from typing import List - -import bpy - -import ayon_blender.api.action -from ayon_blender.api import plugin -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) - - -class ValidateCameraZeroKeyframe( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin -): - """Camera must have a keyframe at frame 0. - - Unreal shifts the first keyframe to frame 0. Forcing the camera to have - a keyframe at frame 0 will ensure that the animation will be the same - in Unreal and Blender. - """ - - order = ValidateContentsOrder - hosts = ["blender"] - families = ["camera"] - label = "Zero Keyframe" - actions = [ayon_blender.api.action.SelectInvalidAction] - - @staticmethod - def get_invalid(instance) -> List: - invalid = [] - for obj in instance: - if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA": - if obj.animation_data and obj.animation_data.action: - action = obj.animation_data.action - frames_set = set() - for fcu in action.fcurves: - for kp in fcu.keyframe_points: - frames_set.add(kp.co[0]) - frames = list(frames_set) - frames.sort() - if frames[0] != 0.0: - invalid.append(obj) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - names = ", ".join(obj.name for obj in invalid) - raise PublishValidationError( - f"Camera must have a keyframe at frame 0: {names}" - ) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_deadline_publish.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_deadline_publish.py deleted file mode 100644 index fe544ee310..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_deadline_publish.py +++ /dev/null @@ -1,61 +0,0 @@ -import os - -import bpy - -from ayon_core.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_blender.api import plugin -from ayon_blender.api.render_lib import prepare_rendering - - -class ValidateDeadlinePublish( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin -): - """Validates Render File Directory is - not the same in every submission - """ - - order = ValidateContentsOrder - families = ["render"] - hosts = ["blender"] - label = "Validate Render Output for Deadline" - optional = True - actions = [RepairAction] - - def process(self, instance): - if not self.is_active(instance.data): - return - - tree = bpy.context.scene.node_tree - output_type = "CompositorNodeOutputFile" - output_node = None - # Remove all output nodes that include "AYON" in the name. - # There should be only one. - for node in tree.nodes: - if node.bl_idname == output_type and "AYON" in node.name: - output_node = node - break - if not output_node: - raise PublishValidationError( - "No output node found in the compositor tree." - ) - filepath = bpy.data.filepath - file = os.path.basename(filepath) - filename, ext = os.path.splitext(file) - if filename not in output_node.base_path: - raise PublishValidationError( - "Render output folder doesn't match the blender scene name! " - "Use Repair action to fix the folder file path." - ) - - @classmethod - def repair(cls, instance): - container = instance.data["transientData"]["instance_node"] - prepare_rendering(container) - bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath) - cls.log.debug("Reset the render output folder...") diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_file_saved.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_file_saved.py deleted file mode 100644 index e6b7b01c71..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_file_saved.py +++ /dev/null @@ -1,66 +0,0 @@ -import bpy - -import pyblish.api - -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError -) -from ayon_blender.api import plugin - - -class SaveWorkfileAction(pyblish.api.Action): - """Save Workfile.""" - label = "Save Workfile" - on = "failed" - icon = "save" - - def process(self, context, plugin): - bpy.ops.wm.avalon_workfiles() - - -class ValidateFileSaved( - plugin.BlenderContextPlugin, - OptionalPyblishPluginMixin -): - """Validate that the workfile has been saved.""" - - order = pyblish.api.ValidatorOrder - 0.01 - hosts = ["blender"] - label = "Validate File Saved" - optional = False - # TODO rename to 'exclude_product_types' - exclude_families = [] - actions = [SaveWorkfileAction] - - def process(self, context): - if not self.is_active(context.data): - return - - if not context.data["currentFile"]: - # File has not been saved at all and has no filename - raise PublishValidationError( - "Current workfile has not been saved yet.\n" - "Save the workfile before continuing." - ) - - # Do not validate workfile has unsaved changes if only instances - # present of families that should be excluded - product_types = { - instance.data["productType"] for instance in context - # Consider only enabled instances - if instance.data.get("publish", True) - and instance.data.get("active", True) - } - - def is_excluded(family): - return any(family in exclude_family - for exclude_family in self.exclude_families) - - if all(is_excluded(product_type) for product_type in product_types): - self.log.debug("Only excluded families found, skipping workfile " - "unsaved changes validation..") - return - - if bpy.data.is_dirty: - raise PublishValidationError("Workfile has unsaved changes.") diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_instance_empty.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_instance_empty.py deleted file mode 100644 index 9561cc7020..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_instance_empty.py +++ /dev/null @@ -1,20 +0,0 @@ -import pyblish.api -from ayon_core.pipeline.publish import PublishValidationError -from ayon_blender.api import plugin - - -class ValidateInstanceEmpty(plugin.BlenderInstancePlugin): - """Validator to verify that the instance is not empty""" - - order = pyblish.api.ValidatorOrder - 0.01 - hosts = ["blender"] - families = ["model", "pointcache", "rig", "camera" "layout", "blendScene"] - label = "Validate Instance is not Empty" - optional = False - - def process(self, instance): - # Members are collected by `collect_instance` so we only need to check - # whether any member is included. The instance node will be included - # as a member as well, hence we will check for at least 2 members - if len(instance) < 2: - raise PublishValidationError(f"Instance {instance.name} is empty.") diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_mesh_has_uv.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_mesh_has_uv.py deleted file mode 100644 index 3dd49e0e45..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_mesh_has_uv.py +++ /dev/null @@ -1,65 +0,0 @@ -from typing import List - -import bpy - -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) -import ayon_blender.api.action -from ayon_blender.api import plugin - - -class ValidateMeshHasUvs( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin, -): - """Validate that the current mesh has UV's.""" - - order = ValidateContentsOrder - hosts = ["blender"] - families = ["model"] - label = "Mesh Has UVs" - actions = [ayon_blender.api.action.SelectInvalidAction] - optional = True - - @staticmethod - def has_uvs(obj: bpy.types.Object) -> bool: - """Check if an object has uv's.""" - if not obj.data.uv_layers: - return False - for uv_layer in obj.data.uv_layers: - for polygon in obj.data.polygons: - for loop_index in polygon.loop_indices: - if ( - loop_index >= len(uv_layer.data) - or not uv_layer.data[loop_index].uv - ): - return False - - return True - - @classmethod - def get_invalid(cls, instance) -> List: - invalid = [] - for obj in instance: - if isinstance(obj, bpy.types.Object) and obj.type == 'MESH': - if obj.mode != "OBJECT": - cls.log.warning( - f"Mesh object {obj.name} should be in 'OBJECT' mode" - " to be properly checked." - ) - if not cls.has_uvs(obj): - invalid.append(obj) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - f"Meshes found in instance without valid UV's: {invalid}" - ) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_mesh_no_negative_scale.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_mesh_no_negative_scale.py deleted file mode 100644 index 91de310e46..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_mesh_no_negative_scale.py +++ /dev/null @@ -1,44 +0,0 @@ -from typing import List - -import bpy - -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) -import ayon_blender.api.action -from ayon_blender.api import plugin - - -class ValidateMeshNoNegativeScale( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin -): - """Ensure that meshes don't have a negative scale.""" - - order = ValidateContentsOrder - hosts = ["blender"] - families = ["model"] - label = "Mesh No Negative Scale" - actions = [ayon_blender.api.action.SelectInvalidAction] - - @staticmethod - def get_invalid(instance) -> List: - invalid = [] - for obj in instance: - if isinstance(obj, bpy.types.Object) and obj.type == 'MESH': - if any(v < 0 for v in obj.scale): - invalid.append(obj) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - names = ", ".join(obj.name for obj in invalid) - raise PublishValidationError( - f"Meshes found in instance with negative scale: {names}" - ) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_model_uv_map1.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_model_uv_map1.py deleted file mode 100644 index 74f550b6db..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_model_uv_map1.py +++ /dev/null @@ -1,93 +0,0 @@ -import inspect -from typing import List - -import bpy - -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction -) -import ayon_blender.api.action -from ayon_blender.api import plugin - - -class ValidateModelMeshUvMap1( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin, -): - """Validate model mesh uvs are named `map1`. - - This is solely to get them to work nicely for the Maya pipeline. - """ - - order = ValidateContentsOrder - hosts = ["blender"] - families = ["model"] - label = "Mesh UVs named map1" - actions = [ayon_blender.api.action.SelectInvalidAction, - RepairAction] - optional = True - enabled = False - - @classmethod - def get_invalid(cls, instance) -> List: - - invalid = [] - for obj in instance: - if obj.mode != "OBJECT": - cls.log.warning( - f"Mesh object {obj.name} should be in 'OBJECT' mode" - " to be properly checked." - ) - - obj_data = obj.data - if isinstance(obj_data, bpy.types.Mesh): - mesh = obj_data - - # Ignore mesh without UVs - if not mesh.uv_layers: - continue - - # If mesh has map1 all is ok - if mesh.uv_layers.get("map1"): - continue - - cls.log.warning( - f"Mesh object {obj.name} should be in 'OBJECT' mode" - " to be properly checked." - ) - invalid.append(obj) - - return invalid - - @classmethod - def repair(cls, instance): - for obj in cls.get_invalid(instance): - mesh = obj.data - - # Rename the first UV set to map1 - mesh.uv_layers[0].name = "map1" - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - f"Meshes found in instance without valid UV's: {invalid}", - description=self.get_description() - ) - - def get_description(self): - return inspect.cleandoc( - """## Meshes must have map1 uv set - - To accompany a better Maya-focused pipeline with Alembics it is - expected that a Mesh has a `map1` UV set. Blender defaults to - a UV set named `UVMap` and thus needs to be renamed. - - """ - ) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_no_colons_in_name.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_no_colons_in_name.py deleted file mode 100644 index dbafb53263..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_no_colons_in_name.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import List - -import bpy - -import ayon_blender.api.action -from ayon_blender.api import plugin -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) - - -class ValidateNoColonsInName( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin -): - """There cannot be colons in names - - Object or bone names cannot include colons. Other software do not - handle colons correctly. - - """ - - order = ValidateContentsOrder - hosts = ["blender"] - families = ["model", "rig"] - label = "No Colons in names" - actions = [ayon_blender.api.action.SelectInvalidAction] - - @staticmethod - def get_invalid(instance) -> List: - invalid = [] - for obj in instance: - if ':' in obj.name: - invalid.append(obj) - if isinstance(obj, bpy.types.Object) and obj.type == 'ARMATURE': - for bone in obj.data.bones: - if ':' in bone.name: - invalid.append(obj) - break - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - names = ", ".join(obj.name for obj in invalid) - raise PublishValidationError( - f"Objects found with colon in name: {names}" - ) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_object_mode.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_object_mode.py deleted file mode 100644 index 4dc2c39949..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_object_mode.py +++ /dev/null @@ -1,44 +0,0 @@ -from typing import List - -import bpy - -import pyblish.api -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError -) -import ayon_blender.api.action -from ayon_blender.api import plugin - - -class ValidateObjectIsInObjectMode( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin, -): - """Validate that the objects in the instance are in Object Mode.""" - - order = pyblish.api.ValidatorOrder - 0.01 - hosts = ["blender"] - families = ["model", "rig", "layout"] - label = "Validate Object Mode" - actions = [ayon_blender.api.action.SelectInvalidAction] - optional = False - - @staticmethod - def get_invalid(instance) -> List: - invalid = [] - for obj in instance: - if isinstance(obj, bpy.types.Object) and obj.mode != "OBJECT": - invalid.append(obj) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - names = ", ".join(obj.name for obj in invalid) - raise PublishValidationError( - f"Object found in instance is not in Object Mode: {names}" - ) diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_render_camera_is_set.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_render_camera_is_set.py deleted file mode 100644 index 15eb64d2ad..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_render_camera_is_set.py +++ /dev/null @@ -1,29 +0,0 @@ -import bpy - -import pyblish.api - -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError -) -from ayon_blender.api import plugin - - -class ValidateRenderCameraIsSet( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin -): - """Validate that there is a camera set as active for rendering.""" - - order = pyblish.api.ValidatorOrder - hosts = ["blender"] - families = ["render"] - label = "Validate Render Camera Is Set" - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - - if not bpy.context.scene.camera: - raise PublishValidationError("No camera is active for rendering.") diff --git a/server_addon/blender/client/ayon_blender/plugins/publish/validate_transform_zero.py b/server_addon/blender/client/ayon_blender/plugins/publish/validate_transform_zero.py deleted file mode 100644 index c7bfc6e8a6..0000000000 --- a/server_addon/blender/client/ayon_blender/plugins/publish/validate_transform_zero.py +++ /dev/null @@ -1,94 +0,0 @@ -import inspect -from typing import List - -import mathutils -import bpy - -from ayon_blender.api import plugin, lib -import ayon_blender.api.action -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError, - RepairAction -) - - -class ValidateTransformZero( - plugin.BlenderInstancePlugin, - OptionalPyblishPluginMixin -): - """Transforms can't have any values""" - - order = ValidateContentsOrder - hosts = ["blender"] - families = ["model"] - label = "Transform Zero" - actions = [ayon_blender.api.action.SelectInvalidAction, - RepairAction] - - _identity = mathutils.Matrix() - - @classmethod - def get_invalid(cls, instance) -> List: - invalid = [] - for obj in instance: - if ( - isinstance(obj, bpy.types.Object) - and obj.matrix_basis != cls._identity - ): - invalid.append(obj) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - names = ", ".join(obj.name for obj in invalid) - raise PublishValidationError( - "Objects found in instance which do not" - f" have transform set to zero: {names}", - description=self.get_description() - ) - - @classmethod - def repair(cls, instance): - - invalid = cls.get_invalid(instance) - if not invalid: - return - - context = plugin.create_blender_context( - active=invalid[0], selected=invalid - ) - with lib.maintained_selection(): - with bpy.context.temp_override(**context): - plugin.deselect_all() - for obj in invalid: - obj.select_set(True) - - # TODO: Preferably this does allow custom pivot point locations - # and if so, this should likely apply to the delta instead - # using `bpy.ops.object.transforms_to_deltas(mode="ALL")` - bpy.ops.object.transform_apply(location=True, - rotation=True, - scale=True) - - def get_description(self): - return inspect.cleandoc( - """## Transforms can't have any values. - - The location, rotation and scale on the transform must be at - the default values. This also goes for the delta transforms. - - To solve this issue, try freezing the transforms: - - `Object` > `Apply` > `All Transforms` - - Using the Repair action directly will do the same. - - So long as the transforms, rotation and scale values are zero, - you're all good. - """ - ) diff --git a/server_addon/blender/client/ayon_blender/version.py b/server_addon/blender/client/ayon_blender/version.py deleted file mode 100644 index c21b06a8de..0000000000 --- a/server_addon/blender/client/ayon_blender/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'blender' version.""" -__version__ = "0.2.1" diff --git a/server_addon/blender/package.py b/server_addon/blender/package.py deleted file mode 100644 index 1b595e22da..0000000000 --- a/server_addon/blender/package.py +++ /dev/null @@ -1,11 +0,0 @@ -name = "blender" -title = "Blender" -version = "0.2.1" - -client_dir = "ayon_blender" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = {} - diff --git a/server_addon/blender/server/__init__.py b/server_addon/blender/server/__init__.py deleted file mode 100644 index b274e3bc29..0000000000 --- a/server_addon/blender/server/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Type - -from ayon_server.addons import BaseServerAddon - -from .settings import BlenderSettings, DEFAULT_VALUES - - -class BlenderAddon(BaseServerAddon): - settings_model: Type[BlenderSettings] = BlenderSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_VALUES) diff --git a/server_addon/blender/server/settings/__init__.py b/server_addon/blender/server/settings/__init__.py deleted file mode 100644 index 3d51e5c3e1..0000000000 --- a/server_addon/blender/server/settings/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .main import ( - BlenderSettings, - DEFAULT_VALUES, -) - - -__all__ = ( - "BlenderSettings", - "DEFAULT_VALUES", -) diff --git a/server_addon/blender/server/settings/imageio.py b/server_addon/blender/server/settings/imageio.py deleted file mode 100644 index 06eec09e7b..0000000000 --- a/server_addon/blender/server/settings/imageio.py +++ /dev/null @@ -1,63 +0,0 @@ -from pydantic import validator -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.settings.validators import ensure_unique_names - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class BlenderImageIOModel(BaseSettingsModel): - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) diff --git a/server_addon/blender/server/settings/main.py b/server_addon/blender/server/settings/main.py deleted file mode 100644 index 3cca22ae3b..0000000000 --- a/server_addon/blender/server/settings/main.py +++ /dev/null @@ -1,70 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - TemplateWorkfileBaseOptions, -) - -from .imageio import BlenderImageIOModel -from .publish_plugins import ( - PublishPluginsModel, - DEFAULT_BLENDER_PUBLISH_SETTINGS -) -from .render_settings import ( - RenderSettingsModel, - DEFAULT_RENDER_SETTINGS -) - - -class UnitScaleSettingsModel(BaseSettingsModel): - enabled: bool = SettingsField(True, title="Enabled") - apply_on_opening: bool = SettingsField( - False, title="Apply on Opening Existing Files") - base_file_unit_scale: float = SettingsField( - 1.0, title="Base File Unit Scale" - ) - - -class BlenderSettings(BaseSettingsModel): - unit_scale_settings: UnitScaleSettingsModel = SettingsField( - default_factory=UnitScaleSettingsModel, - title="Set Unit Scale" - ) - set_resolution_startup: bool = SettingsField( - True, - title="Set Resolution on Startup" - ) - set_frames_startup: bool = SettingsField( - True, - title="Set Start/End Frames and FPS on Startup" - ) - imageio: BlenderImageIOModel = SettingsField( - default_factory=BlenderImageIOModel, - title="Color Management (ImageIO)" - ) - RenderSettings: RenderSettingsModel = SettingsField( - default_factory=RenderSettingsModel, title="Render Settings") - workfile_builder: TemplateWorkfileBaseOptions = SettingsField( - default_factory=TemplateWorkfileBaseOptions, - title="Workfile Builder" - ) - publish: PublishPluginsModel = SettingsField( - default_factory=PublishPluginsModel, - title="Publish Plugins" - ) - - -DEFAULT_VALUES = { - "unit_scale_settings": { - "enabled": True, - "apply_on_opening": False, - "base_file_unit_scale": 1.00 - }, - "set_frames_startup": True, - "set_resolution_startup": True, - "RenderSettings": DEFAULT_RENDER_SETTINGS, - "publish": DEFAULT_BLENDER_PUBLISH_SETTINGS, - "workfile_builder": { - "create_first_version": False, - "custom_templates": [] - } -} diff --git a/server_addon/blender/server/settings/publish_plugins.py b/server_addon/blender/server/settings/publish_plugins.py deleted file mode 100644 index 8db8c5be46..0000000000 --- a/server_addon/blender/server/settings/publish_plugins.py +++ /dev/null @@ -1,361 +0,0 @@ -import json -from pydantic import validator -from ayon_server.exceptions import BadRequestException -from ayon_server.settings import BaseSettingsModel, SettingsField - - -def validate_json_dict(value): - if not value.strip(): - return "{}" - try: - converted_value = json.loads(value) - success = isinstance(converted_value, dict) - except json.JSONDecodeError: - success = False - - if not success: - raise BadRequestException( - "Environment's can't be parsed as json object" - ) - return value - - -class ValidatePluginModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - - -class ValidateFileSavedModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ValidateFileSaved") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - exclude_families: list[str] = SettingsField( - default_factory=list, - title="Exclude product types" - ) - - -class ExtractBlendModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - families: list[str] = SettingsField( - default_factory=list, - title="Families" - ) - compress: bool = SettingsField(True, title="Compress") - - -class ExtractBlendAnimationModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - compress: bool = SettingsField(False, title="Compress") - - -class ExtractPlayblastModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - presets: str = SettingsField("", title="Presets", widget="textarea") - compress: bool = SettingsField(False, title="Compress") - - @validator("presets") - def validate_json(cls, value): - return validate_json_dict(value) - - -class PublishPluginsModel(BaseSettingsModel): - ValidateCameraZeroKeyframe: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Camera Zero Keyframe", - section="General Validators" - ) - ValidateFileSaved: ValidateFileSavedModel = SettingsField( - default_factory=ValidateFileSavedModel, - title="Validate File Saved", - ) - ValidateInstanceEmpty: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Instance is not Empty" - ) - ValidateMeshHasUvs: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Mesh Has Uvs", - section="Model Validators" - ) - ValidateMeshNoNegativeScale: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Mesh No Negative Scale" - ) - ValidateModelMeshUvMap1: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Model Mesh Has UV map named map1" - ) - ValidateTransformZero: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Transform Zero" - ) - ValidateNoColonsInName: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate No Colons In Name" - ) - ValidateRenderCameraIsSet: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Render Camera Is Set", - section="Render Validators" - ) - ValidateDeadlinePublish: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Render Output for Deadline", - ) - ExtractBlend: ExtractBlendModel = SettingsField( - default_factory=ExtractBlendModel, - title="Extract Blend", - section="Extractors" - ) - ExtractFBX: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Extract FBX" - ) - ExtractModelABC: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Extract ABC" - ) - ExtractBlendAnimation: ExtractBlendAnimationModel = SettingsField( - default_factory=ExtractBlendAnimationModel, - title="Extract Blend Animation" - ) - ExtractAnimationFBX: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Extract Animation FBX" - ) - ExtractCamera: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Extract Camera" - ) - ExtractCameraABC: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Extract Camera as ABC" - ) - ExtractLayout: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Extract Layout (JSON)" - ) - ExtractThumbnail: ExtractPlayblastModel = SettingsField( - default_factory=ExtractPlayblastModel, - title="Extract Thumbnail" - ) - ExtractPlayblast: ExtractPlayblastModel = SettingsField( - default_factory=ExtractPlayblastModel, - title="Extract Playblast" - ) - ExtractModelUSD: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Extract Model USD" - ) - - -DEFAULT_BLENDER_PUBLISH_SETTINGS = { - "ValidateCameraZeroKeyframe": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateFileSaved": { - "enabled": True, - "optional": False, - "active": True, - "exclude_families": [] - }, - "ValidateRenderCameraIsSet": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateDeadlinePublish": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateMeshHasUvs": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateMeshNoNegativeScale": { - "enabled": True, - "optional": False, - "active": True - }, - "ValidateModelMeshUvMap1": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateTransformZero": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateNoColonsInName": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateInstanceEmpty": { - "enabled": True, - "optional": False, - "active": True - }, - "ExtractBlend": { - "enabled": True, - "optional": True, - "active": True, - "families": [ - "model", - "camera", - "rig", - "action", - "layout", - "blendScene" - ], - "compress": False - }, - "ExtractFBX": { - "enabled": False, - "optional": True, - "active": True - }, - "ExtractModelABC": { - "enabled": True, - "optional": True, - "active": True - }, - "ExtractBlendAnimation": { - "enabled": True, - "optional": True, - "active": True, - "compress": False - }, - "ExtractAnimationFBX": { - "enabled": False, - "optional": True, - "active": True - }, - "ExtractCamera": { - "enabled": True, - "optional": True, - "active": True - }, - "ExtractCameraABC": { - "enabled": True, - "optional": True, - "active": True - }, - "ExtractLayout": { - "enabled": True, - "optional": True, - "active": False - }, - "ExtractThumbnail": { - "enabled": True, - "optional": True, - "active": True, - "presets": json.dumps( - { - "model": { - "image_settings": { - "file_format": "JPEG", - "color_mode": "RGB", - "quality": 100 - }, - "display_options": { - "shading": { - "light": "STUDIO", - "studio_light": "Default", - "type": "SOLID", - "color_type": "OBJECT", - "show_xray": False, - "show_shadows": False, - "show_cavity": True - }, - "overlay": { - "show_overlays": False - } - } - }, - "rig": { - "image_settings": { - "file_format": "JPEG", - "color_mode": "RGB", - "quality": 100 - }, - "display_options": { - "shading": { - "light": "STUDIO", - "studio_light": "Default", - "type": "SOLID", - "color_type": "OBJECT", - "show_xray": True, - "show_shadows": False, - "show_cavity": False - }, - "overlay": { - "show_overlays": True, - "show_ortho_grid": False, - "show_floor": False, - "show_axis_x": False, - "show_axis_y": False, - "show_axis_z": False, - "show_text": False, - "show_stats": False, - "show_cursor": False, - "show_annotation": False, - "show_extras": False, - "show_relationship_lines": False, - "show_outline_selected": False, - "show_motion_paths": False, - "show_object_origins": False, - "show_bones": True - } - } - } - }, - indent=4, - ) - }, - "ExtractPlayblast": { - "enabled": True, - "optional": True, - "active": True, - "presets": json.dumps( - { - "default": { - "image_settings": { - "file_format": "PNG", - "color_mode": "RGB", - "color_depth": "8", - "compression": 15 - }, - "display_options": { - "shading": { - "type": "MATERIAL", - "render_pass": "COMBINED" - }, - "overlay": { - "show_overlays": False - } - } - } - }, - indent=4 - ) - }, - "ExtractModelUSD": { - "enabled": True, - "optional": True, - "active": True - } -} diff --git a/server_addon/blender/server/settings/render_settings.py b/server_addon/blender/server/settings/render_settings.py deleted file mode 100644 index f992ea6fcc..0000000000 --- a/server_addon/blender/server/settings/render_settings.py +++ /dev/null @@ -1,158 +0,0 @@ -"""Providing models and values for Blender Render Settings.""" -from ayon_server.settings import BaseSettingsModel, SettingsField - - -def aov_separators_enum(): - return [ - {"value": "dash", "label": "- (dash)"}, - {"value": "underscore", "label": "_ (underscore)"}, - {"value": "dot", "label": ". (dot)"} - ] - - -def image_format_enum(): - return [ - {"value": "exr", "label": "OpenEXR"}, - {"value": "bmp", "label": "BMP"}, - {"value": "rgb", "label": "Iris"}, - {"value": "png", "label": "PNG"}, - {"value": "jpg", "label": "JPEG"}, - {"value": "jp2", "label": "JPEG 2000"}, - {"value": "tga", "label": "Targa"}, - {"value": "tif", "label": "TIFF"}, - ] - - -def renderers_enum(): - return [ - {"value": "CYCLES", "label": "Cycles"}, - {"value": "BLENDER_EEVEE", "label": "Eevee"}, - ] - - -def aov_list_enum(): - return [ - {"value": "empty", "label": "< none >"}, - {"value": "combined", "label": "Combined"}, - {"value": "z", "label": "Z"}, - {"value": "mist", "label": "Mist"}, - {"value": "normal", "label": "Normal"}, - {"value": "position", "label": "Position (Cycles Only)"}, - {"value": "vector", "label": "Vector (Cycles Only)"}, - {"value": "uv", "label": "UV (Cycles Only)"}, - {"value": "denoising", "label": "Denoising Data (Cycles Only)"}, - {"value": "object_index", "label": "Object Index (Cycles Only)"}, - {"value": "material_index", "label": "Material Index (Cycles Only)"}, - {"value": "sample_count", "label": "Sample Count (Cycles Only)"}, - {"value": "diffuse_light", "label": "Diffuse Light/Direct"}, - { - "value": "diffuse_indirect", - "label": "Diffuse Indirect (Cycles Only)" - }, - {"value": "diffuse_color", "label": "Diffuse Color"}, - {"value": "specular_light", "label": "Specular (Glossy) Light/Direct"}, - { - "value": "specular_indirect", - "label": "Specular (Glossy) Indirect (Cycles Only)" - }, - {"value": "specular_color", "label": "Specular (Glossy) Color"}, - { - "value": "transmission_light", - "label": "Transmission Light/Direct (Cycles Only)" - }, - { - "value": "transmission_indirect", - "label": "Transmission Indirect (Cycles Only)" - }, - { - "value": "transmission_color", - "label": "Transmission Color (Cycles Only)" - }, - {"value": "volume_light", "label": "Volume Light/Direct"}, - {"value": "volume_indirect", "label": "Volume Indirect (Cycles Only)"}, - {"value": "emission", "label": "Emission"}, - {"value": "environment", "label": "Environment"}, - {"value": "shadow", "label": "Shadow/Shadow Catcher"}, - {"value": "ao", "label": "Ambient Occlusion"}, - {"value": "bloom", "label": "Bloom (Eevee Only)"}, - {"value": "transparent", "label": "Transparent (Eevee Only)"}, - {"value": "cryptomatte_object", "label": "Cryptomatte Object"}, - {"value": "cryptomatte_material", "label": "Cryptomatte Material"}, - {"value": "cryptomatte_asset", "label": "Cryptomatte Asset"}, - { - "value": "cryptomatte_accurate", - "label": "Cryptomatte Accurate Mode (Eevee Only)" - }, - ] - - -def custom_passes_types_enum(): - return [ - {"value": "COLOR", "label": "Color"}, - {"value": "VALUE", "label": "Value"}, - ] - - -class CustomPassesModel(BaseSettingsModel): - """Custom Passes""" - _layout = "compact" - - attribute: str = SettingsField("", title="Attribute name") - value: str = SettingsField( - "COLOR", - title="Type", - enum_resolver=custom_passes_types_enum - ) - - -class RenderSettingsModel(BaseSettingsModel): - default_render_image_folder: str = SettingsField( - title="Default Render Image Folder" - ) - aov_separator: str = SettingsField( - "underscore", - title="AOV Separator Character", - enum_resolver=aov_separators_enum - ) - image_format: str = SettingsField( - "exr", - title="Image Format", - enum_resolver=image_format_enum - ) - multilayer_exr: bool = SettingsField( - title="Multilayer (EXR)" - ) - renderer: str = SettingsField( - "CYCLES", - title="Renderer", - enum_resolver=renderers_enum - ) - compositing: bool = SettingsField( - title="Enable Compositing" - ) - aov_list: list[str] = SettingsField( - default_factory=list, - enum_resolver=aov_list_enum, - title="AOVs to create" - ) - custom_passes: list[CustomPassesModel] = SettingsField( - default_factory=list, - title="Custom Passes", - description=( - "Add custom AOVs. They are added to the view layer and in the " - "Compositing Nodetree,\nbut they need to be added manually to " - "the Shader Nodetree." - ) - ) - - -DEFAULT_RENDER_SETTINGS = { - "default_render_image_folder": "renders/blender", - "aov_separator": "underscore", - "image_format": "exr", - "multilayer_exr": True, - "renderer": "CYCLES", - "compositing": True, - "aov_list": ["combined"], - "custom_passes": [] -} From 9f629a26391c30258ad0fbce61878f364cf87527 Mon Sep 17 00:00:00 2001 From: Petr Kalis Date: Wed, 3 Jul 2024 17:56:38 +0200 Subject: [PATCH 08/10] Standardize IS_TEST to AYON_IN_TESTS ayon-core/client/ayon_core/lib/ayon_info.py expects AYON_IN_TESTS which is more descriptive than just IS_TEST --- .../plugins/publish/submit_aftereffects_deadline.py | 2 +- .../plugins/publish/submit_blender_deadline.py | 2 +- .../ayon_deadline/plugins/publish/submit_fusion_deadline.py | 2 +- .../plugins/publish/submit_harmony_deadline.py | 2 +- .../ayon_deadline/plugins/publish/submit_max_deadline.py | 2 +- .../ayon_deadline/plugins/publish/submit_maya_deadline.py | 6 +++--- .../plugins/publish/submit_publish_cache_job.py | 2 +- .../ayon_deadline/plugins/publish/submit_publish_job.py | 2 +- .../repository/custom/plugins/GlobalJobPreLoad.py | 6 +++--- 9 files changed, 13 insertions(+), 13 deletions(-) diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_aftereffects_deadline.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_aftereffects_deadline.py index a7be38bf3b..45d907cbba 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_aftereffects_deadline.py @@ -87,7 +87,7 @@ class AfterEffectsSubmitDeadline( "AYON_WORKDIR", "AYON_APP_NAME", "AYON_LOG_NO_COLORS", - "IS_TEST" + "AYON_IN_TESTS" ] environment = { diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_blender_deadline.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_blender_deadline.py index 479dffdcc4..073de909b4 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_blender_deadline.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_blender_deadline.py @@ -110,7 +110,7 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, "AYON_TASK_NAME", "AYON_WORKDIR", "AYON_APP_NAME", - "IS_TEST" + "AYON_IN_TESTS" ] environment = { diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_fusion_deadline.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_fusion_deadline.py index f90bb9b597..bf9df40edc 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_fusion_deadline.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_fusion_deadline.py @@ -217,7 +217,7 @@ class FusionSubmitDeadline( "AYON_WORKDIR", "AYON_APP_NAME", "AYON_LOG_NO_COLORS", - "IS_TEST", + "AYON_IN_TESTS", "AYON_BUNDLE_NAME", ] diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_harmony_deadline.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_harmony_deadline.py index 1a40f6f302..bc91483c4f 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_harmony_deadline.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_harmony_deadline.py @@ -282,7 +282,7 @@ class HarmonySubmitDeadline( "AYON_WORKDIR", "AYON_APP_NAME", "AYON_LOG_NO_COLORS" - "IS_TEST" + "AYON_IN_TESTS" ] environment = { diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_max_deadline.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_max_deadline.py index a287630dc5..6a369eb001 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_max_deadline.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_max_deadline.py @@ -114,7 +114,7 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, "AYON_TASK_NAME", "AYON_WORKDIR", "AYON_APP_NAME", - "IS_TEST", + "AYON_IN_TESTS", ] environment = { diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_maya_deadline.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_maya_deadline.py index beed19e007..d50b0147d9 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_maya_deadline.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_maya_deadline.py @@ -216,7 +216,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, "AYON_TASK_NAME", "AYON_WORKDIR", "AYON_APP_NAME", - "IS_TEST" + "AYON_IN_TESTS" ] environment = { @@ -236,7 +236,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, job_info.EnvironmentKeyValue["AYON_LOG_NO_COLORS"] = "1" # Adding file dependencies. - if not bool(os.environ.get("IS_TEST")) and self.asset_dependencies: + if not is_in_tests() and self.asset_dependencies: dependencies = instance.context.data["fileDependencies"] for dependency in dependencies: job_info.AssetDependency += dependency @@ -589,7 +589,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, job_info = copy.deepcopy(self.job_info) - if not bool(os.environ.get("IS_TEST")) and self.asset_dependencies: + if not is_in_tests() and self.asset_dependencies: # Asset dependency to wait for at least the scene file to sync. job_info.AssetDependency += self.scene_path diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_cache_job.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_cache_job.py index 2bb138d849..ddfd21dc78 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_cache_job.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_cache_job.py @@ -128,7 +128,7 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, "AYON_TASK_NAME": instance.context.data["task"], "AYON_USERNAME": instance.context.data["user"], "AYON_LOG_NO_COLORS": "1", - "IS_TEST": str(int(is_in_tests())), + "AYON_IN_TESTS": str(int(is_in_tests())), "AYON_PUBLISH_JOB": "1", "AYON_RENDER_JOB": "0", "AYON_REMOTE_PUBLISH": "0", diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_job.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_job.py index 6aafb8f1c6..11bdc461f6 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_job.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_job.py @@ -206,7 +206,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, "AYON_TASK_NAME": instance.context.data["task"], "AYON_USERNAME": instance.context.data["user"], "AYON_LOG_NO_COLORS": "1", - "IS_TEST": str(int(is_in_tests())), + "AYON_IN_TESTS": str(int(is_in_tests())), "AYON_PUBLISH_JOB": "1", "AYON_RENDER_JOB": "0", "AYON_REMOTE_PUBLISH": "0", diff --git a/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py b/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py index ac04407f5b..cb34d4aa40 100644 --- a/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -13,7 +13,7 @@ from Deadline.Scripting import ( FileUtils, DirectoryUtils, ) -__version__ = "1.1.0" +__version__ = "1.1.1" VERSION_REGEX = re.compile( r"(?P0|[1-9]\d*)" r"\.(?P0|[1-9]\d*)" @@ -342,7 +342,7 @@ def inject_openpype_environment(deadlinePlugin): "envgroup": "farm" } - if job.GetJobEnvironmentKeyValue("IS_TEST"): + if job.GetJobEnvironmentKeyValue("AYON_IN_TESTS"): args.append("--automatic-tests") if all(add_kwargs.values()): @@ -501,7 +501,7 @@ def inject_ayon_environment(deadlinePlugin): "extractenvironments", export_url ] - if job.GetJobEnvironmentKeyValue("IS_TEST"): + if job.GetJobEnvironmentKeyValue("AYON_IN_TESTS"): args.append("--automatic-tests") for key, value in add_kwargs.items(): From 7b25502044e88c485a959c3a2aed86d7aa16a585 Mon Sep 17 00:00:00 2001 From: Petr Kalis Date: Wed, 3 Jul 2024 18:00:12 +0200 Subject: [PATCH 09/10] Use legacy IS_TEST env var to mark automatic tests for OP --- .../repository/custom/plugins/GlobalJobPreLoad.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py b/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py index cb34d4aa40..4081f02015 100644 --- a/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -342,7 +342,8 @@ def inject_openpype_environment(deadlinePlugin): "envgroup": "farm" } - if job.GetJobEnvironmentKeyValue("AYON_IN_TESTS"): + # use legacy IS_TEST env var to mark automatic tests for OP + if job.GetJobEnvironmentKeyValue("IS_TEST"): args.append("--automatic-tests") if all(add_kwargs.values()): From 237d9a18e47c6d489bc2d785375fab7c714fbcb8 Mon Sep 17 00:00:00 2001 From: Petr Kalis Date: Thu, 4 Jul 2024 12:25:42 +0200 Subject: [PATCH 10/10] Use only AYON_IN_TESTS to drive automatic tests --automatic-tests is removed in AYON --- .../plugins/publish/submit_publish_cache_job.py | 3 --- .../ayon_deadline/plugins/publish/submit_publish_job.py | 3 --- .../repository/custom/plugins/GlobalJobPreLoad.py | 6 ++++-- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_cache_job.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_cache_job.py index ddfd21dc78..d93592a6a3 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_cache_job.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_cache_job.py @@ -156,9 +156,6 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, "--targets", "farm" ] - if is_in_tests(): - args.append("--automatic-tests") - # Generate the payload for Deadline submission secondary_pool = ( self.deadline_pool_secondary or instance.data.get("secondaryPool") diff --git a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_job.py b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_job.py index 11bdc461f6..643dcc1c46 100644 --- a/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_job.py +++ b/server_addon/deadline/client/ayon_deadline/plugins/publish/submit_publish_job.py @@ -234,9 +234,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, "--targets", "farm" ] - if is_in_tests(): - args.append("--automatic-tests") - # Generate the payload for Deadline submission secondary_pool = ( self.deadline_pool_secondary or instance.data.get("secondaryPool") diff --git a/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py b/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py index 4081f02015..dbd1798608 100644 --- a/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/server_addon/deadline/client/ayon_deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -502,8 +502,6 @@ def inject_ayon_environment(deadlinePlugin): "extractenvironments", export_url ] - if job.GetJobEnvironmentKeyValue("AYON_IN_TESTS"): - args.append("--automatic-tests") for key, value in add_kwargs.items(): args.extend(["--{}".format(key), value]) @@ -517,6 +515,10 @@ def inject_ayon_environment(deadlinePlugin): "AYON_API_KEY": ayon_api_key, "AYON_BUNDLE_NAME": ayon_bundle_name, } + + automatic_tests = job.GetJobEnvironmentKeyValue("AYON_IN_TESTS") + if automatic_tests: + environment["AYON_IN_TESTS"] = automatic_tests for env, val in environment.items(): # Add the env var for the Render Plugin that is about to render deadlinePlugin.SetEnvironmentVariable(env, val)