diff --git a/.gitignore b/.gitignore index acbc3e2572..41389755f1 100644 --- a/.gitignore +++ b/.gitignore @@ -34,7 +34,6 @@ Temporary Items # Package dirs ########### -/server_addon/packages/* /package/* /.venv diff --git a/client/ayon_core/host/dirmap.py b/client/ayon_core/host/dirmap.py index 8766e7485d..b90b414240 100644 --- a/client/ayon_core/host/dirmap.py +++ b/client/ayon_core/host/dirmap.py @@ -181,25 +181,23 @@ class HostDirmap(object): cached=False) # overrides for roots set in `Site Settings` - active_roots = sitesync_addon.get_site_root_overrides( - project_name, active_site) - remote_roots = sitesync_addon.get_site_root_overrides( - project_name, remote_site) + active_roots_overrides = self._get_site_root_overrides( + sitesync_addon, project_name, active_site) - self.log.debug("active roots overrides {}".format(active_roots)) - self.log.debug("remote roots overrides {}".format(remote_roots)) + remote_roots_overrides = self._get_site_root_overrides( + sitesync_addon, project_name, remote_site) current_platform = platform.system().lower() remote_provider = sitesync_addon.get_provider_for_site( project_name, remote_site ) # dirmap has sense only with regular disk provider, in the workfile - # won't be root on cloud or sftp provider + # won't be root on cloud or sftp provider so fallback to studio if remote_provider != "local_drive": remote_site = "studio" - for root_name, active_site_dir in active_roots.items(): + for root_name, active_site_dir in active_roots_overrides.items(): remote_site_dir = ( - remote_roots.get(root_name) + remote_roots_overrides.get(root_name) or sync_settings["sites"][remote_site]["root"][root_name] ) @@ -220,3 +218,22 @@ class HostDirmap(object): self.log.debug("local sync mapping:: {}".format(mapping)) return mapping + + def _get_site_root_overrides( + self, sitesync_addon, project_name, site_name + ): + """Safely handle root overrides. + + SiteSync raises ValueError for non local or studio sites. + """ + # TODO: could be removed when `get_site_root_overrides` is not raising + # an Error but just returns {} + try: + site_roots_overrides = sitesync_addon.get_site_root_overrides( + project_name, site_name) + except ValueError: + site_roots_overrides = {} + self.log.debug("{} roots overrides {}".format( + site_name, site_roots_overrides)) + + return site_roots_overrides diff --git a/client/ayon_core/pipeline/load/utils.py b/client/ayon_core/pipeline/load/utils.py index 7f2bec6d34..9ba407193e 100644 --- a/client/ayon_core/pipeline/load/utils.py +++ b/client/ayon_core/pipeline/load/utils.py @@ -587,6 +587,21 @@ def switch_container(container, representation, loader_plugin=None): return loader.switch(container, context) +def _fix_representation_context_compatibility(repre_context): + """Helper function to fix representation context compatibility. + + Args: + repre_context (dict): Representation context. + + """ + # Auto-fix 'udim' being list of integers + # - This is a legacy issue for old representation entities, + # added 24/07/10 + udim = repre_context.get("udim") + if isinstance(udim, list): + repre_context["udim"] = udim[0] + + def get_representation_path_from_context(context): """Preparation wrapper using only context as a argument""" from ayon_core.pipeline import get_current_project_name @@ -638,7 +653,9 @@ def get_representation_path_with_anatomy(repre_entity, anatomy): try: context = repre_entity["context"] + _fix_representation_context_compatibility(context) context["root"] = anatomy.roots + path = StringTemplate.format_strict_template(template, context) except TemplateUnsolved as exc: @@ -681,6 +698,9 @@ def get_representation_path(representation, root=None): try: context = representation["context"] + + _fix_representation_context_compatibility(context) + context["root"] = root path = StringTemplate.format_strict_template( template, context diff --git a/client/ayon_core/pipeline/workfile/workfile_template_builder.py b/client/ayon_core/pipeline/workfile/workfile_template_builder.py index bb94d87483..4a1f3a84da 100644 --- a/client/ayon_core/pipeline/workfile/workfile_template_builder.py +++ b/client/ayon_core/pipeline/workfile/workfile_template_builder.py @@ -3,7 +3,7 @@ Build templates are manually prepared using plugin definitions which create placeholders inside the template which are populated on import. -This approach is very explicit to achive very specific build logic that can be +This approach is very explicit to achieve very specific build logic that can be targeted by task types and names. Placeholders are created using placeholder plugins which should care about @@ -87,7 +87,7 @@ class AbstractTemplateBuilder(object): """Abstraction of Template Builder. Builder cares about context, shared data, cache, discovery of plugins - and trigger logic. Provides public api for host workfile build systen. + and trigger logic. Provides public api for host workfile build system. Rest of logic is based on plugins that care about collection and creation of placeholder items. @@ -806,7 +806,7 @@ class AbstractTemplateBuilder(object): ) def get_template_preset(self): - """Unified way how template preset is received usign settings. + """Unified way how template preset is received using settings. Method is dependent on '_get_build_profiles' which should return filter profiles to resolve path to a template. Default implementation looks @@ -1427,7 +1427,7 @@ class PlaceholderLoadMixin(object): placeholder='{"camera":"persp", "lights":True}', tooltip=( "Loader" - "\nDefines a dictionnary of arguments used to load assets." + "\nDefines a dictionary of arguments used to load assets." "\nUseable arguments depend on current placeholder Loader." "\nField should be a valid python dict." " Anything else will be ignored." @@ -1472,7 +1472,7 @@ class PlaceholderLoadMixin(object): ] def parse_loader_args(self, loader_args): - """Helper function to parse string of loader arugments. + """Helper function to parse string of loader arguments. Empty dictionary is returned if conversion fails. @@ -1872,7 +1872,7 @@ class PlaceholderCreateMixin(object): creator_plugin.identifier, create_variant, folder_entity, - task_name=task_name, + task_entity, pre_create_data=pre_create_data ) diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py index 1a4cda4dbb..2da33bfb19 100644 --- a/client/ayon_core/plugins/publish/integrate.py +++ b/client/ayon_core/plugins/publish/integrate.py @@ -789,11 +789,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin): if value is not None: repre_context[key] = value - # Explicitly store the full list even though template data might - # have a different value because it uses just a single udim tile - if repre.get("udim"): - repre_context["udim"] = repre.get("udim") # store list - # Use previous representation's id if there is a name match existing = existing_repres_by_name.get(repre["name"].lower()) repre_id = None diff --git a/client/ayon_core/tools/tray/dialogs.py b/client/ayon_core/tools/tray/dialogs.py index 67348284a1..d37188a845 100644 --- a/client/ayon_core/tools/tray/dialogs.py +++ b/client/ayon_core/tools/tray/dialogs.py @@ -83,7 +83,7 @@ class UpdateDialog(QtWidgets.QDialog): top_layout.addWidget(label_widget, 1) ignore_btn = QtWidgets.QPushButton("Ignore", self) - restart_btn = QtWidgets.QPushButton("Restart && Change", self) + restart_btn = QtWidgets.QPushButton("Restart && Update", self) restart_btn.setObjectName("TrayRestartButton") btns_layout = QtWidgets.QHBoxLayout() diff --git a/client/ayon_core/tools/utils/lib.py b/client/ayon_core/tools/utils/lib.py index 676f853a32..8689a97451 100644 --- a/client/ayon_core/tools/utils/lib.py +++ b/client/ayon_core/tools/utils/lib.py @@ -485,7 +485,10 @@ class _IconsCache: parts = [icon_type, icon_def["path"]] elif icon_type in {"awesome-font", "material-symbols"}: - parts = [icon_type, icon_def["name"], icon_def["color"]] + color = icon_def["color"] or "" + if isinstance(color, QtGui.QColor): + color = color.name() + parts = [icon_type, icon_def["name"] or "", color] return "|".join(parts) @classmethod diff --git a/client/ayon_core/version.py b/client/ayon_core/version.py index f0e4b9a10f..42fd6a5c72 100644 --- a/client/ayon_core/version.py +++ b/client/ayon_core/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring AYON core addon version.""" -__version__ = "0.4.1-dev.1" +__version__ = "0.4.2-dev.1" diff --git a/package.py b/package.py index e5e567b8e8..5da8e36064 100644 --- a/package.py +++ b/package.py @@ -1,6 +1,6 @@ name = "core" title = "Core" -version = "0.4.1-dev.1" +version = "0.4.2-dev.1" client_dir = "ayon_core" diff --git a/pyproject.toml b/pyproject.toml index 82f0fc364e..cdedd878a0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ [tool.poetry] name = "ayon-core" -version = "0.3.1" +version = "0.4.2-dev.1" description = "" authors = ["Ynput Team "] readme = "README.md" @@ -79,11 +79,7 @@ dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" exclude = [ "client/ayon_core/modules/click_wrap.py", - "client/ayon_core/scripts/slates/__init__.py", - "server_addon/deadline/client/ayon_deadline/repository/custom/plugins/CelAction/*", - "server_addon/deadline/client/ayon_deadline/repository/custom/plugins/HarmonyAYON/*", - "server_addon/hiero/client/ayon_hiero/api/startup/*", - "server_addon/aftereffects/client/ayon_aftereffects/api/extension/js/libs/*" + "client/ayon_core/scripts/slates/__init__.py" ] [tool.ruff.lint.per-file-ignores] diff --git a/server_addon/README.md b/server_addon/README.md deleted file mode 100644 index c6d467adaa..0000000000 --- a/server_addon/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Addons for AYON server -Preparation of AYON addons based on OpenPype codebase. The output is a bunch of zip files in `./packages` directory that can be uploaded to AYON server. One of the packages is `openpype` which is OpenPype code converted to AYON addon. The addon is must have requirement to be able to use `ayon-launcher`. The versioning of `openpype` addon is following versioning of OpenPype. The other addons contain only settings models. - -## Intro -OpenPype is transitioning to AYON, a dedicated server with its own database, moving away from MongoDB. During this transition period, OpenPype will remain compatible with both MongoDB and AYON. However, we will gradually update the codebase to align with AYON's data structure and separate individual components into addons. - -Currently, OpenPype has an AYON mode, which means it utilizes the AYON server instead of MongoDB through conversion utilities. Initially, we added the AYON executable alongside the OpenPype executables to enable AYON mode. While this approach worked, updating to new code versions would require a complete reinstallation. To address this, we have decided to create a new repository specifically for the base desktop application logic, which we currently refer to as the AYON Launcher. This Launcher will replace the executables generated by the OpenPype build and convert the OpenPype code into a server addon, resulting in smaller updates. - -Since the implementation of the AYON Launcher is not yet fully completed, we will maintain both methods of starting AYON mode for now. Once the AYON Launcher is finished, we will remove the AYON executables from the OpenPype codebase entirely. - -During this transitional period, the AYON Launcher addon will be a requirement as the entry point for using the AYON Launcher. - -## How to start -There is a `create_ayon_addons.py` python file which contains logic how to create server addon from OpenPype codebase. Just run the code. -```shell -./.poetry/bin/poetry run python ./server_addon/create_ayon_addons.py -``` - -It will create directory `./packages/.zip` files for AYON server. You can then copy upload the zip files to AYON server. Restart server to update addons information, add the addon version to server bundle and set the bundle for production or staging usage. - -Once addon is on server and is enabled, you can just run AYON launcher. Content will be downloaded and used automatically. - -### Additional arguments -Additional arguments are useful for development purposes. - -To skip zip creation to keep only server ready folder structure, pass `--skip-zip` argument. -```shell -./.poetry/bin/poetry run python ./server_addon/create_ayon_addons.py --skip-zip -``` - -To create both zips and keep folder structure, pass `--keep-sources` argument. -```shell -./.poetry/bin/poetry run python ./server_addon/create_ayon_addons.py --keep-sources -``` diff --git a/server_addon/create_ayon_addons.py b/server_addon/create_ayon_addons.py deleted file mode 100644 index 73d0b54770..0000000000 --- a/server_addon/create_ayon_addons.py +++ /dev/null @@ -1,376 +0,0 @@ -import io -import os -import sys -import re -import shutil -import argparse -import zipfile -import types -import importlib.machinery -import platform -import collections -from pathlib import Path -from typing import Optional, Iterable, Pattern, List, Tuple - -# Patterns of directories to be skipped for server part of addon -IGNORE_DIR_PATTERNS: List[Pattern] = [ - re.compile(pattern) - for pattern in { - # Skip directories starting with '.' - r"^\.", - # Skip any pycache folders - "^__pycache__$" - } -] - -# Patterns of files to be skipped for server part of addon -IGNORE_FILE_PATTERNS: List[Pattern] = [ - re.compile(pattern) - for pattern in { - # Skip files starting with '.' - # NOTE this could be an issue in some cases - r"^\.", - # Skip '.pyc' files - r"\.pyc$" - } -] - -IGNORED_HOSTS = [ - "flame", - "harmony", -] - -IGNORED_MODULES = [] - -PACKAGE_PY_TEMPLATE = """name = "{addon_name}" -version = "{addon_version}" -plugin_for = ["ayon_server"] -""" - -CLIENT_VERSION_CONTENT = '''# -*- coding: utf-8 -*- -"""Package declaring AYON addon '{}' version.""" -__version__ = "{}" -''' - - -class ZipFileLongPaths(zipfile.ZipFile): - """Allows longer paths in zip files. - - Regular DOS paths are limited to MAX_PATH (260) characters, including - the string's terminating NUL character. - That limit can be exceeded by using an extended-length path that - starts with the '\\?\' prefix. - """ - _is_windows = platform.system().lower() == "windows" - - def _extract_member(self, member, tpath, pwd): - if self._is_windows: - tpath = os.path.abspath(tpath) - if tpath.startswith("\\\\"): - tpath = "\\\\?\\UNC\\" + tpath[2:] - else: - tpath = "\\\\?\\" + tpath - - return super()._extract_member(member, tpath, pwd) - - -def _value_match_regexes(value: str, regexes: Iterable[Pattern]) -> bool: - return any( - regex.search(value) - for regex in regexes - ) - - -def find_files_in_subdir( - src_path: str, - ignore_file_patterns: Optional[List[Pattern]] = None, - ignore_dir_patterns: Optional[List[Pattern]] = None, - include_empty_dirs: bool = True -): - """Find all files to copy in subdirectories of given path. - - All files that match any of the patterns in 'ignore_file_patterns' will - be skipped and any directories that match any of the patterns in - 'ignore_dir_patterns' will be skipped with all subfiles. - - Args: - src_path (str): Path to directory to search in. - ignore_file_patterns (Optional[List[Pattern]]): List of regexes - to match files to ignore. - ignore_dir_patterns (Optional[List[Pattern]]): List of regexes - to match directories to ignore. - include_empty_dirs (Optional[bool]): Do not skip empty directories. - - Returns: - List[Tuple[str, str]]: List of tuples with path to file and parent - directories relative to 'src_path'. - """ - if not os.path.exists(src_path): - return [] - - if ignore_file_patterns is None: - ignore_file_patterns = IGNORE_FILE_PATTERNS - - if ignore_dir_patterns is None: - ignore_dir_patterns = IGNORE_DIR_PATTERNS - output: List[Tuple[str, str]] = [] - - hierarchy_queue = collections.deque() - hierarchy_queue.append((src_path, [])) - while hierarchy_queue: - item: Tuple[str, List[str]] = hierarchy_queue.popleft() - dirpath, parents = item - subnames = list(os.listdir(dirpath)) - if not subnames and include_empty_dirs: - output.append((dirpath, os.path.sep.join(parents))) - - for name in subnames: - path = os.path.join(dirpath, name) - if os.path.isfile(path): - if not _value_match_regexes(name, ignore_file_patterns): - items = list(parents) - items.append(name) - output.append((path, os.path.sep.join(items))) - continue - - if not _value_match_regexes(name, ignore_dir_patterns): - items = list(parents) - items.append(name) - hierarchy_queue.append((path, items)) - - return output - - -def create_addon_zip( - output_dir: Path, - addon_name: str, - addon_version: str, - files_mapping: List[Tuple[str, str]], - client_zip_content: io.BytesIO -): - zip_filepath = output_dir / f"{addon_name}-{addon_version}.zip" - - with ZipFileLongPaths(zip_filepath, "w", zipfile.ZIP_DEFLATED) as zipf: - for src_path, dst_subpath in files_mapping: - zipf.write(src_path, dst_subpath) - - if client_zip_content is not None: - zipf.writestr("private/client.zip", client_zip_content.getvalue()) - - -def prepare_client_zip( - addon_dir: Path, - addon_name: str, - addon_version: str, - client_dir: str -): - if not client_dir: - return None - client_dir_obj = addon_dir / "client" / client_dir - if not client_dir_obj.exists(): - return None - - # Update version.py with server version if 'version.py' is available - version_path = client_dir_obj / "version.py" - if version_path.exists(): - with open(version_path, "w") as stream: - stream.write( - CLIENT_VERSION_CONTENT.format(addon_name, addon_version) - ) - - zip_content = io.BytesIO() - with ZipFileLongPaths(zip_content, "a", zipfile.ZIP_DEFLATED) as zipf: - # Add client code content to zip - for path, sub_path in find_files_in_subdir( - str(client_dir_obj), include_empty_dirs=False - ): - sub_path = os.path.join(client_dir, sub_path) - zipf.write(path, sub_path) - - zip_content.seek(0) - return zip_content - - -def import_filepath(path: Path, module_name: Optional[str] = None): - if not module_name: - module_name = os.path.splitext(path.name)[0] - - # Convert to string - path = str(path) - module = types.ModuleType(module_name) - module.__file__ = path - - # Use loader so module has full specs - module_loader = importlib.machinery.SourceFileLoader( - module_name, path - ) - module_loader.exec_module(module) - return module - - -def _get_server_mapping( - addon_dir: Path, addon_version: str -) -> List[Tuple[str, str]]: - server_dir = addon_dir / "server" - public_dir = addon_dir / "public" - src_package_py = addon_dir / "package.py" - pyproject_toml = addon_dir / "client" / "pyproject.toml" - - mapping: List[Tuple[str, str]] = [ - (src_path, f"server/{sub_path}") - for src_path, sub_path in find_files_in_subdir(str(server_dir)) - ] - mapping.extend([ - (src_path, f"public/{sub_path}") - for src_path, sub_path in find_files_in_subdir(str(public_dir)) - ]) - mapping.append((src_package_py.as_posix(), "package.py")) - if pyproject_toml.exists(): - mapping.append((pyproject_toml.as_posix(), "private/pyproject.toml")) - - return mapping - - -def create_addon_package( - addon_dir: Path, - output_dir: Path, - create_zip: bool, -): - src_package_py = addon_dir / "package.py" - - package = import_filepath(src_package_py) - addon_name = package.name - addon_version = package.version - - files_mapping = _get_server_mapping(addon_dir, addon_version) - - client_dir = getattr(package, "client_dir", None) - client_zip_content = prepare_client_zip( - addon_dir, addon_name, addon_version, client_dir - ) - - if create_zip: - create_addon_zip( - output_dir, - addon_name, - addon_version, - files_mapping, - client_zip_content - ) - - else: - addon_output_dir = output_dir / addon_dir.name / addon_version - if addon_output_dir.exists(): - shutil.rmtree(str(addon_output_dir)) - - addon_output_dir.mkdir(parents=True, exist_ok=True) - - for src_path, dst_subpath in files_mapping: - dst_path = addon_output_dir / dst_subpath - dst_path.parent.mkdir(parents=True, exist_ok=True) - shutil.copy2(src_path, dst_path) - - if client_zip_content is not None: - private_dir = addon_output_dir / "private" - private_dir.mkdir(parents=True, exist_ok=True) - with open(private_dir / "client.zip", "wb") as stream: - stream.write(client_zip_content.read()) - - -def main( - output_dir=None, - skip_zip=True, - clear_output_dir=False, - addons=None, -): - current_dir = Path(os.path.dirname(os.path.abspath(__file__))) - create_zip = not skip_zip - - if output_dir: - output_dir = Path(output_dir) - else: - output_dir = current_dir / "packages" - - if output_dir.exists() and clear_output_dir: - shutil.rmtree(str(output_dir)) - - print("Package creation started...") - print(f"Output directory: {output_dir}") - - # Make sure output dir is created - output_dir.mkdir(parents=True, exist_ok=True) - ignored_addons = set(IGNORED_HOSTS) | set(IGNORED_MODULES) - for addon_dir in current_dir.iterdir(): - if not addon_dir.is_dir(): - continue - - if addons and addon_dir.name not in addons: - continue - - if addon_dir.name in ignored_addons: - continue - - server_dir = addon_dir / "server" - if not server_dir.exists(): - continue - - create_addon_package(addon_dir, output_dir, create_zip) - - print(f"- package '{addon_dir.name}' created") - print(f"Package creation finished. Output directory: {output_dir}") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--skip-zip", - dest="skip_zip", - action="store_true", - help=( - "Skip zipping server package and create only" - " server folder structure." - ) - ) - parser.add_argument( - "--keep-sources", - dest="keep_sources", - action="store_true", - help=( - "Keep folder structure when server package is created." - ) - ) - parser.add_argument( - "-o", "--output", - dest="output_dir", - default=None, - help=( - "Directory path where package will be created" - " (Will be purged if already exists!)" - ) - ) - parser.add_argument( - "-c", "--clear-output-dir", - dest="clear_output_dir", - action="store_true", - help=( - "Clear output directory before package creation." - ) - ) - parser.add_argument( - "-a", - "--addon", - dest="addons", - action="append", - help="Limit addon creation to given addon name", - ) - - args = parser.parse_args(sys.argv[1:]) - if args.keep_sources: - print("Keeping sources is not supported anymore!") - - main( - args.output_dir, - args.skip_zip, - args.clear_output_dir, - args.addons, - ) diff --git a/server_addon/max/client/ayon_max/__init__.py b/server_addon/max/client/ayon_max/__init__.py deleted file mode 100644 index 77293f9aa9..0000000000 --- a/server_addon/max/client/ayon_max/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .version import __version__ -from .addon import ( - MaxAddon, - MAX_HOST_DIR, -) - - -__all__ = ( - "__version__", - - "MaxAddon", - "MAX_HOST_DIR", -) diff --git a/server_addon/max/client/ayon_max/addon.py b/server_addon/max/client/ayon_max/addon.py deleted file mode 100644 index 9cc0cda1ee..0000000000 --- a/server_addon/max/client/ayon_max/addon.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- -import os -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ - -MAX_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class MaxAddon(AYONAddon, IHostAddon): - name = "max" - version = __version__ - host_name = "max" - - def add_implementation_envs(self, env, _app): - # Remove auto screen scale factor for Qt - # - let 3dsmax decide it's value - env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) - - def get_workfile_extensions(self): - return [".max"] - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(MAX_HOST_DIR, "hooks") - ] diff --git a/server_addon/max/client/ayon_max/api/__init__.py b/server_addon/max/client/ayon_max/api/__init__.py deleted file mode 100644 index 92097cc98b..0000000000 --- a/server_addon/max/client/ayon_max/api/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -"""Public API for 3dsmax""" - -from .pipeline import ( - MaxHost, -) - - -from .lib import ( - maintained_selection, - lsattr, - get_all_children -) - -__all__ = [ - "MaxHost", - "maintained_selection", - "lsattr", - "get_all_children" -] diff --git a/server_addon/max/client/ayon_max/api/action.py b/server_addon/max/client/ayon_max/api/action.py deleted file mode 100644 index bed72bc493..0000000000 --- a/server_addon/max/client/ayon_max/api/action.py +++ /dev/null @@ -1,42 +0,0 @@ -from pymxs import runtime as rt - -import pyblish.api - -from ayon_core.pipeline.publish import get_errored_instances_from_context - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid objects in Blender when a publish plug-in failed.""" - label = "Select Invalid" - on = "failed" - icon = "search" - - def process(self, context, plugin): - errored_instances = get_errored_instances_from_context(context, - plugin=plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid nodes...") - invalid = list() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning( - "Failed plug-in doesn't have any selectable objects." - ) - - if not invalid: - self.log.info("No invalid nodes found.") - return - invalid_names = [obj.name for obj in invalid if not isinstance(obj, tuple)] - if not invalid_names: - invalid_names = [obj.name for obj, _ in invalid] - invalid = [obj for obj, _ in invalid] - self.log.info( - "Selecting invalid objects: %s", ", ".join(invalid_names) - ) - - rt.Select(invalid) diff --git a/server_addon/max/client/ayon_max/api/colorspace.py b/server_addon/max/client/ayon_max/api/colorspace.py deleted file mode 100644 index fafee4ee04..0000000000 --- a/server_addon/max/client/ayon_max/api/colorspace.py +++ /dev/null @@ -1,50 +0,0 @@ -import attr -from pymxs import runtime as rt - - -@attr.s -class LayerMetadata(object): - """Data class for Render Layer metadata.""" - frameStart = attr.ib() - frameEnd = attr.ib() - - -@attr.s -class RenderProduct(object): - """Getting Colorspace as - Specific Render Product Parameter for submitting - publish job. - """ - colorspace = attr.ib() # colorspace - view = attr.ib() - productName = attr.ib(default=None) - - -class ARenderProduct(object): - - def __init__(self): - """Constructor.""" - # Initialize - self.layer_data = self._get_layer_data() - self.layer_data.products = self.get_colorspace_data() - - def _get_layer_data(self): - return LayerMetadata( - frameStart=int(rt.rendStart), - frameEnd=int(rt.rendEnd), - ) - - def get_colorspace_data(self): - """To be implemented by renderer class. - This should return a list of RenderProducts. - Returns: - list: List of RenderProduct - """ - colorspace_data = [ - RenderProduct( - colorspace="sRGB", - view="ACES 1.0", - productName="" - ) - ] - return colorspace_data diff --git a/server_addon/max/client/ayon_max/api/lib.py b/server_addon/max/client/ayon_max/api/lib.py deleted file mode 100644 index 7acc18196f..0000000000 --- a/server_addon/max/client/ayon_max/api/lib.py +++ /dev/null @@ -1,589 +0,0 @@ -# -*- coding: utf-8 -*- -"""Library of functions useful for 3dsmax pipeline.""" -import contextlib -import logging -import json -from typing import Any, Dict, Union - -import six - -from ayon_core.pipeline import ( - get_current_project_name, - colorspace -) -from ayon_core.settings import get_project_settings -from ayon_core.pipeline.context_tools import ( - get_current_task_entity -) -from ayon_core.style import load_stylesheet -from pymxs import runtime as rt - - -JSON_PREFIX = "JSON::" -log = logging.getLogger("ayon_max") - - -def get_main_window(): - """Acquire Max's main window""" - from qtpy import QtWidgets - top_widgets = QtWidgets.QApplication.topLevelWidgets() - name = "QmaxApplicationWindow" - for widget in top_widgets: - if ( - widget.inherits("QMainWindow") - and widget.metaObject().className() == name - ): - return widget - raise RuntimeError('Count not find 3dsMax main window.') - - -def imprint(node_name: str, data: dict) -> bool: - node = rt.GetNodeByName(node_name) - if not node: - return False - - for k, v in data.items(): - if isinstance(v, (dict, list)): - rt.SetUserProp(node, k, f"{JSON_PREFIX}{json.dumps(v)}") - else: - rt.SetUserProp(node, k, v) - - return True - - -def lsattr( - attr: str, - value: Union[str, None] = None, - root: Union[str, None] = None) -> list: - """List nodes having attribute with specified value. - - Args: - attr (str): Attribute name to match. - value (str, Optional): Value to match, of omitted, all nodes - with specified attribute are returned no matter of value. - root (str, Optional): Root node name. If omitted, scene root is used. - - Returns: - list of nodes. - """ - root = rt.RootNode if root is None else rt.GetNodeByName(root) - - def output_node(node, nodes): - nodes.append(node) - for child in node.Children: - output_node(child, nodes) - - nodes = [] - output_node(root, nodes) - return [ - n for n in nodes - if rt.GetUserProp(n, attr) == value - ] if value else [ - n for n in nodes - if rt.GetUserProp(n, attr) - ] - - -def read(container) -> dict: - data = {} - props = rt.GetUserPropBuffer(container) - # this shouldn't happen but let's guard against it anyway - if not props: - return data - - for line in props.split("\r\n"): - try: - key, value = line.split("=") - except ValueError: - # if the line cannot be split we can't really parse it - continue - - value = value.strip() - if isinstance(value.strip(), six.string_types) and \ - value.startswith(JSON_PREFIX): - with contextlib.suppress(json.JSONDecodeError): - value = json.loads(value[len(JSON_PREFIX):]) - - # default value behavior - # convert maxscript boolean values - if value == "true": - value = True - elif value == "false": - value = False - - data[key.strip()] = value - - data["instance_node"] = container.Name - - return data - - -@contextlib.contextmanager -def maintained_selection(): - previous_selection = rt.GetCurrentSelection() - try: - yield - finally: - if previous_selection: - rt.Select(previous_selection) - else: - rt.Select() - - -def get_all_children(parent, node_type=None): - """Handy function to get all the children of a given node - - Args: - parent (3dsmax Node1): Node to get all children of. - node_type (None, runtime.class): give class to check for - e.g. rt.FFDBox/rt.GeometryClass etc. - - Returns: - list: list of all children of the parent node - """ - def list_children(node): - children = [] - for c in node.Children: - children.append(c) - children = children + list_children(c) - return children - child_list = list_children(parent) - - return ([x for x in child_list if rt.SuperClassOf(x) == node_type] - if node_type else child_list) - - -def get_current_renderer(): - """ - Notes: - Get current renderer for Max - - Returns: - "{Current Renderer}:{Current Renderer}" - e.g. "Redshift_Renderer:Redshift_Renderer" - """ - return rt.renderers.production - - -def get_default_render_folder(project_setting=None): - return (project_setting["max"] - ["RenderSettings"] - ["default_render_image_folder"]) - - -def set_render_frame_range(start_frame, end_frame): - """ - Note: - Frame range can be specified in different types. Possible values are: - * `1` - Single frame. - * `2` - Active time segment ( animationRange ). - * `3` - User specified Range. - * `4` - User specified Frame pickup string (for example `1,3,5-12`). - - Todo: - Current type is hard-coded, there should be a custom setting for this. - """ - rt.rendTimeType = 3 - if start_frame is not None and end_frame is not None: - rt.rendStart = int(start_frame) - rt.rendEnd = int(end_frame) - - -def get_multipass_setting(project_setting=None): - return (project_setting["max"] - ["RenderSettings"] - ["multipass"]) - - -def set_scene_resolution(width: int, height: int): - """Set the render resolution - - Args: - width(int): value of the width - height(int): value of the height - - Returns: - None - - """ - # make sure the render dialog is closed - # for the update of resolution - # Changing the Render Setup dialog settings should be done - # with the actual Render Setup dialog in a closed state. - if rt.renderSceneDialog.isOpen(): - rt.renderSceneDialog.close() - - rt.renderWidth = width - rt.renderHeight = height - - -def reset_scene_resolution(): - """Apply the scene resolution from the project definition - - scene resolution can be overwritten by a folder if the folder.attrib - contains any information regarding scene resolution. - """ - task_attributes = get_current_task_entity(fields={"attrib"})["attrib"] - width = int(task_attributes["resolutionWidth"]) - height = int(task_attributes["resolutionHeight"]) - - set_scene_resolution(width, height) - - -def get_frame_range(task_entity=None) -> Union[Dict[str, Any], None]: - """Get the current task frame range and handles - - Args: - task_entity (dict): Task Entity. - - Returns: - dict: with frame start, frame end, handle start, handle end. - """ - # Set frame start/end - if task_entity is None: - task_entity = get_current_task_entity(fields={"attrib"}) - task_attributes = task_entity["attrib"] - frame_start = int(task_attributes["frameStart"]) - frame_end = int(task_attributes["frameEnd"]) - handle_start = int(task_attributes["handleStart"]) - handle_end = int(task_attributes["handleEnd"]) - frame_start_handle = frame_start - handle_start - frame_end_handle = frame_end + handle_end - - return { - "frameStart": frame_start, - "frameEnd": frame_end, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStartHandle": frame_start_handle, - "frameEndHandle": frame_end_handle, - } - - -def reset_frame_range(fps: bool = True): - """Set frame range to current folder. - This is part of 3dsmax documentation: - - animationRange: A System Global variable which lets you get and - set an Interval value that defines the start and end frames - of the Active Time Segment. - frameRate: A System Global variable which lets you get - and set an Integer value that defines the current - scene frame rate in frames-per-second. - """ - if fps: - rt.frameRate = float(get_fps_for_current_context()) - - frame_range = get_frame_range() - - set_timeline( - frame_range["frameStartHandle"], frame_range["frameEndHandle"]) - set_render_frame_range( - frame_range["frameStartHandle"], frame_range["frameEndHandle"]) - - -def get_fps_for_current_context(): - """Get fps that should be set for current context. - - Todos: - - Skip project value. - - Merge logic with 'get_frame_range' and 'reset_scene_resolution' -> - all the values in the functions can be collected at one place as - they have same requirements. - - Returns: - Union[int, float]: FPS value. - """ - task_entity = get_current_task_entity(fields={"attrib"}) - return task_entity["attrib"]["fps"] - - -def reset_unit_scale(): - """Apply the unit scale setting to 3dsMax - """ - project_name = get_current_project_name() - settings = get_project_settings(project_name).get("max") - scene_scale = settings.get("unit_scale_settings", - {}).get("scene_unit_scale") - if scene_scale: - rt.units.DisplayType = rt.Name("Metric") - rt.units.MetricType = rt.Name(scene_scale) - else: - rt.units.DisplayType = rt.Name("Generic") - - -def convert_unit_scale(): - """Convert system unit scale in 3dsMax - for fbx export - - Returns: - str: unit scale - """ - unit_scale_dict = { - "millimeters": "mm", - "centimeters": "cm", - "meters": "m", - "kilometers": "km" - } - current_unit_scale = rt.Execute("units.MetricType as string") - return unit_scale_dict[current_unit_scale] - - -def set_context_setting(): - """Apply the project settings from the project definition - - Settings can be overwritten by an folder if the folder.attrib contains - any information regarding those settings. - - Examples of settings: - frame range - resolution - - Returns: - None - """ - reset_scene_resolution() - reset_frame_range() - reset_colorspace() - reset_unit_scale() - - -def get_max_version(): - """ - Args: - get max version date for deadline - - Returns: - #(25000, 62, 0, 25, 0, 0, 997, 2023, "") - max_info[7] = max version date - """ - max_info = rt.MaxVersion() - return max_info[7] - - -def is_headless(): - """Check if 3dsMax runs in batch mode. - If it returns True, it runs in 3dsbatch.exe - If it returns False, it runs in 3dsmax.exe - """ - return rt.maxops.isInNonInteractiveMode() - - -def set_timeline(frameStart, frameEnd): - """Set frame range for timeline editor in Max - """ - rt.animationRange = rt.interval(int(frameStart), int(frameEnd)) - return rt.animationRange - - -def reset_colorspace(): - """OCIO Configuration - Supports in 3dsMax 2024+ - - """ - if int(get_max_version()) < 2024: - return - - max_config_data = colorspace.get_current_context_imageio_config_preset() - if max_config_data: - ocio_config_path = max_config_data["path"] - colorspace_mgr = rt.ColorPipelineMgr - colorspace_mgr.Mode = rt.Name("OCIO_Custom") - colorspace_mgr.OCIOConfigPath = ocio_config_path - - -def check_colorspace(): - parent = get_main_window() - if parent is None: - log.info("Skipping outdated pop-up " - "because Max main window can't be found.") - if int(get_max_version()) >= 2024: - color_mgr = rt.ColorPipelineMgr - max_config_data = colorspace.get_current_context_imageio_config_preset() - if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"): - if not is_headless(): - from ayon_core.tools.utils import SimplePopup - dialog = SimplePopup(parent=parent) - dialog.setWindowTitle("Warning: Wrong OCIO Mode") - dialog.set_message("This scene has wrong OCIO " - "Mode setting.") - dialog.set_button_text("Fix") - dialog.setStyleSheet(load_stylesheet()) - dialog.on_clicked.connect(reset_colorspace) - dialog.show() - -def unique_namespace(namespace, format="%02d", - prefix="", suffix="", con_suffix="CON"): - """Return unique namespace - - Arguments: - namespace (str): Name of namespace to consider - format (str, optional): Formatting of the given iteration number - suffix (str, optional): Only consider namespaces with this suffix. - con_suffix: max only, for finding the name of the master container - - >>> unique_namespace("bar") - # bar01 - >>> unique_namespace(":hello") - # :hello01 - >>> unique_namespace("bar:", suffix="_NS") - # bar01_NS: - - """ - - def current_namespace(): - current = namespace - # When inside a namespace Max adds no trailing : - if not current.endswith(":"): - current += ":" - return current - - # Always check against the absolute namespace root - # There's no clash with :x if we're defining namespace :a:x - ROOT = ":" if namespace.startswith(":") else current_namespace() - - # Strip trailing `:` tokens since we might want to add a suffix - start = ":" if namespace.startswith(":") else "" - end = ":" if namespace.endswith(":") else "" - namespace = namespace.strip(":") - if ":" in namespace: - # Split off any nesting that we don't uniqify anyway. - parents, namespace = namespace.rsplit(":", 1) - start += parents + ":" - ROOT += start - - iteration = 1 - increment_version = True - while increment_version: - nr_namespace = namespace + format % iteration - unique = prefix + nr_namespace + suffix - container_name = f"{unique}:{namespace}{con_suffix}" - if not rt.getNodeByName(container_name): - name_space = start + unique + end - increment_version = False - return name_space - else: - increment_version = True - iteration += 1 - - -def get_namespace(container_name): - """Get the namespace and name of the sub-container - - Args: - container_name (str): the name of master container - - Raises: - RuntimeError: when there is no master container found - - Returns: - namespace (str): namespace of the sub-container - name (str): name of the sub-container - """ - node = rt.getNodeByName(container_name) - if not node: - raise RuntimeError("Master Container Not Found..") - name = rt.getUserProp(node, "name") - namespace = rt.getUserProp(node, "namespace") - return namespace, name - - -def object_transform_set(container_children): - """A function which allows to store the transform of - previous loaded object(s) - Args: - container_children(list): A list of nodes - - Returns: - transform_set (dict): A dict with all transform data of - the previous loaded object(s) - """ - transform_set = {} - for node in container_children: - name = f"{node}.transform" - transform_set[name] = node.pos - name = f"{node}.scale" - transform_set[name] = node.scale - return transform_set - - -def get_plugins() -> list: - """Get all loaded plugins in 3dsMax - - Returns: - plugin_info_list: a list of loaded plugins - """ - manager = rt.PluginManager - count = manager.pluginDllCount - plugin_info_list = [] - for p in range(1, count + 1): - plugin_info = manager.pluginDllName(p) - plugin_info_list.append(plugin_info) - - return plugin_info_list - - -def update_modifier_node_names(event, node): - """Update the name of the nodes after renaming - - Args: - event (pymxs.MXSWrapperBase): Event Name ( - Mandatory argument for rt.NodeEventCallback) - node (list): Event Number ( - Mandatory argument for rt.NodeEventCallback) - - """ - containers = [ - obj - for obj in rt.Objects - if ( - rt.ClassOf(obj) == rt.Container - and rt.getUserProp(obj, "id") == "pyblish.avalon.instance" - and rt.getUserProp(obj, "productType") not in { - "workfile", "tyflow" - } - ) - ] - if not containers: - return - for container in containers: - ayon_data = container.modifiers[0].openPypeData - updated_node_names = [str(node.node) for node - in ayon_data.all_handles] - rt.setProperty(ayon_data, "sel_list", updated_node_names) - - -@contextlib.contextmanager -def render_resolution(width, height): - """Set render resolution option during context - - Args: - width (int): render width - height (int): render height - """ - current_renderWidth = rt.renderWidth - current_renderHeight = rt.renderHeight - try: - rt.renderWidth = width - rt.renderHeight = height - yield - finally: - rt.renderWidth = current_renderWidth - rt.renderHeight = current_renderHeight - - -@contextlib.contextmanager -def suspended_refresh(): - """Suspended refresh for scene and modify panel redraw. - """ - if is_headless(): - yield - return - rt.disableSceneRedraw() - rt.suspendEditing() - try: - yield - - finally: - rt.enableSceneRedraw() - rt.resumeEditing() diff --git a/server_addon/max/client/ayon_max/api/lib_renderproducts.py b/server_addon/max/client/ayon_max/api/lib_renderproducts.py deleted file mode 100644 index 82a6a0c20c..0000000000 --- a/server_addon/max/client/ayon_max/api/lib_renderproducts.py +++ /dev/null @@ -1,275 +0,0 @@ -# Render Element Example : For scanline render, VRay -# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC -# arnold -# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html -import os - -from pymxs import runtime as rt - -from ayon_max.api.lib import get_current_renderer -from ayon_core.pipeline import get_current_project_name -from ayon_core.settings import get_project_settings - - -class RenderProducts(object): - - def __init__(self, project_settings=None): - self._project_settings = project_settings - if not self._project_settings: - self._project_settings = get_project_settings( - get_current_project_name() - ) - - def get_beauty(self, container): - render_dir = os.path.dirname(rt.rendOutputFilename) - - output_file = os.path.join(render_dir, container) - - setting = self._project_settings - img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa - - start_frame = int(rt.rendStart) - end_frame = int(rt.rendEnd) + 1 - - return { - "beauty": self.get_expected_beauty( - output_file, start_frame, end_frame, img_fmt - ) - } - - def get_multiple_beauty(self, outputs, cameras): - beauty_output_frames = dict() - for output, camera in zip(outputs, cameras): - filename, ext = os.path.splitext(output) - filename = filename.replace(".", "") - ext = ext.replace(".", "") - start_frame = int(rt.rendStart) - end_frame = int(rt.rendEnd) + 1 - new_beauty = self.get_expected_beauty( - filename, start_frame, end_frame, ext - ) - beauty_output = ({ - f"{camera}_beauty": new_beauty - }) - beauty_output_frames.update(beauty_output) - return beauty_output_frames - - def get_multiple_aovs(self, outputs, cameras): - renderer_class = get_current_renderer() - renderer = str(renderer_class).split(":")[0] - aovs_frames = {} - for output, camera in zip(outputs, cameras): - filename, ext = os.path.splitext(output) - filename = filename.replace(".", "") - ext = ext.replace(".", "") - start_frame = int(rt.rendStart) - end_frame = int(rt.rendEnd) + 1 - - if renderer in [ - "ART_Renderer", - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3", - "Default_Scanline_Renderer", - "Quicksilver_Hardware_Renderer", - ]: - render_name = self.get_render_elements_name() - if render_name: - for name in render_name: - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_aovs( - filename, name, start_frame, - end_frame, ext) - }) - elif renderer == "Redshift_Renderer": - render_name = self.get_render_elements_name() - if render_name: - rs_aov_files = rt.Execute("renderers.current.separateAovFiles") # noqa - # this doesn't work, always returns False - # rs_AovFiles = rt.RedShift_Renderer().separateAovFiles - if ext == "exr" and not rs_aov_files: - for name in render_name: - if name == "RsCryptomatte": - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_aovs( - filename, name, start_frame, - end_frame, ext) - }) - else: - for name in render_name: - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_aovs( - filename, name, start_frame, - end_frame, ext) - }) - elif renderer == "Arnold": - render_name = self.get_arnold_product_name() - if render_name: - for name in render_name: - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_arnold_product( # noqa - filename, name, start_frame, - end_frame, ext) - }) - elif renderer in [ - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3" - ]: - if ext != "exr": - render_name = self.get_render_elements_name() - if render_name: - for name in render_name: - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_aovs( - filename, name, start_frame, - end_frame, ext) - }) - - return aovs_frames - - def get_aovs(self, container): - render_dir = os.path.dirname(rt.rendOutputFilename) - - output_file = os.path.join(render_dir, - container) - - setting = self._project_settings - img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa - - start_frame = int(rt.rendStart) - end_frame = int(rt.rendEnd) + 1 - renderer_class = get_current_renderer() - renderer = str(renderer_class).split(":")[0] - render_dict = {} - - if renderer in [ - "ART_Renderer", - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3", - "Default_Scanline_Renderer", - "Quicksilver_Hardware_Renderer", - ]: - render_name = self.get_render_elements_name() - if render_name: - for name in render_name: - render_dict.update({ - name: self.get_expected_aovs( - output_file, name, start_frame, - end_frame, img_fmt) - }) - elif renderer == "Redshift_Renderer": - render_name = self.get_render_elements_name() - if render_name: - rs_aov_files = rt.Execute("renderers.current.separateAovFiles") - # this doesn't work, always returns False - # rs_AovFiles = rt.RedShift_Renderer().separateAovFiles - if img_fmt == "exr" and not rs_aov_files: - for name in render_name: - if name == "RsCryptomatte": - render_dict.update({ - name: self.get_expected_aovs( - output_file, name, start_frame, - end_frame, img_fmt) - }) - else: - for name in render_name: - render_dict.update({ - name: self.get_expected_aovs( - output_file, name, start_frame, - end_frame, img_fmt) - }) - - elif renderer == "Arnold": - render_name = self.get_arnold_product_name() - if render_name: - for name in render_name: - render_dict.update({ - name: self.get_expected_arnold_product( - output_file, name, start_frame, - end_frame, img_fmt) - }) - elif renderer in [ - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3" - ]: - if img_fmt != "exr": - render_name = self.get_render_elements_name() - if render_name: - for name in render_name: - render_dict.update({ - name: self.get_expected_aovs( - output_file, name, start_frame, - end_frame, img_fmt) # noqa - }) - - return render_dict - - def get_expected_beauty(self, folder, start_frame, end_frame, fmt): - beauty_frame_range = [] - for f in range(start_frame, end_frame): - frame = "%04d" % f - beauty_output = f"{folder}.{frame}.{fmt}" - beauty_output = beauty_output.replace("\\", "/") - beauty_frame_range.append(beauty_output) - - return beauty_frame_range - - def get_arnold_product_name(self): - """Get all the Arnold AOVs name""" - aov_name = [] - - amw = rt.MaxToAOps.AOVsManagerWindow() - aov_mgr = rt.renderers.current.AOVManager - # Check if there is any aov group set in AOV manager - aov_group_num = len(aov_mgr.drivers) - if aov_group_num < 1: - return - for i in range(aov_group_num): - # get the specific AOV group - aov_name.extend(aov.name for aov in aov_mgr.drivers[i].aov_list) - # close the AOVs manager window - amw.close() - - return aov_name - - def get_expected_arnold_product(self, folder, name, - start_frame, end_frame, fmt): - """Get all the expected Arnold AOVs""" - aov_list = [] - for f in range(start_frame, end_frame): - frame = "%04d" % f - render_element = f"{folder}_{name}.{frame}.{fmt}" - render_element = render_element.replace("\\", "/") - aov_list.append(render_element) - - return aov_list - - def get_render_elements_name(self): - """Get all the render element names for general """ - render_name = [] - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 1: - return - # get render elements from the renders - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - if renderlayer_name.enabled: - target, renderpass = str(renderlayer_name).split(":") - render_name.append(renderpass) - - return render_name - - def get_expected_aovs(self, folder, name, - start_frame, end_frame, fmt): - """Get all the expected render element output files. """ - render_elements = [] - for f in range(start_frame, end_frame): - frame = "%04d" % f - render_element = f"{folder}_{name}.{frame}.{fmt}" - render_element = render_element.replace("\\", "/") - render_elements.append(render_element) - - return render_elements - - def image_format(self): - return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa diff --git a/server_addon/max/client/ayon_max/api/lib_rendersettings.py b/server_addon/max/client/ayon_max/api/lib_rendersettings.py deleted file mode 100644 index 4b65e1397e..0000000000 --- a/server_addon/max/client/ayon_max/api/lib_rendersettings.py +++ /dev/null @@ -1,227 +0,0 @@ -import os -from pymxs import runtime as rt -from ayon_core.lib import Logger -from ayon_core.settings import get_project_settings -from ayon_core.pipeline import get_current_project_name -from ayon_core.pipeline.context_tools import get_current_folder_entity - -from ayon_max.api.lib import ( - set_render_frame_range, - get_current_renderer, - get_default_render_folder -) - - -class RenderSettings(object): - - log = Logger.get_logger("RenderSettings") - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - - def __init__(self, project_settings=None): - """ - Set up the naming convention for the render - elements for the deadline submission - """ - - self._project_settings = project_settings - if not self._project_settings: - self._project_settings = get_project_settings( - get_current_project_name() - ) - - def set_render_camera(self, selection): - for sel in selection: - # to avoid Attribute Error from pymxs wrapper - if rt.classOf(sel) in rt.Camera.classes: - rt.viewport.setCamera(sel) - return - raise RuntimeError("Active Camera not found") - - def render_output(self, container): - folder = rt.maxFilePath - # hard-coded, should be customized in the setting - file = rt.maxFileName - folder = folder.replace("\\", "/") - # hard-coded, set the renderoutput path - setting = self._project_settings - render_folder = get_default_render_folder(setting) - filename, ext = os.path.splitext(file) - output_dir = os.path.join(folder, - render_folder, - filename) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - # hard-coded, should be customized in the setting - folder_attributes = get_current_folder_entity()["attrib"] - - # get project resolution - width = folder_attributes.get("resolutionWidth") - height = folder_attributes.get("resolutionHeight") - # Set Frame Range - frame_start = folder_attributes.get("frame_start") - frame_end = folder_attributes.get("frame_end") - set_render_frame_range(frame_start, frame_end) - # get the production render - renderer_class = get_current_renderer() - renderer = str(renderer_class).split(":")[0] - - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - output = os.path.join(output_dir, container) - try: - aov_separator = self._aov_chars[( - self._project_settings["max"] - ["RenderSettings"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "." - output_filename = f"{output}..{img_fmt}" - output_filename = output_filename.replace("{aov_separator}", - aov_separator) - rt.rendOutputFilename = output_filename - if renderer == "VUE_File_Renderer": - return - # TODO: Finish the arnold render setup - if renderer == "Arnold": - self.arnold_setup() - - if renderer in [ - "ART_Renderer", - "Redshift_Renderer", - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3", - "Default_Scanline_Renderer", - "Quicksilver_Hardware_Renderer", - ]: - self.render_element_layer(output, width, height, img_fmt) - - rt.rendSaveFile = True - - if rt.renderSceneDialog.isOpen(): - rt.renderSceneDialog.close() - - def arnold_setup(self): - # get Arnold RenderView run in the background - # for setting up renderable camera - arv = rt.MAXToAOps.ArnoldRenderView() - render_camera = rt.viewport.GetCamera() - if render_camera: - arv.setOption("Camera", str(render_camera)) - - # TODO: add AOVs and extension - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - setup_cmd = ( - f""" - amw = MaxtoAOps.AOVsManagerWindow() - amw.close() - aovmgr = renderers.current.AOVManager - aovmgr.drivers = #() - img_fmt = "{img_fmt}" - if img_fmt == "png" then driver = ArnoldPNGDriver() - if img_fmt == "jpg" then driver = ArnoldJPEGDriver() - if img_fmt == "exr" then driver = ArnoldEXRDriver() - if img_fmt == "tif" then driver = ArnoldTIFFDriver() - if img_fmt == "tiff" then driver = ArnoldTIFFDriver() - append aovmgr.drivers driver - aovmgr.drivers[1].aov_list = #() - """) - - rt.execute(setup_cmd) - arv.close() - - def render_element_layer(self, dir, width, height, ext): - """For Renderers with render elements""" - rt.renderWidth = width - rt.renderHeight = height - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 0: - return - - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - target, renderpass = str(renderlayer_name).split(":") - aov_name = f"{dir}_{renderpass}..{ext}" - render_elem.SetRenderElementFileName(i, aov_name) - - def get_render_output(self, container, output_dir): - output = os.path.join(output_dir, container) - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - output_filename = f"{output}..{img_fmt}" - return output_filename - - def get_render_element(self): - orig_render_elem = [] - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 0: - return - - for i in range(render_elem_num): - render_element = render_elem.GetRenderElementFilename(i) - orig_render_elem.append(render_element) - - return orig_render_elem - - def get_batch_render_elements(self, container, - output_dir, camera): - render_element_list = list() - output = os.path.join(output_dir, container) - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 0: - return - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - target, renderpass = str(renderlayer_name).split(":") - aov_name = f"{output}_{camera}_{renderpass}..{img_fmt}" - render_element_list.append(aov_name) - return render_element_list - - def get_batch_render_output(self, camera): - target_layer_no = rt.batchRenderMgr.FindView(camera) - target_layer = rt.batchRenderMgr.GetView(target_layer_no) - return target_layer.outputFilename - - def batch_render_elements(self, camera): - target_layer_no = rt.batchRenderMgr.FindView(camera) - target_layer = rt.batchRenderMgr.GetView(target_layer_no) - outputfilename = target_layer.outputFilename - directory = os.path.dirname(outputfilename) - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 0: - return - ext = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - target, renderpass = str(renderlayer_name).split(":") - aov_name = f"{directory}_{camera}_{renderpass}..{ext}" - render_elem.SetRenderElementFileName(i, aov_name) - - def batch_render_layer(self, container, - output_dir, cameras): - outputs = list() - output = os.path.join(output_dir, container) - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - for cam in cameras: - camera = rt.getNodeByName(cam) - layer_no = rt.batchRenderMgr.FindView(cam) - renderlayer = None - if layer_no == 0: - renderlayer = rt.batchRenderMgr.CreateView(camera) - else: - renderlayer = rt.batchRenderMgr.GetView(layer_no) - # use camera name as renderlayer name - renderlayer.name = cam - renderlayer.outputFilename = f"{output}_{cam}..{img_fmt}" - outputs.append(renderlayer.outputFilename) - return outputs diff --git a/server_addon/max/client/ayon_max/api/menu.py b/server_addon/max/client/ayon_max/api/menu.py deleted file mode 100644 index 25dd39fd84..0000000000 --- a/server_addon/max/client/ayon_max/api/menu.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -"""3dsmax menu definition of AYON.""" -import os -from qtpy import QtWidgets, QtCore -from pymxs import runtime as rt - -from ayon_core.tools.utils import host_tools -from ayon_max.api import lib - - -class AYONMenu(object): - """Object representing AYON menu. - - This is using "hack" to inject itself before "Help" menu of 3dsmax. - For some reason `postLoadingMenus` event doesn't fire, and main menu - if probably re-initialized by menu templates, se we wait for at least - 1 event Qt event loop before trying to insert. - - """ - - def __init__(self): - super().__init__() - self.main_widget = self.get_main_widget() - self.menu = None - - timer = QtCore.QTimer() - # set number of event loops to wait. - timer.setInterval(1) - timer.timeout.connect(self._on_timer) - timer.start() - - self._timer = timer - self._counter = 0 - - def _on_timer(self): - if self._counter < 1: - self._counter += 1 - return - - self._counter = 0 - self._timer.stop() - self._build_ayon_menu() - - @staticmethod - def get_main_widget(): - """Get 3dsmax main window.""" - return QtWidgets.QWidget.find(rt.windows.getMAXHWND()) - - def get_main_menubar(self) -> QtWidgets.QMenuBar: - """Get main Menubar by 3dsmax main window.""" - return list(self.main_widget.findChildren(QtWidgets.QMenuBar))[0] - - def _get_or_create_ayon_menu( - self, name: str = "&AYON", - before: str = "&Help") -> QtWidgets.QAction: - """Create AYON menu. - - Args: - name (str, Optional): AYON menu name. - before (str, Optional): Name of the 3dsmax main menu item to - add AYON menu before. - - Returns: - QtWidgets.QAction: AYON menu action. - - """ - if self.menu is not None: - return self.menu - - menu_bar = self.get_main_menubar() - menu_items = menu_bar.findChildren( - QtWidgets.QMenu, options=QtCore.Qt.FindDirectChildrenOnly) - help_action = None - for item in menu_items: - if name in item.title(): - # we already have AYON menu - return item - - if before in item.title(): - help_action = item.menuAction() - tab_menu_label = os.environ.get("AYON_MENU_LABEL") or "AYON" - op_menu = QtWidgets.QMenu("&{}".format(tab_menu_label)) - menu_bar.insertMenu(help_action, op_menu) - - self.menu = op_menu - return op_menu - - def _build_ayon_menu(self) -> QtWidgets.QAction: - """Build items in AYON menu.""" - ayon_menu = self._get_or_create_ayon_menu() - load_action = QtWidgets.QAction("Load...", ayon_menu) - load_action.triggered.connect(self.load_callback) - ayon_menu.addAction(load_action) - - publish_action = QtWidgets.QAction("Publish...", ayon_menu) - publish_action.triggered.connect(self.publish_callback) - ayon_menu.addAction(publish_action) - - manage_action = QtWidgets.QAction("Manage...", ayon_menu) - manage_action.triggered.connect(self.manage_callback) - ayon_menu.addAction(manage_action) - - library_action = QtWidgets.QAction("Library...", ayon_menu) - library_action.triggered.connect(self.library_callback) - ayon_menu.addAction(library_action) - - ayon_menu.addSeparator() - - workfiles_action = QtWidgets.QAction("Work Files...", ayon_menu) - workfiles_action.triggered.connect(self.workfiles_callback) - ayon_menu.addAction(workfiles_action) - - ayon_menu.addSeparator() - - res_action = QtWidgets.QAction("Set Resolution", ayon_menu) - res_action.triggered.connect(self.resolution_callback) - ayon_menu.addAction(res_action) - - frame_action = QtWidgets.QAction("Set Frame Range", ayon_menu) - frame_action.triggered.connect(self.frame_range_callback) - ayon_menu.addAction(frame_action) - - colorspace_action = QtWidgets.QAction("Set Colorspace", ayon_menu) - colorspace_action.triggered.connect(self.colorspace_callback) - ayon_menu.addAction(colorspace_action) - - unit_scale_action = QtWidgets.QAction("Set Unit Scale", ayon_menu) - unit_scale_action.triggered.connect(self.unit_scale_callback) - ayon_menu.addAction(unit_scale_action) - - return ayon_menu - - def load_callback(self): - """Callback to show Loader tool.""" - host_tools.show_loader(parent=self.main_widget) - - def publish_callback(self): - """Callback to show Publisher tool.""" - host_tools.show_publisher(parent=self.main_widget) - - def manage_callback(self): - """Callback to show Scene Manager/Inventory tool.""" - host_tools.show_scene_inventory(parent=self.main_widget) - - def library_callback(self): - """Callback to show Library Loader tool.""" - host_tools.show_library_loader(parent=self.main_widget) - - def workfiles_callback(self): - """Callback to show Workfiles tool.""" - host_tools.show_workfiles(parent=self.main_widget) - - def resolution_callback(self): - """Callback to reset scene resolution""" - return lib.reset_scene_resolution() - - def frame_range_callback(self): - """Callback to reset frame range""" - return lib.reset_frame_range() - - def colorspace_callback(self): - """Callback to reset colorspace""" - return lib.reset_colorspace() - - def unit_scale_callback(self): - """Callback to reset unit scale""" - return lib.reset_unit_scale() diff --git a/server_addon/max/client/ayon_max/api/pipeline.py b/server_addon/max/client/ayon_max/api/pipeline.py deleted file mode 100644 index a87cd657ce..0000000000 --- a/server_addon/max/client/ayon_max/api/pipeline.py +++ /dev/null @@ -1,297 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pipeline tools for AYON 3ds max integration.""" -import os -import logging -from operator import attrgetter - -import json - -from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost -import pyblish.api -from ayon_core.pipeline import ( - register_creator_plugin_path, - register_loader_plugin_path, - AVALON_CONTAINER_ID, - AYON_CONTAINER_ID, -) -from ayon_max.api.menu import AYONMenu -from ayon_max.api import lib -from ayon_max.api.plugin import MS_CUSTOM_ATTRIB -from ayon_max import MAX_HOST_DIR - -from pymxs import runtime as rt # noqa - -log = logging.getLogger("ayon_max") - -PLUGINS_DIR = os.path.join(MAX_HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - - -class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - - name = "max" - menu = None - - def __init__(self): - super(MaxHost, self).__init__() - self._op_events = {} - self._has_been_setup = False - - def install(self): - pyblish.api.register_host("max") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - - # self._register_callbacks() - self.menu = AYONMenu() - - self._has_been_setup = True - - rt.callbacks.addScript(rt.Name('systemPostNew'), on_new) - - rt.callbacks.addScript(rt.Name('filePostOpen'), - lib.check_colorspace) - - rt.callbacks.addScript(rt.Name('postWorkspaceChange'), - self._deferred_menu_creation) - rt.NodeEventCallback( - nameChanged=lib.update_modifier_node_names) - - def workfile_has_unsaved_changes(self): - return rt.getSaveRequired() - - def get_workfile_extensions(self): - return [".max"] - - def save_workfile(self, dst_path=None): - rt.saveMaxFile(dst_path) - return dst_path - - def open_workfile(self, filepath): - rt.checkForSave() - rt.loadMaxFile(filepath) - return filepath - - def get_current_workfile(self): - return os.path.join(rt.maxFilePath, rt.maxFileName) - - def get_containers(self): - return ls() - - def _register_callbacks(self): - rt.callbacks.removeScripts(id=rt.name("OpenPypeCallbacks")) - - rt.callbacks.addScript( - rt.Name("postLoadingMenus"), - self._deferred_menu_creation, id=rt.Name('OpenPypeCallbacks')) - - def _deferred_menu_creation(self): - self.log.info("Building menu ...") - self.menu = AYONMenu() - - @staticmethod - def create_context_node(): - """Helper for creating context holding node.""" - - root_scene = rt.rootScene - - create_attr_script = (""" -attributes "OpenPypeContext" -( - parameters main rollout:params - ( - context type: #string - ) - - rollout params "OpenPype Parameters" - ( - editText editTextContext "Context" type: #string - ) -) - """) - - attr = rt.execute(create_attr_script) - rt.custAttributes.add(root_scene, attr) - - return root_scene.OpenPypeContext.context - - def update_context_data(self, data, changes): - try: - _ = rt.rootScene.OpenPypeContext.context - except AttributeError: - # context node doesn't exists - self.create_context_node() - - rt.rootScene.OpenPypeContext.context = json.dumps(data) - - def get_context_data(self): - try: - context = rt.rootScene.OpenPypeContext.context - except AttributeError: - # context node doesn't exists - context = self.create_context_node() - if not context: - context = "{}" - return json.loads(context) - - def save_file(self, dst_path=None): - # Force forwards slashes to avoid segfault - dst_path = dst_path.replace("\\", "/") - rt.saveMaxFile(dst_path) - - -def parse_container(container): - """Return the container node's full container data. - - Args: - container (str): A container node name. - - Returns: - dict: The container schema data for this container node. - - """ - data = lib.read(container) - - # Backwards compatibility pre-schemas for containers - data["schema"] = data.get("schema", "openpype:container-3.0") - - # Append transient data - data["objectName"] = container.Name - return data - - -def ls(): - """Get all AYON containers.""" - objs = rt.objects - containers = [ - obj for obj in objs - if rt.getUserProp(obj, "id") in { - AYON_CONTAINER_ID, AVALON_CONTAINER_ID - } - ] - - for container in sorted(containers, key=attrgetter("name")): - yield parse_container(container) - - -def on_new(): - lib.set_context_setting() - if rt.checkForSave(): - rt.resetMaxFile(rt.Name("noPrompt")) - rt.clearUndoBuffer() - rt.redrawViews() - - -def containerise(name: str, nodes: list, context, - namespace=None, loader=None, suffix="_CON"): - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or "", - "loader": loader, - "representation": context["representation"]["id"], - } - container_name = f"{namespace}:{name}{suffix}" - container = rt.container(name=container_name) - import_custom_attribute_data(container, nodes) - if not lib.imprint(container_name, data): - print(f"imprinting of {container_name} failed.") - return container - - -def load_custom_attribute_data(): - """Re-loading the AYON custom parameter built by the creator - - Returns: - attribute: re-loading the custom OP attributes set in Maxscript - """ - return rt.Execute(MS_CUSTOM_ATTRIB) - - -def import_custom_attribute_data(container: str, selections: list): - """Importing the Openpype/AYON custom parameter built by the creator - - Args: - container (str): target container which adds custom attributes - selections (list): nodes to be added into - group in custom attributes - """ - attrs = load_custom_attribute_data() - modifier = rt.EmptyModifier() - rt.addModifier(container, modifier) - container.modifiers[0].name = "OP Data" - rt.custAttributes.add(container.modifiers[0], attrs) - node_list = [] - sel_list = [] - for i in selections: - node_ref = rt.NodeTransformMonitor(node=i) - node_list.append(node_ref) - sel_list.append(str(i)) - - # Setting the property - rt.setProperty( - container.modifiers[0].openPypeData, - "all_handles", node_list) - rt.setProperty( - container.modifiers[0].openPypeData, - "sel_list", sel_list) - - -def update_custom_attribute_data(container: str, selections: list): - """Updating the AYON custom parameter built by the creator - - Args: - container (str): target container which adds custom attributes - selections (list): nodes to be added into - group in custom attributes - """ - if container.modifiers[0].name == "OP Data": - rt.deleteModifier(container, container.modifiers[0]) - import_custom_attribute_data(container, selections) - - -def get_previous_loaded_object(container: str): - """Get previous loaded_object through the OP data - - Args: - container (str): the container which stores the OP data - - Returns: - node_list(list): list of nodes which are previously loaded - """ - node_list = [] - node_transform_monitor_list = rt.getProperty( - container.modifiers[0].openPypeData, "all_handles") - for node_transform_monitor in node_transform_monitor_list: - node_list.append(node_transform_monitor.node) - return node_list - - -def remove_container_data(container_node: str): - """Function to remove container data after updating, switching or deleting it. - - Args: - container_node (str): container node - """ - if container_node.modifiers[0].name == "OP Data": - all_set_members_names = [ - member.node for member - in container_node.modifiers[0].openPypeData.all_handles] - # clean up the children of alembic dummy objects - for current_set_member in all_set_members_names: - shape_list = [members for members in current_set_member.Children - if rt.ClassOf(members) == rt.AlembicObject - or rt.isValidNode(members)] - if shape_list: # noqa - rt.Delete(shape_list) - rt.Delete(current_set_member) - rt.deleteModifier(container_node, container_node.modifiers[0]) - - rt.Delete(container_node) - rt.redrawViews() diff --git a/server_addon/max/client/ayon_max/api/plugin.py b/server_addon/max/client/ayon_max/api/plugin.py deleted file mode 100644 index e5d12ce87d..0000000000 --- a/server_addon/max/client/ayon_max/api/plugin.py +++ /dev/null @@ -1,298 +0,0 @@ -# -*- coding: utf-8 -*- -"""3dsmax specific AYON/Pyblish plugin definitions.""" -from abc import ABCMeta - -import six -from pymxs import runtime as rt - -from ayon_core.lib import BoolDef -from ayon_core.pipeline import ( - CreatedInstance, - Creator, - CreatorError, - AYON_INSTANCE_ID, - AVALON_INSTANCE_ID, -) - -from .lib import imprint, lsattr, read - -MS_CUSTOM_ATTRIB = """attributes "openPypeData" -( - parameters main rollout:OPparams - ( - all_handles type:#maxObjectTab tabSize:0 tabSizeVariable:on - sel_list type:#stringTab tabSize:0 tabSizeVariable:on - ) - - rollout OPparams "OP Parameters" - ( - listbox list_node "Node References" items:#() - button button_add "Add to Container" - button button_del "Delete from Container" - - fn node_to_name the_node = - ( - handle = the_node.handle - obj_name = the_node.name - handle_name = obj_name + "<" + handle as string + ">" - return handle_name - ) - fn nodes_to_add node = - ( - sceneObjs = #() - if classOf node == Container do return false - n = node as string - for obj in Objects do - ( - tmp_obj = obj as string - append sceneObjs tmp_obj - ) - if sel_list != undefined do - ( - for obj in sel_list do - ( - idx = findItem sceneObjs obj - if idx do - ( - deleteItem sceneObjs idx - ) - ) - ) - idx = findItem sceneObjs n - if idx then return true else false - ) - - fn nodes_to_rmv node = - ( - n = node as string - idx = findItem sel_list n - if idx then return true else false - ) - - on button_add pressed do - ( - current_sel = selectByName title:"Select Objects to add to - the Container" buttontext:"Add" filter:nodes_to_add - if current_sel == undefined then return False - temp_arr = #() - i_node_arr = #() - for c in current_sel do - ( - handle_name = node_to_name c - node_ref = NodeTransformMonitor node:c - idx = finditem list_node.items handle_name - if idx do ( - continue - ) - name = c as string - append temp_arr handle_name - append i_node_arr node_ref - append sel_list name - ) - all_handles = join i_node_arr all_handles - list_node.items = join temp_arr list_node.items - ) - - on button_del pressed do - ( - current_sel = selectByName title:"Select Objects to remove - from the Container" buttontext:"Remove" filter: nodes_to_rmv - if current_sel == undefined or current_sel.count == 0 then - ( - return False - ) - temp_arr = #() - i_node_arr = #() - new_i_node_arr = #() - new_temp_arr = #() - - for c in current_sel do - ( - node_ref = NodeTransformMonitor node:c as string - handle_name = node_to_name c - n = c as string - tmp_all_handles = #() - for i in all_handles do - ( - tmp = i as string - append tmp_all_handles tmp - ) - idx = finditem tmp_all_handles node_ref - if idx do - ( - new_i_node_arr = DeleteItem all_handles idx - - ) - idx = finditem list_node.items handle_name - if idx do - ( - new_temp_arr = DeleteItem list_node.items idx - ) - idx = finditem sel_list n - if idx do - ( - sel_list = DeleteItem sel_list idx - ) - ) - all_handles = join i_node_arr new_i_node_arr - list_node.items = join temp_arr new_temp_arr - ) - - on OPparams open do - ( - if all_handles.count != 0 then - ( - temp_arr = #() - for x in all_handles do - ( - if x.node == undefined do continue - handle_name = node_to_name x.node - append temp_arr handle_name - ) - list_node.items = temp_arr - ) - ) - ) -)""" - - -class MaxCreatorBase(object): - - @staticmethod - def cache_instance_data(shared_data): - if shared_data.get("max_cached_instances") is not None: - return shared_data - - shared_data["max_cached_instances"] = {} - - cached_instances = [] - for id_type in [AYON_INSTANCE_ID, AVALON_INSTANCE_ID]: - cached_instances.extend(lsattr("id", id_type)) - - for i in cached_instances: - creator_id = rt.GetUserProp(i, "creator_identifier") - if creator_id not in shared_data["max_cached_instances"]: - shared_data["max_cached_instances"][creator_id] = [i.name] - else: - shared_data[ - "max_cached_instances"][creator_id].append(i.name) - return shared_data - - @staticmethod - def create_instance_node(node): - """Create instance node. - - If the supplied node is existing node, it will be used to hold the - instance, otherwise new node of type Dummy will be created. - - Args: - node (rt.MXSWrapperBase, str): Node or node name to use. - - Returns: - instance - """ - if isinstance(node, str): - node = rt.Container(name=node) - - attrs = rt.Execute(MS_CUSTOM_ATTRIB) - modifier = rt.EmptyModifier() - rt.addModifier(node, modifier) - node.modifiers[0].name = "OP Data" - rt.custAttributes.add(node.modifiers[0], attrs) - - return node - - -@six.add_metaclass(ABCMeta) -class MaxCreator(Creator, MaxCreatorBase): - selected_nodes = [] - - def create(self, product_name, instance_data, pre_create_data): - if pre_create_data.get("use_selection"): - self.selected_nodes = rt.GetCurrentSelection() - if rt.getNodeByName(product_name): - raise CreatorError(f"'{product_name}' is already created..") - - instance_node = self.create_instance_node(product_name) - instance_data["instance_node"] = instance_node.name - instance = CreatedInstance( - self.product_type, - product_name, - instance_data, - self - ) - if pre_create_data.get("use_selection"): - - node_list = [] - sel_list = [] - for i in self.selected_nodes: - node_ref = rt.NodeTransformMonitor(node=i) - node_list.append(node_ref) - sel_list.append(str(i)) - - # Setting the property - rt.setProperty( - instance_node.modifiers[0].openPypeData, - "all_handles", node_list) - rt.setProperty( - instance_node.modifiers[0].openPypeData, - "sel_list", sel_list) - - self._add_instance_to_context(instance) - imprint(instance_node.name, instance.data_to_store()) - - return instance - - def collect_instances(self): - self.cache_instance_data(self.collection_shared_data) - for instance in self.collection_shared_data["max_cached_instances"].get(self.identifier, []): # noqa - created_instance = CreatedInstance.from_existing( - read(rt.GetNodeByName(instance)), self - ) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - for created_inst, changes in update_list: - instance_node = created_inst.get("instance_node") - new_values = { - key: changes[key].new_value - for key in changes.changed_keys - } - product_name = new_values.get("productName", "") - if product_name and instance_node != product_name: - node = rt.getNodeByName(instance_node) - new_product_name = new_values["productName"] - if rt.getNodeByName(new_product_name): - raise CreatorError( - "The product '{}' already exists.".format( - new_product_name)) - instance_node = new_product_name - created_inst["instance_node"] = instance_node - node.name = instance_node - - imprint( - instance_node, - created_inst.data_to_store(), - ) - - def remove_instances(self, instances): - """Remove specified instance from the scene. - - This is only removing `id` parameter so instance is no longer - instance, because it might contain valuable data for artist. - - """ - for instance in instances: - instance_node = rt.GetNodeByName( - instance.data.get("instance_node")) - if instance_node: - count = rt.custAttributes.count(instance_node.modifiers[0]) - rt.custAttributes.delete(instance_node.modifiers[0], count) - rt.Delete(instance_node) - - self._remove_instance_from_context(instance) - - def get_pre_create_attr_defs(self): - return [ - BoolDef("use_selection", label="Use selection") - ] diff --git a/server_addon/max/client/ayon_max/api/preview_animation.py b/server_addon/max/client/ayon_max/api/preview_animation.py deleted file mode 100644 index acda5360a1..0000000000 --- a/server_addon/max/client/ayon_max/api/preview_animation.py +++ /dev/null @@ -1,344 +0,0 @@ -import logging -import contextlib -from pymxs import runtime as rt -from .lib import get_max_version, render_resolution - -log = logging.getLogger("ayon_max") - - -@contextlib.contextmanager -def play_preview_when_done(has_autoplay): - """Set preview playback option during context - - Args: - has_autoplay (bool): autoplay during creating - preview animation - """ - current_playback = rt.preferences.playPreviewWhenDone - try: - rt.preferences.playPreviewWhenDone = has_autoplay - yield - finally: - rt.preferences.playPreviewWhenDone = current_playback - - -@contextlib.contextmanager -def viewport_layout_and_camera(camera, layout="layout_1"): - """Set viewport layout and camera during context - ***For 3dsMax 2024+ - Args: - camera (str): viewport camera - layout (str): layout to use in viewport, defaults to `layout_1` - Use None to not change viewport layout during context. - """ - needs_maximise = 0 - # Set to first active non extended viewport - rt.viewport.activeViewportEx(1) - original_camera = rt.viewport.getCamera() - original_type = rt.viewport.getType() - review_camera = rt.getNodeByName(camera) - - try: - if rt.viewport.getLayout() != rt.name(layout): - rt.execute("max tool maximize") - needs_maximise = 1 - rt.viewport.setCamera(review_camera) - yield - finally: - if needs_maximise == 1: - rt.execute("max tool maximize") - if original_type == rt.Name("view_camera"): - rt.viewport.setCamera(original_camera) - else: - rt.viewport.setType(original_type) - - -@contextlib.contextmanager -def viewport_preference_setting(general_viewport, - nitrous_manager, - nitrous_viewport, - vp_button_mgr): - """Function to set viewport setting during context - ***For Max Version < 2024 - Args: - camera (str): Viewport camera for review render - general_viewport (dict): General viewport setting - nitrous_manager (dict): Nitrous graphic manager - nitrous_viewport (dict): Nitrous setting for - preview animation - vp_button_mgr (dict): Viewport button manager Setting - preview_preferences (dict): Preview Preferences Setting - """ - orig_vp_grid = rt.viewport.getGridVisibility(1) - orig_vp_bkg = rt.viewport.IsSolidBackgroundColorMode() - - nitrousGraphicMgr = rt.NitrousGraphicsManager - viewport_setting = nitrousGraphicMgr.GetActiveViewportSetting() - vp_button_mgr_original = { - key: getattr(rt.ViewportButtonMgr, key) for key in vp_button_mgr - } - nitrous_manager_original = { - key: getattr(nitrousGraphicMgr, key) for key in nitrous_manager - } - nitrous_viewport_original = { - key: getattr(viewport_setting, key) for key in nitrous_viewport - } - - try: - rt.viewport.setGridVisibility(1, general_viewport["dspGrid"]) - rt.viewport.EnableSolidBackgroundColorMode(general_viewport["dspBkg"]) - for key, value in vp_button_mgr.items(): - setattr(rt.ViewportButtonMgr, key, value) - for key, value in nitrous_manager.items(): - setattr(nitrousGraphicMgr, key, value) - for key, value in nitrous_viewport.items(): - if nitrous_viewport[key] != nitrous_viewport_original[key]: - setattr(viewport_setting, key, value) - yield - - finally: - rt.viewport.setGridVisibility(1, orig_vp_grid) - rt.viewport.EnableSolidBackgroundColorMode(orig_vp_bkg) - for key, value in vp_button_mgr_original.items(): - setattr(rt.ViewportButtonMgr, key, value) - for key, value in nitrous_manager_original.items(): - setattr(nitrousGraphicMgr, key, value) - for key, value in nitrous_viewport_original.items(): - setattr(viewport_setting, key, value) - - -def _render_preview_animation_max_2024( - filepath, start, end, percentSize, ext, viewport_options): - """Render viewport preview with MaxScript using `CreateAnimation`. - ****For 3dsMax 2024+ - Args: - filepath (str): filepath for render output without frame number and - extension, for example: /path/to/file - start (int): startFrame - end (int): endFrame - percentSize (float): render resolution multiplier by 100 - e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x - viewport_options (dict): viewport setting options, e.g. - {"vpStyle": "defaultshading", "vpPreset": "highquality"} - Returns: - list: Created files - """ - # the percentSize argument must be integer - percent = int(percentSize) - filepath = filepath.replace("\\", "/") - preview_output = f"{filepath}..{ext}" - frame_template = f"{filepath}.{{:04d}}.{ext}" - job_args = [] - for key, value in viewport_options.items(): - if isinstance(value, bool): - if value: - job_args.append(f"{key}:{value}") - elif isinstance(value, str): - if key == "vpStyle": - if value == "Realistic": - value = "defaultshading" - elif value == "Shaded": - log.warning( - "'Shaded' Mode not supported in " - "preview animation in Max 2024.\n" - "Using 'defaultshading' instead.") - value = "defaultshading" - elif value == "ConsistentColors": - value = "flatcolor" - else: - value = value.lower() - elif key == "vpPreset": - if value == "Quality": - value = "highquality" - elif value == "Customize": - value = "userdefined" - else: - value = value.lower() - job_args.append(f"{key}: #{value}") - - job_str = ( - f'CreatePreview filename:"{preview_output}" outputAVI:false ' - f"percentSize:{percent} start:{start} end:{end} " - f"{' '.join(job_args)} " - "autoPlay:false" - ) - rt.completeRedraw() - rt.execute(job_str) - # Return the created files - return [frame_template.format(frame) for frame in range(start, end + 1)] - - -def _render_preview_animation_max_pre_2024( - filepath, startFrame, endFrame, - width, height, percentSize, ext): - """Render viewport animation by creating bitmaps - ***For 3dsMax Version <2024 - Args: - filepath (str): filepath without frame numbers and extension - startFrame (int): start frame - endFrame (int): end frame - width (int): render resolution width - height (int): render resolution height - percentSize (float): render resolution multiplier by 100 - e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x - ext (str): image extension - Returns: - list: Created filepaths - """ - - # get the screenshot - percent = percentSize / 100.0 - res_width = width * percent - res_height = height * percent - frame_template = "{}.{{:04}}.{}".format(filepath, ext) - frame_template.replace("\\", "/") - files = [] - user_cancelled = False - for frame in range(startFrame, endFrame + 1): - rt.sliderTime = frame - filepath = frame_template.format(frame) - preview_res = rt.bitmap( - res_width, res_height, filename=filepath - ) - dib = rt.gw.getViewportDib() - dib_width = float(dib.width) - dib_height = float(dib.height) - # aspect ratio - viewportRatio = dib_width / dib_height - renderRatio = float(res_width / res_height) - if viewportRatio < renderRatio: - heightCrop = (dib_width / renderRatio) - topEdge = int((dib_height - heightCrop) / 2.0) - tempImage_bmp = rt.bitmap(dib_width, heightCrop) - src_box_value = rt.Box2(0, topEdge, dib_width, heightCrop) - rt.pasteBitmap(dib, tempImage_bmp, src_box_value, rt.Point2(0, 0)) - rt.copy(tempImage_bmp, preview_res) - rt.close(tempImage_bmp) - elif viewportRatio > renderRatio: - widthCrop = dib_height * renderRatio - leftEdge = int((dib_width - widthCrop) / 2.0) - tempImage_bmp = rt.bitmap(widthCrop, dib_height) - src_box_value = rt.Box2(leftEdge, 0, widthCrop, dib_height) - rt.pasteBitmap(dib, tempImage_bmp, src_box_value, rt.Point2(0, 0)) - rt.copy(tempImage_bmp, preview_res) - rt.close(tempImage_bmp) - else: - rt.copy(dib, preview_res) - rt.save(preview_res) - rt.close(preview_res) - rt.close(dib) - files.append(filepath) - if rt.keyboard.escPressed: - user_cancelled = True - break - # clean up the cache - rt.gc(delayed=True) - if user_cancelled: - raise RuntimeError("User cancelled rendering of viewport animation.") - return files - - -def render_preview_animation( - filepath, - ext, - camera, - start_frame=None, - end_frame=None, - percentSize=100.0, - width=1920, - height=1080, - viewport_options=None): - """Render camera review animation - Args: - filepath (str): filepath to render to, without frame number and - extension - ext (str): output file extension - camera (str): viewport camera for preview render - start_frame (int): start frame - end_frame (int): end frame - percentSize (float): render resolution multiplier by 100 - e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x - width (int): render resolution width - height (int): render resolution height - viewport_options (dict): viewport setting options - Returns: - list: Rendered output files - """ - if start_frame is None: - start_frame = int(rt.animationRange.start) - if end_frame is None: - end_frame = int(rt.animationRange.end) - - if viewport_options is None: - viewport_options = viewport_options_for_preview_animation() - with play_preview_when_done(False): - with viewport_layout_and_camera(camera): - if int(get_max_version()) < 2024: - with viewport_preference_setting( - viewport_options["general_viewport"], - viewport_options["nitrous_manager"], - viewport_options["nitrous_viewport"], - viewport_options["vp_btn_mgr"] - ): - return _render_preview_animation_max_pre_2024( - filepath, - start_frame, - end_frame, - width, - height, - percentSize, - ext - ) - else: - with render_resolution(width, height): - return _render_preview_animation_max_2024( - filepath, - start_frame, - end_frame, - percentSize, - ext, - viewport_options - ) - - -def viewport_options_for_preview_animation(): - """Get default viewport options for `render_preview_animation`. - - Returns: - dict: viewport setting options - """ - # viewport_options should be the dictionary - if int(get_max_version()) < 2024: - return { - "visualStyleMode": "defaultshading", - "viewportPreset": "highquality", - "vpTexture": False, - "dspGeometry": True, - "dspShapes": False, - "dspLights": False, - "dspCameras": False, - "dspHelpers": False, - "dspParticles": True, - "dspBones": False, - "dspBkg": True, - "dspGrid": False, - "dspSafeFrame": False, - "dspFrameNums": False - } - else: - viewport_options = {} - viewport_options["general_viewport"] = { - "dspBkg": True, - "dspGrid": False - } - viewport_options["nitrous_manager"] = { - "AntialiasingQuality": "None" - } - viewport_options["nitrous_viewport"] = { - "VisualStyleMode": "defaultshading", - "ViewportPreset": "highquality", - "UseTextureEnabled": False - } - viewport_options["vp_btn_mgr"] = { - "EnableButtons": False} - return viewport_options diff --git a/server_addon/max/client/ayon_max/hooks/force_startup_script.py b/server_addon/max/client/ayon_max/hooks/force_startup_script.py deleted file mode 100644 index 1699ea300a..0000000000 --- a/server_addon/max/client/ayon_max/hooks/force_startup_script.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pre-launch to force 3ds max startup script.""" -import os -from ayon_max import MAX_HOST_DIR -from ayon_applications import PreLaunchHook, LaunchTypes - - -class ForceStartupScript(PreLaunchHook): - """Inject AYON environment to 3ds max. - - Note that this works in combination whit 3dsmax startup script that - is translating it back to PYTHONPATH for cases when 3dsmax drops PYTHONPATH - environment. - - Hook `GlobalHostDataHook` must be executed before this hook. - """ - app_groups = {"3dsmax", "adsk_3dsmax"} - order = 11 - launch_types = {LaunchTypes.local} - - def execute(self): - startup_args = [ - "-U", - "MAXScript", - os.path.join(MAX_HOST_DIR, "startup", "startup.ms"), - ] - self.launch_context.launch_args.append(startup_args) diff --git a/server_addon/max/client/ayon_max/hooks/inject_python.py b/server_addon/max/client/ayon_max/hooks/inject_python.py deleted file mode 100644 index fc9626ab87..0000000000 --- a/server_addon/max/client/ayon_max/hooks/inject_python.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pre-launch hook to inject python environment.""" -import os -from ayon_applications import PreLaunchHook, LaunchTypes - - -class InjectPythonPath(PreLaunchHook): - """Inject AYON environment to 3dsmax. - - Note that this works in combination whit 3dsmax startup script that - is translating it back to PYTHONPATH for cases when 3dsmax drops PYTHONPATH - environment. - - Hook `GlobalHostDataHook` must be executed before this hook. - """ - app_groups = {"3dsmax", "adsk_3dsmax"} - launch_types = {LaunchTypes.local} - - def execute(self): - self.launch_context.env["MAX_PYTHONPATH"] = os.environ["PYTHONPATH"] diff --git a/server_addon/max/client/ayon_max/hooks/set_paths.py b/server_addon/max/client/ayon_max/hooks/set_paths.py deleted file mode 100644 index f066de092e..0000000000 --- a/server_addon/max/client/ayon_max/hooks/set_paths.py +++ /dev/null @@ -1,18 +0,0 @@ -from ayon_applications import PreLaunchHook, LaunchTypes - - -class SetPath(PreLaunchHook): - """Set current dir to workdir. - - Hook `GlobalHostDataHook` must be executed before this hook. - """ - app_groups = {"max"} - launch_types = {LaunchTypes.local} - - def execute(self): - workdir = self.launch_context.env.get("AYON_WORKDIR", "") - if not workdir: - self.log.warning("BUG: Workdir is not filled.") - return - - self.launch_context.kwargs["cwd"] = workdir diff --git a/server_addon/max/client/ayon_max/plugins/__init__.py b/server_addon/max/client/ayon_max/plugins/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/max/client/ayon_max/plugins/create/create_camera.py b/server_addon/max/client/ayon_max/plugins/create/create_camera.py deleted file mode 100644 index 451e178afc..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_camera.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating camera.""" -from ayon_max.api import plugin - - -class CreateCamera(plugin.MaxCreator): - """Creator plugin for Camera.""" - identifier = "io.openpype.creators.max.camera" - label = "Camera" - product_type = "camera" - icon = "gear" - - settings_category = "max" diff --git a/server_addon/max/client/ayon_max/plugins/create/create_maxScene.py b/server_addon/max/client/ayon_max/plugins/create/create_maxScene.py deleted file mode 100644 index ee58ef663d..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_maxScene.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating raw max scene.""" -from ayon_max.api import plugin - - -class CreateMaxScene(plugin.MaxCreator): - """Creator plugin for 3ds max scenes.""" - identifier = "io.openpype.creators.max.maxScene" - label = "Max Scene" - product_type = "maxScene" - icon = "gear" - - settings_category = "max" diff --git a/server_addon/max/client/ayon_max/plugins/create/create_model.py b/server_addon/max/client/ayon_max/plugins/create/create_model.py deleted file mode 100644 index f48182ecd7..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_model.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for model.""" -from ayon_max.api import plugin - - -class CreateModel(plugin.MaxCreator): - """Creator plugin for Model.""" - identifier = "io.openpype.creators.max.model" - label = "Model" - product_type = "model" - icon = "gear" - - settings_category = "max" diff --git a/server_addon/max/client/ayon_max/plugins/create/create_pointcache.py b/server_addon/max/client/ayon_max/plugins/create/create_pointcache.py deleted file mode 100644 index 6d7aabe12c..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_pointcache.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating pointcache alembics.""" -from ayon_max.api import plugin - - -class CreatePointCache(plugin.MaxCreator): - """Creator plugin for Point caches.""" - identifier = "io.openpype.creators.max.pointcache" - label = "Point Cache" - product_type = "pointcache" - icon = "gear" - - settings_category = "max" diff --git a/server_addon/max/client/ayon_max/plugins/create/create_pointcloud.py b/server_addon/max/client/ayon_max/plugins/create/create_pointcloud.py deleted file mode 100644 index 52014d77b2..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_pointcloud.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating point cloud.""" -from ayon_max.api import plugin - - -class CreatePointCloud(plugin.MaxCreator): - """Creator plugin for Point Clouds.""" - identifier = "io.openpype.creators.max.pointcloud" - label = "Point Cloud" - product_type = "pointcloud" - icon = "gear" - - settings_category = "max" diff --git a/server_addon/max/client/ayon_max/plugins/create/create_redshift_proxy.py b/server_addon/max/client/ayon_max/plugins/create/create_redshift_proxy.py deleted file mode 100644 index bcc96c7efe..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_redshift_proxy.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating camera.""" -from ayon_max.api import plugin - - -class CreateRedshiftProxy(plugin.MaxCreator): - identifier = "io.openpype.creators.max.redshiftproxy" - label = "Redshift Proxy" - product_type = "redshiftproxy" - icon = "gear" - - settings_category = "max" diff --git a/server_addon/max/client/ayon_max/plugins/create/create_render.py b/server_addon/max/client/ayon_max/plugins/create/create_render.py deleted file mode 100644 index d1e236f3ef..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_render.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating camera.""" -import os -from ayon_max.api import plugin -from ayon_core.lib import BoolDef -from ayon_max.api.lib_rendersettings import RenderSettings - - -class CreateRender(plugin.MaxCreator): - """Creator plugin for Renders.""" - identifier = "io.openpype.creators.max.render" - label = "Render" - product_type = "maxrender" - icon = "gear" - - settings_category = "max" - - def create(self, product_name, instance_data, pre_create_data): - from pymxs import runtime as rt - file = rt.maxFileName - filename, _ = os.path.splitext(file) - instance_data["AssetName"] = filename - instance_data["multiCamera"] = pre_create_data.get("multi_cam") - num_of_renderlayer = rt.batchRenderMgr.numViews - if num_of_renderlayer > 0: - rt.batchRenderMgr.DeleteView(num_of_renderlayer) - - instance = super(CreateRender, self).create( - product_name, - instance_data, - pre_create_data) - - container_name = instance.data.get("instance_node") - # set output paths for rendering(mandatory for deadline) - RenderSettings().render_output(container_name) - # TODO: create multiple camera options - if self.selected_nodes: - selected_nodes_name = [] - for sel in self.selected_nodes: - name = sel.name - selected_nodes_name.append(name) - RenderSettings().batch_render_layer( - container_name, filename, - selected_nodes_name) - - def get_pre_create_attr_defs(self): - attrs = super(CreateRender, self).get_pre_create_attr_defs() - return attrs + [ - BoolDef("multi_cam", - label="Multiple Cameras Submission", - default=False), - ] diff --git a/server_addon/max/client/ayon_max/plugins/create/create_review.py b/server_addon/max/client/ayon_max/plugins/create/create_review.py deleted file mode 100644 index a49490519a..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_review.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating review in Max.""" -from ayon_max.api import plugin -from ayon_core.lib import BoolDef, EnumDef, NumberDef - - -class CreateReview(plugin.MaxCreator): - """Review in 3dsMax""" - - identifier = "io.openpype.creators.max.review" - label = "Review" - product_type = "review" - icon = "video-camera" - - settings_category = "max" - - review_width = 1920 - review_height = 1080 - percentSize = 100 - keep_images = False - image_format = "png" - visual_style = "Realistic" - viewport_preset = "Quality" - vp_texture = True - anti_aliasing = "None" - - def apply_settings(self, project_settings): - settings = project_settings["max"]["CreateReview"] # noqa - - # Take some defaults from settings - self.review_width = settings.get("review_width", self.review_width) - self.review_height = settings.get("review_height", self.review_height) - self.percentSize = settings.get("percentSize", self.percentSize) - self.keep_images = settings.get("keep_images", self.keep_images) - self.image_format = settings.get("image_format", self.image_format) - self.visual_style = settings.get("visual_style", self.visual_style) - self.viewport_preset = settings.get( - "viewport_preset", self.viewport_preset) - self.anti_aliasing = settings.get( - "anti_aliasing", self.anti_aliasing) - self.vp_texture = settings.get("vp_texture", self.vp_texture) - - def create(self, product_name, instance_data, pre_create_data): - # Transfer settings from pre create to instance - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - for key in ["imageFormat", - "keepImages", - "review_width", - "review_height", - "percentSize", - "visualStyleMode", - "viewportPreset", - "antialiasingQuality", - "vpTexture"]: - if key in pre_create_data: - creator_attributes[key] = pre_create_data[key] - - super(CreateReview, self).create( - product_name, - instance_data, - pre_create_data) - - def get_instance_attr_defs(self): - image_format_enum = ["exr", "jpg", "png", "tga"] - - visual_style_preset_enum = [ - "Realistic", "Shaded", "Facets", - "ConsistentColors", "HiddenLine", - "Wireframe", "BoundingBox", "Ink", - "ColorInk", "Acrylic", "Tech", "Graphite", - "ColorPencil", "Pastel", "Clay", "ModelAssist" - ] - preview_preset_enum = [ - "Quality", "Standard", "Performance", - "DXMode", "Customize"] - anti_aliasing_enum = ["None", "2X", "4X", "8X"] - - return [ - NumberDef("review_width", - label="Review width", - decimals=0, - minimum=0, - default=self.review_width), - NumberDef("review_height", - label="Review height", - decimals=0, - minimum=0, - default=self.review_height), - NumberDef("percentSize", - label="Percent of Output", - default=self.percentSize, - minimum=1, - decimals=0), - BoolDef("keepImages", - label="Keep Image Sequences", - default=self.keep_images), - EnumDef("imageFormat", - image_format_enum, - default=self.image_format, - label="Image Format Options"), - EnumDef("visualStyleMode", - visual_style_preset_enum, - default=self.visual_style, - label="Preference"), - EnumDef("viewportPreset", - preview_preset_enum, - default=self.viewport_preset, - label="Preview Preset"), - EnumDef("antialiasingQuality", - anti_aliasing_enum, - default=self.anti_aliasing, - label="Anti-aliasing Quality"), - BoolDef("vpTexture", - label="Viewport Texture", - default=self.vp_texture) - ] - - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attributes - attrs = super().get_pre_create_attr_defs() - return attrs + self.get_instance_attr_defs() diff --git a/server_addon/max/client/ayon_max/plugins/create/create_tycache.py b/server_addon/max/client/ayon_max/plugins/create/create_tycache.py deleted file mode 100644 index cbdd94e272..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_tycache.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating TyCache.""" -from ayon_max.api import plugin - - -class CreateTyCache(plugin.MaxCreator): - """Creator plugin for TyCache.""" - identifier = "io.openpype.creators.max.tycache" - label = "TyCache" - product_type = "tycache" - icon = "gear" - - settings_category = "max" diff --git a/server_addon/max/client/ayon_max/plugins/create/create_workfile.py b/server_addon/max/client/ayon_max/plugins/create/create_workfile.py deleted file mode 100644 index 35c41f0fcc..0000000000 --- a/server_addon/max/client/ayon_max/plugins/create/create_workfile.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating workfiles.""" -import ayon_api - -from ayon_core.pipeline import CreatedInstance, AutoCreator -from ayon_max.api import plugin -from ayon_max.api.lib import read, imprint -from pymxs import runtime as rt - - -class CreateWorkfile(plugin.MaxCreatorBase, AutoCreator): - """Workfile auto-creator.""" - identifier = "io.ayon.creators.max.workfile" - label = "Workfile" - product_type = "workfile" - icon = "fa5.file" - - default_variant = "Main" - - settings_category = "max" - - def create(self): - variant = self.default_variant - current_instance = next( - ( - instance for instance in self.create_context.instances - if instance.creator_identifier == self.identifier - ), None) - project_name = self.project_name - folder_path = self.create_context.get_current_folder_path() - task_name = self.create_context.get_current_task_name() - host_name = self.create_context.host_name - - if current_instance is None: - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name, - ) - data = { - "folderPath": folder_path, - "task": task_name, - "variant": variant - } - - data.update( - self.get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - current_instance) - ) - self.log.info("Auto-creating workfile instance...") - instance_node = self.create_node(product_name) - data["instance_node"] = instance_node.name - current_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - self._add_instance_to_context(current_instance) - imprint(instance_node.name, current_instance.data) - elif ( - current_instance["folderPath"] != folder_path - or current_instance["task"] != task_name - ): - # Update instance context if is not the same - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name, - ) - - current_instance["folderPath"] = folder_entity["path"] - current_instance["task"] = task_name - current_instance["productName"] = product_name - - def collect_instances(self): - self.cache_instance_data(self.collection_shared_data) - cached_instances = self.collection_shared_data["max_cached_instances"] - for instance in cached_instances.get(self.identifier, []): - if not rt.getNodeByName(instance): - continue - created_instance = CreatedInstance.from_existing( - read(rt.GetNodeByName(instance)), self - ) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - for created_inst, _ in update_list: - instance_node = created_inst.get("instance_node") - imprint( - instance_node, - created_inst.data_to_store() - ) - - def create_node(self, product_name): - if rt.getNodeByName(product_name): - node = rt.getNodeByName(product_name) - return node - node = rt.Container(name=product_name) - node.isHidden = True - return node diff --git a/server_addon/max/client/ayon_max/plugins/load/load_camera_fbx.py b/server_addon/max/client/ayon_max/plugins/load/load_camera_fbx.py deleted file mode 100644 index 81ea15d52a..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_camera_fbx.py +++ /dev/null @@ -1,101 +0,0 @@ -import os - -from ayon_max.api import lib -from ayon_max.api.lib import ( - unique_namespace, - get_namespace, - object_transform_set -) -from ayon_max.api.pipeline import ( - containerise, - get_previous_loaded_object, - update_custom_attribute_data, - remove_container_data -) -from ayon_core.pipeline import get_representation_path, load - - -class FbxLoader(load.LoaderPlugin): - """Fbx Loader.""" - - product_types = {"camera"} - representations = {"fbx"} - order = -9 - icon = "code-fork" - color = "white" - - def load(self, context, name=None, namespace=None, data=None): - from pymxs import runtime as rt - filepath = self.filepath_from_context(context) - filepath = os.path.normpath(filepath) - rt.FBXImporterSetParam("Animation", True) - rt.FBXImporterSetParam("Camera", True) - rt.FBXImporterSetParam("AxisConversionMethod", True) - rt.FBXImporterSetParam("Mode", rt.Name("create")) - rt.FBXImporterSetParam("Preserveinstances", True) - rt.ImportFile( - filepath, - rt.name("noPrompt"), - using=rt.FBXIMP) - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - selections = rt.GetCurrentSelection() - - for selection in selections: - selection.name = f"{namespace}:{selection.name}" - - return containerise( - name, selections, context, - namespace, loader=self.__class__.__name__) - - def update(self, container, context): - from pymxs import runtime as rt - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node_name = container["instance_node"] - node = rt.getNodeByName(node_name) - namespace, _ = get_namespace(node_name) - - node_list = get_previous_loaded_object(node) - rt.Select(node_list) - prev_fbx_objects = rt.GetCurrentSelection() - transform_data = object_transform_set(prev_fbx_objects) - for prev_fbx_obj in prev_fbx_objects: - if rt.isValidNode(prev_fbx_obj): - rt.Delete(prev_fbx_obj) - - rt.FBXImporterSetParam("Animation", True) - rt.FBXImporterSetParam("Camera", True) - rt.FBXImporterSetParam("Mode", rt.Name("merge")) - rt.FBXImporterSetParam("AxisConversionMethod", True) - rt.FBXImporterSetParam("Preserveinstances", True) - rt.ImportFile( - path, rt.name("noPrompt"), using=rt.FBXIMP) - current_fbx_objects = rt.GetCurrentSelection() - fbx_objects = [] - for fbx_object in current_fbx_objects: - fbx_object.name = f"{namespace}:{fbx_object.name}" - fbx_objects.append(fbx_object) - fbx_transform = f"{fbx_object.name}.transform" - if fbx_transform in transform_data.keys(): - fbx_object.pos = transform_data[fbx_transform] or 0 - fbx_object.scale = transform_data[ - f"{fbx_object.name}.scale"] or 0 - - update_custom_attribute_data(node, fbx_objects) - lib.imprint(container["instance_node"], { - "representation": repre_entity["id"] - }) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - from pymxs import runtime as rt - - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) diff --git a/server_addon/max/client/ayon_max/plugins/load/load_max_scene.py b/server_addon/max/client/ayon_max/plugins/load/load_max_scene.py deleted file mode 100644 index 7fca69b193..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_max_scene.py +++ /dev/null @@ -1,178 +0,0 @@ -import os -from qtpy import QtWidgets, QtCore -from ayon_core.lib.attribute_definitions import EnumDef -from ayon_max.api import lib -from ayon_max.api.lib import ( - unique_namespace, - get_namespace, - object_transform_set, - is_headless -) -from ayon_max.api.pipeline import ( - containerise, get_previous_loaded_object, - update_custom_attribute_data, - remove_container_data -) -from ayon_core.pipeline import get_representation_path, load - - -class MaterialDupOptionsWindow(QtWidgets.QDialog): - """The pop-up dialog allows users to choose material - duplicate options for importing Max objects when updating - or switching assets. - """ - def __init__(self, material_options): - super(MaterialDupOptionsWindow, self).__init__() - self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) - - self.material_option = None - self.material_options = material_options - - self.widgets = { - "label": QtWidgets.QLabel( - "Select material duplicate options before loading the max scene."), - "material_options_list": QtWidgets.QListWidget(), - "warning": QtWidgets.QLabel("No material options selected!"), - "buttons": QtWidgets.QWidget(), - "okButton": QtWidgets.QPushButton("Ok"), - "cancelButton": QtWidgets.QPushButton("Cancel") - } - for key, value in material_options.items(): - item = QtWidgets.QListWidgetItem(value) - self.widgets["material_options_list"].addItem(item) - item.setData(QtCore.Qt.UserRole, key) - # Build buttons. - layout = QtWidgets.QHBoxLayout(self.widgets["buttons"]) - layout.addWidget(self.widgets["okButton"]) - layout.addWidget(self.widgets["cancelButton"]) - # Build layout. - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(self.widgets["label"]) - layout.addWidget(self.widgets["material_options_list"]) - layout.addWidget(self.widgets["buttons"]) - - self.widgets["okButton"].pressed.connect(self.on_ok_pressed) - self.widgets["cancelButton"].pressed.connect(self.on_cancel_pressed) - self.widgets["material_options_list"].itemPressed.connect( - self.on_material_options_pressed) - - def on_material_options_pressed(self, item): - self.material_option = item.data(QtCore.Qt.UserRole) - - def on_ok_pressed(self): - if self.material_option is None: - self.widgets["warning"].setVisible(True) - return - self.close() - - def on_cancel_pressed(self): - self.material_option = "promptMtlDups" - self.close() - -class MaxSceneLoader(load.LoaderPlugin): - """Max Scene Loader.""" - - product_types = { - "camera", - "maxScene", - "model", - } - - representations = {"max"} - order = -8 - icon = "code-fork" - color = "green" - mtl_dup_default = "promptMtlDups" - mtl_dup_enum_dict = { - "promptMtlDups": "Prompt on Duplicate Materials", - "useMergedMtlDups": "Use Incoming Material", - "useSceneMtlDups": "Use Scene Material", - "renameMtlDups": "Merge and Rename Incoming Material" - } - @classmethod - def get_options(cls, contexts): - return [ - EnumDef("mtldup", - items=cls.mtl_dup_enum_dict, - default=cls.mtl_dup_default, - label="Material Duplicate Options") - ] - - def load(self, context, name=None, namespace=None, options=None): - from pymxs import runtime as rt - mat_dup_options = options.get("mtldup", self.mtl_dup_default) - path = self.filepath_from_context(context) - path = os.path.normpath(path) - # import the max scene by using "merge file" - path = path.replace('\\', '/') - rt.MergeMaxFile(path, rt.Name(mat_dup_options), - quiet=True, includeFullGroup=True) - max_objects = rt.getLastMergedNodes() - max_object_names = [obj.name for obj in max_objects] - # implement the OP/AYON custom attributes before load - max_container = [] - namespace = unique_namespace( - name + "_", - suffix="_", - ) - for max_obj, obj_name in zip(max_objects, max_object_names): - max_obj.name = f"{namespace}:{obj_name}" - max_container.append(max_obj) - return containerise( - name, max_container, context, - namespace, loader=self.__class__.__name__) - - def update(self, container, context): - from pymxs import runtime as rt - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node_name = container["instance_node"] - node = rt.getNodeByName(node_name) - namespace, _ = get_namespace(node_name) - # delete the old container with attribute - # delete old duplicate - # use the modifier OP data to delete the data - node_list = get_previous_loaded_object(node) - rt.select(node_list) - prev_max_objects = rt.GetCurrentSelection() - transform_data = object_transform_set(prev_max_objects) - - for prev_max_obj in prev_max_objects: - if rt.isValidNode(prev_max_obj): # noqa - rt.Delete(prev_max_obj) - material_option = self.mtl_dup_default - if not is_headless(): - window = MaterialDupOptionsWindow(self.mtl_dup_enum_dict) - window.exec_() - material_option = window.material_option - rt.MergeMaxFile(path, rt.Name(material_option), quiet=True) - - current_max_objects = rt.getLastMergedNodes() - - current_max_object_names = [obj.name for obj - in current_max_objects] - - max_objects = [] - for max_obj, obj_name in zip(current_max_objects, - current_max_object_names): - max_obj.name = f"{namespace}:{obj_name}" - max_objects.append(max_obj) - max_transform = f"{max_obj}.transform" - if max_transform in transform_data.keys(): - max_obj.pos = transform_data[max_transform] or 0 - max_obj.scale = transform_data[ - f"{max_obj}.scale"] or 0 - - update_custom_attribute_data(node, max_objects) - lib.imprint(container["instance_node"], { - "representation": repre_entity["id"] - }) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) diff --git a/server_addon/max/client/ayon_max/plugins/load/load_model.py b/server_addon/max/client/ayon_max/plugins/load/load_model.py deleted file mode 100644 index 2a6bc45c18..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_model.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -from ayon_core.pipeline import load, get_representation_path -from ayon_max.api.pipeline import ( - containerise, - get_previous_loaded_object, - remove_container_data -) -from ayon_max.api import lib -from ayon_max.api.lib import ( - maintained_selection, unique_namespace -) - - -class ModelAbcLoader(load.LoaderPlugin): - """Loading model with the Alembic loader.""" - - product_types = {"model"} - label = "Load Model with Alembic" - representations = {"abc"} - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - from pymxs import runtime as rt - - file_path = os.path.normpath(self.filepath_from_context(context)) - - abc_before = { - c - for c in rt.rootNode.Children - if rt.classOf(c) == rt.AlembicContainer - } - - rt.AlembicImport.ImportToRoot = False - rt.AlembicImport.CustomAttributes = True - rt.AlembicImport.UVs = True - rt.AlembicImport.VertexColors = True - rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport) - - abc_after = { - c - for c in rt.rootNode.Children - if rt.classOf(c) == rt.AlembicContainer - } - - # This should yield new AlembicContainer node - abc_containers = abc_after.difference(abc_before) - - if len(abc_containers) != 1: - self.log.error("Something failed when loading.") - - abc_container = abc_containers.pop() - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - abc_objects = [] - for abc_object in abc_container.Children: - abc_object.name = f"{namespace}:{abc_object.name}" - abc_objects.append(abc_object) - # rename the abc container with namespace - abc_container_name = f"{namespace}:{name}" - abc_container.name = abc_container_name - abc_objects.append(abc_container) - - return containerise( - name, abc_objects, context, - namespace, loader=self.__class__.__name__ - ) - - def update(self, container, context): - from pymxs import runtime as rt - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node = rt.GetNodeByName(container["instance_node"]) - node_list = [n for n in get_previous_loaded_object(node) - if rt.ClassOf(n) == rt.AlembicContainer] - with maintained_selection(): - rt.Select(node_list) - - for alembic in rt.Selection: - abc = rt.GetNodeByName(alembic.name) - rt.Select(abc.Children) - for abc_con in abc.Children: - abc_con.source = path - rt.Select(abc_con.Children) - for abc_obj in abc_con.Children: - abc_obj.source = path - lib.imprint( - container["instance_node"], - {"representation": repre_entity["id"]}, - ) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) - - - @staticmethod - def get_container_children(parent, type_name): - from pymxs import runtime as rt - - def list_children(node): - children = [] - for c in node.Children: - children.append(c) - children += list_children(c) - return children - - filtered = [] - for child in list_children(parent): - class_type = str(rt.ClassOf(child.baseObject)) - if class_type == type_name: - filtered.append(child) - - return filtered diff --git a/server_addon/max/client/ayon_max/plugins/load/load_model_fbx.py b/server_addon/max/client/ayon_max/plugins/load/load_model_fbx.py deleted file mode 100644 index 2775e1b453..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_model_fbx.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -from ayon_core.pipeline import load, get_representation_path -from ayon_max.api.pipeline import ( - containerise, get_previous_loaded_object, - update_custom_attribute_data, - remove_container_data -) -from ayon_max.api import lib -from ayon_max.api.lib import ( - unique_namespace, - get_namespace, - object_transform_set -) -from ayon_max.api.lib import maintained_selection - - -class FbxModelLoader(load.LoaderPlugin): - """Fbx Model Loader.""" - - product_types = {"model"} - representations = {"fbx"} - order = -9 - icon = "code-fork" - color = "white" - - def load(self, context, name=None, namespace=None, data=None): - from pymxs import runtime as rt - filepath = self.filepath_from_context(context) - filepath = os.path.normpath(filepath) - rt.FBXImporterSetParam("Animation", False) - rt.FBXImporterSetParam("Cameras", False) - rt.FBXImporterSetParam("Mode", rt.Name("create")) - rt.FBXImporterSetParam("Preserveinstances", True) - rt.importFile( - filepath, rt.name("noPrompt"), using=rt.FBXIMP) - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - selections = rt.GetCurrentSelection() - - for selection in selections: - selection.name = f"{namespace}:{selection.name}" - - return containerise( - name, selections, context, - namespace, loader=self.__class__.__name__) - - def update(self, container, context): - from pymxs import runtime as rt - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node_name = container["instance_node"] - node = rt.getNodeByName(node_name) - if not node: - rt.Container(name=node_name) - namespace, _ = get_namespace(node_name) - - node_list = get_previous_loaded_object(node) - rt.Select(node_list) - prev_fbx_objects = rt.GetCurrentSelection() - transform_data = object_transform_set(prev_fbx_objects) - for prev_fbx_obj in prev_fbx_objects: - if rt.isValidNode(prev_fbx_obj): - rt.Delete(prev_fbx_obj) - - rt.FBXImporterSetParam("Animation", False) - rt.FBXImporterSetParam("Cameras", False) - rt.FBXImporterSetParam("Mode", rt.Name("create")) - rt.FBXImporterSetParam("Preserveinstances", True) - rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP) - current_fbx_objects = rt.GetCurrentSelection() - fbx_objects = [] - for fbx_object in current_fbx_objects: - fbx_object.name = f"{namespace}:{fbx_object.name}" - fbx_objects.append(fbx_object) - fbx_transform = f"{fbx_object}.transform" - if fbx_transform in transform_data.keys(): - fbx_object.pos = transform_data[fbx_transform] or 0 - fbx_object.scale = transform_data[ - f"{fbx_object}.scale"] or 0 - - with maintained_selection(): - rt.Select(node) - update_custom_attribute_data(node, fbx_objects) - lib.imprint(container["instance_node"], { - "representation": repre_entity["id"] - }) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) diff --git a/server_addon/max/client/ayon_max/plugins/load/load_model_obj.py b/server_addon/max/client/ayon_max/plugins/load/load_model_obj.py deleted file mode 100644 index d38aadb5bc..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_model_obj.py +++ /dev/null @@ -1,89 +0,0 @@ -import os - -from ayon_max.api import lib -from ayon_max.api.lib import ( - unique_namespace, - get_namespace, - maintained_selection, - object_transform_set -) -from ayon_max.api.pipeline import ( - containerise, - get_previous_loaded_object, - update_custom_attribute_data, - remove_container_data -) -from ayon_core.pipeline import get_representation_path, load - - -class ObjLoader(load.LoaderPlugin): - """Obj Loader.""" - - product_types = {"model"} - representations = {"obj"} - order = -9 - icon = "code-fork" - color = "white" - - def load(self, context, name=None, namespace=None, data=None): - from pymxs import runtime as rt - - filepath = os.path.normpath(self.filepath_from_context(context)) - self.log.debug("Executing command to import..") - - rt.Execute(f'importFile @"{filepath}" #noPrompt using:ObjImp') - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - # create "missing" container for obj import - selections = rt.GetCurrentSelection() - # get current selection - for selection in selections: - selection.name = f"{namespace}:{selection.name}" - return containerise( - name, selections, context, - namespace, loader=self.__class__.__name__) - - def update(self, container, context): - from pymxs import runtime as rt - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node_name = container["instance_node"] - node = rt.getNodeByName(node_name) - namespace, _ = get_namespace(node_name) - node_list = get_previous_loaded_object(node) - rt.Select(node_list) - previous_objects = rt.GetCurrentSelection() - transform_data = object_transform_set(previous_objects) - for prev_obj in previous_objects: - if rt.isValidNode(prev_obj): - rt.Delete(prev_obj) - - rt.Execute(f'importFile @"{path}" #noPrompt using:ObjImp') - # get current selection - selections = rt.GetCurrentSelection() - for selection in selections: - selection.name = f"{namespace}:{selection.name}" - selection_transform = f"{selection}.transform" - if selection_transform in transform_data.keys(): - selection.pos = transform_data[selection_transform] or 0 - selection.scale = transform_data[ - f"{selection}.scale"] or 0 - update_custom_attribute_data(node, selections) - with maintained_selection(): - rt.Select(node) - - lib.imprint(node_name, { - "representation": repre_entity["id"] - }) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) diff --git a/server_addon/max/client/ayon_max/plugins/load/load_model_usd.py b/server_addon/max/client/ayon_max/plugins/load/load_model_usd.py deleted file mode 100644 index f4dd41d5db..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_model_usd.py +++ /dev/null @@ -1,120 +0,0 @@ -import os - -from pymxs import runtime as rt -from ayon_core.pipeline.load import LoadError -from ayon_max.api import lib -from ayon_max.api.lib import ( - unique_namespace, - get_namespace, - object_transform_set, - get_plugins -) -from ayon_max.api.lib import maintained_selection -from ayon_max.api.pipeline import ( - containerise, - get_previous_loaded_object, - update_custom_attribute_data, - remove_container_data -) -from ayon_core.pipeline import get_representation_path, load - - -class ModelUSDLoader(load.LoaderPlugin): - """Loading model with the USD loader.""" - - product_types = {"model"} - label = "Load Model(USD)" - representations = {"usda"} - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - # asset_filepath - plugin_info = get_plugins() - if "usdimport.dli" not in plugin_info: - raise LoadError("No USDImporter loaded/installed in Max..") - filepath = os.path.normpath(self.filepath_from_context(context)) - import_options = rt.USDImporter.CreateOptions() - base_filename = os.path.basename(filepath) - _, ext = os.path.splitext(base_filename) - log_filepath = filepath.replace(ext, "txt") - - rt.LogPath = log_filepath - rt.LogLevel = rt.Name("info") - rt.USDImporter.importFile(filepath, - importOptions=import_options) - namespace = unique_namespace( - name + "_", - suffix="_", - ) - asset = rt.GetNodeByName(name) - usd_objects = [] - - for usd_asset in asset.Children: - usd_asset.name = f"{namespace}:{usd_asset.name}" - usd_objects.append(usd_asset) - - asset_name = f"{namespace}:{name}" - asset.name = asset_name - # need to get the correct container after renamed - asset = rt.GetNodeByName(asset_name) - usd_objects.append(asset) - - return containerise( - name, usd_objects, context, - namespace, loader=self.__class__.__name__) - - def update(self, container, context): - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node_name = container["instance_node"] - node = rt.GetNodeByName(node_name) - namespace, name = get_namespace(node_name) - node_list = get_previous_loaded_object(node) - rt.Select(node_list) - prev_objects = [sel for sel in rt.GetCurrentSelection() - if sel != rt.Container - and sel.name != node_name] - transform_data = object_transform_set(prev_objects) - for n in prev_objects: - rt.Delete(n) - - import_options = rt.USDImporter.CreateOptions() - base_filename = os.path.basename(path) - _, ext = os.path.splitext(base_filename) - log_filepath = path.replace(ext, "txt") - - rt.LogPath = log_filepath - rt.LogLevel = rt.Name("info") - rt.USDImporter.importFile( - path, importOptions=import_options) - - asset = rt.GetNodeByName(name) - usd_objects = [] - for children in asset.Children: - children.name = f"{namespace}:{children.name}" - usd_objects.append(children) - children_transform = f"{children}.transform" - if children_transform in transform_data.keys(): - children.pos = transform_data[children_transform] or 0 - children.scale = transform_data[ - f"{children}.scale"] or 0 - - asset.name = f"{namespace}:{asset.name}" - usd_objects.append(asset) - update_custom_attribute_data(node, usd_objects) - with maintained_selection(): - rt.Select(node) - - lib.imprint(node_name, { - "representation": repre_entity["id"] - }) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) diff --git a/server_addon/max/client/ayon_max/plugins/load/load_pointcache.py b/server_addon/max/client/ayon_max/plugins/load/load_pointcache.py deleted file mode 100644 index 87ea5c75bc..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_pointcache.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -"""Simple alembic loader for 3dsmax. - -Because of limited api, alembics can be only loaded, but not easily updated. - -""" -import os -from ayon_core.pipeline import load, get_representation_path -from ayon_max.api import lib, maintained_selection -from ayon_max.api.lib import unique_namespace, reset_frame_range -from ayon_max.api.pipeline import ( - containerise, - get_previous_loaded_object, - remove_container_data -) - - -class AbcLoader(load.LoaderPlugin): - """Alembic loader.""" - - product_types = {"camera", "animation", "pointcache"} - label = "Load Alembic" - representations = {"abc"} - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - from pymxs import runtime as rt - - file_path = self.filepath_from_context(context) - file_path = os.path.normpath(file_path) - - abc_before = { - c - for c in rt.rootNode.Children - if rt.classOf(c) == rt.AlembicContainer - } - - rt.AlembicImport.ImportToRoot = False - # TODO: it will be removed after the improvement - # on the post-system setup - reset_frame_range() - rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport) - - abc_after = { - c - for c in rt.rootNode.Children - if rt.classOf(c) == rt.AlembicContainer - } - - # This should yield new AlembicContainer node - abc_containers = abc_after.difference(abc_before) - - if len(abc_containers) != 1: - self.log.error("Something failed when loading.") - - abc_container = abc_containers.pop() - selections = rt.GetCurrentSelection() - for abc in selections: - for cam_shape in abc.Children: - cam_shape.playbackType = 0 - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - abc_objects = [] - for abc_object in abc_container.Children: - abc_object.name = f"{namespace}:{abc_object.name}" - abc_objects.append(abc_object) - # rename the abc container with namespace - abc_container_name = f"{namespace}:{name}" - abc_container.name = abc_container_name - abc_objects.append(abc_container) - - return containerise( - name, abc_objects, context, - namespace, loader=self.__class__.__name__ - ) - - def update(self, container, context): - from pymxs import runtime as rt - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node = rt.GetNodeByName(container["instance_node"]) - abc_container = [n for n in get_previous_loaded_object(node) - if rt.ClassOf(n) == rt.AlembicContainer] - with maintained_selection(): - rt.Select(abc_container) - - for alembic in rt.Selection: - abc = rt.GetNodeByName(alembic.name) - rt.Select(abc.Children) - for abc_con in abc.Children: - abc_con.source = path - rt.Select(abc_con.Children) - for abc_obj in abc_con.Children: - abc_obj.source = path - lib.imprint( - container["instance_node"], - {"representation": repre_entity["id"]}, - ) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) - - - @staticmethod - def get_container_children(parent, type_name): - from pymxs import runtime as rt - - def list_children(node): - children = [] - for c in node.Children: - children.append(c) - children += list_children(c) - return children - - filtered = [] - for child in list_children(parent): - class_type = str(rt.classOf(child.baseObject)) - if class_type == type_name: - filtered.append(child) - - return filtered diff --git a/server_addon/max/client/ayon_max/plugins/load/load_pointcache_ornatrix.py b/server_addon/max/client/ayon_max/plugins/load/load_pointcache_ornatrix.py deleted file mode 100644 index bc997951c1..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_pointcache_ornatrix.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from ayon_core.pipeline import load, get_representation_path -from ayon_core.pipeline.load import LoadError -from ayon_max.api.pipeline import ( - containerise, - get_previous_loaded_object, - update_custom_attribute_data, - remove_container_data -) - -from ayon_max.api.lib import ( - unique_namespace, - get_namespace, - object_transform_set, - get_plugins -) -from ayon_max.api import lib -from pymxs import runtime as rt - - -class OxAbcLoader(load.LoaderPlugin): - """Ornatrix Alembic loader.""" - - product_types = {"camera", "animation", "pointcache"} - label = "Load Alembic with Ornatrix" - representations = {"abc"} - order = -10 - icon = "code-fork" - color = "orange" - postfix = "param" - - def load(self, context, name=None, namespace=None, data=None): - plugin_list = get_plugins() - if "ephere.plugins.autodesk.max.ornatrix.dlo" not in plugin_list: - raise LoadError("Ornatrix plugin not " - "found/installed in Max yet..") - - file_path = os.path.normpath(self.filepath_from_context(context)) - rt.AlembicImport.ImportToRoot = True - rt.AlembicImport.CustomAttributes = True - rt.importFile( - file_path, rt.name("noPrompt"), - using=rt.Ornatrix_Alembic_Importer) - - scene_object = [] - for obj in rt.rootNode.Children: - obj_type = rt.ClassOf(obj) - if str(obj_type).startswith("Ox_"): - scene_object.append(obj) - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - abc_container = [] - for abc in scene_object: - abc.name = f"{namespace}:{abc.name}" - abc_container.append(abc) - - return containerise( - name, abc_container, context, - namespace, loader=self.__class__.__name__ - ) - - def update(self, container, context): - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node_name = container["instance_node"] - namespace, name = get_namespace(node_name) - node = rt.getNodeByName(node_name) - node_list = get_previous_loaded_object(node) - rt.Select(node_list) - selections = rt.getCurrentSelection() - transform_data = object_transform_set(selections) - for prev_obj in selections: - if rt.isValidNode(prev_obj): - rt.Delete(prev_obj) - - rt.AlembicImport.ImportToRoot = False - rt.AlembicImport.CustomAttributes = True - rt.importFile( - path, rt.name("noPrompt"), - using=rt.Ornatrix_Alembic_Importer) - - scene_object = [] - for obj in rt.rootNode.Children: - obj_type = rt.ClassOf(obj) - if str(obj_type).startswith("Ox_"): - scene_object.append(obj) - ox_abc_objects = [] - for abc in scene_object: - abc.Parent = container - abc.name = f"{namespace}:{abc.name}" - ox_abc_objects.append(abc) - ox_transform = f"{abc}.transform" - if ox_transform in transform_data.keys(): - abc.pos = transform_data[ox_transform] or 0 - abc.scale = transform_data[f"{abc}.scale"] or 0 - update_custom_attribute_data(node, ox_abc_objects) - lib.imprint( - container["instance_node"], - {"representation": repre_entity["id"]}, - ) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) diff --git a/server_addon/max/client/ayon_max/plugins/load/load_pointcloud.py b/server_addon/max/client/ayon_max/plugins/load/load_pointcloud.py deleted file mode 100644 index 0fb506d5bd..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_pointcloud.py +++ /dev/null @@ -1,69 +0,0 @@ -import os - -from ayon_max.api import lib, maintained_selection -from ayon_max.api.lib import ( - unique_namespace, - -) -from ayon_max.api.pipeline import ( - containerise, - get_previous_loaded_object, - update_custom_attribute_data, - remove_container_data -) -from ayon_core.pipeline import get_representation_path, load - - -class PointCloudLoader(load.LoaderPlugin): - """Point Cloud Loader.""" - - product_types = {"pointcloud"} - representations = {"prt"} - order = -8 - icon = "code-fork" - color = "green" - postfix = "param" - - def load(self, context, name=None, namespace=None, data=None): - """load point cloud by tyCache""" - from pymxs import runtime as rt - filepath = os.path.normpath(self.filepath_from_context(context)) - obj = rt.tyCache() - obj.filename = filepath - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - obj.name = f"{namespace}:{obj.name}" - - return containerise( - name, [obj], context, - namespace, loader=self.__class__.__name__) - - def update(self, container, context): - """update the container""" - from pymxs import runtime as rt - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node = rt.GetNodeByName(container["instance_node"]) - node_list = get_previous_loaded_object(node) - update_custom_attribute_data( - node, node_list) - with maintained_selection(): - rt.Select(node_list) - for prt in rt.Selection: - prt.filename = path - lib.imprint(container["instance_node"], { - "representation": repre_entity["id"] - }) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - """remove the container""" - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) diff --git a/server_addon/max/client/ayon_max/plugins/load/load_redshift_proxy.py b/server_addon/max/client/ayon_max/plugins/load/load_redshift_proxy.py deleted file mode 100644 index 3fd84b7538..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_redshift_proxy.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -import clique - -from ayon_core.pipeline import ( - load, - get_representation_path -) -from ayon_core.pipeline.load import LoadError -from ayon_max.api.pipeline import ( - containerise, - update_custom_attribute_data, - get_previous_loaded_object, - remove_container_data -) -from ayon_max.api import lib -from ayon_max.api.lib import ( - unique_namespace, - get_plugins -) - - -class RedshiftProxyLoader(load.LoaderPlugin): - """Load rs files with Redshift Proxy""" - - label = "Load Redshift Proxy" - product_types = {"redshiftproxy"} - representations = {"rs"} - order = -9 - icon = "code-fork" - color = "white" - - def load(self, context, name=None, namespace=None, data=None): - from pymxs import runtime as rt - plugin_info = get_plugins() - if "redshift4max.dlr" not in plugin_info: - raise LoadError("Redshift not loaded/installed in Max..") - filepath = self.filepath_from_context(context) - rs_proxy = rt.RedshiftProxy() - rs_proxy.file = filepath - files_in_folder = os.listdir(os.path.dirname(filepath)) - collections, remainder = clique.assemble(files_in_folder) - if collections: - rs_proxy.is_sequence = True - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - rs_proxy.name = f"{namespace}:{rs_proxy.name}" - - return containerise( - name, [rs_proxy], context, - namespace, loader=self.__class__.__name__) - - def update(self, container, context): - from pymxs import runtime as rt - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node = rt.getNodeByName(container["instance_node"]) - node_list = get_previous_loaded_object(node) - rt.Select(node_list) - update_custom_attribute_data( - node, rt.Selection) - for proxy in rt.Selection: - proxy.file = path - - lib.imprint(container["instance_node"], { - "representation": repre_entity["id"] - }) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) diff --git a/server_addon/max/client/ayon_max/plugins/load/load_tycache.py b/server_addon/max/client/ayon_max/plugins/load/load_tycache.py deleted file mode 100644 index e087d5599a..0000000000 --- a/server_addon/max/client/ayon_max/plugins/load/load_tycache.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -from ayon_max.api import lib, maintained_selection -from ayon_max.api.lib import ( - unique_namespace, - -) -from ayon_max.api.pipeline import ( - containerise, - get_previous_loaded_object, - update_custom_attribute_data, - remove_container_data -) -from ayon_core.pipeline import get_representation_path, load - - -class TyCacheLoader(load.LoaderPlugin): - """TyCache Loader.""" - - product_types = {"tycache"} - representations = {"tyc"} - order = -8 - icon = "code-fork" - color = "green" - - def load(self, context, name=None, namespace=None, data=None): - """Load tyCache""" - from pymxs import runtime as rt - filepath = os.path.normpath(self.filepath_from_context(context)) - obj = rt.tyCache() - obj.filename = filepath - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - obj.name = f"{namespace}:{obj.name}" - - return containerise( - name, [obj], context, - namespace, loader=self.__class__.__name__) - - def update(self, container, context): - """update the container""" - from pymxs import runtime as rt - - repre_entity = context["representation"] - path = get_representation_path(repre_entity) - node = rt.GetNodeByName(container["instance_node"]) - node_list = get_previous_loaded_object(node) - update_custom_attribute_data(node, node_list) - with maintained_selection(): - for tyc in node_list: - tyc.filename = path - lib.imprint(container["instance_node"], { - "representation": repre_entity["id"] - }) - - def switch(self, container, context): - self.update(container, context) - - def remove(self, container): - """remove the container""" - from pymxs import runtime as rt - node = rt.GetNodeByName(container["instance_node"]) - remove_container_data(node) diff --git a/server_addon/max/client/ayon_max/plugins/publish/collect_current_file.py b/server_addon/max/client/ayon_max/plugins/publish/collect_current_file.py deleted file mode 100644 index 6f8b8dda4b..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import pyblish.api - -from pymxs import runtime as rt - - -class CollectCurrentFile(pyblish.api.ContextPlugin): - """Inject the current working file.""" - - order = pyblish.api.CollectorOrder - 0.5 - label = "Max Current File" - hosts = ['max'] - - def process(self, context): - """Inject the current working file""" - folder = rt.maxFilePath - file = rt.maxFileName - if not folder or not file: - self.log.error("Scene is not saved.") - current_file = os.path.join(folder, file) - - context.data["currentFile"] = current_file - self.log.debug("Scene path: {}".format(current_file)) diff --git a/server_addon/max/client/ayon_max/plugins/publish/collect_frame_range.py b/server_addon/max/client/ayon_max/plugins/publish/collect_frame_range.py deleted file mode 100644 index 6fc8de90d1..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/collect_frame_range.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from pymxs import runtime as rt - - -class CollectFrameRange(pyblish.api.InstancePlugin): - """Collect Frame Range.""" - - order = pyblish.api.CollectorOrder + 0.01 - label = "Collect Frame Range" - hosts = ['max'] - families = ["camera", "maxrender", - "pointcache", "pointcloud", - "review", "redshiftproxy"] - - def process(self, instance): - if instance.data["productType"] == "maxrender": - instance.data["frameStartHandle"] = int(rt.rendStart) - instance.data["frameEndHandle"] = int(rt.rendEnd) - else: - instance.data["frameStartHandle"] = int(rt.animationRange.start) - instance.data["frameEndHandle"] = int(rt.animationRange.end) diff --git a/server_addon/max/client/ayon_max/plugins/publish/collect_members.py b/server_addon/max/client/ayon_max/plugins/publish/collect_members.py deleted file mode 100644 index 010b3cd3e1..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/collect_members.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect instance members.""" -import pyblish.api -from pymxs import runtime as rt - - -class CollectMembers(pyblish.api.InstancePlugin): - """Collect Set Members.""" - - order = pyblish.api.CollectorOrder + 0.01 - label = "Collect Instance Members" - hosts = ['max'] - - def process(self, instance): - if instance.data["productType"] == "workfile": - self.log.debug( - "Skipping Collecting Members for workfile product type." - ) - return - if instance.data.get("instance_node"): - container = rt.GetNodeByName(instance.data["instance_node"]) - instance.data["members"] = [ - member.node for member - in container.modifiers[0].openPypeData.all_handles - ] - self.log.debug("{}".format(instance.data["members"])) diff --git a/server_addon/max/client/ayon_max/plugins/publish/collect_render.py b/server_addon/max/client/ayon_max/plugins/publish/collect_render.py deleted file mode 100644 index a5e8d65df2..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/collect_render.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect Render""" -import os -import pyblish.api - -from pymxs import runtime as rt -from ayon_core.pipeline.publish import KnownPublishError -from ayon_max.api import colorspace -from ayon_max.api.lib import get_max_version, get_current_renderer -from ayon_max.api.lib_rendersettings import RenderSettings -from ayon_max.api.lib_renderproducts import RenderProducts - - -class CollectRender(pyblish.api.InstancePlugin): - """Collect Render for Deadline""" - - order = pyblish.api.CollectorOrder + 0.02 - label = "Collect 3dsmax Render Layers" - hosts = ['max'] - families = ["maxrender"] - - def process(self, instance): - context = instance.context - folder = rt.maxFilePath - file = rt.maxFileName - current_file = os.path.join(folder, file) - filepath = current_file.replace("\\", "/") - context.data['currentFile'] = current_file - - files_by_aov = RenderProducts().get_beauty(instance.name) - aovs = RenderProducts().get_aovs(instance.name) - files_by_aov.update(aovs) - - camera = rt.viewport.GetCamera() - if instance.data.get("members"): - camera_list = [member for member in instance.data["members"] - if rt.ClassOf(member) == rt.Camera.Classes] - if camera_list: - camera = camera_list[-1] - - instance.data["cameras"] = [camera.name] if camera else None # noqa - - if instance.data.get("multiCamera"): - cameras = instance.data.get("members") - if not cameras: - raise KnownPublishError("There should be at least" - " one renderable camera in container") - sel_cam = [ - c.name for c in cameras - if rt.classOf(c) in rt.Camera.classes] - container_name = instance.data.get("instance_node") - render_dir = os.path.dirname(rt.rendOutputFilename) - outputs = RenderSettings().batch_render_layer( - container_name, render_dir, sel_cam - ) - - instance.data["cameras"] = sel_cam - - files_by_aov = RenderProducts().get_multiple_beauty( - outputs, sel_cam) - aovs = RenderProducts().get_multiple_aovs( - outputs, sel_cam) - files_by_aov.update(aovs) - - if "expectedFiles" not in instance.data: - instance.data["expectedFiles"] = list() - instance.data["files"] = list() - instance.data["expectedFiles"].append(files_by_aov) - instance.data["files"].append(files_by_aov) - - img_format = RenderProducts().image_format() - # OCIO config not support in - # most of the 3dsmax renderers - # so this is currently hard coded - # TODO: add options for redshift/vray ocio config - instance.data["colorspaceConfig"] = "" - instance.data["colorspaceDisplay"] = "sRGB" - instance.data["colorspaceView"] = "ACES 1.0 SDR-video" - - if int(get_max_version()) >= 2024: - colorspace_mgr = rt.ColorPipelineMgr # noqa - display = next( - (display for display in colorspace_mgr.GetDisplayList())) - view_transform = next( - (view for view in colorspace_mgr.GetViewList(display))) - instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath - instance.data["colorspaceDisplay"] = display - instance.data["colorspaceView"] = view_transform - - instance.data["renderProducts"] = colorspace.ARenderProduct() - instance.data["publishJobState"] = "Suspended" - instance.data["attachTo"] = [] - renderer_class = get_current_renderer() - renderer = str(renderer_class).split(":")[0] - product_type = "maxrender" - # also need to get the render dir for conversion - data = { - "folderPath": instance.data["folderPath"], - "productName": str(instance.name), - "publish": True, - "maxversion": str(get_max_version()), - "imageFormat": img_format, - "productType": product_type, - "family": product_type, - "families": [product_type], - "renderer": renderer, - "source": filepath, - "plugin": "3dsmax", - "frameStart": instance.data["frameStartHandle"], - "frameEnd": instance.data["frameEndHandle"], - "farm": True - } - instance.data.update(data) - - # TODO: this should be unified with maya and its "multipart" flag - # on instance. - if renderer == "Redshift_Renderer": - instance.data.update( - {"separateAovFiles": rt.Execute( - "renderers.current.separateAovFiles")}) - - self.log.info("data: {0}".format(data)) diff --git a/server_addon/max/client/ayon_max/plugins/publish/collect_review.py b/server_addon/max/client/ayon_max/plugins/publish/collect_review.py deleted file mode 100644 index 321aa7439c..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/collect_review.py +++ /dev/null @@ -1,153 +0,0 @@ -# dont forget getting the focal length for burnin -"""Collect Review""" -import pyblish.api - -from pymxs import runtime as rt -from ayon_core.lib import BoolDef -from ayon_max.api.lib import get_max_version -from ayon_core.pipeline.publish import ( - AYONPyblishPluginMixin, - KnownPublishError -) - - -class CollectReview(pyblish.api.InstancePlugin, - AYONPyblishPluginMixin): - """Collect Review Data for Preview Animation""" - - order = pyblish.api.CollectorOrder + 0.02 - label = "Collect Review Data" - hosts = ['max'] - families = ["review"] - - def process(self, instance): - nodes = instance.data["members"] - - def is_camera(node): - is_camera_class = rt.classOf(node) in rt.Camera.classes - return is_camera_class and rt.isProperty(node, "fov") - - # Use first camera in instance - cameras = [node for node in nodes if is_camera(node)] - if cameras: - if len(cameras) > 1: - self.log.warning( - "Found more than one camera in instance, using first " - f"one found: {cameras[0]}" - ) - camera = cameras[0] - camera_name = camera.name - focal_length = camera.fov - else: - raise KnownPublishError( - "Unable to find a valid camera in 'Review' container." - " Only native max Camera supported. " - f"Found objects: {nodes}" - ) - creator_attrs = instance.data["creator_attributes"] - attr_values = self.get_attr_values_from_data(instance.data) - - general_preview_data = { - "review_camera": camera_name, - "frameStart": instance.data["frameStartHandle"], - "frameEnd": instance.data["frameEndHandle"], - "percentSize": creator_attrs["percentSize"], - "imageFormat": creator_attrs["imageFormat"], - "keepImages": creator_attrs["keepImages"], - "fps": instance.context.data["fps"], - "review_width": creator_attrs["review_width"], - "review_height": creator_attrs["review_height"], - } - - if int(get_max_version()) >= 2024: - colorspace_mgr = rt.ColorPipelineMgr # noqa - display = next( - (display for display in colorspace_mgr.GetDisplayList())) - view_transform = next( - (view for view in colorspace_mgr.GetViewList(display))) - instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath - instance.data["colorspaceDisplay"] = display - instance.data["colorspaceView"] = view_transform - - preview_data = { - "vpStyle": creator_attrs["visualStyleMode"], - "vpPreset": creator_attrs["viewportPreset"], - "vpTextures": creator_attrs["vpTexture"], - "dspGeometry": attr_values.get("dspGeometry"), - "dspShapes": attr_values.get("dspShapes"), - "dspLights": attr_values.get("dspLights"), - "dspCameras": attr_values.get("dspCameras"), - "dspHelpers": attr_values.get("dspHelpers"), - "dspParticles": attr_values.get("dspParticles"), - "dspBones": attr_values.get("dspBones"), - "dspBkg": attr_values.get("dspBkg"), - "dspGrid": attr_values.get("dspGrid"), - "dspSafeFrame": attr_values.get("dspSafeFrame"), - "dspFrameNums": attr_values.get("dspFrameNums") - } - else: - general_viewport = { - "dspBkg": attr_values.get("dspBkg"), - "dspGrid": attr_values.get("dspGrid") - } - nitrous_manager = { - "AntialiasingQuality": creator_attrs["antialiasingQuality"], - } - nitrous_viewport = { - "VisualStyleMode": creator_attrs["visualStyleMode"], - "ViewportPreset": creator_attrs["viewportPreset"], - "UseTextureEnabled": creator_attrs["vpTexture"] - } - preview_data = { - "general_viewport": general_viewport, - "nitrous_manager": nitrous_manager, - "nitrous_viewport": nitrous_viewport, - "vp_btn_mgr": {"EnableButtons": False} - } - - # Enable ftrack functionality - instance.data.setdefault("families", []).append('ftrack') - - burnin_members = instance.data.setdefault("burninDataMembers", {}) - burnin_members["focalLength"] = focal_length - - instance.data.update(general_preview_data) - instance.data["viewport_options"] = preview_data - - @classmethod - def get_attribute_defs(cls): - return [ - BoolDef("dspGeometry", - label="Geometry", - default=True), - BoolDef("dspShapes", - label="Shapes", - default=False), - BoolDef("dspLights", - label="Lights", - default=False), - BoolDef("dspCameras", - label="Cameras", - default=False), - BoolDef("dspHelpers", - label="Helpers", - default=False), - BoolDef("dspParticles", - label="Particle Systems", - default=True), - BoolDef("dspBones", - label="Bone Objects", - default=False), - BoolDef("dspBkg", - label="Background", - default=True), - BoolDef("dspGrid", - label="Active Grid", - default=False), - BoolDef("dspSafeFrame", - label="Safe Frames", - default=False), - BoolDef("dspFrameNums", - label="Frame Numbers", - default=False) - ] diff --git a/server_addon/max/client/ayon_max/plugins/publish/collect_tycache_attributes.py b/server_addon/max/client/ayon_max/plugins/publish/collect_tycache_attributes.py deleted file mode 100644 index 4855e952d8..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/collect_tycache_attributes.py +++ /dev/null @@ -1,76 +0,0 @@ -import pyblish.api - -from ayon_core.lib import EnumDef, TextDef -from ayon_core.pipeline.publish import AYONPyblishPluginMixin - - -class CollectTyCacheData(pyblish.api.InstancePlugin, - AYONPyblishPluginMixin): - """Collect Channel Attributes for TyCache Export""" - - order = pyblish.api.CollectorOrder + 0.02 - label = "Collect tyCache attribute Data" - hosts = ['max'] - families = ["tycache"] - - def process(self, instance): - attr_values = self.get_attr_values_from_data(instance.data) - attributes = {} - for attr_key in attr_values.get("tycacheAttributes", []): - attributes[attr_key] = True - - for key in ["tycacheLayer", "tycacheObjectName"]: - attributes[key] = attr_values.get(key, "") - - # Collect the selected channel data before exporting - instance.data["tyc_attrs"] = attributes - self.log.debug( - f"Found tycache attributes: {attributes}" - ) - - @classmethod - def get_attribute_defs(cls): - # TODO: Support the attributes with maxObject array - tyc_attr_enum = ["tycacheChanAge", "tycacheChanGroups", - "tycacheChanPos", "tycacheChanRot", - "tycacheChanScale", "tycacheChanVel", - "tycacheChanSpin", "tycacheChanShape", - "tycacheChanMatID", "tycacheChanMapping", - "tycacheChanMaterials", "tycacheChanCustomFloat" - "tycacheChanCustomVector", "tycacheChanCustomTM", - "tycacheChanPhysX", "tycacheMeshBackup", - "tycacheCreateObject", - "tycacheCreateObjectIfNotCreated", - "tycacheAdditionalCloth", - "tycacheAdditionalSkin", - "tycacheAdditionalSkinID", - "tycacheAdditionalSkinIDValue", - "tycacheAdditionalTerrain", - "tycacheAdditionalVDB", - "tycacheAdditionalSplinePaths", - "tycacheAdditionalGeo", - "tycacheAdditionalGeoActivateModifiers", - "tycacheSplines", - "tycacheSplinesAdditionalSplines" - ] - tyc_default_attrs = ["tycacheChanGroups", "tycacheChanPos", - "tycacheChanRot", "tycacheChanScale", - "tycacheChanVel", "tycacheChanShape", - "tycacheChanMatID", "tycacheChanMapping", - "tycacheChanMaterials", - "tycacheCreateObjectIfNotCreated"] - return [ - EnumDef("tycacheAttributes", - tyc_attr_enum, - default=tyc_default_attrs, - multiselection=True, - label="TyCache Attributes"), - TextDef("tycacheLayer", - label="TyCache Layer", - tooltip="Name of tycache layer", - default="$(tyFlowLayer)"), - TextDef("tycacheObjectName", - label="TyCache Object Name", - tooltip="TyCache Object Name", - default="$(tyFlowName)_tyCache") - ] diff --git a/server_addon/max/client/ayon_max/plugins/publish/collect_workfile.py b/server_addon/max/client/ayon_max/plugins/publish/collect_workfile.py deleted file mode 100644 index 6eec0f7292..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect current work file.""" -import os -import pyblish.api - -from pymxs import runtime as rt - - -class CollectWorkfile(pyblish.api.InstancePlugin): - """Inject the current working file into context""" - - order = pyblish.api.CollectorOrder - 0.01 - label = "Collect 3dsmax Workfile" - hosts = ['max'] - families = ["workfile"] - - def process(self, instance): - """Inject the current working file.""" - context = instance.context - folder = rt.maxFilePath - file = rt.maxFileName - if not folder or not file: - self.log.error("Scene is not saved.") - ext = os.path.splitext(file)[-1].lstrip(".") - - data = {} - - data.update({ - "setMembers": context.data["currentFile"], - "frameStart": context.data["frameStart"], - "frameEnd": context.data["frameEnd"], - "handleStart": context.data["handleStart"], - "handleEnd": context.data["handleEnd"] - }) - - data["representations"] = [{ - "name": ext, - "ext": ext, - "files": file, - "stagingDir": folder, - }] - - instance.data.update(data) - self.log.debug("Collected data: {}".format(data)) - self.log.debug("Collected instance: {}".format(file)) - self.log.debug("staging Dir: {}".format(folder)) diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_alembic.py b/server_addon/max/client/ayon_max/plugins/publish/extract_alembic.py deleted file mode 100644 index b0999e5a78..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_alembic.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Export alembic file. - -Note: - Parameters on AlembicExport (AlembicExport.Parameter): - - ParticleAsMesh (bool): Sets whether particle shapes are exported - as meshes. - AnimTimeRange (enum): How animation is saved: - #CurrentFrame: saves current frame - #TimeSlider: saves the active time segments on time slider (default) - #StartEnd: saves a range specified by the Step - StartFrame (int) - EnFrame (int) - ShapeSuffix (bool): When set to true, appends the string "Shape" to the - name of each exported mesh. This property is set to false by default. - SamplesPerFrame (int): Sets the number of animation samples per frame. - Hidden (bool): When true, export hidden geometry. - UVs (bool): When true, export the mesh UV map channel. - Normals (bool): When true, export the mesh normals. - VertexColors (bool): When true, export the mesh vertex color map 0 and the - current vertex color display data when it differs - ExtraChannels (bool): When true, export the mesh extra map channels - (map channels greater than channel 1) - Velocity (bool): When true, export the meh vertex and particle velocity - data. - MaterialIDs (bool): When true, export the mesh material ID as - Alembic face sets. - Visibility (bool): When true, export the node visibility data. - LayerName (bool): When true, export the node layer name as an Alembic - object property. - MaterialName (bool): When true, export the geometry node material name as - an Alembic object property - ObjectID (bool): When true, export the geometry node g-buffer object ID as - an Alembic object property. - CustomAttributes (bool): When true, export the node and its modifiers - custom attributes into an Alembic object compound property. -""" -import os -import pyblish.api -from ayon_core.pipeline import publish, OptionalPyblishPluginMixin -from pymxs import runtime as rt -from ayon_max.api import maintained_selection -from ayon_max.api.lib import suspended_refresh -from ayon_core.lib import BoolDef - - -class ExtractAlembic(publish.Extractor, - OptionalPyblishPluginMixin): - order = pyblish.api.ExtractorOrder - label = "Extract Pointcache" - hosts = ["max"] - families = ["pointcache"] - optional = True - active = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - parent_dir = self.staging_dir(instance) - file_name = "{name}.abc".format(**instance.data) - path = os.path.join(parent_dir, file_name) - - with suspended_refresh(): - self._set_abc_attributes(instance) - with maintained_selection(): - # select and export - node_list = instance.data["members"] - rt.Select(node_list) - rt.exportFile( - path, - rt.name("noPrompt"), - selectedOnly=True, - using=rt.AlembicExport, - ) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "abc", - "ext": "abc", - "files": file_name, - "stagingDir": parent_dir, - } - instance.data["representations"].append(representation) - - def _set_abc_attributes(self, instance): - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - attr_values = self.get_attr_values_from_data(instance.data) - custom_attrs = attr_values.get("custom_attrs", False) - if not custom_attrs: - self.log.debug( - "No Custom Attributes included in this abc export...") - rt.AlembicExport.ArchiveType = rt.Name("ogawa") - rt.AlembicExport.CoordinateSystem = rt.Name("maya") - rt.AlembicExport.StartFrame = start - rt.AlembicExport.EndFrame = end - rt.AlembicExport.CustomAttributes = custom_attrs - - @classmethod - def get_attribute_defs(cls): - defs = super(ExtractAlembic, cls).get_attribute_defs() - defs.extend([ - BoolDef("custom_attrs", - label="Custom Attributes", - default=False), - ]) - return defs - - -class ExtractCameraAlembic(ExtractAlembic): - """Extract Camera with AlembicExport.""" - label = "Extract Alembic Camera" - families = ["camera"] - optional = True - - -class ExtractModelAlembic(ExtractAlembic): - """Extract Geometry in Alembic Format""" - label = "Extract Geometry (Alembic)" - families = ["model"] - optional = True - - def _set_abc_attributes(self, instance): - attr_values = self.get_attr_values_from_data(instance.data) - custom_attrs = attr_values.get("custom_attrs", False) - if not custom_attrs: - self.log.debug( - "No Custom Attributes included in this abc export...") - rt.AlembicExport.ArchiveType = rt.name("ogawa") - rt.AlembicExport.CoordinateSystem = rt.name("maya") - rt.AlembicExport.CustomAttributes = custom_attrs - rt.AlembicExport.UVs = True - rt.AlembicExport.VertexColors = True - rt.AlembicExport.PreserveInstances = True diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_fbx.py b/server_addon/max/client/ayon_max/plugins/publish/extract_fbx.py deleted file mode 100644 index bdfc1d0d78..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_fbx.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -import pyblish.api -from ayon_core.pipeline import publish, OptionalPyblishPluginMixin -from pymxs import runtime as rt -from ayon_max.api import maintained_selection -from ayon_max.api.lib import convert_unit_scale - - -class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin): - """ - Extract Geometry in FBX Format - """ - - order = pyblish.api.ExtractorOrder - 0.05 - label = "Extract FBX" - hosts = ["max"] - families = ["model"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - stagingdir = self.staging_dir(instance) - filename = "{name}.fbx".format(**instance.data) - filepath = os.path.join(stagingdir, filename) - self._set_fbx_attributes() - - with maintained_selection(): - # select and export - node_list = instance.data["members"] - rt.Select(node_list) - rt.exportFile( - filepath, - rt.name("noPrompt"), - selectedOnly=True, - using=rt.FBXEXP, - ) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "fbx", - "ext": "fbx", - "files": filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - self.log.info( - "Extracted instance '%s' to: %s" % (instance.name, filepath) - ) - - def _set_fbx_attributes(self): - unit_scale = convert_unit_scale() - rt.FBXExporterSetParam("Animation", False) - rt.FBXExporterSetParam("Cameras", False) - rt.FBXExporterSetParam("Lights", False) - rt.FBXExporterSetParam("PointCache", False) - rt.FBXExporterSetParam("AxisConversionMethod", "Animation") - rt.FBXExporterSetParam("UpAxis", "Y") - rt.FBXExporterSetParam("Preserveinstances", True) - if unit_scale: - rt.FBXExporterSetParam("ConvertUnit", unit_scale) - - -class ExtractCameraFbx(ExtractModelFbx): - """Extract Camera with FbxExporter.""" - - order = pyblish.api.ExtractorOrder - 0.2 - label = "Extract Fbx Camera" - families = ["camera"] - optional = True - - def _set_fbx_attributes(self): - unit_scale = convert_unit_scale() - rt.FBXExporterSetParam("Animation", True) - rt.FBXExporterSetParam("Cameras", True) - rt.FBXExporterSetParam("AxisConversionMethod", "Animation") - rt.FBXExporterSetParam("UpAxis", "Y") - rt.FBXExporterSetParam("Preserveinstances", True) - if unit_scale: - rt.FBXExporterSetParam("ConvertUnit", unit_scale) diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_max_scene_raw.py b/server_addon/max/client/ayon_max/plugins/publish/extract_max_scene_raw.py deleted file mode 100644 index ecde6d2ce9..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_max_scene_raw.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import pyblish.api -from ayon_core.pipeline import publish, OptionalPyblishPluginMixin -from pymxs import runtime as rt - - -class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin): - """ - Extract Raw Max Scene with SaveSelected - """ - - order = pyblish.api.ExtractorOrder - 0.2 - label = "Extract Max Scene (Raw)" - hosts = ["max"] - families = ["camera", "maxScene", "model"] - optional = True - - settings_category = "max" - - def process(self, instance): - if not self.is_active(instance.data): - return - - # publish the raw scene for camera - self.log.debug("Extracting Raw Max Scene ...") - - stagingdir = self.staging_dir(instance) - filename = "{name}.max".format(**instance.data) - - max_path = os.path.join(stagingdir, filename) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - nodes = instance.data["members"] - rt.saveNodes(nodes, max_path, quiet=True) - - self.log.info("Performing Extraction ...") - - representation = { - "name": "max", - "ext": "max", - "files": filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - self.log.info( - "Extracted instance '%s' to: %s" % (instance.name, max_path) - ) diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_model_obj.py b/server_addon/max/client/ayon_max/plugins/publish/extract_model_obj.py deleted file mode 100644 index 6556bd7809..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_model_obj.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import pyblish.api -from ayon_core.pipeline import publish, OptionalPyblishPluginMixin -from pymxs import runtime as rt -from ayon_max.api import maintained_selection -from ayon_max.api.lib import suspended_refresh -from ayon_core.pipeline.publish import KnownPublishError - - -class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin): - """ - Extract Geometry in OBJ Format - """ - - order = pyblish.api.ExtractorOrder - 0.05 - label = "Extract OBJ" - hosts = ["max"] - families = ["model"] - optional = True - - settings_category = "max" - - def process(self, instance): - if not self.is_active(instance.data): - return - - stagingdir = self.staging_dir(instance) - filename = "{name}.obj".format(**instance.data) - filepath = os.path.join(stagingdir, filename) - - with suspended_refresh(): - with maintained_selection(): - # select and export - node_list = instance.data["members"] - rt.Select(node_list) - rt.exportFile( - filepath, - rt.name("noPrompt"), - selectedOnly=True, - using=rt.ObjExp, - ) - if not os.path.exists(filepath): - raise KnownPublishError( - "File {} wasn't produced by 3ds max, please check the logs.") - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "obj", - "ext": "obj", - "files": filename, - "stagingDir": stagingdir, - } - - instance.data["representations"].append(representation) - self.log.info( - "Extracted instance '%s' to: %s" % (instance.name, filepath) - ) diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_model_usd.py b/server_addon/max/client/ayon_max/plugins/publish/extract_model_usd.py deleted file mode 100644 index a48126c6e5..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_model_usd.py +++ /dev/null @@ -1,94 +0,0 @@ -import os - -import pyblish.api -from pymxs import runtime as rt - -from ayon_max.api import maintained_selection -from ayon_core.pipeline import OptionalPyblishPluginMixin, publish - - -class ExtractModelUSD(publish.Extractor, - OptionalPyblishPluginMixin): - """Extract Geometry in USDA Format.""" - - order = pyblish.api.ExtractorOrder - 0.05 - label = "Extract Geometry (USD)" - hosts = ["max"] - families = ["model"] - optional = True - - settings_category = "max" - - def process(self, instance): - if not self.is_active(instance.data): - return - - self.log.info("Extracting Geometry ...") - - stagingdir = self.staging_dir(instance) - asset_filename = "{name}.usda".format(**instance.data) - asset_filepath = os.path.join(stagingdir, - asset_filename) - self.log.info(f"Writing USD '{asset_filepath}' to '{stagingdir}'") - - log_filename = "{name}.txt".format(**instance.data) - log_filepath = os.path.join(stagingdir, - log_filename) - self.log.info(f"Writing log '{log_filepath}' to '{stagingdir}'") - - # get the nodes which need to be exported - export_options = self.get_export_options(log_filepath) - with maintained_selection(): - # select and export - node_list = instance.data["members"] - rt.Select(node_list) - rt.USDExporter.ExportFile(asset_filepath, - exportOptions=export_options, - contentSource=rt.Name("selected"), - nodeList=node_list) - - self.log.info("Performing Extraction ...") - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'usda', - 'ext': 'usda', - 'files': asset_filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - log_representation = { - 'name': 'txt', - 'ext': 'txt', - 'files': log_filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(log_representation) - - self.log.info( - f"Extracted instance '{instance.name}' to: {asset_filepath}") - - @staticmethod - def get_export_options(log_path): - """Set Export Options for USD Exporter""" - - export_options = rt.USDExporter.createOptions() - - export_options.Meshes = True - export_options.Shapes = False - export_options.Lights = False - export_options.Cameras = False - export_options.Materials = False - export_options.MeshFormat = rt.Name('fromScene') - export_options.FileFormat = rt.Name('ascii') - export_options.UpAxis = rt.Name('y') - export_options.LogLevel = rt.Name('info') - export_options.LogPath = log_path - export_options.PreserveEdgeOrientation = True - export_options.TimeMode = rt.Name('current') - - rt.USDexporter.UIOptions = export_options - - return export_options diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_pointcloud.py b/server_addon/max/client/ayon_max/plugins/publish/extract_pointcloud.py deleted file mode 100644 index f763325eb9..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_pointcloud.py +++ /dev/null @@ -1,242 +0,0 @@ -import os - -import pyblish.api -from pymxs import runtime as rt - -from ayon_max.api import maintained_selection -from ayon_core.pipeline import publish - - -class ExtractPointCloud(publish.Extractor): - """ - Extract PRT format with tyFlow operators. - - Notes: - Currently only works for the default partition setting - - Args: - self.export_particle(): sets up all job arguments for attributes - to be exported in MAXscript - - self.get_operators(): get the export_particle operator - - self.get_custom_attr(): get all custom channel attributes from Openpype - setting and sets it as job arguments before exporting - - self.get_files(): get the files with tyFlow naming convention - before publishing - - self.partition_output_name(): get the naming with partition settings. - - self.get_partition(): get partition value - - """ - - order = pyblish.api.ExtractorOrder - 0.2 - label = "Extract Point Cloud" - hosts = ["max"] - families = ["pointcloud"] - settings = [] - - def process(self, instance): - self.settings = self.get_setting(instance) - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - self.log.info("Extracting PRT...") - - stagingdir = self.staging_dir(instance) - filename = "{name}.prt".format(**instance.data) - path = os.path.join(stagingdir, filename) - - with maintained_selection(): - job_args = self.export_particle(instance.data["members"], - start, - end, - path) - - for job in job_args: - rt.Execute(job) - - self.log.info("Performing Extraction ...") - if "representations" not in instance.data: - instance.data["representations"] = [] - - self.log.info("Writing PRT with TyFlow Plugin...") - filenames = self.get_files( - instance.data["members"], path, start, end) - self.log.debug(f"filenames: {filenames}") - - partition = self.partition_output_name( - instance.data["members"]) - - representation = { - 'name': 'prt', - 'ext': 'prt', - 'files': filenames if len(filenames) > 1 else filenames[0], - "stagingDir": stagingdir, - "outputName": partition # partition value - } - instance.data["representations"].append(representation) - self.log.info(f"Extracted instance '{instance.name}' to: {path}") - - def export_particle(self, - members, - start, - end, - filepath): - """Sets up all job arguments for attributes. - - Those attributes are to be exported in MAX Script. - - Args: - members (list): Member nodes of the instance. - start (int): Start frame. - end (int): End frame. - filepath (str): Path to PRT file. - - Returns: - list of arguments for MAX Script. - - """ - job_args = [] - opt_list = self.get_operators(members) - for operator in opt_list: - start_frame = f"{operator}.frameStart={start}" - job_args.append(start_frame) - end_frame = f"{operator}.frameEnd={end}" - job_args.append(end_frame) - filepath = filepath.replace("\\", "/") - prt_filename = f'{operator}.PRTFilename="{filepath}"' - job_args.append(prt_filename) - # Partition - mode = f"{operator}.PRTPartitionsMode=2" - job_args.append(mode) - - additional_args = self.get_custom_attr(operator) - job_args.extend(iter(additional_args)) - prt_export = f"{operator}.exportPRT()" - job_args.append(prt_export) - - return job_args - - @staticmethod - def get_operators(members): - """Get Export Particles Operator. - - Args: - members (list): Instance members. - - Returns: - list of particle operators - - """ - opt_list = [] - for member in members: - obj = member.baseobject - # TODO: to see if it can be used maxscript instead - anim_names = rt.GetSubAnimNames(obj) - for anim_name in anim_names: - sub_anim = rt.GetSubAnim(obj, anim_name) - boolean = rt.IsProperty(sub_anim, "Export_Particles") - if boolean: - event_name = sub_anim.Name - opt = f"${member.Name}.{event_name}.export_particles" - opt_list.append(opt) - - return opt_list - - @staticmethod - def get_setting(instance): - project_setting = instance.context.data["project_settings"] - return project_setting["max"]["PointCloud"] - - def get_custom_attr(self, operator): - """Get Custom Attributes""" - - custom_attr_list = [] - attr_settings = self.settings["attribute"] - for attr in attr_settings: - key = attr["name"] - value = attr["value"] - custom_attr = "{0}.PRTChannels_{1}=True".format(operator, - value) - self.log.debug( - "{0} will be added as custom attribute".format(key) - ) - custom_attr_list.append(custom_attr) - - return custom_attr_list - - def get_files(self, - container, - path, - start_frame, - end_frame): - """Get file names for tyFlow. - - Set the filenames accordingly to the tyFlow file - naming extension for the publishing purpose - - Actual File Output from tyFlow:: - __partof..prt - - e.g. tyFlow_cloth_CCCS_blobbyFill_001__part1of1_00004.prt - - Args: - container: Instance node. - path (str): Output directory. - start_frame (int): Start frame. - end_frame (int): End frame. - - Returns: - list of filenames - - """ - filenames = [] - filename = os.path.basename(path) - orig_name, ext = os.path.splitext(filename) - partition_count, partition_start = self.get_partition(container) - for frame in range(int(start_frame), int(end_frame) + 1): - actual_name = "{}__part{:03}of{}_{:05}".format(orig_name, - partition_start, - partition_count, - frame) - actual_filename = path.replace(orig_name, actual_name) - filenames.append(os.path.basename(actual_filename)) - - return filenames - - def partition_output_name(self, container): - """Get partition output name. - - Partition output name set for mapping - the published file output. - - Todo: - Customizes the setting for the output. - - Args: - container: Instance node. - - Returns: - str: Partition name. - - """ - partition_count, partition_start = self.get_partition(container) - return f"_part{partition_start:03}of{partition_count}" - - def get_partition(self, container): - """Get Partition value. - - Args: - container: Instance node. - - """ - opt_list = self.get_operators(container) - # TODO: This looks strange? Iterating over - # the opt_list but returning from inside? - for operator in opt_list: - count = rt.Execute(f'{operator}.PRTPartitionsCount') - start = rt.Execute(f'{operator}.PRTPartitionsFrom') - - return count, start diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_redshift_proxy.py b/server_addon/max/client/ayon_max/plugins/publish/extract_redshift_proxy.py deleted file mode 100644 index dfb3527be1..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_redshift_proxy.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import pyblish.api -from ayon_core.pipeline import publish -from pymxs import runtime as rt -from ayon_max.api import maintained_selection - - -class ExtractRedshiftProxy(publish.Extractor): - """ - Extract Redshift Proxy with rsProxy - """ - - order = pyblish.api.ExtractorOrder - 0.1 - label = "Extract RedShift Proxy" - hosts = ["max"] - families = ["redshiftproxy"] - - def process(self, instance): - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - self.log.debug("Extracting Redshift Proxy...") - stagingdir = self.staging_dir(instance) - rs_filename = "{name}.rs".format(**instance.data) - rs_filepath = os.path.join(stagingdir, rs_filename) - rs_filepath = rs_filepath.replace("\\", "/") - - rs_filenames = self.get_rsfiles(instance, start, end) - - with maintained_selection(): - # select and export - node_list = instance.data["members"] - rt.Select(node_list) - # Redshift rsProxy command - # rsProxy fp selected compress connectivity startFrame endFrame - # camera warnExisting transformPivotToOrigin - rt.rsProxy(rs_filepath, 1, 0, 0, start, end, 0, 1, 1) - - self.log.info("Performing Extraction ...") - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'rs', - 'ext': 'rs', - 'files': rs_filenames if len(rs_filenames) > 1 else rs_filenames[0], # noqa - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - self.log.info("Extracted instance '%s' to: %s" % (instance.name, - stagingdir)) - - def get_rsfiles(self, instance, startFrame, endFrame): - rs_filenames = [] - rs_name = instance.data["name"] - for frame in range(startFrame, endFrame + 1): - rs_filename = "%s.%04d.rs" % (rs_name, frame) - rs_filenames.append(rs_filename) - - return rs_filenames diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_review_animation.py b/server_addon/max/client/ayon_max/plugins/publish/extract_review_animation.py deleted file mode 100644 index b6397d404e..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_review_animation.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -import pyblish.api -from ayon_core.pipeline import publish -from ayon_max.api.preview_animation import ( - render_preview_animation -) - - -class ExtractReviewAnimation(publish.Extractor): - """ - Extract Review by Review Animation - """ - - order = pyblish.api.ExtractorOrder + 0.001 - label = "Extract Review Animation" - hosts = ["max"] - families = ["review"] - - def process(self, instance): - staging_dir = self.staging_dir(instance) - ext = instance.data.get("imageFormat") - start = int(instance.data["frameStart"]) - end = int(instance.data["frameEnd"]) - filepath = os.path.join(staging_dir, instance.name) - self.log.debug( - "Writing Review Animation to '{}'".format(filepath)) - - review_camera = instance.data["review_camera"] - viewport_options = instance.data.get("viewport_options", {}) - files = render_preview_animation( - filepath, - ext, - review_camera, - start, - end, - percentSize=instance.data["percentSize"], - width=instance.data["review_width"], - height=instance.data["review_height"], - viewport_options=viewport_options) - - filenames = [os.path.basename(path) for path in files] - - tags = ["review"] - if not instance.data.get("keepImages"): - tags.append("delete") - - self.log.debug("Performing Extraction ...") - - representation = { - "name": instance.data["imageFormat"], - "ext": instance.data["imageFormat"], - "files": filenames, - "stagingDir": staging_dir, - "frameStart": instance.data["frameStartHandle"], - "frameEnd": instance.data["frameEndHandle"], - "tags": tags, - "preview": True, - "camera_name": review_camera - } - self.log.debug(f"{representation}") - - if "representations" not in instance.data: - instance.data["representations"] = [] - instance.data["representations"].append(representation) diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_thumbnail.py b/server_addon/max/client/ayon_max/plugins/publish/extract_thumbnail.py deleted file mode 100644 index 183e381be2..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import pyblish.api -from ayon_core.pipeline import publish -from ayon_max.api.preview_animation import render_preview_animation - - -class ExtractThumbnail(publish.Extractor): - """Extract Thumbnail for Review - """ - - order = pyblish.api.ExtractorOrder - label = "Extract Thumbnail" - hosts = ["max"] - families = ["review"] - - def process(self, instance): - ext = instance.data.get("imageFormat") - frame = int(instance.data["frameStart"]) - staging_dir = self.staging_dir(instance) - filepath = os.path.join( - staging_dir, f"{instance.name}_thumbnail") - self.log.debug("Writing Thumbnail to '{}'".format(filepath)) - - review_camera = instance.data["review_camera"] - viewport_options = instance.data.get("viewport_options", {}) - files = render_preview_animation( - filepath, - ext, - review_camera, - start_frame=frame, - end_frame=frame, - percentSize=instance.data["percentSize"], - width=instance.data["review_width"], - height=instance.data["review_height"], - viewport_options=viewport_options) - - thumbnail = next(os.path.basename(path) for path in files) - - representation = { - "name": "thumbnail", - "ext": ext, - "files": thumbnail, - "stagingDir": staging_dir, - "thumbnail": True - } - - self.log.debug(f"{representation}") - - if "representations" not in instance.data: - instance.data["representations"] = [] - instance.data["representations"].append(representation) diff --git a/server_addon/max/client/ayon_max/plugins/publish/extract_tycache.py b/server_addon/max/client/ayon_max/plugins/publish/extract_tycache.py deleted file mode 100644 index 576abe32a2..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/extract_tycache.py +++ /dev/null @@ -1,157 +0,0 @@ -import os - -import pyblish.api -from pymxs import runtime as rt - -from ayon_max.api import maintained_selection -from ayon_core.pipeline import publish - - -class ExtractTyCache(publish.Extractor): - """Extract tycache format with tyFlow operators. - Notes: - - TyCache only works for TyFlow Pro Plugin. - - Methods: - self.get_export_particles_job_args(): sets up all job arguments - for attributes to be exported in MAXscript - - self.get_operators(): get the export_particle operator - - self.get_files(): get the files with tyFlow naming convention - before publishing - """ - - order = pyblish.api.ExtractorOrder - 0.2 - label = "Extract TyCache" - hosts = ["max"] - families = ["tycache"] - - def process(self, instance): - # TODO: let user decide the param - start = int(instance.context.data["frameStart"]) - end = int(instance.context.data.get("frameEnd")) - self.log.debug("Extracting Tycache...") - - stagingdir = self.staging_dir(instance) - filename = "{name}.tyc".format(**instance.data) - path = os.path.join(stagingdir, filename) - filenames = self.get_files(instance, start, end) - additional_attributes = instance.data.get("tyc_attrs", {}) - - with maintained_selection(): - job_args = self.get_export_particles_job_args( - instance.data["members"], - start, end, path, - additional_attributes) - for job in job_args: - rt.Execute(job) - representations = instance.data.setdefault("representations", []) - representation = { - 'name': 'tyc', - 'ext': 'tyc', - 'files': filenames if len(filenames) > 1 else filenames[0], - "stagingDir": stagingdir, - } - representations.append(representation) - - # Get the tyMesh filename for extraction - mesh_filename = f"{instance.name}__tyMesh.tyc" - mesh_repres = { - 'name': 'tyMesh', - 'ext': 'tyc', - 'files': mesh_filename, - "stagingDir": stagingdir, - "outputName": '__tyMesh' - } - representations.append(mesh_repres) - self.log.debug(f"Extracted instance '{instance.name}' to: {filenames}") - - def get_files(self, instance, start_frame, end_frame): - """Get file names for tyFlow in tyCache format. - - Set the filenames accordingly to the tyCache file - naming extension(.tyc) for the publishing purpose - - Actual File Output from tyFlow in tyCache format: - __tyPart_.tyc - - e.g. tycacheMain__tyPart_00000.tyc - - Args: - instance (pyblish.api.Instance): instance. - start_frame (int): Start frame. - end_frame (int): End frame. - - Returns: - filenames(list): list of filenames - - """ - filenames = [] - for frame in range(int(start_frame), int(end_frame) + 1): - filename = f"{instance.name}__tyPart_{frame:05}.tyc" - filenames.append(filename) - return filenames - - def get_export_particles_job_args(self, members, start, end, - filepath, additional_attributes): - """Sets up all job arguments for attributes. - - Those attributes are to be exported in MAX Script. - - Args: - members (list): Member nodes of the instance. - start (int): Start frame. - end (int): End frame. - filepath (str): Output path of the TyCache file. - additional_attributes (dict): channel attributes data - which needed to be exported - - Returns: - list of arguments for MAX Script. - - """ - settings = { - "exportMode": 2, - "frameStart": start, - "frameEnd": end, - "tyCacheFilename": filepath.replace("\\", "/") - } - settings.update(additional_attributes) - - job_args = [] - for operator in self.get_operators(members): - for key, value in settings.items(): - if isinstance(value, str): - # embed in quotes - value = f'"{value}"' - - job_args.append(f"{operator}.{key}={value}") - job_args.append(f"{operator}.exportTyCache()") - return job_args - - @staticmethod - def get_operators(members): - """Get Export Particles Operator. - - Args: - members (list): Instance members. - - Returns: - list of particle operators - - """ - opt_list = [] - for member in members: - obj = member.baseobject - # TODO: see if it can use maxscript instead - anim_names = rt.GetSubAnimNames(obj) - for anim_name in anim_names: - sub_anim = rt.GetSubAnim(obj, anim_name) - boolean = rt.IsProperty(sub_anim, "Export_Particles") - if boolean: - event_name = sub_anim.Name - opt = f"${member.Name}.{event_name}.export_particles" - opt_list.append(opt) - - return opt_list diff --git a/server_addon/max/client/ayon_max/plugins/publish/help/validate_model_name.xml b/server_addon/max/client/ayon_max/plugins/publish/help/validate_model_name.xml deleted file mode 100644 index e41146910a..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/help/validate_model_name.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - -Invalid Model Name -## Nodes found with Invalid Model Name - -Nodes were detected in your scene which have invalid model name which does not -match the regex you preset in AYON setting. -### How to repair? -Make sure the model name aligns with validation regex in your AYON setting. - - - -### Invalid nodes - -{nodes} - - -### How could this happen? - -This often happens if you have mesh with the model naming does not match -with regex in the setting. - - - - \ No newline at end of file diff --git a/server_addon/max/client/ayon_max/plugins/publish/increment_workfile_version.py b/server_addon/max/client/ayon_max/plugins/publish/increment_workfile_version.py deleted file mode 100644 index c7c3f49626..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/increment_workfile_version.py +++ /dev/null @@ -1,19 +0,0 @@ -import pyblish.api -from ayon_core.lib import version_up -from pymxs import runtime as rt - - -class IncrementWorkfileVersion(pyblish.api.ContextPlugin): - """Increment current workfile version.""" - - order = pyblish.api.IntegratorOrder + 0.9 - label = "Increment Workfile Version" - hosts = ["max"] - families = ["maxrender", "workfile"] - - def process(self, context): - path = context.data["currentFile"] - filepath = version_up(path) - - rt.saveMaxFile(filepath) - self.log.info("Incrementing file version") diff --git a/server_addon/max/client/ayon_max/plugins/publish/save_scene.py b/server_addon/max/client/ayon_max/plugins/publish/save_scene.py deleted file mode 100644 index fe2c7f50f4..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/save_scene.py +++ /dev/null @@ -1,25 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import registered_host - - -class SaveCurrentScene(pyblish.api.InstancePlugin): - """Save current scene""" - - label = "Save current file" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["max"] - families = ["maxrender", "workfile"] - - def process(self, instance): - host = registered_host() - current_file = host.get_current_workfile() - - assert instance.context.data["currentFile"] == current_file - if instance.data["productType"] == "maxrender": - host.save_workfile(current_file) - - elif host.workfile_has_unsaved_changes(): - self.log.info(f"Saving current file: {current_file}") - host.save_workfile(current_file) - else: - self.log.debug("No unsaved changes, skipping file save..") \ No newline at end of file diff --git a/server_addon/max/client/ayon_max/plugins/publish/save_scenes_for_cameras.py b/server_addon/max/client/ayon_max/plugins/publish/save_scenes_for_cameras.py deleted file mode 100644 index a211210550..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/save_scenes_for_cameras.py +++ /dev/null @@ -1,105 +0,0 @@ -import pyblish.api -import os -import sys -import tempfile - -from pymxs import runtime as rt -from ayon_core.lib import run_subprocess -from ayon_max.api.lib_rendersettings import RenderSettings -from ayon_max.api.lib_renderproducts import RenderProducts - - -class SaveScenesForCamera(pyblish.api.InstancePlugin): - """Save scene files for multiple cameras without - editing the original scene before deadline submission - - """ - - label = "Save Scene files for cameras" - order = pyblish.api.ExtractorOrder - 0.48 - hosts = ["max"] - families = ["maxrender"] - - def process(self, instance): - if not instance.data.get("multiCamera"): - self.log.debug( - "Multi Camera disabled. " - "Skipping to save scene files for cameras") - return - current_folder = rt.maxFilePath - current_filename = rt.maxFileName - current_filepath = os.path.join(current_folder, current_filename) - camera_scene_files = [] - scripts = [] - filename, ext = os.path.splitext(current_filename) - fmt = RenderProducts().image_format() - cameras = instance.data.get("cameras") - if not cameras: - return - new_folder = f"{current_folder}_{filename}" - os.makedirs(new_folder, exist_ok=True) - for camera in cameras: - new_output = RenderSettings().get_batch_render_output(camera) # noqa - new_output = new_output.replace("\\", "/") - new_filename = f"{filename}_{camera}{ext}" - new_filepath = os.path.join(new_folder, new_filename) - new_filepath = new_filepath.replace("\\", "/") - camera_scene_files.append(new_filepath) - RenderSettings().batch_render_elements(camera) - rt.rendOutputFilename = new_output - rt.saveMaxFile(current_filepath) - script = (""" -from pymxs import runtime as rt -import os -filename = "{filename}" -new_filepath = "{new_filepath}" -new_output = "{new_output}" -camera = "{camera}" -rt.rendOutputFilename = new_output -directory = os.path.dirname(rt.rendOutputFilename) -directory = os.path.join(directory, filename) -render_elem = rt.maxOps.GetCurRenderElementMgr() -render_elem_num = render_elem.NumRenderElements() -if render_elem_num > 0: - ext = "{ext}" - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - target, renderpass = str(renderlayer_name).split(":") - aov_name = f"{{directory}}_{camera}_{{renderpass}}..{ext}" - render_elem.SetRenderElementFileName(i, aov_name) -rt.saveMaxFile(new_filepath) - """).format(filename=instance.name, - new_filepath=new_filepath, - new_output=new_output, - camera=camera, - ext=fmt) - scripts.append(script) - - maxbatch_exe = os.path.join( - os.path.dirname(sys.executable), "3dsmaxbatch") - maxbatch_exe = maxbatch_exe.replace("\\", "/") - if sys.platform == "windows": - maxbatch_exe += ".exe" - maxbatch_exe = os.path.normpath(maxbatch_exe) - with tempfile.TemporaryDirectory() as tmp_dir_name: - tmp_script_path = os.path.join( - tmp_dir_name, "extract_scene_files.py") - self.log.info("Using script file: {}".format(tmp_script_path)) - - with open(tmp_script_path, "wt") as tmp: - for script in scripts: - tmp.write(script + "\n") - - try: - current_filepath = current_filepath.replace("\\", "/") - tmp_script_path = tmp_script_path.replace("\\", "/") - run_subprocess([maxbatch_exe, tmp_script_path, - "-sceneFile", current_filepath]) - except RuntimeError: - self.log.debug("Checking the scene files existing") - - for camera_scene in camera_scene_files: - if not os.path.exists(camera_scene): - self.log.error("Camera scene files not existed yet!") - raise RuntimeError("MaxBatch.exe doesn't run as expected") - self.log.debug(f"Found Camera scene:{camera_scene}") diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_attributes.py b/server_addon/max/client/ayon_max/plugins/publish/validate_attributes.py deleted file mode 100644 index a489533b2c..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_attributes.py +++ /dev/null @@ -1,143 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for Attributes.""" -import json - -from pyblish.api import ContextPlugin, ValidatorOrder -from pymxs import runtime as rt - -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairContextAction -) - - -def has_property(object_name, property_name): - """Return whether an object has a property with given name""" - return rt.Execute(f'isProperty {object_name} "{property_name}"') - - -def is_matching_value(object_name, property_name, value): - """Return whether an existing property matches value `value""" - property_value = rt.Execute(f"{object_name}.{property_name}") - - # Wrap property value if value is a string valued attributes - # starting with a `#` - if ( - isinstance(value, str) and - value.startswith("#") and - not value.endswith(")") - ): - # prefix value with `#` - # not applicable for #() array value type - # and only applicable for enum i.e. #bob, #sally - property_value = f"#{property_value}" - - return property_value == value - - -class ValidateAttributes(OptionalPyblishPluginMixin, - ContextPlugin): - """Validates attributes in the project setting are consistent - with the nodes from MaxWrapper Class in 3ds max. - E.g. "renderers.current.separateAovFiles", - "renderers.production.PrimaryGIEngine" - Admin(s) need to put the dict below and enable this validator for a check: - { - "renderers.current":{ - "separateAovFiles" : True - }, - "renderers.production":{ - "PrimaryGIEngine": "#RS_GIENGINE_BRUTE_FORCE" - } - .... - } - - """ - - order = ValidatorOrder - hosts = ["max"] - label = "Attributes" - actions = [RepairContextAction] - optional = True - - settings_category = "max" - - @classmethod - def get_invalid(cls, context): - attributes = json.loads( - context.data - ["project_settings"] - ["max"] - ["publish"] - ["ValidateAttributes"] - ["attributes"] - ) - if not attributes: - return - invalid = [] - for object_name, required_properties in attributes.items(): - if not rt.Execute(f"isValidValue {object_name}"): - # Skip checking if the node does not - # exist in MaxWrapper Class - cls.log.debug(f"Unable to find '{object_name}'." - " Skipping validation of attributes.") - continue - - for property_name, value in required_properties.items(): - if not has_property(object_name, property_name): - cls.log.error( - "Non-existing property: " - f"{object_name}.{property_name}") - invalid.append((object_name, property_name)) - - if not is_matching_value(object_name, property_name, value): - cls.log.error( - f"Invalid value for: {object_name}.{property_name}" - f" should be: {value}") - invalid.append((object_name, property_name)) - - return invalid - - def process(self, context): - if not self.is_active(context.data): - self.log.debug("Skipping Validate Attributes...") - return - invalid_attributes = self.get_invalid(context) - if invalid_attributes: - bullet_point_invalid_statement = "\n".join( - "- {}".format(invalid) for invalid - in invalid_attributes - ) - report = ( - "Required Attribute(s) have invalid value(s).\n\n" - f"{bullet_point_invalid_statement}\n\n" - "You can use repair action to fix them if they are not\n" - "unknown property value(s)." - ) - raise PublishValidationError( - report, title="Invalid Value(s) for Required Attribute(s)") - - @classmethod - def repair(cls, context): - attributes = json.loads( - context.data - ["project_settings"] - ["max"] - ["publish"] - ["ValidateAttributes"] - ["attributes"] - ) - invalid_attributes = cls.get_invalid(context) - for attrs in invalid_attributes: - prop, attr = attrs - value = attributes[prop][attr] - if isinstance(value, str) and not value.startswith("#"): - attribute_fix = '{}.{}="{}"'.format( - prop, attr, value - ) - else: - attribute_fix = "{}.{}={}".format( - prop, attr, value - ) - rt.Execute(attribute_fix) diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_camera_attributes.py b/server_addon/max/client/ayon_max/plugins/publish/validate_camera_attributes.py deleted file mode 100644 index 63a2ef39a7..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_camera_attributes.py +++ /dev/null @@ -1,90 +0,0 @@ -import pyblish.api -from pymxs import runtime as rt - -from ayon_core.pipeline.publish import ( - RepairAction, - OptionalPyblishPluginMixin, - PublishValidationError -) -from ayon_max.api.action import SelectInvalidAction - - -class ValidateCameraAttributes(OptionalPyblishPluginMixin, - pyblish.api.InstancePlugin): - """Validates Camera has no invalid attribute properties - or values.(For 3dsMax Cameras only) - - """ - - order = pyblish.api.ValidatorOrder - families = ['camera'] - hosts = ['max'] - label = 'Validate Camera Attributes' - actions = [SelectInvalidAction, RepairAction] - optional = True - - settings_category = "max" - - DEFAULTS = ["fov", "nearrange", "farrange", - "nearclip", "farclip"] - CAM_TYPE = ["Freecamera", "Targetcamera", - "Physical"] - - @classmethod - def get_invalid(cls, instance): - invalid = [] - if rt.units.DisplayType != rt.Name("Generic"): - cls.log.warning( - "Generic Type is not used as a scene unit\n\n" - "sure you tweak the settings with your own values\n\n" - "before validation.") - cameras = instance.data["members"] - project_settings = instance.context.data["project_settings"].get("max") - cam_attr_settings = ( - project_settings["publish"]["ValidateCameraAttributes"] - ) - for camera in cameras: - if str(rt.ClassOf(camera)) not in cls.CAM_TYPE: - cls.log.debug( - "Skipping camera created from external plugin..") - continue - for attr in cls.DEFAULTS: - default_value = cam_attr_settings.get(attr) - if default_value == float(0): - cls.log.debug( - f"the value of {attr} in setting set to" - " zero. Skipping the check.") - continue - if round(rt.getProperty(camera, attr), 1) != default_value: - cls.log.error( - f"Invalid attribute value for {camera.name}:{attr} " - f"(should be: {default_value}))") - invalid.append(camera) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - self.log.debug("Skipping Validate Camera Attributes.") - return - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Invalid camera attributes found. See log.") - - @classmethod - def repair(cls, instance): - invalid_cameras = cls.get_invalid(instance) - project_settings = instance.context.data["project_settings"].get("max") - cam_attr_settings = ( - project_settings["publish"]["ValidateCameraAttributes"] - ) - for camera in invalid_cameras: - for attr in cls.DEFAULTS: - expected_value = cam_attr_settings.get(attr) - if expected_value == float(0): - cls.log.debug( - f"the value of {attr} in setting set to zero.") - continue - rt.setProperty(camera, attr, expected_value) diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_camera_contents.py b/server_addon/max/client/ayon_max/plugins/publish/validate_camera_contents.py deleted file mode 100644 index 334e7dcec9..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_camera_contents.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api - -from ayon_core.pipeline import PublishValidationError - - -class ValidateCameraContent(pyblish.api.InstancePlugin): - """Validates Camera instance contents. - - A Camera instance may only hold a SINGLE camera's transform - """ - - order = pyblish.api.ValidatorOrder - families = ["camera", "review"] - hosts = ["max"] - label = "Camera Contents" - camera_type = ["$Free_Camera", "$Target_Camera", - "$Physical_Camera", "$Target"] - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError(("Camera instance must only include" - "camera (and camera target). " - f"Invalid content {invalid}")) - - def get_invalid(self, instance): - """ - Get invalid nodes if the instance is not camera - """ - invalid = [] - container = instance.data["instance_node"] - self.log.info(f"Validating camera content for {container}") - - selection_list = instance.data["members"] - for sel in selection_list: - # to avoid Attribute Error from pymxs wrapper - sel_tmp = str(sel) - found = any(sel_tmp.startswith(cam) for cam in self.camera_type) - if not found: - self.log.error("Camera not found") - invalid.append(sel) - return invalid diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_extended_viewport.py b/server_addon/max/client/ayon_max/plugins/publish/validate_extended_viewport.py deleted file mode 100644 index ed476ec874..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_extended_viewport.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_core.pipeline import PublishValidationError -from pymxs import runtime as rt - - -class ValidateExtendedViewport(pyblish.api.ContextPlugin): - """Validate if the first viewport is an extended viewport.""" - - order = pyblish.api.ValidatorOrder - families = ["review"] - hosts = ["max"] - label = "Validate Extended Viewport" - - def process(self, context): - try: - rt.viewport.activeViewportEx(1) - except RuntimeError: - raise PublishValidationError( - "Please make sure one viewport is not an extended viewport", - description = ( - "Please make sure at least one viewport is not an " - "extended viewport but a 3dsmax supported viewport " - "i.e camera/persp/orthographic view.\n\n" - "To rectify it, please go to view in the top menubar, " - "go to Views -> Viewports Configuration -> Layout and " - "right click on one of the panels to change it." - )) - diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_frame_range.py b/server_addon/max/client/ayon_max/plugins/publish/validate_frame_range.py deleted file mode 100644 index 9a9f22dd3e..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_frame_range.py +++ /dev/null @@ -1,90 +0,0 @@ -import pyblish.api - -from pymxs import runtime as rt -from ayon_core.pipeline import ( - OptionalPyblishPluginMixin -) -from ayon_core.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - KnownPublishError -) -from ayon_max.api.lib import get_frame_range, set_timeline - - -class ValidateFrameRange(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validates the frame ranges. - - This is an optional validator checking if the frame range on instance - matches the frame range specified for the folder. - - It also validates render frame ranges of render layers. - - Repair action will change everything to match the folder frame range. - - This can be turned off by the artist to allow custom ranges. - """ - - label = "Validate Frame Range" - order = ValidateContentsOrder - families = ["camera", "maxrender", - "pointcache", "pointcloud", - "review", "redshiftproxy"] - hosts = ["max"] - optional = True - actions = [RepairAction] - - settings_category = "max" - - def process(self, instance): - if not self.is_active(instance.data): - self.log.debug("Skipping Validate Frame Range...") - return - - frame_range = get_frame_range( - instance.data["taskEntity"]) - - inst_frame_start = instance.data.get("frameStartHandle") - inst_frame_end = instance.data.get("frameEndHandle") - if inst_frame_start is None or inst_frame_end is None: - raise KnownPublishError( - "Missing frame start and frame end on " - "instance to to validate." - ) - frame_start_handle = frame_range["frameStartHandle"] - frame_end_handle = frame_range["frameEndHandle"] - errors = [] - if frame_start_handle != inst_frame_start: - errors.append( - f"Start frame ({inst_frame_start}) on instance does not match " # noqa - f"with the start frame ({frame_start_handle}) set on the folder attributes. ") # noqa - if frame_end_handle != inst_frame_end: - errors.append( - f"End frame ({inst_frame_end}) on instance does not match " - f"with the end frame ({frame_end_handle}) " - "from the folder attributes. ") - - if errors: - bullet_point_errors = "\n".join( - "- {}".format(error) for error in errors - ) - report = ( - "Frame range settings are incorrect.\n\n" - f"{bullet_point_errors}\n\n" - "You can use repair action to fix it." - ) - raise PublishValidationError(report, title="Frame Range incorrect") - - @classmethod - def repair(cls, instance): - frame_range = get_frame_range() - frame_start_handle = frame_range["frameStartHandle"] - frame_end_handle = frame_range["frameEndHandle"] - - if instance.data["productType"] == "maxrender": - rt.rendStart = frame_start_handle - rt.rendEnd = frame_end_handle - else: - set_timeline(frame_start_handle, frame_end_handle) diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_instance_has_members.py b/server_addon/max/client/ayon_max/plugins/publish/validate_instance_has_members.py deleted file mode 100644 index 552e9ea0e2..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_instance_has_members.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_core.pipeline import PublishValidationError - - -class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): - """Validates Instance has members. - - Check if MaxScene containers includes any contents underneath. - """ - - order = pyblish.api.ValidatorOrder - families = ["camera", - "model", - "maxScene", - "review", - "pointcache", - "pointcloud", - "redshiftproxy"] - hosts = ["max"] - label = "Container Contents" - - def process(self, instance): - if not instance.data["members"]: - raise PublishValidationError("No content found in the container") diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_instance_in_context.py b/server_addon/max/client/ayon_max/plugins/publish/validate_instance_in_context.py deleted file mode 100644 index d5bdfe4eb0..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_instance_in_context.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validate if instance context is the same as current context.""" -import pyblish.api -from ayon_core.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_max.api.action import SelectInvalidAction -from pymxs import runtime as rt - - -class ValidateInstanceInContext(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validator to check if instance context match current context. - - When working in per-shot style you always publish data in context of - current context (shot). This validator checks if this is so. It is optional - so it can be disabled when needed. - - Action on this validator will select invalid instances. - """ - order = ValidateContentsOrder - label = "Instance in same Context" - optional = True - hosts = ["max"] - actions = [SelectInvalidAction, RepairAction] - - settings_category = "max" - - def process(self, instance): - if not self.is_active(instance.data): - return - - folderPath = instance.data.get("folderPath") - task = instance.data.get("task") - context = self.get_context(instance) - if (folderPath, task) != context: - context_label = "{} > {}".format(*context) - instance_label = "{} > {}".format(folderPath, task) - message = ( - "Instance '{}' publishes to different context(folder or task) " - "than current context: {}. Current context: {}".format( - instance.name, instance_label, context_label - ) - ) - raise PublishValidationError( - message=message, - description=( - "## Publishing to a different context data(folder or task)\n" - "There are publish instances present which are publishing " - "into a different folder path or task than your current context.\n\n" - "Usually this is not what you want but there can be cases " - "where you might want to publish into another context or " - "shot. If that's the case you can disable the validation " - "on the instance to ignore it." - ) - ) - - @classmethod - def get_invalid(cls, instance): - invalid = [] - folderPath = instance.data.get("folderPath") - task = instance.data.get("task") - context = cls.get_context(instance) - if (folderPath, task) != context: - invalid.append(rt.getNodeByName(instance.name)) - return invalid - - @classmethod - def repair(cls, instance): - context_asset = instance.context.data["folderPath"] - context_task = instance.context.data["task"] - instance_node = rt.getNodeByName(instance.data.get( - "instance_node", "")) - if not instance_node: - return - rt.SetUserProp(instance_node, "folderPath", context_asset) - rt.SetUserProp(instance_node, "task", context_task) - - @staticmethod - def get_context(instance): - """Return asset, task from publishing context data""" - context = instance.context - return context.data["folderPath"], context.data["task"] diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_loaded_plugin.py b/server_addon/max/client/ayon_max/plugins/publish/validate_loaded_plugin.py deleted file mode 100644 index 1fddc7998d..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_loaded_plugin.py +++ /dev/null @@ -1,143 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for Loaded Plugin.""" -import os -import pyblish.api -from pymxs import runtime as rt - -from ayon_core.pipeline.publish import ( - RepairAction, - OptionalPyblishPluginMixin, - PublishValidationError -) -from ayon_max.api.lib import get_plugins - - -class ValidateLoadedPlugin(OptionalPyblishPluginMixin, - pyblish.api.InstancePlugin): - """Validates if the specific plugin is loaded in 3ds max. - Studio Admin(s) can add the plugins they want to check in validation - via studio defined project settings - """ - - order = pyblish.api.ValidatorOrder - hosts = ["max"] - label = "Validate Loaded Plugins" - optional = True - actions = [RepairAction] - - settings_category = "max" - - family_plugins_mapping = [] - - @classmethod - def get_invalid(cls, instance): - """Plugin entry point.""" - family_plugins_mapping = cls.family_plugins_mapping - if not family_plugins_mapping: - return - - # Backward compatibility - settings did have 'product_types' - if "product_types" in family_plugins_mapping: - family_plugins_mapping["families"] = family_plugins_mapping.pop( - "product_types" - ) - - invalid = [] - # Find all plug-in requirements for current instance - instance_families = {instance.data["productType"]} - instance_families.update(instance.data.get("families", [])) - cls.log.debug("Checking plug-in validation " - f"for instance families: {instance_families}") - all_required_plugins = set() - - for mapping in family_plugins_mapping: - # Check for matching families - if not mapping: - return - - match_families = { - fam.strip() for fam in mapping["families"] - } - has_match = "*" in match_families or match_families.intersection( - instance_families) - - if not has_match: - continue - - cls.log.debug( - f"Found plug-in family requirements: {match_families}") - required_plugins = [ - # match lowercase and format with os.environ to allow - # plugin names defined by max version, e.g. {3DSMAX_VERSION} - plugin.format(**os.environ).lower() - for plugin in mapping["plugins"] - # ignore empty fields in settings - if plugin.strip() - ] - - all_required_plugins.update(required_plugins) - - if not all_required_plugins: - # Instance has no plug-in requirements - return - - # get all DLL loaded plugins in Max and their plugin index - available_plugins = { - plugin_name.lower(): index for index, plugin_name in enumerate( - get_plugins()) - } - # validate the required plug-ins - for plugin in sorted(all_required_plugins): - plugin_index = available_plugins.get(plugin) - if plugin_index is None: - debug_msg = ( - f"Plugin {plugin} does not exist" - " in 3dsMax Plugin List." - ) - invalid.append((plugin, debug_msg)) - continue - if not rt.pluginManager.isPluginDllLoaded(plugin_index): - debug_msg = f"Plugin {plugin} not loaded." - invalid.append((plugin, debug_msg)) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - self.log.debug("Skipping Validate Loaded Plugin...") - return - invalid = self.get_invalid(instance) - if invalid: - bullet_point_invalid_statement = "\n".join( - "- {}".format(message) for _, message in invalid - ) - report = ( - "Required plugins are not loaded.\n\n" - f"{bullet_point_invalid_statement}\n\n" - "You can use repair action to load the plugin." - ) - raise PublishValidationError( - report, title="Missing Required Plugins") - - @classmethod - def repair(cls, instance): - # get all DLL loaded plugins in Max and their plugin index - invalid = cls.get_invalid(instance) - if not invalid: - return - - # get all DLL loaded plugins in Max and their plugin index - available_plugins = { - plugin_name.lower(): index for index, plugin_name in enumerate( - get_plugins()) - } - - for invalid_plugin, _ in invalid: - plugin_index = available_plugins.get(invalid_plugin) - - if plugin_index is None: - cls.log.warning( - f"Can't enable missing plugin: {invalid_plugin}") - continue - - if not rt.pluginManager.isPluginDllLoaded(plugin_index): - rt.pluginManager.loadPluginDll(plugin_index) diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_mesh_has_uv.py b/server_addon/max/client/ayon_max/plugins/publish/validate_mesh_has_uv.py deleted file mode 100644 index 31143a60c0..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_mesh_has_uv.py +++ /dev/null @@ -1,62 +0,0 @@ - -import pyblish.api -from ayon_max.api.action import SelectInvalidAction -from ayon_core.pipeline.publish import ( - ValidateMeshOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) -from pymxs import runtime as rt - - -class ValidateMeshHasUVs(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - - """Validate the current mesh has UVs. - - This validator only checks if the mesh has UVs but not - whether all the individual faces of the mesh have UVs. - - It validates whether the current mesh has texture vertices. - If the mesh does not have texture vertices, it does not - have UVs in Max. - - """ - - order = ValidateMeshOrder - hosts = ['max'] - families = ['model'] - label = 'Validate Mesh Has UVs' - actions = [SelectInvalidAction] - optional = True - - settings_category = "max" - - @classmethod - def get_invalid(cls, instance): - meshes = [member for member in instance.data["members"] - if rt.isProperty(member, "mesh")] - invalid = [member for member in meshes - if member.mesh.numTVerts == 0] - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - bullet_point_invalid_statement = "\n".join( - "- {}".format(invalid.name) for invalid - in invalid - ) - report = ( - "Model meshes are required to have UVs.\n\n" - "Meshes detected with invalid or missing UVs:\n" - f"{bullet_point_invalid_statement}\n" - ) - raise PublishValidationError( - report, - description=( - "Model meshes are required to have UVs.\n\n" - "Meshes detected with no texture vertice or missing UVs"), - title="Non-mesh objects found or mesh has missing UVs") diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_model_contents.py b/server_addon/max/client/ayon_max/plugins/publish/validate_model_contents.py deleted file mode 100644 index 9a4d988aa4..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_model_contents.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from pymxs import runtime as rt - -from ayon_core.pipeline import PublishValidationError - - -class ValidateModelContent(pyblish.api.InstancePlugin): - """Validates Model instance contents. - - A model instance may only hold either geometry-related - object(excluding Shapes) or editable meshes. - """ - - order = pyblish.api.ValidatorOrder - families = ["model"] - hosts = ["max"] - label = "Model Contents" - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError(("Model instance must only include" - "Geometry and Editable Mesh. " - f"Invalid types on: {invalid}")) - - def get_invalid(self, instance): - """ - Get invalid nodes if the instance is not camera - """ - invalid = [] - container = instance.data["instance_node"] - self.log.info(f"Validating model content for {container}") - - selection_list = instance.data["members"] - for sel in selection_list: - if rt.ClassOf(sel) in rt.Camera.classes: - invalid.append(sel) - if rt.ClassOf(sel) in rt.Light.classes: - invalid.append(sel) - if rt.ClassOf(sel) in rt.Shape.classes: - invalid.append(sel) - - return invalid diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_model_name.py b/server_addon/max/client/ayon_max/plugins/publish/validate_model_name.py deleted file mode 100644 index d691b739b7..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_model_name.py +++ /dev/null @@ -1,123 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validate model nodes names.""" -import re - -import pyblish.api - -from ayon_max.api.action import SelectInvalidAction - -from ayon_core.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishXmlValidationError, - ValidateContentsOrder -) - -class ValidateModelName(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validate Model Name. - - Validation regex is `(.*)_(?P.*)_(GEO)` by default. - The setting supports the following regex group name: - - project - - asset - - subset - - Examples: - `{SOME_RANDOM_NAME}_{YOUR_SUBSET_NAME}_GEO` should be your - default model name. - The regex of `(?P.*)` can be replaced by `(?P.*)` - and `(?P.*)`. - `(.*)_(?P.*)_(GEO)` check if your model name is - `{SOME_RANDOM_NAME}_{CURRENT_ASSET_NAME}_GEO` - `(.*)_(?P.*)_(GEO)` check if your model name is - `{SOME_RANDOM_NAME}_{CURRENT_PROJECT_NAME}_GEO` - - """ - optional = True - order = ValidateContentsOrder - hosts = ["max"] - families = ["model"] - label = "Validate Model Name" - actions = [SelectInvalidAction] - - settings_category = "max" - - # defined by settings - regex = r"(.*)_(?P.*)_(GEO)" - # cache - regex_compiled = None - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - names = "\n".join( - "- {}".format(node.name) for node in invalid - ) - raise PublishXmlValidationError( - plugin=self, - message="Nodes found with invalid model names: {}".format(invalid), - formatting_data={"nodes": names} - ) - - @classmethod - def get_invalid(cls, instance): - if not cls.regex: - cls.log.warning("No regex pattern set. Nothing to validate.") - return - - members = instance.data.get("members") - if not members: - cls.log.error("No members found in the instance.") - return - - cls.regex_compiled = re.compile(cls.regex) - - invalid = [] - for obj in members: - if cls.invalid_name(instance, obj): - invalid.append(obj) - return invalid - - @classmethod - def invalid_name(cls, instance, obj): - """Function to check the object has invalid name - regarding to the validation regex in the AYON setttings - - Args: - instance (pyblish.api.instance): Instance - obj (str): object name - - Returns: - str: invalid object - """ - regex = cls.regex_compiled - name = obj.name - match = regex.match(name) - - if match is None: - cls.log.error("Invalid model name on: %s", name) - cls.log.error("Name doesn't match regex {}".format(regex.pattern)) - return obj - - # Validate regex groups - invalid = False - compare = { - "project": instance.context.data["projectName"], - "asset": instance.data["folderPath"], - "subset": instance.data["productName"] - } - for key, required_value in compare.items(): - if key in regex.groupindex: - if match.group(key) != required_value: - cls.log.error( - "Invalid %s name for the model %s, " - "required name is %s", - key, name, required_value - ) - invalid = True - - if invalid: - return obj diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_no_animation.py b/server_addon/max/client/ayon_max/plugins/publish/validate_no_animation.py deleted file mode 100644 index 26384954ca..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_no_animation.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from pymxs import runtime as rt -from ayon_core.pipeline import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_max.api.action import SelectInvalidAction - - -def get_invalid_keys(obj): - """function to check on whether there is keyframe in - - Args: - obj (str): object needed to check if there is a keyframe - - Returns: - bool: whether invalid object(s) exist - """ - for transform in ["Position", "Rotation", "Scale"]: - num_of_key = rt.NumKeys(rt.getPropertyController( - obj.controller, transform)) - if num_of_key > 0: - return True - return False - - -class ValidateNoAnimation(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validates No Animation - - Ensure no keyframes on nodes in the Instance - """ - - order = pyblish.api.ValidatorOrder - families = ["model"] - hosts = ["max"] - optional = True - label = "Validate No Animation" - actions = [SelectInvalidAction] - - settings_category = "max" - - def process(self, instance): - if not self.is_active(instance.data): - return - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Keyframes found on:\n\n{0}".format(invalid) - , - title="Keyframes on model" - ) - - @staticmethod - def get_invalid(instance): - """Get invalid object(s) which have keyframe(s) - - - Args: - instance (pyblish.api.instance): Instance - - Returns: - list: list of invalid objects - """ - invalid = [invalid for invalid in instance.data["members"] - if invalid.isAnimated or get_invalid_keys(invalid)] - - return invalid diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_pointcloud.py b/server_addon/max/client/ayon_max/plugins/publish/validate_pointcloud.py deleted file mode 100644 index 73b18984ed..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_pointcloud.py +++ /dev/null @@ -1,126 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import PublishValidationError -from pymxs import runtime as rt - - -class ValidatePointCloud(pyblish.api.InstancePlugin): - """Validate that work file was saved.""" - - order = pyblish.api.ValidatorOrder - families = ["pointcloud"] - hosts = ["max"] - label = "Validate Point Cloud" - - def process(self, instance): - """ - Notes: - 1. Validate if the export mode of Export Particle is at PRT format - 2. Validate the partition count and range set as default value - Partition Count : 100 - Partition Range : 1 to 1 - 3. Validate if the custom attribute(s) exist as parameter(s) - of export_particle operator - - """ - report = [] - - if self.validate_export_mode(instance): - report.append("The export mode is not at PRT") - - if self.validate_partition_value(instance): - report.append(("tyFlow Partition setting is " - "not at the default value")) - - invalid_attribute = self.validate_custom_attribute(instance) - if invalid_attribute: - report.append(("Custom Attribute not found " - f":{invalid_attribute}")) - - if report: - raise PublishValidationError(f"{report}") - - def validate_custom_attribute(self, instance): - invalid = [] - container = instance.data["instance_node"] - self.log.info( - f"Validating tyFlow custom attributes for {container}") - - selection_list = instance.data["members"] - - project_settings = instance.context.data["project_settings"] - attr_settings = project_settings["max"]["PointCloud"]["attribute"] - for sel in selection_list: - obj = sel.baseobject - anim_names = rt.GetSubAnimNames(obj) - for anim_name in anim_names: - # get all the names of the related tyFlow nodes - sub_anim = rt.GetSubAnim(obj, anim_name) - if rt.IsProperty(sub_anim, "Export_Particles"): - event_name = sub_anim.name - opt = "${0}.{1}.export_particles".format(sel.name, - event_name) - for attr in attr_settings: - key = attr["name"] - value = attr["value"] - custom_attr = "{0}.PRTChannels_{1}".format(opt, - value) - try: - rt.Execute(custom_attr) - except RuntimeError: - invalid.append(key) - - return invalid - - def validate_partition_value(self, instance): - invalid = [] - container = instance.data["instance_node"] - self.log.info( - f"Validating tyFlow partition value for {container}") - - selection_list = instance.data["members"] - for sel in selection_list: - obj = sel.baseobject - anim_names = rt.GetSubAnimNames(obj) - for anim_name in anim_names: - # get all the names of the related tyFlow nodes - sub_anim = rt.GetSubAnim(obj, anim_name) - if rt.IsProperty(sub_anim, "Export_Particles"): - event_name = sub_anim.name - opt = "${0}.{1}.export_particles".format(sel.name, - event_name) - count = rt.Execute(f'{opt}.PRTPartitionsCount') - if count != 100: - invalid.append(count) - start = rt.Execute(f'{opt}.PRTPartitionsFrom') - if start != 1: - invalid.append(start) - end = rt.Execute(f'{opt}.PRTPartitionsTo') - if end != 1: - invalid.append(end) - - return invalid - - def validate_export_mode(self, instance): - invalid = [] - container = instance.data["instance_node"] - self.log.info( - f"Validating tyFlow export mode for {container}") - - con = rt.GetNodeByName(container) - selection_list = list(con.Children) - for sel in selection_list: - obj = sel.baseobject - anim_names = rt.GetSubAnimNames(obj) - for anim_name in anim_names: - # get all the names of the related tyFlow nodes - sub_anim = rt.GetSubAnim(obj, anim_name) - # check if there is export particle operator - boolean = rt.IsProperty(sub_anim, "Export_Particles") - event_name = sub_anim.name - if boolean: - opt = f"${sel.name}.{event_name}.export_particles" - export_mode = rt.Execute(f'{opt}.exportMode') - if export_mode != 1: - invalid.append(export_mode) - - return invalid diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_renderable_camera.py b/server_addon/max/client/ayon_max/plugins/publish/validate_renderable_camera.py deleted file mode 100644 index dc05771e1b..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_renderable_camera.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_core.pipeline import ( - PublishValidationError, - OptionalPyblishPluginMixin) -from ayon_core.pipeline.publish import RepairAction -from ayon_max.api.lib import get_current_renderer - -from pymxs import runtime as rt - - -class ValidateRenderableCamera(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validates Renderable Camera - - Check if the renderable camera used for rendering - """ - - order = pyblish.api.ValidatorOrder - families = ["maxrender"] - hosts = ["max"] - label = "Renderable Camera" - optional = True - actions = [RepairAction] - - def process(self, instance): - if not self.is_active(instance.data): - return - if not instance.data["cameras"]: - raise PublishValidationError( - "No renderable Camera found in scene." - ) - - @classmethod - def repair(cls, instance): - - rt.viewport.setType(rt.Name("view_camera")) - camera = rt.viewport.GetCamera() - cls.log.info(f"Camera {camera} set as renderable camera") - renderer_class = get_current_renderer() - renderer = str(renderer_class).split(":")[0] - if renderer == "Arnold": - arv = rt.MAXToAOps.ArnoldRenderView() - arv.setOption("Camera", str(camera)) - arv.close() - instance.data["cameras"] = [camera.name] diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_renderer_redshift_proxy.py b/server_addon/max/client/ayon_max/plugins/publish/validate_renderer_redshift_proxy.py deleted file mode 100644 index 66c69bc100..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_renderer_redshift_proxy.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_core.pipeline import PublishValidationError -from pymxs import runtime as rt -from ayon_core.pipeline.publish import RepairAction -from ayon_max.api.lib import get_current_renderer - - -class ValidateRendererRedshiftProxy(pyblish.api.InstancePlugin): - """ - Validates Redshift as the current renderer for creating - Redshift Proxy - """ - - order = pyblish.api.ValidatorOrder - families = ["redshiftproxy"] - hosts = ["max"] - label = "Redshift Renderer" - actions = [RepairAction] - - def process(self, instance): - invalid = self.get_redshift_renderer(instance) - if invalid: - raise PublishValidationError("Please install Redshift for 3dsMax" - " before using the Redshift proxy instance") # noqa - invalid = self.get_current_renderer(instance) - if invalid: - raise PublishValidationError("The Redshift proxy extraction" - "discontinued since the current renderer is not Redshift") # noqa - - def get_redshift_renderer(self, instance): - invalid = list() - max_renderers_list = str(rt.RendererClass.classes) - if "Redshift_Renderer" not in max_renderers_list: - invalid.append(max_renderers_list) - - return invalid - - def get_current_renderer(self, instance): - invalid = list() - renderer_class = get_current_renderer() - current_renderer = str(renderer_class).split(":")[0] - if current_renderer != "Redshift_Renderer": - invalid.append(current_renderer) - - return invalid - - @classmethod - def repair(cls, instance): - for Renderer in rt.RendererClass.classes: - renderer = Renderer() - if "Redshift_Renderer" in str(renderer): - rt.renderers.production = renderer - break diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_renderpasses.py b/server_addon/max/client/ayon_max/plugins/publish/validate_renderpasses.py deleted file mode 100644 index d0d47c6340..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_renderpasses.py +++ /dev/null @@ -1,187 +0,0 @@ -import os -import pyblish.api -from pymxs import runtime as rt -from ayon_core.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) -from ayon_max.api.lib_rendersettings import RenderSettings - - -class ValidateRenderPasses(OptionalPyblishPluginMixin, - pyblish.api.InstancePlugin): - """Validates Render Passes before farm submission - """ - - order = ValidateContentsOrder - families = ["maxrender"] - hosts = ["max"] - label = "Validate Render Passes" - actions = [RepairAction] - - settings_category = "max" - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - bullet_point_invalid_statement = "\n".join( - f"- {err_type}: {filepath}" for err_type, filepath - in invalid - ) - report = ( - "Invalid render passes found.\n\n" - f"{bullet_point_invalid_statement}\n\n" - "You can use repair action to fix the invalid filepath." - ) - raise PublishValidationError( - report, title="Invalid Render Passes") - - @classmethod - def get_invalid(cls, instance): - """Function to get invalid beauty render outputs and - render elements. - - 1. Check Render Output Folder matches the name of - the current Max Scene, e.g. - The name of the current Max scene: - John_Doe.max - The expected render output directory: - {root[work]}/{project[name]}/{hierarchy}/{asset}/ - work/{task[name]}/render/3dsmax/John_Doe/ - - 2. Check image extension(s) of the render output(s) - matches the image format in OP/AYON setting, e.g. - The current image format in settings: png - The expected render outputs: John_Doe.png - - 3. Check filename of render element ends with the name of - render element from the 3dsMax Render Element Manager. - e.g. The name of render element: RsCryptomatte - The expected filename: {InstanceName}_RsCryptomatte.png - - Args: - instance (pyblish.api.Instance): instance - workfile_name (str): filename of the Max scene - - Returns: - list: list of invalid filename which doesn't match - with the project name - """ - invalid = [] - file = rt.maxFileName - workfile_name, ext = os.path.splitext(file) - if workfile_name not in rt.rendOutputFilename: - cls.log.error( - "Render output folder must include" - f" the max scene name {workfile_name} " - ) - invalid_folder_name = os.path.dirname( - rt.rendOutputFilename).replace( - "\\", "/").split("/")[-1] - invalid.append(("Invalid Render Output Folder", - invalid_folder_name)) - beauty_fname = os.path.basename(rt.rendOutputFilename) - beauty_name, ext = os.path.splitext(beauty_fname) - invalid_filenames = cls.get_invalid_filenames( - instance, beauty_name) - invalid.extend(invalid_filenames) - invalid_image_format = cls.get_invalid_image_format( - instance, ext.lstrip(".")) - invalid.extend(invalid_image_format) - renderer = instance.data["renderer"] - if renderer in [ - "ART_Renderer", - "Redshift_Renderer", - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3", - "Default_Scanline_Renderer", - "Quicksilver_Hardware_Renderer", - ]: - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - renderpass = str(renderlayer_name).rsplit(":", 1)[-1] - rend_file = render_elem.GetRenderElementFilename(i) - if not rend_file: - continue - - rend_fname, ext = os.path.splitext( - os.path.basename(rend_file)) - invalid_filenames = cls.get_invalid_filenames( - instance, rend_fname, renderpass=renderpass) - invalid.extend(invalid_filenames) - invalid_image_format = cls.get_invalid_image_format( - instance, ext) - invalid.extend(invalid_image_format) - elif renderer == "Arnold": - cls.log.debug( - "Renderpass validation does not support Arnold yet," - " validation skipped...") - else: - cls.log.debug( - "Skipping render element validation " - f"for renderer: {renderer}") - return invalid - - @classmethod - def get_invalid_filenames(cls, instance, file_name, renderpass=None): - """Function to get invalid filenames from render outputs. - - Args: - instance (pyblish.api.Instance): instance - file_name (str): name of the file - renderpass (str, optional): name of the renderpass. - Defaults to None. - - Returns: - list: invalid filenames - """ - invalid = [] - if instance.name not in file_name: - cls.log.error("The renderpass filename should contain the instance name.") - invalid.append(("Invalid instance name", - file_name)) - if renderpass is not None: - if not file_name.rstrip(".").endswith(renderpass): - cls.log.error( - f"Filename for {renderpass} should " - f"end with {renderpass}: {file_name}" - ) - invalid.append((f"Invalid {renderpass}", - os.path.basename(file_name))) - return invalid - - @classmethod - def get_invalid_image_format(cls, instance, ext): - """Function to check if the image format of the render outputs - aligns with that in the setting. - - Args: - instance (pyblish.api.Instance): instance - ext (str): image extension - - Returns: - list: list of files with invalid image format - """ - invalid = [] - settings = instance.context.data["project_settings"].get("max") - image_format = settings["RenderSettings"]["image_format"] - ext = ext.lstrip(".") - if ext != image_format: - msg = ( - f"Invalid image format {ext} for render outputs.\n" - f"Should be: {image_format}") - cls.log.error(msg) - invalid.append((msg, ext)) - return invalid - - @classmethod - def repair(cls, instance): - container = instance.data.get("instance_node") - # TODO: need to rename the function of render_output - RenderSettings().render_output(container) - cls.log.debug("Finished repairing the render output " - "folder and filenames.") diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_resolution_setting.py b/server_addon/max/client/ayon_max/plugins/publish/validate_resolution_setting.py deleted file mode 100644 index 9f7ec17dd9..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_resolution_setting.py +++ /dev/null @@ -1,92 +0,0 @@ -import pyblish.api -from pymxs import runtime as rt -from ayon_core.pipeline import ( - OptionalPyblishPluginMixin -) -from ayon_core.pipeline.publish import ( - RepairAction, - PublishValidationError -) -from ayon_max.api.lib import ( - reset_scene_resolution, - imprint -) - - -class ValidateResolutionSetting(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validate the resolution setting aligned with DB""" - - order = pyblish.api.ValidatorOrder - 0.01 - families = ["maxrender"] - hosts = ["max"] - label = "Validate Resolution Setting" - optional = True - actions = [RepairAction] - - def process(self, instance): - if not self.is_active(instance.data): - return - width, height = self.get_folder_resolution(instance) - current_width, current_height = ( - self.get_current_resolution(instance) - ) - - if current_width != width and current_height != height: - raise PublishValidationError("Resolution Setting " - "not matching resolution " - "set on asset or shot.") - if current_width != width: - raise PublishValidationError("Width in Resolution Setting " - "not matching resolution set " - "on asset or shot.") - - if current_height != height: - raise PublishValidationError("Height in Resolution Setting " - "not matching resolution set " - "on asset or shot.") - - def get_current_resolution(self, instance): - return rt.renderWidth, rt.renderHeight - - @classmethod - def get_folder_resolution(cls, instance): - task_entity = instance.data.get("taskEntity") - if task_entity: - task_attributes = task_entity["attrib"] - width = task_attributes["resolutionWidth"] - height = task_attributes["resolutionHeight"] - return int(width), int(height) - - # Defaults if not found in folder entity - return 1920, 1080 - - @classmethod - def repair(cls, instance): - reset_scene_resolution() - - -class ValidateReviewResolutionSetting(ValidateResolutionSetting): - families = ["review"] - optional = True - actions = [RepairAction] - - def get_current_resolution(self, instance): - current_width = instance.data["review_width"] - current_height = instance.data["review_height"] - return current_width, current_height - - @classmethod - def repair(cls, instance): - context_width, context_height = ( - cls.get_folder_resolution(instance) - ) - creator_attrs = instance.data["creator_attributes"] - creator_attrs["review_width"] = context_width - creator_attrs["review_height"] = context_height - creator_attrs_data = { - "creator_attributes": creator_attrs - } - # update the width and height of review - # data in creator_attributes - imprint(instance.data["instance_node"], creator_attrs_data) diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_scene_saved.py b/server_addon/max/client/ayon_max/plugins/publish/validate_scene_saved.py deleted file mode 100644 index 3028a55337..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_scene_saved.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from ayon_core.pipeline import PublishValidationError -from pymxs import runtime as rt - - -class ValidateSceneSaved(pyblish.api.InstancePlugin): - """Validate that workfile was saved.""" - - order = pyblish.api.ValidatorOrder - families = ["workfile"] - hosts = ["max"] - label = "Validate Workfile is saved" - - def process(self, instance): - if not rt.maxFilePath or not rt.maxFileName: - raise PublishValidationError( - "Workfile is not saved", title=self.label) diff --git a/server_addon/max/client/ayon_max/plugins/publish/validate_tyflow_data.py b/server_addon/max/client/ayon_max/plugins/publish/validate_tyflow_data.py deleted file mode 100644 index 8dd8a1bb68..0000000000 --- a/server_addon/max/client/ayon_max/plugins/publish/validate_tyflow_data.py +++ /dev/null @@ -1,88 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import PublishValidationError -from pymxs import runtime as rt - - -class ValidateTyFlowData(pyblish.api.InstancePlugin): - """Validate TyFlow plugins or relevant operators are set correctly.""" - - order = pyblish.api.ValidatorOrder - families = ["pointcloud", "tycache"] - hosts = ["max"] - label = "TyFlow Data" - - def process(self, instance): - """ - Notes: - 1. Validate the container only include tyFlow objects - 2. Validate if tyFlow operator Export Particle exists - - """ - - invalid_object = self.get_tyflow_object(instance) - if invalid_object: - self.log.error(f"Non tyFlow object found: {invalid_object}") - - invalid_operator = self.get_tyflow_operator(instance) - if invalid_operator: - self.log.error( - "Operator 'Export Particles' not found in tyFlow editor.") - if invalid_object or invalid_operator: - raise PublishValidationError( - "issues occurred", - description="Container should only include tyFlow object " - "and tyflow operator 'Export Particle' should be in " - "the tyFlow editor.") - - def get_tyflow_object(self, instance): - """Get the nodes which are not tyFlow object(s) - and editable mesh(es) - - Args: - instance (pyblish.api.Instance): instance - - Returns: - list: invalid nodes which are not tyFlow - object(s) and editable mesh(es). - """ - container = instance.data["instance_node"] - self.log.debug(f"Validating tyFlow container for {container}") - - allowed_classes = [rt.tyFlow, rt.Editable_Mesh] - return [ - member for member in instance.data["members"] - if rt.ClassOf(member) not in allowed_classes - ] - - def get_tyflow_operator(self, instance): - """Check if the Export Particle Operators in the node - connections. - - Args: - instance (str): instance node - - Returns: - invalid(list): list of invalid nodes which do - not consist of Export Particle Operators as parts - of the node connections - """ - invalid = [] - members = instance.data["members"] - for member in members: - obj = member.baseobject - - # There must be at least one animation with export - # particles enabled - has_export_particles = False - anim_names = rt.GetSubAnimNames(obj) - for anim_name in anim_names: - # get name of the related tyFlow node - sub_anim = rt.GetSubAnim(obj, anim_name) - # check if there is export particle operator - if rt.IsProperty(sub_anim, "Export_Particles"): - has_export_particles = True - break - - if not has_export_particles: - invalid.append(member) - return invalid diff --git a/server_addon/max/client/ayon_max/startup/startup.ms b/server_addon/max/client/ayon_max/startup/startup.ms deleted file mode 100644 index c5b4f0e526..0000000000 --- a/server_addon/max/client/ayon_max/startup/startup.ms +++ /dev/null @@ -1,15 +0,0 @@ --- AYON Init Script -( - local sysPath = dotNetClass "System.IO.Path" - local sysDir = dotNetClass "System.IO.Directory" - local localScript = getThisScriptFilename() - local startup = sysPath.Combine (sysPath.GetDirectoryName localScript) "startup.py" - - local pythonpath = systemTools.getEnvVariable "MAX_PYTHONPATH" - systemTools.setEnvVariable "PYTHONPATH" pythonpath - - /*opens the create menu on startup to ensure users are presented with a useful default view.*/ - max create mode - - python.ExecuteFile startup -) diff --git a/server_addon/max/client/ayon_max/startup/startup.py b/server_addon/max/client/ayon_max/startup/startup.py deleted file mode 100644 index 1462cc93b7..0000000000 --- a/server_addon/max/client/ayon_max/startup/startup.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import sys -from ayon_max.api import MaxHost -from ayon_core.pipeline import install_host -# this might happen in some 3dsmax version where PYTHONPATH isn't added -# to sys.path automatically -for path in os.environ["PYTHONPATH"].split(os.pathsep): - if path and path not in sys.path: - sys.path.append(path) - -host = MaxHost() -install_host(host) diff --git a/server_addon/max/client/ayon_max/version.py b/server_addon/max/client/ayon_max/version.py deleted file mode 100644 index acb68bbdfc..0000000000 --- a/server_addon/max/client/ayon_max/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'max' version.""" -__version__ = "0.2.1" diff --git a/server_addon/max/package.py b/server_addon/max/package.py deleted file mode 100644 index 09e86f8d50..0000000000 --- a/server_addon/max/package.py +++ /dev/null @@ -1,9 +0,0 @@ -name = "max" -title = "Max" -version = "0.2.1" -client_dir = "ayon_max" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = {} diff --git a/server_addon/max/server/__init__.py b/server_addon/max/server/__init__.py deleted file mode 100644 index d03b29d249..0000000000 --- a/server_addon/max/server/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Type - -from ayon_server.addons import BaseServerAddon - -from .settings import MaxSettings, DEFAULT_VALUES - - -class MaxAddon(BaseServerAddon): - settings_model: Type[MaxSettings] = MaxSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_VALUES) diff --git a/server_addon/max/server/settings/__init__.py b/server_addon/max/server/settings/__init__.py deleted file mode 100644 index 986b1903a5..0000000000 --- a/server_addon/max/server/settings/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .main import ( - MaxSettings, - DEFAULT_VALUES, -) - - -__all__ = ( - "MaxSettings", - "DEFAULT_VALUES", -) diff --git a/server_addon/max/server/settings/create_review_settings.py b/server_addon/max/server/settings/create_review_settings.py deleted file mode 100644 index 807976a391..0000000000 --- a/server_addon/max/server/settings/create_review_settings.py +++ /dev/null @@ -1,91 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -def image_format_enum(): - """Return enumerator for image output formats.""" - return [ - {"label": "exr", "value": "exr"}, - {"label": "jpg", "value": "jpg"}, - {"label": "png", "value": "png"}, - {"label": "tga", "value": "tga"} - ] - - -def visual_style_enum(): - """Return enumerator for viewport visual style.""" - return [ - {"label": "Realistic", "value": "Realistic"}, - {"label": "Shaded", "value": "Shaded"}, - {"label": "Facets", "value": "Facets"}, - {"label": "ConsistentColors", - "value": "ConsistentColors"}, - {"label": "Wireframe", "value": "Wireframe"}, - {"label": "BoundingBox", "value": "BoundingBox"}, - {"label": "Ink", "value": "Ink"}, - {"label": "ColorInk", "value": "ColorInk"}, - {"label": "Acrylic", "value": "Acrylic"}, - {"label": "Tech", "value": "Tech"}, - {"label": "Graphite", "value": "Graphite"}, - {"label": "ColorPencil", "value": "ColorPencil"}, - {"label": "Pastel", "value": "Pastel"}, - {"label": "Clay", "value": "Clay"}, - {"label": "ModelAssist", "value": "ModelAssist"} - ] - - -def preview_preset_enum(): - """Return enumerator for viewport visual preset.""" - return [ - {"label": "Quality", "value": "Quality"}, - {"label": "Standard", "value": "Standard"}, - {"label": "Performance", "value": "Performance"}, - {"label": "DXMode", "value": "DXMode"}, - {"label": "Customize", "value": "Customize"}, - ] - - -def anti_aliasing_enum(): - """Return enumerator for viewport anti-aliasing.""" - return [ - {"label": "None", "value": "None"}, - {"label": "2X", "value": "2X"}, - {"label": "4X", "value": "4X"}, - {"label": "8X", "value": "8X"} - ] - - -class CreateReviewModel(BaseSettingsModel): - review_width: int = SettingsField(1920, title="Review Width") - review_height: int = SettingsField(1080, title="Review Height") - percentSize: float = SettingsField(100.0, title="Percent of Output") - keep_images: bool = SettingsField(False, title="Keep Image Sequences") - image_format: str = SettingsField( - enum_resolver=image_format_enum, - title="Image Format Options" - ) - visual_style: str = SettingsField( - enum_resolver=visual_style_enum, - title="Preference" - ) - viewport_preset: str = SettingsField( - enum_resolver=preview_preset_enum, - title="Preview Preset" - ) - anti_aliasing: str = SettingsField( - enum_resolver=anti_aliasing_enum, - title="Anti-aliasing Quality" - ) - vp_texture: bool = SettingsField(True, title="Viewport Texture") - - -DEFAULT_CREATE_REVIEW_SETTINGS = { - "review_width": 1920, - "review_height": 1080, - "percentSize": 100.0, - "keep_images": False, - "image_format": "png", - "visual_style": "Realistic", - "viewport_preset": "Quality", - "anti_aliasing": "None", - "vp_texture": True -} diff --git a/server_addon/max/server/settings/imageio.py b/server_addon/max/server/settings/imageio.py deleted file mode 100644 index 9c6f1b6409..0000000000 --- a/server_addon/max/server/settings/imageio.py +++ /dev/null @@ -1,63 +0,0 @@ -from pydantic import validator -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.settings.validators import ensure_unique_names - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class ImageIOSettings(BaseSettingsModel): - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) diff --git a/server_addon/max/server/settings/main.py b/server_addon/max/server/settings/main.py deleted file mode 100644 index 7b0bfc6421..0000000000 --- a/server_addon/max/server/settings/main.py +++ /dev/null @@ -1,94 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField -from .imageio import ImageIOSettings -from .render_settings import ( - RenderSettingsModel, DEFAULT_RENDER_SETTINGS -) -from .create_review_settings import ( - CreateReviewModel, DEFAULT_CREATE_REVIEW_SETTINGS -) -from .publishers import ( - PublishersModel, DEFAULT_PUBLISH_SETTINGS -) - - -def unit_scale_enum(): - """Return enumerator for scene unit scale.""" - return [ - {"label": "mm", "value": "Millimeters"}, - {"label": "cm", "value": "Centimeters"}, - {"label": "m", "value": "Meters"}, - {"label": "km", "value": "Kilometers"} - ] - - -class UnitScaleSettings(BaseSettingsModel): - enabled: bool = SettingsField(True, title="Enabled") - scene_unit_scale: str = SettingsField( - "Centimeters", - title="Scene Unit Scale", - enum_resolver=unit_scale_enum - ) - - -class PRTAttributesModel(BaseSettingsModel): - _layout = "compact" - name: str = SettingsField(title="Name") - value: str = SettingsField(title="Attribute") - - -class PointCloudSettings(BaseSettingsModel): - attribute: list[PRTAttributesModel] = SettingsField( - default_factory=list, title="Channel Attribute") - - -class MaxSettings(BaseSettingsModel): - unit_scale_settings: UnitScaleSettings = SettingsField( - default_factory=UnitScaleSettings, - title="Set Unit Scale" - ) - imageio: ImageIOSettings = SettingsField( - default_factory=ImageIOSettings, - title="Color Management (ImageIO)" - ) - RenderSettings: RenderSettingsModel = SettingsField( - default_factory=RenderSettingsModel, - title="Render Settings" - ) - CreateReview: CreateReviewModel = SettingsField( - default_factory=CreateReviewModel, - title="Create Review" - ) - PointCloud: PointCloudSettings = SettingsField( - default_factory=PointCloudSettings, - title="Point Cloud" - ) - publish: PublishersModel = SettingsField( - default_factory=PublishersModel, - title="Publish Plugins") - - -DEFAULT_VALUES = { - "unit_scale_settings": { - "enabled": True, - "scene_unit_scale": "Centimeters" - }, - "RenderSettings": DEFAULT_RENDER_SETTINGS, - "CreateReview": DEFAULT_CREATE_REVIEW_SETTINGS, - "PointCloud": { - "attribute": [ - {"name": "Age", "value": "age"}, - {"name": "Radius", "value": "radius"}, - {"name": "Position", "value": "position"}, - {"name": "Rotation", "value": "rotation"}, - {"name": "Scale", "value": "scale"}, - {"name": "Velocity", "value": "velocity"}, - {"name": "Color", "value": "color"}, - {"name": "TextureCoordinate", "value": "texcoord"}, - {"name": "MaterialID", "value": "matid"}, - {"name": "custFloats", "value": "custFloats"}, - {"name": "custVecs", "value": "custVecs"}, - ] - }, - "publish": DEFAULT_PUBLISH_SETTINGS - -} diff --git a/server_addon/max/server/settings/publishers.py b/server_addon/max/server/settings/publishers.py deleted file mode 100644 index 5e1b348d92..0000000000 --- a/server_addon/max/server/settings/publishers.py +++ /dev/null @@ -1,222 +0,0 @@ -import json -from pydantic import validator - -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.exceptions import BadRequestException - - -class ValidateAttributesModel(BaseSettingsModel): - enabled: bool = SettingsField(title="ValidateAttributes") - attributes: str = SettingsField( - "{}", title="Attributes", widget="textarea") - - @validator("attributes") - def validate_json(cls, value): - if not value.strip(): - return "{}" - try: - converted_value = json.loads(value) - success = isinstance(converted_value, dict) - except json.JSONDecodeError: - success = False - - if not success: - raise BadRequestException( - "The attibutes can't be parsed as json object" - ) - return value - - -class ValidateCameraAttributesModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - fov: float = SettingsField(0.0, title="Focal Length") - nearrange: float = SettingsField(0.0, title="Near Range") - farrange: float = SettingsField(0.0, title="Far Range") - nearclip: float = SettingsField(0.0, title="Near Clip") - farclip: float = SettingsField(0.0, title="Far Clip") - - -class FamilyMappingItemModel(BaseSettingsModel): - families: list[str] = SettingsField( - default_factory=list, - title="Families" - ) - plugins: list[str] = SettingsField( - default_factory=list, - title="Plugins" - ) - - -class ValidateModelNameModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - regex: str = SettingsField( - "(.*)_(?P.*)_(GEO)", - title="Validation regex", - description=( - "Regex for validating model name. You can use named " - " capturing groups:(?P.*) for Asset name" - ) - ) - - -class ValidateLoadedPluginModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - family_plugins_mapping: list[FamilyMappingItemModel] = SettingsField( - default_factory=list, - title="Family Plugins Mapping" - ) - - -class BasicValidateModel(BaseSettingsModel): - enabled: bool = SettingsField(title="Enabled") - optional: bool = SettingsField(title="Optional") - active: bool = SettingsField(title="Active") - - -class PublishersModel(BaseSettingsModel): - ValidateInstanceInContext: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Instance In Context", - section="Validators" - ) - ValidateFrameRange: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Frame Range" - ) - ValidateAttributes: ValidateAttributesModel = SettingsField( - default_factory=ValidateAttributesModel, - title="Validate Attributes" - ) - ValidateCameraAttributes: ValidateCameraAttributesModel = SettingsField( - default_factory=ValidateCameraAttributesModel, - title="Validate Camera Attributes", - description=( - "If the value of the camera attributes set to 0, " - "the system automatically skips checking it" - ) - ) - ValidateNoAnimation: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate No Animation" - ) - ValidateLoadedPlugin: ValidateLoadedPluginModel = SettingsField( - default_factory=ValidateLoadedPluginModel, - title="Validate Loaded Plugin" - ) - ValidateMeshHasUVs: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh Has UVs" - ) - ValidateModelName: ValidateModelNameModel = SettingsField( - default_factory=ValidateModelNameModel, - title="Validate Model Name" - ) - ValidateRenderPasses: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Render Passes" - ) - ExtractModelObj: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Extract OBJ", - section="Extractors" - ) - ExtractModelFbx: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Extract FBX" - ) - ExtractModelUSD: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Extract Geometry (USD)" - ) - ExtractModel: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Extract Geometry (Alembic)" - ) - ExtractMaxSceneRaw: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Extract Max Scene (Raw)" - ) - - -DEFAULT_PUBLISH_SETTINGS = { - "ValidateInstanceInContext": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateFrameRange": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateAttributes": { - "enabled": False, - "attributes": "{}" - }, - "ValidateCameraAttributes": { - "enabled": True, - "optional": True, - "active": False, - "fov": 45.0, - "nearrange": 0.0, - "farrange": 1000.0, - "nearclip": 1.0, - "farclip": 1000.0 - }, - "ValidateModelName": { - "enabled": True, - "optional": True, - "active": False, - "regex": "(.*)_(?P.*)_(GEO)" - }, - "ValidateLoadedPlugin": { - "enabled": False, - "optional": True, - "family_plugins_mapping": [] - }, - "ValidateMeshHasUVs": { - "enabled": True, - "optional": True, - "active": False - }, - "ValidateNoAnimation": { - "enabled": True, - "optional": True, - "active": False, - }, - "ValidateRenderPasses": { - "enabled": True, - "optional": False, - "active": True - }, - "ExtractModelObj": { - "enabled": True, - "optional": True, - "active": False - }, - "ExtractModelFbx": { - "enabled": True, - "optional": True, - "active": False - }, - "ExtractModelUSD": { - "enabled": True, - "optional": True, - "active": False - }, - "ExtractModel": { - "enabled": True, - "optional": True, - "active": True - }, - "ExtractMaxSceneRaw": { - "enabled": True, - "optional": True, - "active": True - } -} diff --git a/server_addon/max/server/settings/render_settings.py b/server_addon/max/server/settings/render_settings.py deleted file mode 100644 index 19d36dd0f8..0000000000 --- a/server_addon/max/server/settings/render_settings.py +++ /dev/null @@ -1,47 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -def aov_separators_enum(): - return [ - {"value": "dash", "label": "- (dash)"}, - {"value": "underscore", "label": "_ (underscore)"}, - {"value": "dot", "label": ". (dot)"} - ] - - -def image_format_enum(): - """Return enumerator for image output formats.""" - return [ - {"label": "bmp", "value": "bmp"}, - {"label": "exr", "value": "exr"}, - {"label": "tif", "value": "tif"}, - {"label": "tiff", "value": "tiff"}, - {"label": "jpg", "value": "jpg"}, - {"label": "png", "value": "png"}, - {"label": "tga", "value": "tga"}, - {"label": "dds", "value": "dds"} - ] - - -class RenderSettingsModel(BaseSettingsModel): - default_render_image_folder: str = SettingsField( - title="Default render image folder" - ) - aov_separator: str = SettingsField( - "underscore", - title="AOV Separator character", - enum_resolver=aov_separators_enum - ) - image_format: str = SettingsField( - enum_resolver=image_format_enum, - title="Output Image Format" - ) - multipass: bool = SettingsField(title="multipass") - - -DEFAULT_RENDER_SETTINGS = { - "default_render_image_folder": "renders/3dsmax", - "aov_separator": "underscore", - "image_format": "exr", - "multipass": True -}