diff --git a/.github/workflows/milestone_assign.yml b/.github/workflows/milestone_assign.yml index c5a231e59e..4b52dfc30d 100644 --- a/.github/workflows/milestone_assign.yml +++ b/.github/workflows/milestone_assign.yml @@ -2,7 +2,7 @@ name: Milestone - assign to PRs on: pull_request_target: - types: [opened, reopened, edited, synchronize] + types: [closed] jobs: run_if_release: diff --git a/CHANGELOG.md b/CHANGELOG.md index 707b61676f..0c5f2cf8b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,66 @@ # Changelog -## [3.14.6](https://github.com/pypeclub/OpenPype/tree/HEAD) +## [3.14.7](https://github.com/pypeclub/OpenPype/tree/3.14.7) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.5...HEAD) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.6...3.14.7) + +**🆕 New features** + +- Hiero: loading effect family to timeline [\#4055](https://github.com/pypeclub/OpenPype/pull/4055) + +**🚀 Enhancements** + +- Photoshop: bug with pop-up window on Instance Creator [\#4121](https://github.com/pypeclub/OpenPype/pull/4121) +- Publisher: Open on specific tab [\#4120](https://github.com/pypeclub/OpenPype/pull/4120) +- Publisher: Hide unknown publish values [\#4116](https://github.com/pypeclub/OpenPype/pull/4116) +- Ftrack: Event server status give more information about version locations [\#4112](https://github.com/pypeclub/OpenPype/pull/4112) +- General: Allow higher numbers in frames and clips [\#4101](https://github.com/pypeclub/OpenPype/pull/4101) +- Publisher: Settings for validate frame range [\#4097](https://github.com/pypeclub/OpenPype/pull/4097) +- Publisher: Ignore escape button [\#4090](https://github.com/pypeclub/OpenPype/pull/4090) +- Flame: Loading clip with native colorspace resolved from mapping [\#4079](https://github.com/pypeclub/OpenPype/pull/4079) +- General: Extract review single frame output [\#4064](https://github.com/pypeclub/OpenPype/pull/4064) +- Publisher: Prepared common function for instance data cache [\#4063](https://github.com/pypeclub/OpenPype/pull/4063) +- Publisher: Easy access to publish page from create page [\#4058](https://github.com/pypeclub/OpenPype/pull/4058) +- General/TVPaint: Attribute defs dialog [\#4052](https://github.com/pypeclub/OpenPype/pull/4052) +- Publisher: Better reset defer [\#4048](https://github.com/pypeclub/OpenPype/pull/4048) +- Publisher: Add thumbnail sources [\#4042](https://github.com/pypeclub/OpenPype/pull/4042) + +**🐛 Bug fixes** + +- General: Move default settings for template name [\#4119](https://github.com/pypeclub/OpenPype/pull/4119) +- Slack: notification fail in new tray publisher [\#4118](https://github.com/pypeclub/OpenPype/pull/4118) +- Nuke: loaded nodes set to first tab [\#4114](https://github.com/pypeclub/OpenPype/pull/4114) +- Nuke: load image first frame [\#4113](https://github.com/pypeclub/OpenPype/pull/4113) +- Files Widget: Ignore case sensitivity of extensions [\#4096](https://github.com/pypeclub/OpenPype/pull/4096) +- Webpublisher: extension is lowercased in Setting and in uploaded files [\#4095](https://github.com/pypeclub/OpenPype/pull/4095) +- Publish Report Viewer: Fix small bugs [\#4086](https://github.com/pypeclub/OpenPype/pull/4086) +- Igniter: fix regex to match semver better [\#4085](https://github.com/pypeclub/OpenPype/pull/4085) +- Maya: aov filtering [\#4083](https://github.com/pypeclub/OpenPype/pull/4083) +- Flame/Flare: Loading to multiple batches [\#4080](https://github.com/pypeclub/OpenPype/pull/4080) +- hiero: creator from settings with set maximum [\#4077](https://github.com/pypeclub/OpenPype/pull/4077) +- Nuke: resolve hashes in file name only for frame token [\#4074](https://github.com/pypeclub/OpenPype/pull/4074) +- Publisher: Fix cache of asset docs [\#4070](https://github.com/pypeclub/OpenPype/pull/4070) +- Webpublisher: cleanup wp extract thumbnail [\#4067](https://github.com/pypeclub/OpenPype/pull/4067) +- Settings UI: Locked setting can't bypass lock [\#4066](https://github.com/pypeclub/OpenPype/pull/4066) +- Loader: Fix comparison of repre name [\#4053](https://github.com/pypeclub/OpenPype/pull/4053) +- Deadline: Extract environment subprocess failure [\#4050](https://github.com/pypeclub/OpenPype/pull/4050) + +**🔀 Refactored code** + +- General: Collect entities plugin minor changes [\#4089](https://github.com/pypeclub/OpenPype/pull/4089) +- General: Direct interfaces import [\#4065](https://github.com/pypeclub/OpenPype/pull/4065) + +**Merged pull requests:** + +- Bump loader-utils from 1.4.1 to 1.4.2 in /website [\#4100](https://github.com/pypeclub/OpenPype/pull/4100) +- Online family for Tray Publisher [\#4093](https://github.com/pypeclub/OpenPype/pull/4093) +- Bump loader-utils from 1.4.0 to 1.4.1 in /website [\#4081](https://github.com/pypeclub/OpenPype/pull/4081) +- remove underscore from subset name [\#4059](https://github.com/pypeclub/OpenPype/pull/4059) +- Alembic Loader as Arnold Standin [\#4047](https://github.com/pypeclub/OpenPype/pull/4047) + +## [3.14.6](https://github.com/pypeclub/OpenPype/tree/3.14.6) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.5...3.14.6) ### 📖 Documentation diff --git a/HISTORY.md b/HISTORY.md index f6cc74e114..04a1073c07 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,99 @@ # Changelog + +## [3.14.7](https://github.com/pypeclub/OpenPype/tree/3.14.7) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.6...3.14.7) + +**🆕 New features** + +- Hiero: loading effect family to timeline [\#4055](https://github.com/pypeclub/OpenPype/pull/4055) + +**🚀 Enhancements** + +- Photoshop: bug with pop-up window on Instance Creator [\#4121](https://github.com/pypeclub/OpenPype/pull/4121) +- Publisher: Open on specific tab [\#4120](https://github.com/pypeclub/OpenPype/pull/4120) +- Publisher: Hide unknown publish values [\#4116](https://github.com/pypeclub/OpenPype/pull/4116) +- Ftrack: Event server status give more information about version locations [\#4112](https://github.com/pypeclub/OpenPype/pull/4112) +- General: Allow higher numbers in frames and clips [\#4101](https://github.com/pypeclub/OpenPype/pull/4101) +- Publisher: Settings for validate frame range [\#4097](https://github.com/pypeclub/OpenPype/pull/4097) +- Publisher: Ignore escape button [\#4090](https://github.com/pypeclub/OpenPype/pull/4090) +- Flame: Loading clip with native colorspace resolved from mapping [\#4079](https://github.com/pypeclub/OpenPype/pull/4079) +- General: Extract review single frame output [\#4064](https://github.com/pypeclub/OpenPype/pull/4064) +- Publisher: Prepared common function for instance data cache [\#4063](https://github.com/pypeclub/OpenPype/pull/4063) +- Publisher: Easy access to publish page from create page [\#4058](https://github.com/pypeclub/OpenPype/pull/4058) +- General/TVPaint: Attribute defs dialog [\#4052](https://github.com/pypeclub/OpenPype/pull/4052) +- Publisher: Better reset defer [\#4048](https://github.com/pypeclub/OpenPype/pull/4048) +- Publisher: Add thumbnail sources [\#4042](https://github.com/pypeclub/OpenPype/pull/4042) + +**🐛 Bug fixes** + +- General: Move default settings for template name [\#4119](https://github.com/pypeclub/OpenPype/pull/4119) +- Slack: notification fail in new tray publisher [\#4118](https://github.com/pypeclub/OpenPype/pull/4118) +- Nuke: loaded nodes set to first tab [\#4114](https://github.com/pypeclub/OpenPype/pull/4114) +- Nuke: load image first frame [\#4113](https://github.com/pypeclub/OpenPype/pull/4113) +- Files Widget: Ignore case sensitivity of extensions [\#4096](https://github.com/pypeclub/OpenPype/pull/4096) +- Webpublisher: extension is lowercased in Setting and in uploaded files [\#4095](https://github.com/pypeclub/OpenPype/pull/4095) +- Publish Report Viewer: Fix small bugs [\#4086](https://github.com/pypeclub/OpenPype/pull/4086) +- Igniter: fix regex to match semver better [\#4085](https://github.com/pypeclub/OpenPype/pull/4085) +- Maya: aov filtering [\#4083](https://github.com/pypeclub/OpenPype/pull/4083) +- Flame/Flare: Loading to multiple batches [\#4080](https://github.com/pypeclub/OpenPype/pull/4080) +- hiero: creator from settings with set maximum [\#4077](https://github.com/pypeclub/OpenPype/pull/4077) +- Nuke: resolve hashes in file name only for frame token [\#4074](https://github.com/pypeclub/OpenPype/pull/4074) +- Publisher: Fix cache of asset docs [\#4070](https://github.com/pypeclub/OpenPype/pull/4070) +- Webpublisher: cleanup wp extract thumbnail [\#4067](https://github.com/pypeclub/OpenPype/pull/4067) +- Settings UI: Locked setting can't bypass lock [\#4066](https://github.com/pypeclub/OpenPype/pull/4066) +- Loader: Fix comparison of repre name [\#4053](https://github.com/pypeclub/OpenPype/pull/4053) +- Deadline: Extract environment subprocess failure [\#4050](https://github.com/pypeclub/OpenPype/pull/4050) + +**🔀 Refactored code** + +- General: Collect entities plugin minor changes [\#4089](https://github.com/pypeclub/OpenPype/pull/4089) +- General: Direct interfaces import [\#4065](https://github.com/pypeclub/OpenPype/pull/4065) + +**Merged pull requests:** + +- Bump loader-utils from 1.4.1 to 1.4.2 in /website [\#4100](https://github.com/pypeclub/OpenPype/pull/4100) +- Online family for Tray Publisher [\#4093](https://github.com/pypeclub/OpenPype/pull/4093) +- Bump loader-utils from 1.4.0 to 1.4.1 in /website [\#4081](https://github.com/pypeclub/OpenPype/pull/4081) +- remove underscore from subset name [\#4059](https://github.com/pypeclub/OpenPype/pull/4059) +- Alembic Loader as Arnold Standin [\#4047](https://github.com/pypeclub/OpenPype/pull/4047) + +## [3.14.6](https://github.com/pypeclub/OpenPype/tree/3.14.6) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.5...3.14.6) + +### 📖 Documentation + +- Documentation: Minor updates to dev\_requirements.md [\#4025](https://github.com/pypeclub/OpenPype/pull/4025) + +**🆕 New features** + +- Nuke: add 13.2 variant [\#4041](https://github.com/pypeclub/OpenPype/pull/4041) + +**🚀 Enhancements** + +- Publish Report Viewer: Store reports locally on machine [\#4040](https://github.com/pypeclub/OpenPype/pull/4040) +- General: More specific error in burnins script [\#4026](https://github.com/pypeclub/OpenPype/pull/4026) +- General: Extract review does not crash with old settings overrides [\#4023](https://github.com/pypeclub/OpenPype/pull/4023) +- Publisher: Convertors for legacy instances [\#4020](https://github.com/pypeclub/OpenPype/pull/4020) +- workflows: adding milestone creator and assigner [\#4018](https://github.com/pypeclub/OpenPype/pull/4018) +- Publisher: Catch creator errors [\#4015](https://github.com/pypeclub/OpenPype/pull/4015) + +**🐛 Bug fixes** + +- Hiero - effect collection fixes [\#4038](https://github.com/pypeclub/OpenPype/pull/4038) +- Nuke - loader clip correct hash conversion in path [\#4037](https://github.com/pypeclub/OpenPype/pull/4037) +- Maya: Soft fail when applying capture preset [\#4034](https://github.com/pypeclub/OpenPype/pull/4034) +- Igniter: handle missing directory [\#4032](https://github.com/pypeclub/OpenPype/pull/4032) +- StandalonePublisher: Fix thumbnail publishing [\#4029](https://github.com/pypeclub/OpenPype/pull/4029) +- Experimental Tools: Fix publisher import [\#4027](https://github.com/pypeclub/OpenPype/pull/4027) +- Houdini: fix wrong path in ASS loader [\#4016](https://github.com/pypeclub/OpenPype/pull/4016) + +**🔀 Refactored code** + +- General: Import lib functions from lib [\#4017](https://github.com/pypeclub/OpenPype/pull/4017) + ## [3.14.5](https://github.com/pypeclub/OpenPype/tree/3.14.5) (2022-10-24) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.4...3.14.5) diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index addcbed24c..077f56d769 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -63,7 +63,8 @@ class OpenPypeVersion(semver.VersionInfo): """ staging = False path = None - _VERSION_REGEX = re.compile(r"(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?") # noqa: E501 + # this should match any string complying with https://semver.org/ + _VERSION_REGEX = re.compile(r"(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P[a-zA-Z\d\-.]*))?(?:\+(?P[a-zA-Z\d\-.]*))?") # noqa: E501 _installed_version = None def __init__(self, *args, **kwargs): @@ -211,6 +212,8 @@ class OpenPypeVersion(semver.VersionInfo): OpenPypeVersion: of detected or None. """ + # strip .zip ext if present + string = re.sub(r"\.zip$", "", string, flags=re.IGNORECASE) m = re.search(OpenPypeVersion._VERSION_REGEX, string) if not m: return None diff --git a/openpype/action.py b/openpype/action.py index de9cdee010..15c96404b6 100644 --- a/openpype/action.py +++ b/openpype/action.py @@ -72,17 +72,19 @@ def get_errored_plugins_from_data(context): return get_errored_plugins_from_context(context) -# 'RepairAction' and 'RepairContextAction' were moved to -# 'openpype.pipeline.publish' please change you imports. -# There is no "reasonable" way hot mark these classes as deprecated to show -# warning of wrong import. -# Deprecated since 3.14.* will be removed in 3.16.* class RepairAction(pyblish.api.Action): """Repairs the action To process the repairing this requires a static `repair(instance)` method is available on the plugin. + Deprecated: + 'RepairAction' and 'RepairContextAction' were moved to + 'openpype.pipeline.publish' please change you imports. + There is no "reasonable" way hot mark these classes as deprecated + to show warning of wrong import. Deprecated since 3.14.* will be + removed in 3.16.* + """ label = "Repair" on = "failed" # This action is only available on a failed plug-in @@ -103,13 +105,19 @@ class RepairAction(pyblish.api.Action): plugin.repair(instance) -# Deprecated since 3.14.* will be removed in 3.16.* class RepairContextAction(pyblish.api.Action): """Repairs the action To process the repairing this requires a static `repair(instance)` method is available on the plugin. + Deprecated: + 'RepairAction' and 'RepairContextAction' were moved to + 'openpype.pipeline.publish' please change you imports. + There is no "reasonable" way hot mark these classes as deprecated + to show warning of wrong import. Deprecated since 3.14.* will be + removed in 3.16.* + """ label = "Repair" on = "failed" # This action is only available on a failed plug-in diff --git a/openpype/client/entities.py b/openpype/client/entities.py index 43afccf2f1..c415be8816 100644 --- a/openpype/client/entities.py +++ b/openpype/client/entities.py @@ -389,10 +389,11 @@ def get_subset_by_name(project_name, subset_name, asset_id, fields=None): returned if 'None' is passed. Returns: - None: If subset with specified filters was not found. - Dict: Subset document which can be reduced to specified 'fields'. - """ + Union[None, Dict[str, Any]]: None if subset with specified filters was + not found or dict subset document which can be reduced to + specified 'fields'. + """ if not subset_name: return None diff --git a/openpype/hooks/pre_copy_last_published_workfile.py b/openpype/hooks/pre_copy_last_published_workfile.py new file mode 100644 index 0000000000..26b43c39cb --- /dev/null +++ b/openpype/hooks/pre_copy_last_published_workfile.py @@ -0,0 +1,177 @@ +import os +import shutil +from time import sleep +from openpype.client.entities import ( + get_last_version_by_subset_id, + get_representations, + get_subsets, +) +from openpype.lib import PreLaunchHook +from openpype.lib.local_settings import get_local_site_id +from openpype.lib.profiles_filtering import filter_profiles +from openpype.pipeline.load.utils import get_representation_path +from openpype.settings.lib import get_project_settings + + +class CopyLastPublishedWorkfile(PreLaunchHook): + """Copy last published workfile as first workfile. + + Prelaunch hook works only if last workfile leads to not existing file. + - That is possible only if it's first version. + """ + + # Before `AddLastWorkfileToLaunchArgs` + order = -1 + app_groups = ["blender", "photoshop", "tvpaint", "aftereffects"] + + def execute(self): + """Check if local workfile doesn't exist, else copy it. + + 1- Check if setting for this feature is enabled + 2- Check if workfile in work area doesn't exist + 3- Check if published workfile exists and is copied locally in publish + 4- Substitute copied published workfile as first workfile + + Returns: + None: This is a void method. + """ + + sync_server = self.modules_manager.get("sync_server") + if not sync_server or not sync_server.enabled: + self.log.debug("Sync server module is not enabled or available") + return + + # Check there is no workfile available + last_workfile = self.data.get("last_workfile_path") + if os.path.exists(last_workfile): + self.log.debug( + "Last workfile exists. Skipping {} process.".format( + self.__class__.__name__ + ) + ) + return + + # Get data + project_name = self.data["project_name"] + task_name = self.data["task_name"] + task_type = self.data["task_type"] + host_name = self.application.host_name + + # Check settings has enabled it + project_settings = get_project_settings(project_name) + profiles = project_settings["global"]["tools"]["Workfiles"][ + "last_workfile_on_startup" + ] + filter_data = { + "tasks": task_name, + "task_types": task_type, + "hosts": host_name, + } + last_workfile_settings = filter_profiles(profiles, filter_data) + use_last_published_workfile = last_workfile_settings.get( + "use_last_published_workfile" + ) + if use_last_published_workfile is None: + self.log.info( + ( + "Seems like old version of settings is used." + ' Can\'t access custom templates in host "{}".'.format( + host_name + ) + ) + ) + return + elif use_last_published_workfile is False: + self.log.info( + ( + 'Project "{}" has turned off to use last published' + ' workfile as first workfile for host "{}"'.format( + project_name, host_name + ) + ) + ) + return + + self.log.info("Trying to fetch last published workfile...") + + project_doc = self.data.get("project_doc") + asset_doc = self.data.get("asset_doc") + anatomy = self.data.get("anatomy") + + # Check it can proceed + if not project_doc and not asset_doc: + return + + # Get subset id + subset_id = next( + ( + subset["_id"] + for subset in get_subsets( + project_name, + asset_ids=[asset_doc["_id"]], + fields=["_id", "data.family", "data.families"], + ) + if subset["data"].get("family") == "workfile" + # Legacy compatibility + or "workfile" in subset["data"].get("families", {}) + ), + None, + ) + if not subset_id: + self.log.debug( + 'No any workfile for asset "{}".'.format(asset_doc["name"]) + ) + return + + # Get workfile representation + last_version_doc = get_last_version_by_subset_id( + project_name, subset_id, fields=["_id"] + ) + if not last_version_doc: + self.log.debug("Subset does not have any versions") + return + + workfile_representation = next( + ( + representation + for representation in get_representations( + project_name, version_ids=[last_version_doc["_id"]] + ) + if representation["context"]["task"]["name"] == task_name + ), + None, + ) + + if not workfile_representation: + self.log.debug( + 'No published workfile for task "{}" and host "{}".'.format( + task_name, host_name + ) + ) + return + + local_site_id = get_local_site_id() + sync_server.add_site( + project_name, + workfile_representation["_id"], + local_site_id, + force=True, + priority=99, + reset_timer=True, + ) + + while not sync_server.is_representation_on_site( + project_name, workfile_representation["_id"], local_site_id + ): + sleep(5) + + # Get paths + published_workfile_path = get_representation_path( + workfile_representation, root=anatomy.roots + ) + local_workfile_dir = os.path.dirname(last_workfile) + + # Copy file and substitute path + self.data["last_workfile_path"] = shutil.copy( + published_workfile_path, local_workfile_dir + ) diff --git a/openpype/host/interfaces.py b/openpype/host/interfaces.py index 3b2df745d1..999aefd254 100644 --- a/openpype/host/interfaces.py +++ b/openpype/host/interfaces.py @@ -252,7 +252,7 @@ class IWorkfileHost: Remove when all usages are replaced. """ - self.save_workfile() + self.save_workfile(dst_path) def open_file(self, filepath): """Deprecated variant of 'open_workfile'. diff --git a/openpype/hosts/aftereffects/addon.py b/openpype/hosts/aftereffects/addon.py index 94843e7dc5..79df550312 100644 --- a/openpype/hosts/aftereffects/addon.py +++ b/openpype/hosts/aftereffects/addon.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon class AfterEffectsAddon(OpenPypeModule, IHostAddon): diff --git a/openpype/hosts/blender/addon.py b/openpype/hosts/blender/addon.py index 3ee638a5bb..f1da9b808c 100644 --- a/openpype/hosts/blender/addon.py +++ b/openpype/hosts/blender/addon.py @@ -1,6 +1,5 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon BLENDER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/flame/addon.py b/openpype/hosts/flame/addon.py index 5a34413bb0..d9359fc5bf 100644 --- a/openpype/hosts/flame/addon.py +++ b/openpype/hosts/flame/addon.py @@ -1,6 +1,5 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon HOST_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/flame/api/menu.py b/openpype/hosts/flame/api/menu.py index f72a352bba..319ed7afb6 100644 --- a/openpype/hosts/flame/api/menu.py +++ b/openpype/hosts/flame/api/menu.py @@ -225,7 +225,8 @@ class FlameMenuUniversal(_FlameMenuApp): menu['actions'].append({ "name": "Load...", - "execute": lambda x: self.tools_helper.show_loader() + "execute": lambda x: callback_selection( + x, self.tools_helper.show_loader) }) menu['actions'].append({ "name": "Manage...", diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py index 092ce9d106..26129ebaa6 100644 --- a/openpype/hosts/flame/api/plugin.py +++ b/openpype/hosts/flame/api/plugin.py @@ -4,13 +4,13 @@ import shutil from copy import deepcopy from xml.etree import ElementTree as ET +import qargparse from Qt import QtCore, QtWidgets -import qargparse from openpype import style -from openpype.settings import get_current_project_settings from openpype.lib import Logger from openpype.pipeline import LegacyCreator, LoaderPlugin +from openpype.settings import get_current_project_settings from . import constants from . import lib as flib @@ -690,6 +690,54 @@ class ClipLoader(LoaderPlugin): ) ] + _mapping = None + + def get_colorspace(self, context): + """Get colorspace name + + Look either to version data or representation data. + + Args: + context (dict): version context data + + Returns: + str: colorspace name or None + """ + version = context['version'] + version_data = version.get("data", {}) + colorspace = version_data.get( + "colorspace", None + ) + + if ( + not colorspace + or colorspace == "Unknown" + ): + colorspace = context["representation"]["data"].get( + "colorspace", None) + + return colorspace + + @classmethod + def get_native_colorspace(cls, input_colorspace): + """Return native colorspace name. + + Args: + input_colorspace (str | None): colorspace name + + Returns: + str: native colorspace name defined in mapping or None + """ + if not cls._mapping: + settings = get_current_project_settings()["flame"] + mapping = settings["imageio"]["profilesMapping"]["inputs"] + cls._mapping = { + input["ocioName"]: input["flameName"] + for input in mapping + } + + return cls._mapping.get(input_colorspace) + class OpenClipSolver(flib.MediaInfoFile): create_new_clip = False diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index 0843dde76a..f8cb7b3e11 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -36,14 +36,15 @@ class LoadClip(opfapi.ClipLoader): version = context['version'] version_data = version.get("data", {}) version_name = version.get("name", None) - colorspace = version_data.get("colorspace", None) + colorspace = self.get_colorspace(context) + clip_name = StringTemplate(self.clip_name_template).format( context["representation"]["context"]) - # TODO: settings in imageio # convert colorspace with ocio to flame mapping # in imageio flame section - colorspace = colorspace + colorspace = self.get_native_colorspace(colorspace) + self.log.info("Loading with colorspace: `{}`".format(colorspace)) # create workfile path workfile_dir = os.environ["AVALON_WORKDIR"] diff --git a/openpype/hosts/flame/plugins/load/load_clip_batch.py b/openpype/hosts/flame/plugins/load/load_clip_batch.py index 3b049b861b..048ac19431 100644 --- a/openpype/hosts/flame/plugins/load/load_clip_batch.py +++ b/openpype/hosts/flame/plugins/load/load_clip_batch.py @@ -1,3 +1,4 @@ +from copy import deepcopy import os import flame from pprint import pformat @@ -22,7 +23,7 @@ class LoadClipBatch(opfapi.ClipLoader): # settings reel_name = "OP_LoadedReel" - clip_name_template = "{asset}_{subset}<_{output}>" + clip_name_template = "{batch}_{asset}_{subset}<_{output}>" def load(self, context, name, namespace, options): @@ -34,19 +35,22 @@ class LoadClipBatch(opfapi.ClipLoader): version = context['version'] version_data = version.get("data", {}) version_name = version.get("name", None) - colorspace = version_data.get("colorspace", None) + colorspace = self.get_colorspace(context) # in case output is not in context replace key to representation if not context["representation"]["context"].get("output"): self.clip_name_template.replace("output", "representation") - clip_name = StringTemplate(self.clip_name_template).format( - context["representation"]["context"]) + formating_data = deepcopy(context["representation"]["context"]) + formating_data["batch"] = self.batch.name.get_value() + + clip_name = StringTemplate(self.clip_name_template).format( + formating_data) - # TODO: settings in imageio # convert colorspace with ocio to flame mapping # in imageio flame section - colorspace = colorspace + colorspace = self.get_native_colorspace(colorspace) + self.log.info("Loading with colorspace: `{}`".format(colorspace)) # create workfile path workfile_dir = options.get("workdir") or os.environ["AVALON_WORKDIR"] @@ -56,6 +60,7 @@ class LoadClipBatch(opfapi.ClipLoader): openclip_path = os.path.join( openclip_dir, clip_name + ".clip" ) + if not os.path.exists(openclip_dir): os.makedirs(openclip_dir) diff --git a/openpype/hosts/fusion/addon.py b/openpype/hosts/fusion/addon.py index 1913cc2e30..d1bd1566b7 100644 --- a/openpype/hosts/fusion/addon.py +++ b/openpype/hosts/fusion/addon.py @@ -1,6 +1,5 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/harmony/addon.py b/openpype/hosts/harmony/addon.py index 872a7490b5..efef40ab92 100644 --- a/openpype/hosts/harmony/addon.py +++ b/openpype/hosts/harmony/addon.py @@ -1,6 +1,5 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon HARMONY_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/hiero/addon.py b/openpype/hosts/hiero/addon.py index 3523e9aed7..f5bb94dbaa 100644 --- a/openpype/hosts/hiero/addon.py +++ b/openpype/hosts/hiero/addon.py @@ -1,7 +1,6 @@ import os import platform -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon HIERO_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/hiero/api/__init__.py b/openpype/hosts/hiero/api/__init__.py index 781f846bbe..1fa40c9f74 100644 --- a/openpype/hosts/hiero/api/__init__.py +++ b/openpype/hosts/hiero/api/__init__.py @@ -30,9 +30,15 @@ from .lib import ( get_timeline_selection, get_current_track, get_track_item_tags, + get_track_openpype_tag, + set_track_openpype_tag, + get_track_openpype_data, get_track_item_pype_tag, set_track_item_pype_tag, get_track_item_pype_data, + get_trackitem_openpype_tag, + set_trackitem_openpype_tag, + get_trackitem_openpype_data, set_publish_attribute, get_publish_attribute, imprint, @@ -85,9 +91,12 @@ __all__ = [ "get_timeline_selection", "get_current_track", "get_track_item_tags", - "get_track_item_pype_tag", - "set_track_item_pype_tag", - "get_track_item_pype_data", + "get_track_openpype_tag", + "set_track_openpype_tag", + "get_track_openpype_data", + "get_trackitem_openpype_tag", + "set_trackitem_openpype_tag", + "get_trackitem_openpype_data", "set_publish_attribute", "get_publish_attribute", "imprint", @@ -99,6 +108,10 @@ __all__ = [ "apply_colorspace_project", "apply_colorspace_clips", "get_sequence_pattern_and_padding", + # depricated + "get_track_item_pype_tag", + "set_track_item_pype_tag", + "get_track_item_pype_data", # plugins "CreatorWidget", diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index e5d35945af..7f0cf8149a 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -7,11 +7,15 @@ import os import re import sys import platform +import functools +import warnings +import json import ast +import secrets import shutil import hiero -from Qt import QtWidgets +from Qt import QtWidgets, QtCore, QtXml from openpype.client import get_project from openpype.settings import get_project_settings @@ -20,15 +24,51 @@ from openpype.pipeline.load import filter_containers from openpype.lib import Logger from . import tags -try: - from PySide.QtCore import QFile, QTextStream - from PySide.QtXml import QDomDocument -except ImportError: - from PySide2.QtCore import QFile, QTextStream - from PySide2.QtXml import QDomDocument -# from opentimelineio import opentime -# from pprint import pformat +class DeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", DeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=DeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + log = Logger.get_logger(__name__) @@ -301,7 +341,124 @@ def get_track_item_tags(track_item): return returning_tag_data +def _get_tag_unique_hash(): + # sourcery skip: avoid-builtin-shadow + return secrets.token_hex(nbytes=4) + + +def set_track_openpype_tag(track, data=None): + """ + Set openpype track tag to input track object. + + Attributes: + track (hiero.core.VideoTrack): hiero object + + Returns: + hiero.core.Tag + """ + data = data or {} + + # basic Tag's attribute + tag_data = { + "editable": "0", + "note": "OpenPype data container", + "icon": "openpype_icon.png", + "metadata": dict(data.items()) + } + # get available pype tag if any + _tag = get_track_openpype_tag(track) + + if _tag: + # it not tag then create one + tag = tags.update_tag(_tag, tag_data) + else: + # if pype tag available then update with input data + tag = tags.create_tag( + "{}_{}".format( + self.pype_tag_name, + _get_tag_unique_hash() + ), + tag_data + ) + # add it to the input track item + track.addTag(tag) + + return tag + + +def get_track_openpype_tag(track): + """ + Get pype track item tag created by creator or loader plugin. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag: hierarchy, orig clip attributes + """ + # get all tags from track item + _tags = track.tags() + if not _tags: + return None + for tag in _tags: + # return only correct tag defined by global name + if self.pype_tag_name in tag.name(): + return tag + + +def get_track_openpype_data(track, container_name=None): + """ + Get track's openpype tag data. + + Attributes: + trackItem (hiero.core.VideoTrack): hiero object + + Returns: + dict: data found on pype tag + """ + return_data = {} + # get pype data tag from track item + tag = get_track_openpype_tag(track) + + if not tag: + return None + + # get tag metadata attribute + tag_data = deepcopy(dict(tag.metadata())) + + for obj_name, obj_data in tag_data.items(): + obj_name = obj_name.replace("tag.", "") + + if obj_name in ["applieswhole", "note", "label"]: + continue + return_data[obj_name] = json.loads(obj_data) + + return ( + return_data[container_name] + if container_name + else return_data + ) + + +@deprecated("openpype.hosts.hiero.api.lib.get_trackitem_openpype_tag") def get_track_item_pype_tag(track_item): + # backward compatibility alias + return get_trackitem_openpype_tag(track_item) + + +@deprecated("openpype.hosts.hiero.api.lib.set_trackitem_openpype_tag") +def set_track_item_pype_tag(track_item, data=None): + # backward compatibility alias + return set_trackitem_openpype_tag(track_item, data) + + +@deprecated("openpype.hosts.hiero.api.lib.get_trackitem_openpype_data") +def get_track_item_pype_data(track_item): + # backward compatibility alias + return get_trackitem_openpype_data(track_item) + + +def get_trackitem_openpype_tag(track_item): """ Get pype track item tag created by creator or loader plugin. @@ -317,16 +474,16 @@ def get_track_item_pype_tag(track_item): return None for tag in _tags: # return only correct tag defined by global name - if tag.name() == self.pype_tag_name: + if self.pype_tag_name in tag.name(): return tag -def set_track_item_pype_tag(track_item, data=None): +def set_trackitem_openpype_tag(track_item, data=None): """ - Set pype track item tag to input track_item. + Set openpype track tag to input track object. Attributes: - trackItem (hiero.core.TrackItem): hiero object + track (hiero.core.VideoTrack): hiero object Returns: hiero.core.Tag @@ -341,21 +498,26 @@ def set_track_item_pype_tag(track_item, data=None): "metadata": dict(data.items()) } # get available pype tag if any - _tag = get_track_item_pype_tag(track_item) - + _tag = get_trackitem_openpype_tag(track_item) if _tag: # it not tag then create one tag = tags.update_tag(_tag, tag_data) else: # if pype tag available then update with input data - tag = tags.create_tag(self.pype_tag_name, tag_data) + tag = tags.create_tag( + "{}_{}".format( + self.pype_tag_name, + _get_tag_unique_hash() + ), + tag_data + ) # add it to the input track item track_item.addTag(tag) return tag -def get_track_item_pype_data(track_item): +def get_trackitem_openpype_data(track_item): """ Get track item's pype tag data. @@ -367,7 +529,7 @@ def get_track_item_pype_data(track_item): """ data = {} # get pype data tag from track item - tag = get_track_item_pype_tag(track_item) + tag = get_trackitem_openpype_tag(track_item) if not tag: return None @@ -420,7 +582,7 @@ def imprint(track_item, data=None): """ data = data or {} - tag = set_track_item_pype_tag(track_item, data) + tag = set_trackitem_openpype_tag(track_item, data) # add publish attribute set_publish_attribute(tag, True) @@ -832,22 +994,22 @@ def set_selected_track_items(track_items_list, sequence=None): def _read_doc_from_path(path): - # reading QDomDocument from HROX path - hrox_file = QFile(path) - if not hrox_file.open(QFile.ReadOnly): + # reading QtXml.QDomDocument from HROX path + hrox_file = QtCore.QFile(path) + if not hrox_file.open(QtCore.QFile.ReadOnly): raise RuntimeError("Failed to open file for reading") - doc = QDomDocument() + doc = QtXml.QDomDocument() doc.setContent(hrox_file) hrox_file.close() return doc def _write_doc_to_path(doc, path): - # write QDomDocument to path as HROX - hrox_file = QFile(path) - if not hrox_file.open(QFile.WriteOnly): + # write QtXml.QDomDocument to path as HROX + hrox_file = QtCore.QFile(path) + if not hrox_file.open(QtCore.QFile.WriteOnly): raise RuntimeError("Failed to open file for writing") - stream = QTextStream(hrox_file) + stream = QtCore.QTextStream(hrox_file) doc.save(stream, 1) hrox_file.close() @@ -1030,7 +1192,7 @@ def sync_clip_name_to_data_asset(track_items_list): # get name and data ti_name = track_item.name() - data = get_track_item_pype_data(track_item) + data = get_trackitem_openpype_data(track_item) # ignore if no data on the clip or not publish instance if not data: @@ -1042,10 +1204,10 @@ def sync_clip_name_to_data_asset(track_items_list): if data["asset"] != ti_name: data["asset"] = ti_name # remove the original tag - tag = get_track_item_pype_tag(track_item) + tag = get_trackitem_openpype_tag(track_item) track_item.removeTag(tag) # create new tag with updated data - set_track_item_pype_tag(track_item, data) + set_trackitem_openpype_tag(track_item, data) print("asset was changed in clip: {}".format(ti_name)) @@ -1083,10 +1245,10 @@ def check_inventory_versions(track_items=None): project_name = legacy_io.active_project() filter_result = filter_containers(containers, project_name) for container in filter_result.latest: - set_track_color(container["_track_item"], clip_color) + set_track_color(container["_item"], clip_color) for container in filter_result.outdated: - set_track_color(container["_track_item"], clip_color_last) + set_track_color(container["_item"], clip_color_last) def selection_changed_timeline(event): diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index ea61dc4785..4ab73e7d19 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -1,6 +1,7 @@ """ Basic avalon integration """ +from copy import deepcopy import os import contextlib from collections import OrderedDict @@ -17,6 +18,7 @@ from openpype.pipeline import ( ) from openpype.tools.utils import host_tools from . import lib, menu, events +import hiero log = Logger.get_logger(__name__) @@ -106,7 +108,7 @@ def containerise(track_item, data_imprint.update({k: v}) log.debug("_ data_imprint: {}".format(data_imprint)) - lib.set_track_item_pype_tag(track_item, data_imprint) + lib.set_trackitem_openpype_tag(track_item, data_imprint) return track_item @@ -123,79 +125,131 @@ def ls(): """ # get all track items from current timeline - all_track_items = lib.get_track_items() + all_items = lib.get_track_items() - for track_item in all_track_items: - container = parse_container(track_item) - if container: - yield container + # append all video tracks + for track in lib.get_current_sequence(): + if type(track) != hiero.core.VideoTrack: + continue + all_items.append(track) + + for item in all_items: + container_data = parse_container(item) + + if isinstance(container_data, list): + for _c in container_data: + yield _c + elif container_data: + yield container_data -def parse_container(track_item, validate=True): +def parse_container(item, validate=True): """Return container data from track_item's pype tag. Args: - track_item (hiero.core.TrackItem): A containerised track item. + item (hiero.core.TrackItem or hiero.core.VideoTrack): + A containerised track item. validate (bool)[optional]: validating with avalon scheme Returns: dict: The container schema data for input containerized track item. """ + def data_to_container(item, data): + if ( + not data + or data.get("id") != "pyblish.avalon.container" + ): + return + + if validate and data and data.get("schema"): + schema.validate(data) + + if not isinstance(data, dict): + return + + # If not all required data return the empty container + required = ['schema', 'id', 'name', + 'namespace', 'loader', 'representation'] + + if any(key not in data for key in required): + return + + container = {key: data[key] for key in required} + + container["objectName"] = item.name() + + # Store reference to the node object + container["_item"] = item + + return container + # convert tag metadata to normal keys names - data = lib.get_track_item_pype_data(track_item) - if ( - not data - or data.get("id") != "pyblish.avalon.container" - ): - return + if type(item) == hiero.core.VideoTrack: + return_list = [] + _data = lib.get_track_openpype_data(item) - if validate and data and data.get("schema"): - schema.validate(data) + if not _data: + return + # convert the data to list and validate them + for _, obj_data in _data.items(): + cotnainer = data_to_container(item, obj_data) + return_list.append(cotnainer) + return return_list + else: + _data = lib.get_trackitem_openpype_data(item) + return data_to_container(item, _data) - if not isinstance(data, dict): - return - - # If not all required data return the empty container - required = ['schema', 'id', 'name', - 'namespace', 'loader', 'representation'] - - if not all(key in data for key in required): - return - - container = {key: data[key] for key in required} - - container["objectName"] = track_item.name() - - # Store reference to the node object - container["_track_item"] = track_item +def _update_container_data(container, data): + for key in container: + try: + container[key] = data[key] + except KeyError: + pass return container -def update_container(track_item, data=None): - """Update container data to input track_item's pype tag. +def update_container(item, data=None): + """Update container data to input track_item or track's + openpype tag. Args: - track_item (hiero.core.TrackItem): A containerised track item. + item (hiero.core.TrackItem or hiero.core.VideoTrack): + A containerised track item. data (dict)[optional]: dictionery with data to be updated Returns: bool: True if container was updated correctly """ - data = data or dict() - container = lib.get_track_item_pype_data(track_item) + data = data or {} + data = deepcopy(data) - for _key, _value in container.items(): - try: - container[_key] = data[_key] - except KeyError: - pass + if type(item) == hiero.core.VideoTrack: + # form object data for test + object_name = data["objectName"] - log.info("Updating container: `{}`".format(track_item.name())) - return bool(lib.set_track_item_pype_tag(track_item, container)) + # get all available containers + containers = lib.get_track_openpype_data(item) + container = lib.get_track_openpype_data(item, object_name) + + containers = deepcopy(containers) + container = deepcopy(container) + + # update data in container + updated_container = _update_container_data(container, data) + # merge updated container back to containers + containers.update({object_name: updated_container}) + + return bool(lib.set_track_openpype_tag(item, containers)) + else: + container = lib.get_trackitem_openpype_data(item) + updated_container = _update_container_data(container, data) + + log.info("Updating container: `{}`".format(item.name())) + return bool(lib.set_trackitem_openpype_tag(item, updated_container)) def launch_workfiles_app(*args): @@ -272,11 +326,11 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): instance, old_value, new_value)) from openpype.hosts.hiero.api import ( - get_track_item_pype_tag, + get_trackitem_openpype_tag, set_publish_attribute ) # Whether instances should be passthrough based on new value track_item = instance.data["item"] - tag = get_track_item_pype_tag(track_item) + tag = get_trackitem_openpype_tag(track_item) set_publish_attribute(tag, new_value) diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index ea8a9e836a..5ec1c78aaa 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -170,7 +170,10 @@ class CreatorWidget(QtWidgets.QDialog): for func, val in kwargs.items(): if getattr(item, func): func_attr = getattr(item, func) - func_attr(val) + if isinstance(val, tuple): + func_attr(*val) + else: + func_attr(val) # add to layout layout.addRow(label, item) @@ -273,8 +276,8 @@ class CreatorWidget(QtWidgets.QDialog): elif v["type"] == "QSpinBox": data[k]["value"] = self.create_row( content_layout, "QSpinBox", v["label"], - setValue=v["value"], setMinimum=0, - setMaximum=100000, setToolTip=tool_tip) + setRange=(1, 9999999), setValue=v["value"], + setToolTip=tool_tip) return data diff --git a/openpype/hosts/hiero/api/tags.py b/openpype/hosts/hiero/api/tags.py index fac26da03a..cb7bc14edb 100644 --- a/openpype/hosts/hiero/api/tags.py +++ b/openpype/hosts/hiero/api/tags.py @@ -1,3 +1,4 @@ +import json import re import os import hiero @@ -85,17 +86,16 @@ def update_tag(tag, data): # get metadata key from data data_mtd = data.get("metadata", {}) - # due to hiero bug we have to make sure keys which are not existent in - # data are cleared of value by `None` - for _mk in mtd.dict().keys(): - if _mk.replace("tag.", "") not in data_mtd.keys(): - mtd.setValue(_mk, str(None)) - # set all data metadata to tag metadata - for k, v in data_mtd.items(): + for _k, _v in data_mtd.items(): + value = str(_v) + if type(_v) == dict: + value = json.dumps(_v) + + # set the value mtd.setValue( - "tag.{}".format(str(k)), - str(v) + "tag.{}".format(str(_k)), + value ) # set note description of tag diff --git a/openpype/hosts/hiero/plugins/load/load_effects.py b/openpype/hosts/hiero/plugins/load/load_effects.py new file mode 100644 index 0000000000..a3fcd63b5b --- /dev/null +++ b/openpype/hosts/hiero/plugins/load/load_effects.py @@ -0,0 +1,308 @@ +import json +from collections import OrderedDict +import six + +from openpype.client import ( + get_version_by_id +) + +from openpype.pipeline import ( + AVALON_CONTAINER_ID, + load, + legacy_io, + get_representation_path +) +from openpype.hosts.hiero import api as phiero +from openpype.lib import Logger + + +class LoadEffects(load.LoaderPlugin): + """Loading colorspace soft effect exported from nukestudio""" + + representations = ["effectJson"] + families = ["effect"] + + label = "Load Effects" + order = 0 + icon = "cc" + color = "white" + + log = Logger.get_logger(__name__) + + def load(self, context, name, namespace, data): + """ + Loading function to get the soft effects to particular read node + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + active_sequence = phiero.get_current_sequence() + active_track = phiero.get_current_track( + active_sequence, "Loaded_{}".format(name)) + + # get main variables + namespace = namespace or context["asset"]["name"] + object_name = "{}_{}".format(name, namespace) + clip_in = context["asset"]["data"]["clipIn"] + clip_out = context["asset"]["data"]["clipOut"] + + data_imprint = { + "objectName": object_name, + "children_names": [] + } + + # getting file path + file = self.fname.replace("\\", "/") + + if self._shared_loading( + file, + active_track, + clip_in, + clip_out, + data_imprint + ): + self.containerise( + active_track, + name=name, + namespace=namespace, + object_name=object_name, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def _shared_loading( + self, + file, + active_track, + clip_in, + clip_out, + data_imprint, + update=False + ): + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).items()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f) + + used_subtracks = { + stitem.name(): stitem + for stitem in phiero.flatten(active_track.subTrackItems()) + } + + loaded = False + for index_order, (ef_name, ef_val) in enumerate(nodes_order.items()): + new_name = "{}_loaded".format(ef_name) + if new_name not in used_subtracks: + effect_track_item = active_track.createEffect( + effectType=ef_val["class"], + timelineIn=clip_in, + timelineOut=clip_out, + subTrackIndex=index_order + + ) + effect_track_item.setName(new_name) + else: + effect_track_item = used_subtracks[new_name] + + node = effect_track_item.node() + for knob_name, knob_value in ef_val["node"].items(): + if ( + not knob_value + or knob_name == "name" + ): + continue + + try: + # assume list means animation + # except 4 values could be RGBA or vector + if isinstance(knob_value, list) and len(knob_value) > 4: + node[knob_name].setAnimated() + for i, value in enumerate(knob_value): + if isinstance(value, list): + # list can have vector animation + for ci, cv in enumerate(value): + node[knob_name].setValueAt( + cv, + (clip_in + i), + ci + ) + else: + # list is single values + node[knob_name].setValueAt( + value, + (clip_in + i) + ) + else: + node[knob_name].setValue(knob_value) + except NameError: + self.log.warning("Knob: {} cannot be set".format( + knob_name)) + + # register all loaded children + data_imprint["children_names"].append(new_name) + + # make sure containerisation will happen + loaded = True + + return loaded + + def update(self, container, representation): + """ Updating previously loaded effects + """ + active_track = container["_item"] + file = get_representation_path(representation).replace("\\", "/") + + # get main variables + name = container['name'] + namespace = container['namespace'] + + # get timeline in out data + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + version_data = version_doc["data"] + clip_in = version_data["clipIn"] + clip_out = version_data["clipOut"] + + object_name = "{}_{}".format(name, namespace) + + # Disable previously created nodes + used_subtracks = { + stitem.name(): stitem + for stitem in phiero.flatten(active_track.subTrackItems()) + } + container = phiero.get_track_openpype_data( + active_track, object_name + ) + + loaded_subtrack_items = container["children_names"] + for loaded_stitem in loaded_subtrack_items: + if loaded_stitem not in used_subtracks: + continue + item_to_remove = used_subtracks.pop(loaded_stitem) + # TODO: find a way to erase nodes + self.log.debug( + "This node needs to be removed: {}".format(item_to_remove)) + + data_imprint = { + "objectName": object_name, + "name": name, + "representation": str(representation["_id"]), + "children_names": [] + } + + if self._shared_loading( + file, + active_track, + clip_in, + clip_out, + data_imprint, + update=True + ): + return phiero.update_container(active_track, data_imprint) + + def reorder_nodes(self, data): + new_order = OrderedDict() + trackNums = [v["trackIndex"] for k, v in data.items() + if isinstance(v, dict)] + subTrackNums = [v["subTrackIndex"] for k, v in data.items() + if isinstance(v, dict)] + + for trackIndex in range( + min(trackNums), max(trackNums) + 1): + for subTrackIndex in range( + min(subTrackNums), max(subTrackNums) + 1): + item = self.get_item(data, trackIndex, subTrackIndex) + if item is not {}: + new_order.update(item) + return new_order + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if isinstance(val, dict) + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes through all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.items()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, six.text_type): + return str(input) + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + pass + + def containerise( + self, + track, + name, + namespace, + object_name, + context, + loader=None, + data=None + ): + """Bundle Hiero's object into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + track (hiero.core.VideoTrack): object to imprint as container + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + object_name (str): name of container + context (dict): Asset information + loader (str, optional): Name of node used to produce this + container. + + Returns: + track_item (hiero.core.TrackItem): containerised object + + """ + + data_imprint = { + object_name: { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": str(name), + "namespace": str(namespace), + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + } + } + + if data: + for k, v in data.items(): + data_imprint[object_name].update({k: v}) + + self.log.debug("_ data_imprint: {}".format(data_imprint)) + phiero.set_track_openpype_tag(track, data_imprint) diff --git a/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py b/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py index 8d2ed9a9c2..9489b1c4fb 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py +++ b/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py @@ -16,6 +16,9 @@ class CollectClipEffects(pyblish.api.InstancePlugin): review_track_index = instance.context.data.get("reviewTrackIndex") item = instance.data["item"] + if "audio" in instance.data["family"]: + return + # frame range self.handle_start = instance.data["handleStart"] self.handle_end = instance.data["handleEnd"] diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py index 1fc4b1f696..bb02919b35 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py @@ -48,7 +48,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin): self.log.debug("clip_name: {}".format(clip_name)) # get openpype tag data - tag_data = phiero.get_track_item_pype_data(track_item) + tag_data = phiero.get_trackitem_openpype_data(track_item) self.log.debug("__ tag_data: {}".format(pformat(tag_data))) if not tag_data: diff --git a/openpype/hosts/houdini/addon.py b/openpype/hosts/houdini/addon.py index 8d88e83c56..80856b0624 100644 --- a/openpype/hosts/houdini/addon.py +++ b/openpype/hosts/houdini/addon.py @@ -1,6 +1,5 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon HOUDINI_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/houdini/api/__init__.py b/openpype/hosts/houdini/api/__init__.py index fddf7ab98d..2663a55f6f 100644 --- a/openpype/hosts/houdini/api/__init__.py +++ b/openpype/hosts/houdini/api/__init__.py @@ -1,24 +1,13 @@ from .pipeline import ( - install, - uninstall, - + HoudiniHost, ls, - containerise, + containerise ) from .plugin import ( Creator, ) -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root -) - from .lib import ( lsattr, lsattrs, @@ -29,22 +18,13 @@ from .lib import ( __all__ = [ - "install", - "uninstall", + "HoudiniHost", "ls", "containerise", "Creator", - # Workfiles API - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root", - # Utility functions "lsattr", "lsattrs", @@ -52,7 +32,3 @@ __all__ = [ "maintained_selection" ] - -# Backwards API compatibility -open = open_file -save = save_file diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index c8a7f92bb9..13f5a62ec3 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -1,6 +1,10 @@ +# -*- coding: utf-8 -*- +import sys +import os import uuid import logging from contextlib import contextmanager +import json import six @@ -8,10 +12,13 @@ from openpype.client import get_asset_by_name from openpype.pipeline import legacy_io from openpype.pipeline.context_tools import get_current_project_asset - import hou + +self = sys.modules[__name__] +self._parent = None log = logging.getLogger(__name__) +JSON_PREFIX = "JSON:::" def get_asset_fps(): @@ -29,23 +36,18 @@ def set_id(node, unique_id, overwrite=False): def get_id(node): - """ - Get the `cbId` attribute of the given node + """Get the `cbId` attribute of the given node. + Args: node (hou.Node): the name of the node to retrieve the attribute from Returns: - str + str: cbId attribute of the node. """ - if node is None: - return - - id = node.parm("id") - if node is None: - return - return id + if node is not None: + return node.parm("id") def generate_ids(nodes, asset_id=None): @@ -281,7 +283,7 @@ def render_rop(ropnode): raise RuntimeError("Render failed: {0}".format(exc)) -def imprint(node, data): +def imprint(node, data, update=False): """Store attributes with value on a node Depending on the type of attribute it creates the correct parameter @@ -290,49 +292,76 @@ def imprint(node, data): http://www.sidefx.com/docs/houdini/hom/hou/ParmTemplate.html + Because of some update glitch where you cannot overwrite existing + ParmTemplates on node using: + `setParmTemplates()` and `parmTuplesInFolder()` + update is done in another pass. + Args: node(hou.Node): node object from Houdini data(dict): collection of attributes and their value + update (bool, optional): flag if imprint should update + already existing data or leave them untouched and only + add new. Returns: None """ + if not data: + return + if not node: + self.log.error("Node is not set, calling imprint on invalid data.") + return - parm_group = node.parmTemplateGroup() + current_parms = {p.name(): p for p in node.spareParms()} + update_parms = [] + templates = [] - parm_folder = hou.FolderParmTemplate("folder", "Extra") for key, value in data.items(): if value is None: continue - if isinstance(value, float): - parm = hou.FloatParmTemplate(name=key, - label=key, - num_components=1, - default_value=(value,)) - elif isinstance(value, bool): - parm = hou.ToggleParmTemplate(name=key, - label=key, - default_value=value) - elif isinstance(value, int): - parm = hou.IntParmTemplate(name=key, - label=key, - num_components=1, - default_value=(value,)) - elif isinstance(value, six.string_types): - parm = hou.StringParmTemplate(name=key, - label=key, - num_components=1, - default_value=(value,)) - else: - raise TypeError("Unsupported type: %r" % type(value)) + parm = get_template_from_value(key, value) - parm_folder.addParmTemplate(parm) + if key in current_parms: + if node.evalParm(key) == data[key]: + continue + if not update: + log.debug(f"{key} already exists on {node}") + else: + log.debug(f"replacing {key}") + update_parms.append(parm) + continue + + templates.append(parm) + + parm_group = node.parmTemplateGroup() + parm_folder = parm_group.findFolder("Extra") + + # if folder doesn't exist yet, create one and append to it, + # else append to existing one + if not parm_folder: + parm_folder = hou.FolderParmTemplate("folder", "Extra") + parm_folder.setParmTemplates(templates) + parm_group.append(parm_folder) + else: + for template in templates: + parm_group.appendToFolder(parm_folder, template) + # this is needed because the pointer to folder + # is for some reason lost every call to `appendToFolder()` + parm_folder = parm_group.findFolder("Extra") - parm_group.append(parm_folder) node.setParmTemplateGroup(parm_group) + # TODO: Updating is done here, by calling probably deprecated functions. + # This needs to be addressed in the future. + if not update_parms: + return + + for parm in update_parms: + node.replaceSpareParmTuple(parm.name(), parm) + def lsattr(attr, value=None, root="/"): """Return nodes that have `attr` @@ -397,8 +426,22 @@ def read(node): """ # `spareParms` returns a tuple of hou.Parm objects - return {parameter.name(): parameter.eval() for - parameter in node.spareParms()} + data = {} + if not node: + return data + for parameter in node.spareParms(): + value = parameter.eval() + # test if value is json encoded dict + if isinstance(value, six.string_types) and \ + value.startswith(JSON_PREFIX): + try: + value = json.loads(value[len(JSON_PREFIX):]) + except json.JSONDecodeError: + # not a json + pass + data[parameter.name()] = value + + return data @contextmanager @@ -460,3 +503,89 @@ def reset_framerange(): hou.playbar.setFrameRange(frame_start, frame_end) hou.playbar.setPlaybackRange(frame_start, frame_end) hou.setFrame(frame_start) + + +def get_main_window(): + """Acquire Houdini's main window""" + if self._parent is None: + self._parent = hou.ui.mainQtWindow() + return self._parent + + +def get_template_from_value(key, value): + if isinstance(value, float): + parm = hou.FloatParmTemplate(name=key, + label=key, + num_components=1, + default_value=(value,)) + elif isinstance(value, bool): + parm = hou.ToggleParmTemplate(name=key, + label=key, + default_value=value) + elif isinstance(value, int): + parm = hou.IntParmTemplate(name=key, + label=key, + num_components=1, + default_value=(value,)) + elif isinstance(value, six.string_types): + parm = hou.StringParmTemplate(name=key, + label=key, + num_components=1, + default_value=(value,)) + elif isinstance(value, (dict, list, tuple)): + parm = hou.StringParmTemplate(name=key, + label=key, + num_components=1, + default_value=( + JSON_PREFIX + json.dumps(value),)) + else: + raise TypeError("Unsupported type: %r" % type(value)) + + return parm + + +def get_frame_data(node): + """Get the frame data: start frame, end frame and steps. + + Args: + node(hou.Node) + + Returns: + dict: frame data for star, end and steps. + + """ + data = {} + + if node.parm("trange") is None: + + return data + + if node.evalParm("trange") == 0: + self.log.debug("trange is 0") + return data + + data["frameStart"] = node.evalParm("f1") + data["frameEnd"] = node.evalParm("f2") + data["steps"] = node.evalParm("f3") + + return data + + +def splitext(name, allowed_multidot_extensions): + # type: (str, list) -> tuple + """Split file name to name and extension. + + Args: + name (str): File name to split. + allowed_multidot_extensions (list of str): List of allowed multidot + extensions. + + Returns: + tuple: Name and extension. + """ + + for ext in allowed_multidot_extensions: + if name.endswith(ext): + return name[:-len(ext)], ext + + return os.path.splitext(name) diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index e4af1913ef..b0791fcb6c 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -1,9 +1,13 @@ +# -*- coding: utf-8 -*- +"""Pipeline tools for OpenPype Houdini integration.""" import os import sys import logging import contextlib -import hou +import hou # noqa + +from openpype.host import HostBase, IWorkfileHost, ILoadHost, INewPublisher import pyblish.api @@ -26,6 +30,7 @@ from .lib import get_asset_fps log = logging.getLogger("openpype.hosts.houdini") AVALON_CONTAINERS = "/obj/AVALON_CONTAINERS" +CONTEXT_CONTAINER = "/obj/OpenPypeContext" IS_HEADLESS = not hasattr(hou, "ui") PLUGINS_DIR = os.path.join(HOUDINI_HOST_DIR, "plugins") @@ -35,71 +40,139 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") -self = sys.modules[__name__] -self._has_been_setup = False -self._parent = None -self._events = dict() +class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher): + name = "houdini" + def __init__(self): + super(HoudiniHost, self).__init__() + self._op_events = {} + self._has_been_setup = False -def install(): - _register_callbacks() + def install(self): + pyblish.api.register_host("houdini") + pyblish.api.register_host("hython") + pyblish.api.register_host("hpython") - pyblish.api.register_host("houdini") - pyblish.api.register_host("hython") - pyblish.api.register_host("hpython") + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) + log.info("Installing callbacks ... ") + # register_event_callback("init", on_init) + self._register_callbacks() + register_event_callback("before.save", before_save) + register_event_callback("save", on_save) + register_event_callback("open", on_open) + register_event_callback("new", on_new) - log.info("Installing callbacks ... ") - # register_event_callback("init", on_init) - register_event_callback("before.save", before_save) - register_event_callback("save", on_save) - register_event_callback("open", on_open) - register_event_callback("new", on_new) + pyblish.api.register_callback( + "instanceToggled", on_pyblish_instance_toggled + ) - pyblish.api.register_callback( - "instanceToggled", on_pyblish_instance_toggled - ) + self._has_been_setup = True + # add houdini vendor packages + hou_pythonpath = os.path.join(HOUDINI_HOST_DIR, "vendor") - self._has_been_setup = True - # add houdini vendor packages - hou_pythonpath = os.path.join(HOUDINI_HOST_DIR, "vendor") + sys.path.append(hou_pythonpath) - sys.path.append(hou_pythonpath) + # Set asset settings for the empty scene directly after launch of + # Houdini so it initializes into the correct scene FPS, + # Frame Range, etc. + # TODO: make sure this doesn't trigger when + # opening with last workfile. + _set_context_settings() + shelves.generate_shelves() - # Set asset settings for the empty scene directly after launch of Houdini - # so it initializes into the correct scene FPS, Frame Range, etc. - # todo: make sure this doesn't trigger when opening with last workfile - _set_context_settings() - shelves.generate_shelves() + def has_unsaved_changes(self): + return hou.hipFile.hasUnsavedChanges() + def get_workfile_extensions(self): + return [".hip", ".hiplc", ".hipnc"] -def uninstall(): - """Uninstall Houdini-specific functionality of avalon-core. + def save_workfile(self, dst_path=None): + # Force forwards slashes to avoid segfault + if dst_path: + dst_path = dst_path.replace("\\", "/") + hou.hipFile.save(file_name=dst_path, + save_to_recent_files=True) + return dst_path - This function is called automatically on calling `api.uninstall()`. - """ + def open_workfile(self, filepath): + # Force forwards slashes to avoid segfault + filepath = filepath.replace("\\", "/") - pyblish.api.deregister_host("hython") - pyblish.api.deregister_host("hpython") - pyblish.api.deregister_host("houdini") + hou.hipFile.load(filepath, + suppress_save_prompt=True, + ignore_load_warnings=False) + return filepath -def _register_callbacks(): - for event in self._events.copy().values(): - if event is None: - continue + def get_current_workfile(self): + current_filepath = hou.hipFile.path() + if (os.path.basename(current_filepath) == "untitled.hip" and + not os.path.exists(current_filepath)): + # By default a new scene in houdini is saved in the current + # working directory as "untitled.hip" so we need to capture + # that and consider it 'not saved' when it's in that state. + return None - try: - hou.hipFile.removeEventCallback(event) - except RuntimeError as e: - log.info(e) + return current_filepath - self._events[on_file_event_callback] = hou.hipFile.addEventCallback( - on_file_event_callback - ) + def get_containers(self): + return ls() + + def _register_callbacks(self): + for event in self._op_events.copy().values(): + if event is None: + continue + + try: + hou.hipFile.removeEventCallback(event) + except RuntimeError as e: + log.info(e) + + self._op_events[on_file_event_callback] = hou.hipFile.addEventCallback( + on_file_event_callback + ) + + @staticmethod + def create_context_node(): + """Helper for creating context holding node. + + Returns: + hou.Node: context node + + """ + obj_network = hou.node("/obj") + op_ctx = obj_network.createNode( + "null", node_name="OpenPypeContext") + op_ctx.moveToGoodPosition() + op_ctx.setBuiltExplicitly(False) + op_ctx.setCreatorState("OpenPype") + op_ctx.setComment("OpenPype node to hold context metadata") + op_ctx.setColor(hou.Color((0.081, 0.798, 0.810))) + op_ctx.hide(True) + return op_ctx + + def update_context_data(self, data, changes): + op_ctx = hou.node(CONTEXT_CONTAINER) + if not op_ctx: + op_ctx = self.create_context_node() + + lib.imprint(op_ctx, data) + + def get_context_data(self): + op_ctx = hou.node(CONTEXT_CONTAINER) + if not op_ctx: + op_ctx = self.create_context_node() + return lib.read(op_ctx) + + def save_file(self, dst_path=None): + # Force forwards slashes to avoid segfault + dst_path = dst_path.replace("\\", "/") + + hou.hipFile.save(file_name=dst_path, + save_to_recent_files=True) def on_file_event_callback(event): @@ -113,22 +186,6 @@ def on_file_event_callback(event): emit_event("new") -def get_main_window(): - """Acquire Houdini's main window""" - if self._parent is None: - self._parent = hou.ui.mainQtWindow() - return self._parent - - -def teardown(): - """Remove integration""" - if not self._has_been_setup: - return - - self._has_been_setup = False - print("pyblish: Integration torn down successfully") - - def containerise(name, namespace, nodes, @@ -251,7 +308,7 @@ def on_open(): log.warning("Scene has outdated content.") # Get main window - parent = get_main_window() + parent = lib.get_main_window() if parent is None: log.info("Skipping outdated content pop-up " "because Houdini window can't be found.") diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py index 2bbb65aa05..e15e27c83f 100644 --- a/openpype/hosts/houdini/api/plugin.py +++ b/openpype/hosts/houdini/api/plugin.py @@ -1,14 +1,19 @@ # -*- coding: utf-8 -*- """Houdini specific Avalon/Pyblish plugin definitions.""" import sys +from abc import ( + ABCMeta +) import six - import hou from openpype.pipeline import ( CreatorError, - LegacyCreator + LegacyCreator, + Creator as NewCreator, + CreatedInstance ) -from .lib import imprint +from openpype.lib import BoolDef +from .lib import imprint, read, lsattr class OpenPypeCreatorError(CreatorError): @@ -30,12 +35,15 @@ class Creator(LegacyCreator): when hovering over a node. The information is visible under the name of the node. + Deprecated: + This creator is deprecated and will be removed in future version. + """ defaults = ['Main'] def __init__(self, *args, **kwargs): super(Creator, self).__init__(*args, **kwargs) - self.nodes = list() + self.nodes = [] def process(self): """This is the base functionality to create instances in Houdini @@ -84,3 +92,187 @@ class Creator(LegacyCreator): OpenPypeCreatorError, OpenPypeCreatorError("Creator error: {}".format(er)), sys.exc_info()[2]) + + +class HoudiniCreatorBase(object): + @staticmethod + def cache_subsets(shared_data): + """Cache instances for Creators to shared data. + + Create `houdini_cached_subsets` key when needed in shared data and + fill it with all collected instances from the scene under its + respective creator identifiers. + + If legacy instances are detected in the scene, create + `houdini_cached_legacy_subsets` there and fill it with + all legacy subsets under family as a key. + + Args: + Dict[str, Any]: Shared data. + + Return: + Dict[str, Any]: Shared data dictionary. + + """ + if shared_data.get("houdini_cached_subsets") is None: + shared_data["houdini_cached_subsets"] = {} + if shared_data.get("houdini_cached_legacy_subsets") is None: + shared_data["houdini_cached_legacy_subsets"] = {} + cached_instances = lsattr("id", "pyblish.avalon.instance") + for i in cached_instances: + if not i.parm("creator_identifier"): + # we have legacy instance + family = i.parm("family").eval() + if family not in shared_data[ + "houdini_cached_legacy_subsets"]: + shared_data["houdini_cached_legacy_subsets"][ + family] = [i] + else: + shared_data[ + "houdini_cached_legacy_subsets"][family].append(i) + continue + + creator_id = i.parm("creator_identifier").eval() + if creator_id not in shared_data["houdini_cached_subsets"]: + shared_data["houdini_cached_subsets"][creator_id] = [i] + else: + shared_data[ + "houdini_cached_subsets"][creator_id].append(i) # noqa + return shared_data + + @staticmethod + def create_instance_node( + node_name, parent, + node_type="geometry"): + # type: (str, str, str) -> hou.Node + """Create node representing instance. + + Arguments: + node_name (str): Name of the new node. + parent (str): Name of the parent node. + node_type (str, optional): Type of the node. + + Returns: + hou.Node: Newly created instance node. + + """ + parent_node = hou.node(parent) + instance_node = parent_node.createNode( + node_type, node_name=node_name) + instance_node.moveToGoodPosition() + return instance_node + + +@six.add_metaclass(ABCMeta) +class HoudiniCreator(NewCreator, HoudiniCreatorBase): + """Base class for most of the Houdini creator plugins.""" + selected_nodes = [] + + def create(self, subset_name, instance_data, pre_create_data): + try: + if pre_create_data.get("use_selection"): + self.selected_nodes = hou.selectedNodes() + + # Get the node type and remove it from the data, not needed + node_type = instance_data.pop("node_type", None) + if node_type is None: + node_type = "geometry" + + instance_node = self.create_instance_node( + subset_name, "/out", node_type) + + self.customize_node_look(instance_node) + + instance_data["instance_node"] = instance_node.path() + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self) + self._add_instance_to_context(instance) + imprint(instance_node, instance.data_to_store()) + return instance + + except hou.Error as er: + six.reraise( + OpenPypeCreatorError, + OpenPypeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2]) + + def lock_parameters(self, node, parameters): + """Lock list of specified parameters on the node. + + Args: + node (hou.Node): Houdini node to lock parameters on. + parameters (list of str): List of parameter names. + + """ + for name in parameters: + try: + parm = node.parm(name) + parm.lock(True) + except AttributeError: + self.log.debug("missing lock pattern {}".format(name)) + + def collect_instances(self): + # cache instances if missing + self.cache_subsets(self.collection_shared_data) + for instance in self.collection_shared_data[ + "houdini_cached_subsets"].get(self.identifier, []): + created_instance = CreatedInstance.from_existing( + read(instance), self + ) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + instance_node = hou.node(created_inst.get("instance_node")) + + new_values = { + key: new_value + for key, (_old_value, new_value) in _changes.items() + } + imprint( + instance_node, + new_values, + update=True + ) + + def remove_instances(self, instances): + """Remove specified instance from the scene. + + This is only removing `id` parameter so instance is no longer + instance, because it might contain valuable data for artist. + + """ + for instance in instances: + instance_node = hou.node(instance.data.get("instance_node")) + if instance_node: + instance_node.destroy() + + self._remove_instance_from_context(instance) + + def get_pre_create_attr_defs(self): + return [ + BoolDef("use_selection", label="Use selection") + ] + + @staticmethod + def customize_node_look( + node, color=None, + shape="chevron_down"): + """Set custom look for instance nodes. + + Args: + node (hou.Node): Node to set look. + color (hou.Color, Optional): Color of the node. + shape (str, Optional): Shape name of the node. + + Returns: + None + + """ + if not color: + color = hou.Color((0.616, 0.871, 0.769)) + node.setUserData('nodeshape', shape) + node.setColor(color) diff --git a/openpype/hosts/houdini/api/workio.py b/openpype/hosts/houdini/api/workio.py deleted file mode 100644 index 5f7efff333..0000000000 --- a/openpype/hosts/houdini/api/workio.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Host API required Work Files tool""" -import os - -import hou - - -def file_extensions(): - return [".hip", ".hiplc", ".hipnc"] - - -def has_unsaved_changes(): - return hou.hipFile.hasUnsavedChanges() - - -def save_file(filepath): - - # Force forwards slashes to avoid segfault - filepath = filepath.replace("\\", "/") - - hou.hipFile.save(file_name=filepath, - save_to_recent_files=True) - - return filepath - - -def open_file(filepath): - - # Force forwards slashes to avoid segfault - filepath = filepath.replace("\\", "/") - - hou.hipFile.load(filepath, - suppress_save_prompt=True, - ignore_load_warnings=False) - - return filepath - - -def current_file(): - - current_filepath = hou.hipFile.path() - if (os.path.basename(current_filepath) == "untitled.hip" and - not os.path.exists(current_filepath)): - # By default a new scene in houdini is saved in the current - # working directory as "untitled.hip" so we need to capture - # that and consider it 'not saved' when it's in that state. - return None - - return current_filepath - - -def work_root(session): - work_dir = session["AVALON_WORKDIR"] - scene_dir = session.get("AVALON_SCENEDIR") - if scene_dir: - return os.path.join(work_dir, scene_dir) - else: - return work_dir diff --git a/openpype/hosts/houdini/plugins/create/convert_legacy.py b/openpype/hosts/houdini/plugins/create/convert_legacy.py new file mode 100644 index 0000000000..4b8041b4f5 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/convert_legacy.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +"""Convertor for legacy Houdini subsets.""" +from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin +from openpype.hosts.houdini.api.lib import imprint + + +class HoudiniLegacyConvertor(SubsetConvertorPlugin): + """Find and convert any legacy subsets in the scene. + + This Convertor will find all legacy subsets in the scene and will + transform them to the current system. Since the old subsets doesn't + retain any information about their original creators, the only mapping + we can do is based on their families. + + Its limitation is that you can have multiple creators creating subset + of the same family and there is no way to handle it. This code should + nevertheless cover all creators that came with OpenPype. + + """ + identifier = "io.openpype.creators.houdini.legacy" + family_to_id = { + "camera": "io.openpype.creators.houdini.camera", + "ass": "io.openpype.creators.houdini.ass", + "imagesequence": "io.openpype.creators.houdini.imagesequence", + "hda": "io.openpype.creators.houdini.hda", + "pointcache": "io.openpype.creators.houdini.pointcache", + "redshiftproxy": "io.openpype.creators.houdini.redshiftproxy", + "redshift_rop": "io.openpype.creators.houdini.redshift_rop", + "usd": "io.openpype.creators.houdini.usd", + "usdrender": "io.openpype.creators.houdini.usdrender", + "vdbcache": "io.openpype.creators.houdini.vdbcache" + } + + def __init__(self, *args, **kwargs): + super(HoudiniLegacyConvertor, self).__init__(*args, **kwargs) + self.legacy_subsets = {} + + def find_instances(self): + """Find legacy subsets in the scene. + + Legacy subsets are the ones that doesn't have `creator_identifier` + parameter on them. + + This is using cached entries done in + :py:meth:`~HoudiniCreatorBase.cache_subsets()` + + """ + self.legacy_subsets = self.collection_shared_data.get( + "houdini_cached_legacy_subsets") + if not self.legacy_subsets: + return + self.add_convertor_item("Found {} incompatible subset{}.".format( + len(self.legacy_subsets), "s" if len(self.legacy_subsets) > 1 else "") + ) + + def convert(self): + """Convert all legacy subsets to current. + + It is enough to add `creator_identifier` and `instance_node`. + + """ + if not self.legacy_subsets: + return + + for family, subsets in self.legacy_subsets.items(): + if family in self.family_to_id: + for subset in subsets: + data = { + "creator_identifier": self.family_to_id[family], + "instance_node": subset.path() + } + self.log.info("Converting {} to {}".format( + subset.path(), self.family_to_id[family])) + imprint(subset, data) diff --git a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py b/openpype/hosts/houdini/plugins/create/create_alembic_camera.py index eef86005f5..fec64eb4a1 100644 --- a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py +++ b/openpype/hosts/houdini/plugins/create/create_alembic_camera.py @@ -1,46 +1,49 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating alembic camera subsets.""" from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance, CreatorError -class CreateAlembicCamera(plugin.Creator): - """Single baked camera from Alembic ROP""" +class CreateAlembicCamera(plugin.HoudiniCreator): + """Single baked camera from Alembic ROP.""" - name = "camera" + identifier = "io.openpype.creators.houdini.camera" label = "Camera (Abc)" family = "camera" icon = "camera" - def __init__(self, *args, **kwargs): - super(CreateAlembicCamera, self).__init__(*args, **kwargs) + def create(self, subset_name, instance_data, pre_create_data): + import hou - # Remove the active, we are checking the bypass flag of the nodes - self.data.pop("active", None) + instance_data.pop("active", None) + instance_data.update({"node_type": "alembic"}) - # Set node type to create for output - self.data.update({"node_type": "alembic"}) + instance = super(CreateAlembicCamera, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance - def _process(self, instance): - """Creator main entry point. - - Args: - instance (hou.Node): Created Houdini instance. - - """ + instance_node = hou.node(instance.get("instance_node")) parms = { - "filename": "$HIP/pyblish/%s.abc" % self.name, + "filename": hou.text.expandString( + "$HIP/pyblish/{}.abc".format(subset_name)), "use_sop_path": False, } - if self.nodes: - node = self.nodes[0] - path = node.path() + if self.selected_nodes: + if len(self.selected_nodes) > 1: + raise CreatorError("More than one item selected.") + path = self.selected_nodes[0].path() # Split the node path into the first root and the remainder # So we can set the root and objects parameters correctly _, root, remainder = path.split("/", 2) parms.update({"root": "/" + root, "objects": remainder}) - instance.setParms(parms) + instance_node.setParms(parms) # Lock the Use Sop Path setting so the # user doesn't accidentally enable it. - instance.parm("use_sop_path").lock(True) - instance.parm("trange").set(1) + to_lock = ["use_sop_path"] + self.lock_parameters(instance_node, to_lock) + + instance_node.parm("trange").set(1) diff --git a/openpype/hosts/houdini/plugins/create/create_arnold_ass.py b/openpype/hosts/houdini/plugins/create/create_arnold_ass.py index 72088e43b0..8b310753d0 100644 --- a/openpype/hosts/houdini/plugins/create/create_arnold_ass.py +++ b/openpype/hosts/houdini/plugins/create/create_arnold_ass.py @@ -1,9 +1,12 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating Arnold ASS files.""" from openpype.hosts.houdini.api import plugin -class CreateArnoldAss(plugin.Creator): +class CreateArnoldAss(plugin.HoudiniCreator): """Arnold .ass Archive""" + identifier = "io.openpype.creators.houdini.ass" label = "Arnold ASS" family = "ass" icon = "magic" @@ -12,42 +15,39 @@ class CreateArnoldAss(plugin.Creator): # Default extension: `.ass` or `.ass.gz` ext = ".ass" - def __init__(self, *args, **kwargs): - super(CreateArnoldAss, self).__init__(*args, **kwargs) + def create(self, subset_name, instance_data, pre_create_data): + import hou - # Remove the active, we are checking the bypass flag of the nodes - self.data.pop("active", None) + instance_data.pop("active", None) + instance_data.update({"node_type": "arnold"}) - self.data.update({"node_type": "arnold"}) + instance = super(CreateArnoldAss, self).create( + subset_name, + instance_data, + pre_create_data) # type: plugin.CreatedInstance - def process(self): - node = super(CreateArnoldAss, self).process() - - basename = node.name() - node.setName(basename + "_ASS", unique_name=True) + instance_node = hou.node(instance.get("instance_node")) # Hide Properties Tab on Arnold ROP since that's used # for rendering instead of .ass Archive Export - parm_template_group = node.parmTemplateGroup() + parm_template_group = instance_node.parmTemplateGroup() parm_template_group.hideFolder("Properties", True) - node.setParmTemplateGroup(parm_template_group) + instance_node.setParmTemplateGroup(parm_template_group) - filepath = '$HIP/pyblish/`chs("subset")`.$F4{}'.format(self.ext) + filepath = "{}{}".format( + hou.text.expandString("$HIP/pyblish/"), + "{}.$F4{}".format(subset_name, self.ext) + ) parms = { # Render frame range "trange": 1, - # Arnold ROP settings "ar_ass_file": filepath, "ar_ass_export_enable": 1 } - node.setParms(parms) - # Lock the ASS export attribute - node.parm("ar_ass_export_enable").lock(True) + instance_node.setParms(parms) - # Lock some Avalon attributes - to_lock = ["family", "id"] - for name in to_lock: - parm = node.parm(name) - parm.lock(True) + # Lock any parameters in this list + to_lock = ["ar_ass_export_enable", "family", "id"] + self.lock_parameters(instance_node, to_lock) diff --git a/openpype/hosts/houdini/plugins/create/create_composite.py b/openpype/hosts/houdini/plugins/create/create_composite.py index e278708076..45af2b0630 100644 --- a/openpype/hosts/houdini/plugins/create/create_composite.py +++ b/openpype/hosts/houdini/plugins/create/create_composite.py @@ -1,44 +1,42 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating composite sequences.""" from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance -class CreateCompositeSequence(plugin.Creator): +class CreateCompositeSequence(plugin.HoudiniCreator): """Composite ROP to Image Sequence""" + identifier = "io.openpype.creators.houdini.imagesequence" label = "Composite (Image Sequence)" family = "imagesequence" icon = "gears" - def __init__(self, *args, **kwargs): - super(CreateCompositeSequence, self).__init__(*args, **kwargs) + ext = ".exr" - # Remove the active, we are checking the bypass flag of the nodes - self.data.pop("active", None) + def create(self, subset_name, instance_data, pre_create_data): + import hou # noqa - # Type of ROP node to create - self.data.update({"node_type": "comp"}) + instance_data.pop("active", None) + instance_data.update({"node_type": "comp"}) - def _process(self, instance): - """Creator main entry point. + instance = super(CreateCompositeSequence, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance - Args: - instance (hou.Node): Created Houdini instance. + instance_node = hou.node(instance.get("instance_node")) + filepath = "{}{}".format( + hou.text.expandString("$HIP/pyblish/"), + "{}.$F4{}".format(subset_name, self.ext) + ) + parms = { + "trange": 1, + "copoutput": filepath + } - """ - parms = {"copoutput": "$HIP/pyblish/%s.$F4.exr" % self.name} - - if self.nodes: - node = self.nodes[0] - parms.update({"coppath": node.path()}) - - instance.setParms(parms) + instance_node.setParms(parms) # Lock any parameters in this list to_lock = ["prim_to_detail_pattern"] - for name in to_lock: - try: - parm = instance.parm(name) - parm.lock(True) - except AttributeError: - # missing lock pattern - self.log.debug( - "missing lock pattern {}".format(name)) + self.lock_parameters(instance_node, to_lock) diff --git a/openpype/hosts/houdini/plugins/create/create_hda.py b/openpype/hosts/houdini/plugins/create/create_hda.py index b98da8b8bb..4bed83c2e9 100644 --- a/openpype/hosts/houdini/plugins/create/create_hda.py +++ b/openpype/hosts/houdini/plugins/create/create_hda.py @@ -1,28 +1,22 @@ # -*- coding: utf-8 -*- -import hou - +"""Creator plugin for creating publishable Houdini Digital Assets.""" from openpype.client import ( get_asset_by_name, get_subsets, ) from openpype.pipeline import legacy_io -from openpype.hosts.houdini.api import lib from openpype.hosts.houdini.api import plugin -class CreateHDA(plugin.Creator): +class CreateHDA(plugin.HoudiniCreator): """Publish Houdini Digital Asset file.""" - name = "hda" + identifier = "io.openpype.creators.houdini.hda" label = "Houdini Digital Asset (Hda)" family = "hda" icon = "gears" maintain_selection = False - def __init__(self, *args, **kwargs): - super(CreateHDA, self).__init__(*args, **kwargs) - self.data.pop("active", None) - def _check_existing(self, subset_name): # type: (str) -> bool """Check if existing subset name versions already exists.""" @@ -40,55 +34,51 @@ class CreateHDA(plugin.Creator): } return subset_name.lower() in existing_subset_names_low - def _process(self, instance): - subset_name = self.data["subset"] - # get selected nodes - out = hou.node("/obj") - self.nodes = hou.selectedNodes() + def _create_instance_node( + self, node_name, parent, node_type="geometry"): + import hou - if (self.options or {}).get("useSelection") and self.nodes: - # if we have `use selection` enabled and we have some + parent_node = hou.node("/obj") + if self.selected_nodes: + # if we have `use selection` enabled, and we have some # selected nodes ... - subnet = out.collapseIntoSubnet( - self.nodes, - subnet_name="{}_subnet".format(self.name)) + subnet = parent_node.collapseIntoSubnet( + self.selected_nodes, + subnet_name="{}_subnet".format(node_name)) subnet.moveToGoodPosition() to_hda = subnet else: - to_hda = out.createNode( - "subnet", node_name="{}_subnet".format(self.name)) + to_hda = parent_node.createNode( + "subnet", node_name="{}_subnet".format(node_name)) if not to_hda.type().definition(): # if node type has not its definition, it is not user # created hda. We test if hda can be created from the node. if not to_hda.canCreateDigitalAsset(): - raise Exception( + raise plugin.OpenPypeCreatorError( "cannot create hda from node {}".format(to_hda)) hda_node = to_hda.createDigitalAsset( - name=subset_name, - hda_file_name="$HIP/{}.hda".format(subset_name) + name=node_name, + hda_file_name="$HIP/{}.hda".format(node_name) ) hda_node.layoutChildren() - elif self._check_existing(subset_name): + elif self._check_existing(node_name): raise plugin.OpenPypeCreatorError( ("subset {} is already published with different HDA" - "definition.").format(subset_name)) + "definition.").format(node_name)) else: hda_node = to_hda - hda_node.setName(subset_name) - - # delete node created by Avalon in /out - # this needs to be addressed in future Houdini workflow refactor. - - hou.node("/out/{}".format(subset_name)).destroy() - - try: - lib.imprint(hda_node, self.data) - except hou.OperationFailed: - raise plugin.OpenPypeCreatorError( - ("Cannot set metadata on asset. Might be that it already is " - "OpenPype asset.") - ) - + hda_node.setName(node_name) + self.customize_node_look(hda_node) return hda_node + + def create(self, subset_name, instance_data, pre_create_data): + instance_data.pop("active", None) + + instance = super(CreateHDA, self).create( + subset_name, + instance_data, + pre_create_data) # type: plugin.CreatedInstance + + return instance diff --git a/openpype/hosts/houdini/plugins/create/create_pointcache.py b/openpype/hosts/houdini/plugins/create/create_pointcache.py index feb683edf6..6b6b277422 100644 --- a/openpype/hosts/houdini/plugins/create/create_pointcache.py +++ b/openpype/hosts/houdini/plugins/create/create_pointcache.py @@ -1,48 +1,51 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating pointcache alembics.""" from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance -class CreatePointCache(plugin.Creator): +class CreatePointCache(plugin.HoudiniCreator): """Alembic ROP to pointcache""" - - name = "pointcache" + identifier = "io.openpype.creators.houdini.pointcache" label = "Point Cache" family = "pointcache" icon = "gears" - def __init__(self, *args, **kwargs): - super(CreatePointCache, self).__init__(*args, **kwargs) + def create(self, subset_name, instance_data, pre_create_data): + import hou - # Remove the active, we are checking the bypass flag of the nodes - self.data.pop("active", None) + instance_data.pop("active", None) + instance_data.update({"node_type": "alembic"}) - self.data.update({"node_type": "alembic"}) + instance = super(CreatePointCache, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance - def _process(self, instance): - """Creator main entry point. - - Args: - instance (hou.Node): Created Houdini instance. - - """ + instance_node = hou.node(instance.get("instance_node")) parms = { - "use_sop_path": True, # Export single node from SOP Path - "build_from_path": True, # Direct path of primitive in output - "path_attrib": "path", # Pass path attribute for output + "use_sop_path": True, + "build_from_path": True, + "path_attrib": "path", "prim_to_detail_pattern": "cbId", - "format": 2, # Set format to Ogawa - "facesets": 0, # No face sets (by default exclude them) - "filename": "$HIP/pyblish/%s.abc" % self.name, + "format": 2, + "facesets": 0, + "filename": hou.text.expandString( + "$HIP/pyblish/{}.abc".format(subset_name)) } - if self.nodes: - node = self.nodes[0] - parms.update({"sop_path": node.path()}) + if self.selected_nodes: + parms["sop_path"] = self.selected_nodes[0].path() - instance.setParms(parms) - instance.parm("trange").set(1) + # try to find output node + for child in self.selected_nodes[0].children(): + if child.type().name() == "output": + parms["sop_path"] = child.path() + break + + instance_node.setParms(parms) + instance_node.parm("trange").set(1) # Lock any parameters in this list to_lock = ["prim_to_detail_pattern"] - for name in to_lock: - parm = instance.parm(name) - parm.lock(True) + self.lock_parameters(instance_node, to_lock) diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py b/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py index da4d80bf2b..8b6a68437b 100644 --- a/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py +++ b/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py @@ -1,18 +1,20 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating Redshift proxies.""" from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance -class CreateRedshiftProxy(plugin.Creator): +class CreateRedshiftProxy(plugin.HoudiniCreator): """Redshift Proxy""" - + identifier = "io.openpype.creators.houdini.redshiftproxy" label = "Redshift Proxy" family = "redshiftproxy" icon = "magic" - def __init__(self, *args, **kwargs): - super(CreateRedshiftProxy, self).__init__(*args, **kwargs) - + def create(self, subset_name, instance_data, pre_create_data): + import hou # noqa # Remove the active, we are checking the bypass flag of the nodes - self.data.pop("active", None) + instance_data.pop("active", None) # Redshift provides a `Redshift_Proxy_Output` node type which shows # a limited set of parameters by default and is set to extract a @@ -21,28 +23,24 @@ class CreateRedshiftProxy(plugin.Creator): # why this happens. # TODO: Somehow enforce so that it only shows the original limited # attributes of the Redshift_Proxy_Output node type - self.data.update({"node_type": "Redshift_Proxy_Output"}) + instance_data.update({"node_type": "Redshift_Proxy_Output"}) - def _process(self, instance): - """Creator main entry point. + instance = super(CreateRedshiftProxy, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance - Args: - instance (hou.Node): Created Houdini instance. + instance_node = hou.node(instance.get("instance_node")) - """ parms = { - "RS_archive_file": '$HIP/pyblish/`chs("subset")`.$F4.rs', + "RS_archive_file": '$HIP/pyblish/`{}.$F4.rs'.format(subset_name), } - if self.nodes: - node = self.nodes[0] - path = node.path() - parms["RS_archive_sopPath"] = path + if self.selected_nodes: + parms["RS_archive_sopPath"] = self.selected_nodes[0].path() - instance.setParms(parms) + instance_node.setParms(parms) # Lock some Avalon attributes - to_lock = ["family", "id"] - for name in to_lock: - parm = instance.parm(name) - parm.lock(True) + to_lock = ["family", "id", "prim_to_detail_pattern"] + self.lock_parameters(instance_node, to_lock) diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py index 6949ca169b..2cbe9bfda1 100644 --- a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py +++ b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py @@ -1,41 +1,40 @@ -import hou +# -*- coding: utf-8 -*- +"""Creator plugin to create Redshift ROP.""" from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance -class CreateRedshiftROP(plugin.Creator): +class CreateRedshiftROP(plugin.HoudiniCreator): """Redshift ROP""" - + identifier = "io.openpype.creators.houdini.redshift_rop" label = "Redshift ROP" family = "redshift_rop" icon = "magic" defaults = ["master"] - def __init__(self, *args, **kwargs): - super(CreateRedshiftROP, self).__init__(*args, **kwargs) + def create(self, subset_name, instance_data, pre_create_data): + import hou # noqa + + instance_data.pop("active", None) + instance_data.update({"node_type": "Redshift_ROP"}) + # Add chunk size attribute + instance_data["chunkSize"] = 10 # Clear the family prefix from the subset - subset = self.data["subset"] + subset = subset_name subset_no_prefix = subset[len(self.family):] subset_no_prefix = subset_no_prefix[0].lower() + subset_no_prefix[1:] - self.data["subset"] = subset_no_prefix + subset_name = subset_no_prefix - # Add chunk size attribute - self.data["chunkSize"] = 10 + instance = super(CreateRedshiftROP, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance - # Remove the active, we are checking the bypass flag of the nodes - self.data.pop("active", None) + instance_node = hou.node(instance.get("instance_node")) - self.data.update({"node_type": "Redshift_ROP"}) - - def _process(self, instance): - """Creator main entry point. - - Args: - instance (hou.Node): Created Houdini instance. - - """ - basename = instance.name() - instance.setName(basename + "_ROP", unique_name=True) + basename = instance_node.name() + instance_node.setName(basename + "_ROP", unique_name=True) # Also create the linked Redshift IPR Rop try: @@ -43,11 +42,12 @@ class CreateRedshiftROP(plugin.Creator): "Redshift_IPR", node_name=basename + "_IPR" ) except hou.OperationFailed: - raise Exception(("Cannot create Redshift node. Is Redshift " - "installed and enabled?")) + raise plugin.OpenPypeCreatorError( + ("Cannot create Redshift node. Is Redshift " + "installed and enabled?")) # Move it to directly under the Redshift ROP - ipr_rop.setPosition(instance.position() + hou.Vector2(0, -1)) + ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1)) # Set the linked rop to the Redshift ROP ipr_rop.parm("linked_rop").set(ipr_rop.relativePathTo(instance)) @@ -61,10 +61,8 @@ class CreateRedshiftROP(plugin.Creator): "RS_outputMultilayerMode": 0, # no multi-layered exr "RS_outputBeautyAOVSuffix": "beauty", } - instance.setParms(parms) + instance_node.setParms(parms) # Lock some Avalon attributes to_lock = ["family", "id"] - for name in to_lock: - parm = instance.parm(name) - parm.lock(True) + self.lock_parameters(instance_node, to_lock) diff --git a/openpype/hosts/houdini/plugins/create/create_usd.py b/openpype/hosts/houdini/plugins/create/create_usd.py index 5bcb7840c0..51ed8237c5 100644 --- a/openpype/hosts/houdini/plugins/create/create_usd.py +++ b/openpype/hosts/houdini/plugins/create/create_usd.py @@ -1,39 +1,39 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating USDs.""" from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance -class CreateUSD(plugin.Creator): +class CreateUSD(plugin.HoudiniCreator): """Universal Scene Description""" - + identifier = "io.openpype.creators.houdini.usd" label = "USD (experimental)" family = "usd" icon = "gears" enabled = False - def __init__(self, *args, **kwargs): - super(CreateUSD, self).__init__(*args, **kwargs) + def create(self, subset_name, instance_data, pre_create_data): + import hou # noqa - # Remove the active, we are checking the bypass flag of the nodes - self.data.pop("active", None) + instance_data.pop("active", None) + instance_data.update({"node_type": "usd"}) - self.data.update({"node_type": "usd"}) + instance = super(CreateUSD, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance - def _process(self, instance): - """Creator main entry point. + instance_node = hou.node(instance.get("instance_node")) - Args: - instance (hou.Node): Created Houdini instance. - - """ parms = { - "lopoutput": "$HIP/pyblish/%s.usd" % self.name, + "lopoutput": "$HIP/pyblish/{}.usd".format(subset_name), "enableoutputprocessor_simplerelativepaths": False, } - if self.nodes: - node = self.nodes[0] - parms.update({"loppath": node.path()}) + if self.selected_nodes: + parms["loppath"] = self.selected_nodes[0].path() - instance.setParms(parms) + instance_node.setParms(parms) # Lock any parameters in this list to_lock = [ @@ -42,6 +42,4 @@ class CreateUSD(plugin.Creator): "family", "id", ] - for name in to_lock: - parm = instance.parm(name) - parm.lock(True) + self.lock_parameters(instance_node, to_lock) diff --git a/openpype/hosts/houdini/plugins/create/create_usdrender.py b/openpype/hosts/houdini/plugins/create/create_usdrender.py index cb3fe3f02b..f78f0bed50 100644 --- a/openpype/hosts/houdini/plugins/create/create_usdrender.py +++ b/openpype/hosts/houdini/plugins/create/create_usdrender.py @@ -1,42 +1,41 @@ -import hou +# -*- coding: utf-8 -*- +"""Creator plugin for creating USD renders.""" from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance -class CreateUSDRender(plugin.Creator): +class CreateUSDRender(plugin.HoudiniCreator): """USD Render ROP in /stage""" - + identifier = "io.openpype.creators.houdini.usdrender" label = "USD Render (experimental)" family = "usdrender" icon = "magic" - def __init__(self, *args, **kwargs): - super(CreateUSDRender, self).__init__(*args, **kwargs) + def create(self, subset_name, instance_data, pre_create_data): + import hou # noqa - self.parent = hou.node("/stage") + instance_data["parent"] = hou.node("/stage") # Remove the active, we are checking the bypass flag of the nodes - self.data.pop("active", None) + instance_data.pop("active", None) + instance_data.update({"node_type": "usdrender"}) - self.data.update({"node_type": "usdrender"}) + instance = super(CreateUSDRender, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance - def _process(self, instance): - """Creator main entry point. + instance_node = hou.node(instance.get("instance_node")) - Args: - instance (hou.Node): Created Houdini instance. - """ parms = { # Render frame range "trange": 1 } - if self.nodes: - node = self.nodes[0] - parms.update({"loppath": node.path()}) - instance.setParms(parms) + if self.selected_nodes: + parms["loppath"] = self.selected_nodes[0].path() + instance_node.setParms(parms) # Lock some Avalon attributes to_lock = ["family", "id"] - for name in to_lock: - parm = instance.parm(name) - parm.lock(True) + self.lock_parameters(instance_node, to_lock) diff --git a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py index 242c21fc72..1a5011745f 100644 --- a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py +++ b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py @@ -1,38 +1,36 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating VDB Caches.""" from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance -class CreateVDBCache(plugin.Creator): +class CreateVDBCache(plugin.HoudiniCreator): """OpenVDB from Geometry ROP""" - + identifier = "io.openpype.creators.houdini.vdbcache" name = "vbdcache" label = "VDB Cache" family = "vdbcache" icon = "cloud" - def __init__(self, *args, **kwargs): - super(CreateVDBCache, self).__init__(*args, **kwargs) + def create(self, subset_name, instance_data, pre_create_data): + import hou - # Remove the active, we are checking the bypass flag of the nodes - self.data.pop("active", None) + instance_data.pop("active", None) + instance_data.update({"node_type": "geometry"}) - # Set node type to create for output - self.data["node_type"] = "geometry" + instance = super(CreateVDBCache, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance - def _process(self, instance): - """Creator main entry point. - - Args: - instance (hou.Node): Created Houdini instance. - - """ + instance_node = hou.node(instance.get("instance_node")) parms = { - "sopoutput": "$HIP/pyblish/%s.$F4.vdb" % self.name, + "sopoutput": "$HIP/pyblish/{}.$F4.vdb".format(subset_name), "initsim": True, "trange": 1 } - if self.nodes: - node = self.nodes[0] - parms.update({"soppath": node.path()}) + if self.selected_nodes: + parms["soppath"] = self.selected_nodes[0].path() - instance.setParms(parms) + instance_node.setParms(parms) diff --git a/openpype/hosts/houdini/plugins/create/create_workfile.py b/openpype/hosts/houdini/plugins/create/create_workfile.py new file mode 100644 index 0000000000..0c6d840810 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_workfile.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating workfiles.""" +from openpype.hosts.houdini.api import plugin +from openpype.hosts.houdini.api.lib import read, imprint +from openpype.hosts.houdini.api.pipeline import CONTEXT_CONTAINER +from openpype.pipeline import CreatedInstance, AutoCreator +from openpype.pipeline import legacy_io +from openpype.client import get_asset_by_name +import hou + + +class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator): + """Workfile auto-creator.""" + identifier = "io.openpype.creators.houdini.workfile" + label = "Workfile" + family = "workfile" + icon = "document" + + default_variant = "Main" + + def create(self): + variant = self.default_variant + current_instance = next( + ( + instance for instance in self.create_context.instances + if instance.creator_identifier == self.identifier + ), None) + + project_name = self.project_name + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] + + if current_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + data.update( + self.get_dynamic_data( + variant, task_name, asset_doc, + project_name, host_name, current_instance) + ) + self.log.info("Auto-creating workfile instance...") + current_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(current_instance) + elif ( + current_instance["asset"] != asset_name + or current_instance["task"] != task_name + ): + # Update instance context if is not the same + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + current_instance["asset"] = asset_name + current_instance["task"] = task_name + current_instance["subset"] = subset_name + + # write workfile information to context container. + op_ctx = hou.node(CONTEXT_CONTAINER) + if not op_ctx: + op_ctx = self.create_context_node() + + workfile_data = {"workfile": current_instance.data_to_store()} + imprint(op_ctx, workfile_data) + + def collect_instances(self): + op_ctx = hou.node(CONTEXT_CONTAINER) + instance = read(op_ctx) + if not instance: + return + workfile = instance.get("workfile") + if not workfile: + return + created_instance = CreatedInstance.from_existing( + workfile, self + ) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + op_ctx = hou.node(CONTEXT_CONTAINER) + for created_inst, _changes in update_list: + if created_inst["creator_identifier"] == self.identifier: + workfile_data = {"workfile": created_inst.data_to_store()} + imprint(op_ctx, workfile_data, update=True) diff --git a/openpype/hosts/houdini/plugins/publish/collect_active_state.py b/openpype/hosts/houdini/plugins/publish/collect_active_state.py index 862d5720e1..cc3f2e7fae 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_active_state.py +++ b/openpype/hosts/houdini/plugins/publish/collect_active_state.py @@ -1,4 +1,5 @@ import pyblish.api +import hou class CollectInstanceActiveState(pyblish.api.InstancePlugin): @@ -24,7 +25,7 @@ class CollectInstanceActiveState(pyblish.api.InstancePlugin): # Check bypass state and reverse active = True - node = instance[0] + node = hou.node(instance.get("instance_node")) if hasattr(node, "isBypassed"): active = not node.isBypassed() diff --git a/openpype/hosts/houdini/plugins/publish/collect_current_file.py b/openpype/hosts/houdini/plugins/publish/collect_current_file.py index 1383c274a2..9cca07fdc7 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/collect_current_file.py @@ -5,19 +5,20 @@ from openpype.pipeline import legacy_io import pyblish.api -class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin): +class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin): """Inject the current working file into context""" order = pyblish.api.CollectorOrder - 0.01 label = "Houdini Current File" hosts = ["houdini"] + family = ["workfile"] - def process(self, context): + def process(self, instance): """Inject the current working file""" current_file = hou.hipFile.path() if not os.path.exists(current_file): - # By default Houdini will even point a new scene to a path. + # By default, Houdini will even point a new scene to a path. # However if the file is not saved at all and does not exist, # we assume the user never set it. filepath = "" @@ -34,43 +35,26 @@ class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin): "saved correctly." ) - context.data["currentFile"] = current_file + instance.context.data["currentFile"] = current_file folder, file = os.path.split(current_file) filename, ext = os.path.splitext(file) - task = legacy_io.Session["AVALON_TASK"] - - data = {} - - # create instance - instance = context.create_instance(name=filename) - subset = 'workfile' + task.capitalize() - - data.update({ - "subset": subset, - "asset": os.getenv("AVALON_ASSET", None), - "label": subset, - "publish": True, - "family": 'workfile', - "families": ['workfile'], + instance.data.update({ "setMembers": [current_file], - "frameStart": context.data['frameStart'], - "frameEnd": context.data['frameEnd'], - "handleStart": context.data['handleStart'], - "handleEnd": context.data['handleEnd'] + "frameStart": instance.context.data['frameStart'], + "frameEnd": instance.context.data['frameEnd'], + "handleStart": instance.context.data['handleStart'], + "handleEnd": instance.context.data['handleEnd'] }) - data['representations'] = [{ + instance.data['representations'] = [{ 'name': ext.lstrip("."), 'ext': ext.lstrip("."), 'files': file, "stagingDir": folder, }] - instance.data.update(data) - self.log.info('Collected instance: {}'.format(file)) self.log.info('Scene path: {}'.format(current_file)) self.log.info('staging Dir: {}'.format(folder)) - self.log.info('subset: {}'.format(subset)) diff --git a/openpype/hosts/houdini/plugins/publish/collect_frames.py b/openpype/hosts/houdini/plugins/publish/collect_frames.py index 9bd43d8a09..531cdf1249 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_frames.py +++ b/openpype/hosts/houdini/plugins/publish/collect_frames.py @@ -1,19 +1,13 @@ +# -*- coding: utf-8 -*- +"""Collector plugin for frames data on ROP instances.""" import os import re -import hou +import hou # noqa import pyblish.api from openpype.hosts.houdini.api import lib -def splitext(name, allowed_multidot_extensions): - - for ext in allowed_multidot_extensions: - if name.endswith(ext): - return name[:-len(ext)], ext - - return os.path.splitext(name) - class CollectFrames(pyblish.api.InstancePlugin): """Collect all frames which would be saved from the ROP nodes""" @@ -24,7 +18,9 @@ class CollectFrames(pyblish.api.InstancePlugin): def process(self, instance): - ropnode = instance[0] + ropnode = hou.node(instance.data["instance_node"]) + frame_data = lib.get_frame_data(ropnode) + instance.data.update(frame_data) start_frame = instance.data.get("frameStart", None) end_frame = instance.data.get("frameEnd", None) @@ -38,13 +34,13 @@ class CollectFrames(pyblish.api.InstancePlugin): self.log.warning("Using current frame: {}".format(hou.frame())) output = output_parm.eval() - _, ext = splitext(output, + _, ext = lib.splitext(output, allowed_multidot_extensions=[".ass.gz"]) file_name = os.path.basename(output) result = file_name # Get the filename pattern match from the output - # path so we can compute all frames that would + # path, so we can compute all frames that would # come out from rendering the ROP node if there # is a frame pattern in the name pattern = r"\w+\.(\d+)" + re.escape(ext) @@ -63,8 +59,9 @@ class CollectFrames(pyblish.api.InstancePlugin): # for a custom frame list. So this should be refactored. instance.data.update({"frames": result}) - def create_file_list(self, match, start_frame, end_frame): - """Collect files based on frame range and regex.match + @staticmethod + def create_file_list(match, start_frame, end_frame): + """Collect files based on frame range and `regex.match` Args: match(re.match): match object diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances.py b/openpype/hosts/houdini/plugins/publish/collect_instances.py index d38927984a..bb85630552 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_instances.py +++ b/openpype/hosts/houdini/plugins/publish/collect_instances.py @@ -47,6 +47,11 @@ class CollectInstances(pyblish.api.ContextPlugin): if node.evalParm("id") != "pyblish.avalon.instance": continue + # instance was created by new creator code, skip it as + # it is already collected. + if node.parm("creator_identifier"): + continue + has_family = node.evalParm("family") assert has_family, "'%s' is missing 'family'" % node.name() @@ -58,7 +63,8 @@ class CollectInstances(pyblish.api.ContextPlugin): data.update({"active": not node.isBypassed()}) # temporarily translation of `active` to `publish` till issue has - # been resolved, https://github.com/pyblish/pyblish-base/issues/307 + # been resolved. + # https://github.com/pyblish/pyblish-base/issues/307 if "active" in data: data["publish"] = data["active"] @@ -78,6 +84,7 @@ class CollectInstances(pyblish.api.ContextPlugin): instance.data["families"] = [instance.data["family"]] instance[:] = [node] + instance.data["instance_node"] = node.path() instance.data.update(data) def sort_by_family(instance): diff --git a/openpype/hosts/houdini/plugins/publish/collect_output_node.py b/openpype/hosts/houdini/plugins/publish/collect_output_node.py index 0130c0a8da..601ed17b39 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/collect_output_node.py @@ -22,7 +22,7 @@ class CollectOutputSOPPath(pyblish.api.InstancePlugin): import hou - node = instance[0] + node = hou.node(instance.data["instance_node"]) # Get sop path node_type = node.type().name() diff --git a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py index 72b554b567..346bdf3421 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py +++ b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py @@ -69,7 +69,7 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): def process(self, instance): - rop = instance[0] + rop = hou.node(instance.get("instance_node")) # Collect chunkSize chunk_size_parm = rop.parm("chunkSize") diff --git a/openpype/hosts/houdini/plugins/publish/collect_render_products.py b/openpype/hosts/houdini/plugins/publish/collect_render_products.py index d7163b43c0..fcd80e0082 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_render_products.py +++ b/openpype/hosts/houdini/plugins/publish/collect_render_products.py @@ -53,7 +53,7 @@ class CollectRenderProducts(pyblish.api.InstancePlugin): node = instance.data.get("output_node") if not node: - rop_path = instance[0].path() + rop_path = instance.data["instance_node"].path() raise RuntimeError( "No output node found. Make sure to connect an " "input to the USD ROP: %s" % rop_path diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py index cf8d61cda3..81274c670e 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py +++ b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py @@ -1,6 +1,6 @@ import pyblish.api -from openyppe.client import get_subset_by_name, get_asset_by_name +from openpype.client import get_subset_by_name, get_asset_by_name from openpype.pipeline import legacy_io import openpype.lib.usdlib as usdlib diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py b/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py index e3985e3c97..833add854b 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py +++ b/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py @@ -3,6 +3,8 @@ import os import pyblish.api import openpype.hosts.houdini.api.usd as usdlib +import hou + class CollectUsdLayers(pyblish.api.InstancePlugin): """Collect the USD Layers that have configured save paths.""" @@ -19,7 +21,7 @@ class CollectUsdLayers(pyblish.api.InstancePlugin): self.log.debug("No output node found..") return - rop_node = instance[0] + rop_node = hou.node(instance.get("instance_node")) save_layers = [] for layer in usdlib.get_configured_save_layers(rop_node): @@ -54,8 +56,10 @@ class CollectUsdLayers(pyblish.api.InstancePlugin): layer_inst.data["subset"] = "__stub__" layer_inst.data["label"] = label layer_inst.data["asset"] = instance.data["asset"] - layer_inst.append(instance[0]) # include same USD ROP - layer_inst.append((layer, save_path)) # include layer data + # include same USD ROP + layer_inst.append(rop_node) + # include layer data + layer_inst.append((layer, save_path)) # Allow this subset to be grouped into a USD Layer on creation layer_inst.data["subsetGroup"] = "USD Layer" diff --git a/openpype/hosts/houdini/plugins/publish/extract_alembic.py b/openpype/hosts/houdini/plugins/publish/extract_alembic.py index 758d4c560b..cb2d4ef424 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_alembic.py +++ b/openpype/hosts/houdini/plugins/publish/extract_alembic.py @@ -5,6 +5,8 @@ import pyblish.api from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop +import hou + class ExtractAlembic(publish.Extractor): @@ -15,7 +17,7 @@ class ExtractAlembic(publish.Extractor): def process(self, instance): - ropnode = instance[0] + ropnode = hou.node(instance.data["instance_node"]) # Get the filename from the filename parameter output = ropnode.evalParm("filename") diff --git a/openpype/hosts/houdini/plugins/publish/extract_ass.py b/openpype/hosts/houdini/plugins/publish/extract_ass.py index a302b451cb..0d246625ba 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_ass.py +++ b/openpype/hosts/houdini/plugins/publish/extract_ass.py @@ -5,6 +5,8 @@ import pyblish.api from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop +import hou + class ExtractAss(publish.Extractor): @@ -15,7 +17,7 @@ class ExtractAss(publish.Extractor): def process(self, instance): - ropnode = instance[0] + ropnode = hou.node(instance.data["instance_node"]) # Get the filename from the filename parameter # `.evalParm(parameter)` will make sure all tokens are resolved @@ -33,8 +35,12 @@ class ExtractAss(publish.Extractor): # error and thus still continues to the integrator. To capture that # we make sure all files exist files = instance.data["frames"] - missing = [fname for fname in files - if not os.path.exists(os.path.join(staging_dir, fname))] + missing = [] + for file_name in files: + full_path = os.path.normpath(os.path.join(staging_dir, file_name)) + if not os.path.exists(full_path): + missing.append(full_path) + if missing: raise RuntimeError("Failed to complete Arnold ass extraction. " "Missing output files: {}".format(missing)) diff --git a/openpype/hosts/houdini/plugins/publish/extract_composite.py b/openpype/hosts/houdini/plugins/publish/extract_composite.py index 23e875f107..7a1ab36b93 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_composite.py +++ b/openpype/hosts/houdini/plugins/publish/extract_composite.py @@ -1,9 +1,10 @@ import os - import pyblish.api from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop +from openpype.hosts.houdini.api.lib import render_rop, splitext + +import hou class ExtractComposite(publish.Extractor): @@ -15,7 +16,7 @@ class ExtractComposite(publish.Extractor): def process(self, instance): - ropnode = instance[0] + ropnode = hou.node(instance.data["instance_node"]) # Get the filename from the copoutput parameter # `.evalParm(parameter)` will make sure all tokens are resolved @@ -28,8 +29,24 @@ class ExtractComposite(publish.Extractor): render_rop(ropnode) - if "files" not in instance.data: - instance.data["files"] = [] + output = instance.data["frames"] + _, ext = splitext(output[0], []) + ext = ext.lstrip(".") - frames = instance.data["frames"] - instance.data["files"].append(frames) + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": ext, + "ext": ext, + "files": output, + "stagingDir": staging_dir, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + } + + from pprint import pformat + + self.log.info(pformat(representation)) + + instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/extract_hda.py b/openpype/hosts/houdini/plugins/publish/extract_hda.py index 7dd03a92b7..8b97bf364f 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_hda.py +++ b/openpype/hosts/houdini/plugins/publish/extract_hda.py @@ -1,11 +1,9 @@ # -*- coding: utf-8 -*- import os - from pprint import pformat - import pyblish.api - from openpype.pipeline import publish +import hou class ExtractHDA(publish.Extractor): @@ -17,7 +15,7 @@ class ExtractHDA(publish.Extractor): def process(self, instance): self.log.info(pformat(instance.data)) - hda_node = instance[0] + hda_node = hou.node(instance.data.get("instance_node")) hda_def = hda_node.type().definition() hda_options = hda_def.options() hda_options.setSaveInitialParmsAndContents(True) diff --git a/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py index ca9be64a47..29ede98a52 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py +++ b/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py @@ -5,6 +5,8 @@ import pyblish.api from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop +import hou + class ExtractRedshiftProxy(publish.Extractor): @@ -15,7 +17,7 @@ class ExtractRedshiftProxy(publish.Extractor): def process(self, instance): - ropnode = instance[0] + ropnode = hou.node(instance.get("instance_node")) # Get the filename from the filename parameter # `.evalParm(parameter)` will make sure all tokens are resolved diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd.py b/openpype/hosts/houdini/plugins/publish/extract_usd.py index 78c32affb4..cbeb5add71 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd.py +++ b/openpype/hosts/houdini/plugins/publish/extract_usd.py @@ -5,6 +5,7 @@ import pyblish.api from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop +import hou class ExtractUSD(publish.Extractor): @@ -17,7 +18,7 @@ class ExtractUSD(publish.Extractor): def process(self, instance): - ropnode = instance[0] + ropnode = hou.node(instance.get("instance_node")) # Get the filename from the filename parameter output = ropnode.evalParm("lopoutput") diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py index f686f712bb..0288b7363a 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py +++ b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py @@ -187,7 +187,7 @@ class ExtractUSDLayered(publish.Extractor): # Main ROP node, either a USD Rop or ROP network with # multiple USD ROPs - node = instance[0] + node = hou.node(instance.get("instance_node")) # Collect any output dependencies that have not been processed yet # during extraction of other instances diff --git a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py index 26ec423048..434d6a2160 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py +++ b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py @@ -5,6 +5,8 @@ import pyblish.api from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop +import hou + class ExtractVDBCache(publish.Extractor): @@ -15,7 +17,7 @@ class ExtractVDBCache(publish.Extractor): def process(self, instance): - ropnode = instance[0] + ropnode = hou.node(instance.get("instance_node")) # Get the filename from the filename parameter # `.evalParm(parameter)` will make sure all tokens are resolved diff --git a/openpype/hosts/houdini/plugins/publish/help/validate_vdb_input_node.xml b/openpype/hosts/houdini/plugins/publish/help/validate_vdb_input_node.xml new file mode 100644 index 0000000000..0f92560bf7 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/help/validate_vdb_input_node.xml @@ -0,0 +1,21 @@ + + + +Scene setting + +## Invalid input node + +VDB input must have the same number of VDBs, points, primitives and vertices as output. + + + +### __Detailed Info__ (optional) + +A VDB is an inherited type of Prim, holds the following data: + - Primitives: 1 + - Points: 1 + - Vertices: 1 + - VDBs: 1 + + + \ No newline at end of file diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py index c990f481d3..16d9ef9aec 100644 --- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py @@ -2,7 +2,7 @@ import pyblish.api from openpype.lib import version_up from openpype.pipeline import registered_host - +from openpype.hosts.houdini.api import HoudiniHost class IncrementCurrentFile(pyblish.api.ContextPlugin): """Increment the current file. @@ -20,11 +20,11 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin): def process(self, context): # Filename must not have changed since collecting - host = registered_host() + host = registered_host() # type: HoudiniHost current_file = host.current_file() assert ( context.data["currentFile"] == current_file ), "Collected filename from current scene name." new_filepath = version_up(current_file) - host.save(new_filepath) + host.save_workfile(new_filepath) diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py index 6128c7af77..d6e07ccab0 100644 --- a/openpype/hosts/houdini/plugins/publish/save_scene.py +++ b/openpype/hosts/houdini/plugins/publish/save_scene.py @@ -14,13 +14,13 @@ class SaveCurrentScene(pyblish.api.ContextPlugin): # Filename must not have changed since collecting host = registered_host() - current_file = host.current_file() + current_file = host.get_current_workfile() assert context.data['currentFile'] == current_file, ( "Collected filename from current scene name." ) if host.has_unsaved_changes(): - self.log.info("Saving current file..") - host.save_file(current_file) + self.log.info("Saving current file {}...".format(current_file)) + host.save_workfile(current_file) else: self.log.debug("No unsaved changes, skipping file save..") diff --git a/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py b/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py deleted file mode 100644 index ac408bc842..0000000000 --- a/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py +++ /dev/null @@ -1,47 +0,0 @@ -import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder - - -class ValidateVDBInputNode(pyblish.api.InstancePlugin): - """Validate that the node connected to the output node is of type VDB. - - Regardless of the amount of VDBs create the output will need to have an - equal amount of VDBs, points, primitives and vertices - - A VDB is an inherited type of Prim, holds the following data: - - Primitives: 1 - - Points: 1 - - Vertices: 1 - - VDBs: 1 - - """ - - order = ValidateContentsOrder + 0.1 - families = ["vdbcache"] - hosts = ["houdini"] - label = "Validate Input Node (VDB)" - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise RuntimeError( - "Node connected to the output node is not" "of type VDB!" - ) - - @classmethod - def get_invalid(cls, instance): - - node = instance.data["output_node"] - - prims = node.geometry().prims() - nr_of_prims = len(prims) - - nr_of_points = len(node.geometry().points()) - if nr_of_points != nr_of_prims: - cls.log.error("The number of primitives and points do not match") - return [instance] - - for prim in prims: - if prim.numVertices() != 1: - cls.log.error("Found primitive with more than 1 vertex!") - return [instance] diff --git a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py index ea800707fb..86e92a052f 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py +++ b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py @@ -1,8 +1,8 @@ +# -*- coding: utf-8 -*- import pyblish.api from collections import defaultdict - -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): @@ -16,7 +16,7 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): """ - order = ValidateContentsOrder + 0.1 + order = pyblish.api.ValidatorOrder + 0.1 families = ["pointcache"] hosts = ["houdini"] label = "Validate Primitive to Detail (Abc)" @@ -24,18 +24,26 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "Primitives found with inconsistent primitive " - "to detail attributes. See log." + raise PublishValidationError( + ("Primitives found with inconsistent primitive " + "to detail attributes. See log."), + title=self.label ) @classmethod def get_invalid(cls, instance): + import hou # noqa + output_node = instance.data.get("output_node") + rop_node = hou.node(instance.data["instance_node"]) + if output_node is None: + cls.log.error( + "SOP Output node in '%s' does not exist. " + "Ensure a valid SOP output path is set." % rop_node.path() + ) - output = instance.data["output_node"] + return [rop_node.path()] - rop = instance[0] - pattern = rop.parm("prim_to_detail_pattern").eval().strip() + pattern = rop_node.parm("prim_to_detail_pattern").eval().strip() if not pattern: cls.log.debug( "Alembic ROP has no 'Primitive to Detail' pattern. " @@ -43,7 +51,7 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): ) return - build_from_path = rop.parm("build_from_path").eval() + build_from_path = rop_node.parm("build_from_path").eval() if not build_from_path: cls.log.debug( "Alembic ROP has 'Build from Path' disabled. " @@ -51,14 +59,14 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): ) return - path_attr = rop.parm("path_attrib").eval() + path_attr = rop_node.parm("path_attrib").eval() if not path_attr: cls.log.error( "The Alembic ROP node has no Path Attribute" "value set, but 'Build Hierarchy from Attribute'" "is enabled." ) - return [rop.path()] + return [rop_node.path()] # Let's assume each attribute is explicitly named for now and has no # wildcards for Primitive to Detail. This simplifies the check. @@ -67,7 +75,7 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): # Check if the primitive attribute exists frame = instance.data.get("frameStart", 0) - geo = output.geometryAtFrame(frame) + geo = output_node.geometryAtFrame(frame) # If there are no primitives on the start frame then it might be # something that is emitted over time. As such we can't actually @@ -86,7 +94,7 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): "Geometry Primitives are missing " "path attribute: `%s`" % path_attr ) - return [output.path()] + return [output_node.path()] # Ensure at least a single string value is present if not attrib.strings(): @@ -94,7 +102,7 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): "Primitive path attribute has no " "string values: %s" % path_attr ) - return [output.path()] + return [output_node.path()] paths = None for attr in pattern.split(" "): @@ -130,4 +138,4 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): "Path has multiple values: %s (path: %s)" % (list(values), path) ) - return [output.path()] + return [output_node.path()] diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py index cbed3ea235..44d58cfa36 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py +++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py @@ -1,7 +1,6 @@ +# -*- coding: utf-8 -*- import pyblish.api - -from openpype.pipeline.publish import ValidateContentsOrder - +import hou class ValidateAlembicROPFaceSets(pyblish.api.InstancePlugin): """Validate Face Sets are disabled for extraction to pointcache. @@ -18,14 +17,14 @@ class ValidateAlembicROPFaceSets(pyblish.api.InstancePlugin): """ - order = ValidateContentsOrder + 0.1 + order = pyblish.api.ValidatorOrder + 0.1 families = ["pointcache"] hosts = ["houdini"] label = "Validate Alembic ROP Face Sets" def process(self, instance): - rop = instance[0] + rop = hou.node(instance.data["instance_node"]) facesets = rop.parm("facesets").eval() # 0 = No Face Sets diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py index 2625ae5f83..bafb206bd3 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py @@ -1,6 +1,7 @@ +# -*- coding: utf-8 -*- import pyblish.api - -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError +import hou class ValidateAlembicInputNode(pyblish.api.InstancePlugin): @@ -12,7 +13,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin): """ - order = ValidateContentsOrder + 0.1 + order = pyblish.api.ValidatorOrder + 0.1 families = ["pointcache"] hosts = ["houdini"] label = "Validate Input Node (Abc)" @@ -20,18 +21,28 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "Primitive types found that are not supported" - "for Alembic output." + raise PublishValidationError( + ("Primitive types found that are not supported" + "for Alembic output."), + title=self.label ) @classmethod def get_invalid(cls, instance): invalid_prim_types = ["VDB", "Volume"] - node = instance.data["output_node"] + output_node = instance.data.get("output_node") - if not hasattr(node, "geometry"): + if output_node is None: + node = hou.node(instance.data["instance_node"]) + cls.log.error( + "SOP Output node in '%s' does not exist. " + "Ensure a valid SOP output path is set." % node.path() + ) + + return [node.path()] + + if not hasattr(output_node, "geometry"): # In the case someone has explicitly set an Object # node instead of a SOP node in Geometry context # then for now we ignore - this allows us to also @@ -40,7 +51,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin): return frame = instance.data.get("frameStart", 0) - geo = node.geometryAtFrame(frame) + geo = output_node.geometryAtFrame(frame) invalid = False for prim_type in invalid_prim_types: diff --git a/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py b/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py index 5eb8f93d03..f11f9c0c62 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py +++ b/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py @@ -1,6 +1,7 @@ import pyblish.api from openpype.hosts.houdini.api import lib +import hou class ValidateAnimationSettings(pyblish.api.InstancePlugin): @@ -36,7 +37,7 @@ class ValidateAnimationSettings(pyblish.api.InstancePlugin): @classmethod def get_invalid(cls, instance): - node = instance[0] + node = hou.node(instance.get("instance_node")) # Check trange parm, 0 means Render Current Frame frame_range = node.evalParm("trange") diff --git a/openpype/hosts/houdini/plugins/publish/validate_bypass.py b/openpype/hosts/houdini/plugins/publish/validate_bypass.py index 7cf8da69d6..1bf51a986c 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_bypass.py +++ b/openpype/hosts/houdini/plugins/publish/validate_bypass.py @@ -1,6 +1,8 @@ +# -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError +import hou class ValidateBypassed(pyblish.api.InstancePlugin): """Validate all primitives build hierarchy from attribute when enabled. @@ -11,7 +13,7 @@ class ValidateBypassed(pyblish.api.InstancePlugin): """ - order = ValidateContentsOrder - 0.1 + order = pyblish.api.ValidatorOrder - 0.1 families = ["*"] hosts = ["houdini"] label = "Validate ROP Bypass" @@ -26,14 +28,15 @@ class ValidateBypassed(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: rop = invalid[0] - raise RuntimeError( - "ROP node %s is set to bypass, publishing cannot continue.." - % rop.path() + raise PublishValidationError( + ("ROP node {} is set to bypass, publishing cannot " + "continue.".format(rop.path())), + title=self.label ) @classmethod def get_invalid(cls, instance): - rop = instance[0] + rop = hou.node(instance.get("instance_node")) if hasattr(rop, "isBypassed") and rop.isBypassed(): return [rop] diff --git a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py b/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py index d414920f8b..41b5273e6a 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py +++ b/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py @@ -1,11 +1,13 @@ +# -*- coding: utf-8 -*- +"""Validator plugin for Houdini Camera ROP settings.""" import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError class ValidateCameraROP(pyblish.api.InstancePlugin): """Validate Camera ROP settings.""" - order = ValidateContentsOrder + order = pyblish.api.ValidatorOrder families = ["camera"] hosts = ["houdini"] label = "Camera ROP" @@ -14,30 +16,45 @@ class ValidateCameraROP(pyblish.api.InstancePlugin): import hou - node = instance[0] + node = hou.node(instance.data.get("instance_node")) if node.parm("use_sop_path").eval(): - raise RuntimeError( - "Alembic ROP for Camera export should not be " - "set to 'Use Sop Path'. Please disable." + raise PublishValidationError( + ("Alembic ROP for Camera export should not be " + "set to 'Use Sop Path'. Please disable."), + title=self.label ) # Get the root and objects parameter of the Alembic ROP node root = node.parm("root").eval() objects = node.parm("objects").eval() - assert root, "Root parameter must be set on Alembic ROP" - assert root.startswith("/"), "Root parameter must start with slash /" - assert objects, "Objects parameter must be set on Alembic ROP" - assert len(objects.split(" ")) == 1, "Must have only a single object." + errors = [] + if not root: + errors.append("Root parameter must be set on Alembic ROP") + if not root.startswith("/"): + errors.append("Root parameter must start with slash /") + if not objects: + errors.append("Objects parameter must be set on Alembic ROP") + if len(objects.split(" ")) != 1: + errors.append("Must have only a single object.") + + if errors: + for error in errors: + self.log.error(error) + raise PublishValidationError( + "Some checks failed, see validator log.", + title=self.label) # Check if the object exists and is a camera path = root + "/" + objects camera = hou.node(path) if not camera: - raise ValueError("Camera path does not exist: %s" % path) + raise PublishValidationError( + "Camera path does not exist: %s" % path, + title=self.label) if camera.type().name() != "cam": - raise ValueError( - "Object set in Alembic ROP is not a camera: " - "%s (type: %s)" % (camera, camera.type().name()) - ) + raise PublishValidationError( + ("Object set in Alembic ROP is not a camera: " + "{} (type: {})").format(camera, camera.type().name()), + title=self.label) diff --git a/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py index 543539ffe3..1d0377c818 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py @@ -1,4 +1,9 @@ +# -*- coding: utf-8 -*- +import sys import pyblish.api +import six + +from openpype.pipeline import PublishValidationError class ValidateCopOutputNode(pyblish.api.InstancePlugin): @@ -20,9 +25,10 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "Output node(s) `%s` are incorrect. " - "See plug-in log for details." % invalid + raise PublishValidationError( + ("Output node(s) `{}` are incorrect. " + "See plug-in log for details.").format(invalid), + title=self.label ) @classmethod @@ -30,10 +36,19 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): import hou - output_node = instance.data["output_node"] + try: + output_node = instance.data["output_node"] + except KeyError: + six.reraise( + PublishValidationError, + PublishValidationError( + "Can't determine COP output node.", + title=cls.__name__), + sys.exc_info()[2] + ) if output_node is None: - node = instance[0] + node = hou.node(instance.get("instance_node")) cls.log.error( "COP Output node in '%s' does not exist. " "Ensure a valid COP output path is set." % node.path() @@ -54,7 +69,8 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): # For the sake of completeness also assert the category type # is Cop2 to avoid potential edge case scenarios even though # the isinstance check above should be stricter than this category - assert output_node.type().category().name() == "Cop2", ( - "Output node %s is not of category Cop2. This is a bug.." - % output_node.path() - ) + if output_node.type().category().name() != "Cop2": + raise PublishValidationError( + ("Output node %s is not of category Cop2. " + "This is a bug...").format(output_node.path()), + title=cls.label) diff --git a/openpype/hosts/houdini/plugins/publish/validate_file_extension.py b/openpype/hosts/houdini/plugins/publish/validate_file_extension.py index b26d28a1e7..4584e78f4f 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_file_extension.py +++ b/openpype/hosts/houdini/plugins/publish/validate_file_extension.py @@ -1,7 +1,11 @@ +# -*- coding: utf-8 -*- import os import pyblish.api from openpype.hosts.houdini.api import lib +from openpype.pipeline import PublishValidationError + +import hou class ValidateFileExtension(pyblish.api.InstancePlugin): @@ -29,15 +33,16 @@ class ValidateFileExtension(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "ROP node has incorrect " "file extension: %s" % invalid + raise PublishValidationError( + "ROP node has incorrect file extension: {}".format(invalid), + title=self.label ) @classmethod def get_invalid(cls, instance): # Get ROP node from instance - node = instance[0] + node = hou.node(instance.data["instance_node"]) # Create lookup for current family in instance families = [] @@ -53,7 +58,9 @@ class ValidateFileExtension(pyblish.api.InstancePlugin): for family in families: extension = cls.family_extensions.get(family, None) if extension is None: - raise RuntimeError("Unsupported family: %s" % family) + raise PublishValidationError( + "Unsupported family: {}".format(family), + title=cls.label) if output_extension != extension: return [node.path()] diff --git a/openpype/hosts/houdini/plugins/publish/validate_frame_token.py b/openpype/hosts/houdini/plugins/publish/validate_frame_token.py index 76b5910576..b5f6ba71e1 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_frame_token.py +++ b/openpype/hosts/houdini/plugins/publish/validate_frame_token.py @@ -1,6 +1,7 @@ import pyblish.api from openpype.hosts.houdini.api import lib +import hou class ValidateFrameToken(pyblish.api.InstancePlugin): @@ -36,7 +37,7 @@ class ValidateFrameToken(pyblish.api.InstancePlugin): @classmethod def get_invalid(cls, instance): - node = instance[0] + node = hou.node(instance.get("instance_node")) # Check trange parm, 0 means Render Current Frame frame_range = node.evalParm("trange") diff --git a/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py b/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py index f5f03aa844..f1c52f22c1 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py +++ b/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py @@ -1,4 +1,6 @@ +# -*- coding: utf-8 -*- import pyblish.api +from openpype.pipeline import PublishValidationError class ValidateHoudiniCommercialLicense(pyblish.api.InstancePlugin): @@ -24,7 +26,7 @@ class ValidateHoudiniCommercialLicense(pyblish.api.InstancePlugin): license = hou.licenseCategory() if license != hou.licenseCategoryType.Commercial: - raise RuntimeError( - "USD Publishing requires a full Commercial " - "license. You are on: %s" % license - ) + raise PublishValidationError( + ("USD Publishing requires a full Commercial " + "license. You are on: {}").format(license), + title=self.label) diff --git a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py b/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py index be6a798a95..9d1f92a101 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py +++ b/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py @@ -1,11 +1,12 @@ +# -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin): """Validate Create Intermediate Directories is enabled on ROP node.""" - order = ValidateContentsOrder + order = pyblish.api.ValidatorOrder families = ["pointcache", "camera", "vdbcache"] hosts = ["houdini"] label = "Create Intermediate Directories Checked" @@ -14,10 +15,10 @@ class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "Found ROP node with Create Intermediate " - "Directories turned off: %s" % invalid - ) + raise PublishValidationError( + ("Found ROP node with Create Intermediate " + "Directories turned off: {}".format(invalid)), + title=self.label) @classmethod def get_invalid(cls, instance): diff --git a/openpype/hosts/houdini/plugins/publish/validate_no_errors.py b/openpype/hosts/houdini/plugins/publish/validate_no_errors.py index 76635d4ed5..f7c95aaf4e 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_no_errors.py +++ b/openpype/hosts/houdini/plugins/publish/validate_no_errors.py @@ -1,6 +1,7 @@ +# -*- coding: utf-8 -*- import pyblish.api import hou -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError def cook_in_range(node, start, end): @@ -28,7 +29,7 @@ def get_errors(node): class ValidateNoErrors(pyblish.api.InstancePlugin): """Validate the Instance has no current cooking errors.""" - order = ValidateContentsOrder + order = pyblish.api.ValidatorOrder hosts = ["houdini"] label = "Validate no errors" @@ -37,7 +38,7 @@ class ValidateNoErrors(pyblish.api.InstancePlugin): validate_nodes = [] if len(instance) > 0: - validate_nodes.append(instance[0]) + validate_nodes.append(hou.node(instance.get("instance_node"))) output_node = instance.data.get("output_node") if output_node: validate_nodes.append(output_node) @@ -62,4 +63,6 @@ class ValidateNoErrors(pyblish.api.InstancePlugin): errors = get_errors(node) if errors: self.log.error(errors) - raise RuntimeError("Node has errors: %s" % node.path()) + raise PublishValidationError( + "Node has errors: {}".format(node.path()), + title=self.label) diff --git a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py index 7a8cd04f15..d3a4c0cfbf 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py +++ b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py @@ -1,5 +1,8 @@ +# -*- coding: utf-8 -*- import pyblish.api from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError +import hou class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): @@ -19,19 +22,26 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "See log for details. " "Invalid nodes: {0}".format(invalid) + raise PublishValidationError( + "See log for details. " "Invalid nodes: {0}".format(invalid), + title=self.label ) @classmethod def get_invalid(cls, instance): - import hou + output_node = instance.data.get("output_node") + rop_node = hou.node(instance.data["instance_node"]) - output = instance.data["output_node"] + if output_node is None: + cls.log.error( + "SOP Output node in '%s' does not exist. " + "Ensure a valid SOP output path is set." % rop_node.path() + ) - rop = instance[0] - build_from_path = rop.parm("build_from_path").eval() + return [rop_node.path()] + + build_from_path = rop_node.parm("build_from_path").eval() if not build_from_path: cls.log.debug( "Alembic ROP has 'Build from Path' disabled. " @@ -39,20 +49,20 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): ) return - path_attr = rop.parm("path_attrib").eval() + path_attr = rop_node.parm("path_attrib").eval() if not path_attr: cls.log.error( "The Alembic ROP node has no Path Attribute" "value set, but 'Build Hierarchy from Attribute'" "is enabled." ) - return [rop.path()] + return [rop_node.path()] cls.log.debug("Checking for attribute: %s" % path_attr) # Check if the primitive attribute exists frame = instance.data.get("frameStart", 0) - geo = output.geometryAtFrame(frame) + geo = output_node.geometryAtFrame(frame) # If there are no primitives on the current frame then we can't # check whether the path names are correct. So we'll just issue a @@ -73,7 +83,7 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): "Geometry Primitives are missing " "path attribute: `%s`" % path_attr ) - return [output.path()] + return [output_node.path()] # Ensure at least a single string value is present if not attrib.strings(): @@ -81,7 +91,7 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): "Primitive path attribute has no " "string values: %s" % path_attr ) - return [output.path()] + return [output_node.path()] paths = geo.primStringAttribValues(path_attr) # Ensure all primitives are set to a valid path @@ -93,4 +103,4 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): "Prims have no value for attribute `%s` " "(%s of %s prims)" % (path_attr, len(invalid_prims), num_prims) ) - return [output.path()] + return [output_node.path()] diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py b/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py index 0ab182c584..4e8e5fc0e8 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py +++ b/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py @@ -1,7 +1,9 @@ +# -*-coding: utf-8 -*- import pyblish.api from openpype.hosts.houdini.api import lib from openpype.pipeline.publish import RepairContextAction +from openpype.pipeline import PublishValidationError import hou @@ -27,17 +29,24 @@ class ValidateRemotePublishOutNode(pyblish.api.ContextPlugin): # We ensure it's a shell node and that it has the pre-render script # set correctly. Plus the shell script it will trigger should be # completely empty (doing nothing) - assert node.type().name() == "shell", "Must be shell ROP node" - assert node.parm("command").eval() == "", "Must have no command" - assert not node.parm("shellexec").eval(), "Must not execute in shell" - assert ( - node.parm("prerender").eval() == cmd - ), "REMOTE_PUBLISH node does not have correct prerender script." - assert ( - node.parm("lprerender").eval() == "python" - ), "REMOTE_PUBLISH node prerender script type not set to 'python'" + if node.type().name() != "shell": + self.raise_error("Must be shell ROP node") + if node.parm("command").eval() != "": + self.raise_error("Must have no command") + if node.parm("shellexec").eval(): + self.raise_error("Must not execute in shell") + if node.parm("prerender").eval() != cmd: + self.raise_error(("REMOTE_PUBLISH node does not have " + "correct prerender script.")) + if node.parm("lprerender").eval() != "python": + self.raise_error(("REMOTE_PUBLISH node prerender script " + "type not set to 'python'")) @classmethod def repair(cls, context): """(Re)create the node if it fails to pass validation.""" lib.create_remote_publish_node(force=True) + + def raise_error(self, message): + self.log.error(message) + raise PublishValidationError(message, title=self.label) diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py b/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py index afc8df7528..8ec62f4e85 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py +++ b/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py @@ -1,7 +1,9 @@ +# -*- coding: utf-8 -*- import pyblish.api import hou from openpype.pipeline.publish import RepairContextAction +from openpype.pipeline import PublishValidationError class ValidateRemotePublishEnabled(pyblish.api.ContextPlugin): @@ -18,10 +20,12 @@ class ValidateRemotePublishEnabled(pyblish.api.ContextPlugin): node = hou.node("/out/REMOTE_PUBLISH") if not node: - raise RuntimeError("Missing REMOTE_PUBLISH node.") + raise PublishValidationError( + "Missing REMOTE_PUBLISH node.", title=self.label) if node.isBypassed(): - raise RuntimeError("REMOTE_PUBLISH must not be bypassed.") + raise PublishValidationError( + "REMOTE_PUBLISH must not be bypassed.", title=self.label) @classmethod def repair(cls, context): @@ -29,7 +33,8 @@ class ValidateRemotePublishEnabled(pyblish.api.ContextPlugin): node = hou.node("/out/REMOTE_PUBLISH") if not node: - raise RuntimeError("Missing REMOTE_PUBLISH node.") + raise PublishValidationError( + "Missing REMOTE_PUBLISH node.", title=cls.label) cls.log.info("Disabling bypass on /out/REMOTE_PUBLISH") node.bypass(False) diff --git a/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py index a5a07b1b1a..ed7f438729 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py @@ -1,4 +1,6 @@ +# -*- coding: utf-8 -*- import pyblish.api +from openpype.pipeline import PublishValidationError class ValidateSopOutputNode(pyblish.api.InstancePlugin): @@ -22,9 +24,9 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "Output node(s) `%s` are incorrect. " - "See plug-in log for details." % invalid + raise PublishValidationError( + "Output node(s) are incorrect", + title="Invalid output node(s)" ) @classmethod @@ -32,10 +34,10 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin): import hou - output_node = instance.data["output_node"] + output_node = instance.data.get("output_node") if output_node is None: - node = instance[0] + node = hou.node(instance.data["instance_node"]) cls.log.error( "SOP Output node in '%s' does not exist. " "Ensure a valid SOP output path is set." % node.path() @@ -56,10 +58,11 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin): # For the sake of completeness also assert the category type # is Sop to avoid potential edge case scenarios even though # the isinstance check above should be stricter than this category - assert output_node.type().category().name() == "Sop", ( - "Output node %s is not of category Sop. This is a bug.." - % output_node.path() - ) + if output_node.type().category().name() != "Sop": + raise PublishValidationError( + ("Output node {} is not of category Sop. " + "This is a bug.").format(output_node.path()), + title=cls.label) # Ensure the node is cooked and succeeds to cook so we can correctly # check for its geometry data. diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py b/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py index ac0181aed2..a0e2302495 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py @@ -1,6 +1,10 @@ +# -*- coding: utf-8 -*- import pyblish.api import openpype.hosts.houdini.api.usd as hou_usdlib +from openpype.pipeline import PublishValidationError + +import hou class ValidateUSDLayerPathBackslashes(pyblish.api.InstancePlugin): @@ -24,7 +28,7 @@ class ValidateUSDLayerPathBackslashes(pyblish.api.InstancePlugin): def process(self, instance): - rop = instance[0] + rop = hou.node(instance.get("instance_node")) lop_path = hou_usdlib.get_usd_rop_loppath(rop) stage = lop_path.stage(apply_viewport_overrides=False) @@ -44,7 +48,7 @@ class ValidateUSDLayerPathBackslashes(pyblish.api.InstancePlugin): invalid.append(layer) if invalid: - raise RuntimeError( + raise PublishValidationError(( "Loaded layers have backslashes. " - "This is invalid for HUSK USD rendering." - ) + "This is invalid for HUSK USD rendering."), + title=self.label) diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py b/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py index 2fd2f5eb9f..a55eb70cb2 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py @@ -1,10 +1,13 @@ +# -*- coding: utf-8 -*- import pyblish.api import openpype.hosts.houdini.api.usd as hou_usdlib - +from openpype.pipeline import PublishValidationError from pxr import UsdShade, UsdRender, UsdLux +import hou + def fullname(o): """Get fully qualified class name""" @@ -37,7 +40,7 @@ class ValidateUsdModel(pyblish.api.InstancePlugin): def process(self, instance): - rop = instance[0] + rop = hou.node(instance.get("instance_node")) lop_path = hou_usdlib.get_usd_rop_loppath(rop) stage = lop_path.stage(apply_viewport_overrides=False) @@ -55,7 +58,8 @@ class ValidateUsdModel(pyblish.api.InstancePlugin): if invalid: prim_paths = sorted([str(prim.GetPath()) for prim in invalid]) - raise RuntimeError("Found invalid primitives: %s" % prim_paths) + raise PublishValidationError( + "Found invalid primitives: {}".format(prim_paths)) class ValidateUsdShade(ValidateUsdModel): diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py index 1f10fafdf4..af21efcafc 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py @@ -1,4 +1,6 @@ +# -*- coding: utf-8 -*- import pyblish.api +from openpype.pipeline import PublishValidationError class ValidateUSDOutputNode(pyblish.api.InstancePlugin): @@ -20,9 +22,10 @@ class ValidateUSDOutputNode(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "Output node(s) `%s` are incorrect. " - "See plug-in log for details." % invalid + raise PublishValidationError( + ("Output node(s) `{}` are incorrect. " + "See plug-in log for details.").format(invalid), + title=self.label ) @classmethod @@ -33,7 +36,7 @@ class ValidateUSDOutputNode(pyblish.api.InstancePlugin): output_node = instance.data["output_node"] if output_node is None: - node = instance[0] + node = hou.node(instance.get("instance_node")) cls.log.error( "USD node '%s' LOP path does not exist. " "Ensure a valid LOP path is set." % node.path() diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py b/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py index 36336a03ae..02c44ab94e 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py @@ -1,6 +1,8 @@ +# -*- coding: utf-8 -*- +import os import pyblish.api -import os +from openpype.pipeline import PublishValidationError class ValidateUSDRenderProductNames(pyblish.api.InstancePlugin): @@ -28,4 +30,5 @@ class ValidateUSDRenderProductNames(pyblish.api.InstancePlugin): if invalid: for message in invalid: self.log.error(message) - raise RuntimeError("USD Render Paths are invalid.") + raise PublishValidationError( + "USD Render Paths are invalid.", title=self.label) diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py b/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py index fb1094e6b5..01ebc0e828 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py @@ -1,6 +1,8 @@ +# -*- coding: utf-8 -*- import pyblish.api import openpype.hosts.houdini.api.usd as hou_usdlib +from openpype.pipeline import PublishValidationError class ValidateUsdSetDress(pyblish.api.InstancePlugin): @@ -20,8 +22,9 @@ class ValidateUsdSetDress(pyblish.api.InstancePlugin): def process(self, instance): from pxr import UsdGeom + import hou - rop = instance[0] + rop = hou.node(instance.get("instance_node")) lop_path = hou_usdlib.get_usd_rop_loppath(rop) stage = lop_path.stage(apply_viewport_overrides=False) @@ -47,8 +50,9 @@ class ValidateUsdSetDress(pyblish.api.InstancePlugin): invalid.append(node) if invalid: - raise RuntimeError( + raise PublishValidationError(( "SetDress contains local geometry. " "This is not allowed, it must be an assembly " - "of referenced assets." + "of referenced assets."), + title=self.label ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py index f08c7c72c5..c4f118ac3b 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import re import pyblish.api @@ -5,6 +6,7 @@ import pyblish.api from openpype.client import get_subset_by_name from openpype.pipeline import legacy_io from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin): @@ -32,7 +34,8 @@ class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin): project_name, model_subset, asset_doc["_id"], fields=["_id"] ) if not subset_doc: - raise RuntimeError( - "USD Model subset not found: " - "%s (%s)" % (model_subset, asset_name) + raise PublishValidationError( + ("USD Model subset not found: " + "{} ({})").format(model_subset, asset_name), + title=self.label ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py index a4902b48a9..bd3366a424 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError import hou @@ -12,14 +13,14 @@ class ValidateUsdShadeWorkspace(pyblish.api.InstancePlugin): """ - order = ValidateContentsOrder + order = pyblish.api.ValidatorOrder hosts = ["houdini"] families = ["usdShade"] label = "USD Shade Workspace" def process(self, instance): - rop = instance[0] + rop = hou.node(instance.get("instance_node")) workspace = rop.parent() definition = workspace.type().definition() @@ -39,13 +40,14 @@ class ValidateUsdShadeWorkspace(pyblish.api.InstancePlugin): if node_type != other_node_type: continue - # Get highest version + # Get the highest version highest = max(highest, other_version) if version != highest: - raise RuntimeError( - "Shading Workspace is not the latest version." - " Found %s. Latest is %s." % (version, highest) + raise PublishValidationError( + ("Shading Workspace is not the latest version." + " Found {}. Latest is {}.").format(version, highest), + title=self.label ) # There were some issues with the editable node not having the right @@ -56,8 +58,9 @@ class ValidateUsdShadeWorkspace(pyblish.api.InstancePlugin): ) rop_value = rop.parm("lopoutput").rawValue() if rop_value != value: - raise RuntimeError( - "Shading Workspace has invalid 'lopoutput'" - " parameter value. The Shading Workspace" - " needs to be reset to its default values." + raise PublishValidationError( + ("Shading Workspace has invalid 'lopoutput'" + " parameter value. The Shading Workspace" + " needs to be reset to its default values."), + title=self.label ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py index ac408bc842..1f9ccc9c42 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py @@ -1,5 +1,8 @@ +# -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import ( + PublishValidationError +) class ValidateVDBInputNode(pyblish.api.InstancePlugin): @@ -16,7 +19,7 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin): """ - order = ValidateContentsOrder + 0.1 + order = pyblish.api.ValidatorOrder + 0.1 families = ["vdbcache"] hosts = ["houdini"] label = "Validate Input Node (VDB)" @@ -24,8 +27,10 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "Node connected to the output node is not" "of type VDB!" + raise PublishValidationError( + self, + "Node connected to the output node is not of type VDB", + title=self.label ) @classmethod diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py index 55ed581d4c..61c1209fc9 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py @@ -1,6 +1,7 @@ +# -*- coding: utf-8 -*- import pyblish.api import hou -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline import PublishValidationError class ValidateVDBOutputNode(pyblish.api.InstancePlugin): @@ -17,7 +18,7 @@ class ValidateVDBOutputNode(pyblish.api.InstancePlugin): """ - order = ValidateContentsOrder + 0.1 + order = pyblish.api.ValidatorOrder + 0.1 families = ["vdbcache"] hosts = ["houdini"] label = "Validate Output Node (VDB)" @@ -25,8 +26,9 @@ class ValidateVDBOutputNode(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError( - "Node connected to the output node is not" " of type VDB!" + raise PublishValidationError( + "Node connected to the output node is not" " of type VDB!", + title=self.label ) @classmethod @@ -36,7 +38,7 @@ class ValidateVDBOutputNode(pyblish.api.InstancePlugin): if node is None: cls.log.error( "SOP path is not correctly set on " - "ROP node '%s'." % instance[0].path() + "ROP node '%s'." % instance.get("instance_node") ) return [instance] diff --git a/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py b/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py index 560b355e21..7707cc2dba 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py +++ b/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py @@ -1,11 +1,17 @@ # -*- coding: utf-8 -*- import pyblish.api import hou +from openpype.pipeline import ( + PublishValidationError, + OptionalPyblishPluginMixin +) +from openpype.pipeline.publish import RepairAction from openpype.pipeline.publish import RepairAction -class ValidateWorkfilePaths(pyblish.api.InstancePlugin): +class ValidateWorkfilePaths( + pyblish.api.InstancePlugin, OptionalPyblishPluginMixin): """Validate workfile paths so they are absolute.""" order = pyblish.api.ValidatorOrder @@ -19,6 +25,8 @@ class ValidateWorkfilePaths(pyblish.api.InstancePlugin): prohibited_vars = ["$HIP", "$JOB"] def process(self, instance): + if not self.is_active(instance.data): + return invalid = self.get_invalid() self.log.info( "node types to check: {}".format(", ".join(self.node_types))) @@ -30,15 +38,16 @@ class ValidateWorkfilePaths(pyblish.api.InstancePlugin): self.log.error( "{}: {}".format(param.path(), param.unexpandedString())) - raise RuntimeError("Invalid paths found") + raise PublishValidationError( + "Invalid paths found", title=self.label) @classmethod def get_invalid(cls): invalid = [] for param, _ in hou.fileReferences(): - if param is None: + # it might return None for some reason + if not param: continue - # skip nodes we are not interested in if param.node().type().name() not in cls.node_types: continue diff --git a/openpype/hosts/houdini/startup/MainMenuCommon.xml b/openpype/hosts/houdini/startup/MainMenuCommon.xml index abfa3f136e..c08114b71b 100644 --- a/openpype/hosts/houdini/startup/MainMenuCommon.xml +++ b/openpype/hosts/houdini/startup/MainMenuCommon.xml @@ -1,10 +1,10 @@ - + - + - + - + 1 + is_sequence = len(representation["files"]) > 1 - filepath = self.fname.replace("\\", "/") + if is_sequence: + representation = self._representation_with_hash_in_frame( + representation + ) + filepath = get_representation_path(representation).replace("\\", "/") + self.log.debug("_ filepath: {}".format(filepath)) start_at_workfile = options.get( "start_at_workfile", self.options_defaults["start_at_workfile"]) @@ -112,11 +109,10 @@ class LoadClip(plugin.NukeLoader): version = context['version'] version_data = version.get("data", {}) - repre_id = repre["_id"] + repre_id = representation["_id"] - repre_cont = repre["context"] - - self.log.info("version_data: {}\n".format(version_data)) + self.log.debug("_ version_data: {}\n".format( + pformat(version_data))) self.log.debug( "Representation id `{}` ".format(repre_id)) @@ -132,8 +128,6 @@ class LoadClip(plugin.NukeLoader): duration = last - first first = 1 last = first + duration - elif "#" not in filepath: - filepath = self._fix_path_for_knob(filepath, repre_cont) # Fallback to asset name when namespace is None if namespace is None: @@ -144,20 +138,23 @@ class LoadClip(plugin.NukeLoader): "Representation id `{}` is failing to load".format(repre_id)) return - read_name = self._get_node_name(repre) + read_name = self._get_node_name(representation) # Create the Loader with the filename path set read_node = nuke.createNode( "Read", "name {}".format(read_name)) + # hide property panel + read_node.hideControlPanel() + # to avoid multiple undo steps for rest of process # we will switch off undo-ing with viewer_update_and_undo_stop(): read_node["file"].setValue(filepath) used_colorspace = self._set_colorspace( - read_node, version_data, repre["data"]) + read_node, version_data, representation["data"]) self._set_range_to_node(read_node, first, last, start_at_workfile) @@ -179,7 +176,7 @@ class LoadClip(plugin.NukeLoader): data_imprint[k] = version elif k == 'colorspace': - colorspace = repre["data"].get(k) + colorspace = representation["data"].get(k) colorspace = colorspace or version_data.get(k) data_imprint["db_colorspace"] = colorspace if used_colorspace: @@ -213,6 +210,20 @@ class LoadClip(plugin.NukeLoader): def switch(self, container, representation): self.update(container, representation) + def _representation_with_hash_in_frame(self, representation): + """Convert frame key value to padded hash + + Args: + representation (dict): representation data + + Returns: + dict: altered representation data + """ + representation = deepcopy(representation) + frame = representation["context"]["frame"] + representation["context"]["frame"] = "#" * len(str(frame)) + return representation + def update(self, container, representation): """Update the Loader's path @@ -225,7 +236,13 @@ class LoadClip(plugin.NukeLoader): is_sequence = len(representation["files"]) > 1 read_node = nuke.toNode(container['objectName']) + + if is_sequence: + representation = self._representation_with_hash_in_frame( + representation + ) filepath = get_representation_path(representation).replace("\\", "/") + self.log.debug("_ filepath: {}".format(filepath)) start_at_workfile = "start at" in read_node['frame_mode'].value() @@ -240,8 +257,6 @@ class LoadClip(plugin.NukeLoader): version_data = version_doc.get("data", {}) repre_id = representation["_id"] - repre_cont = representation["context"] - # colorspace profile colorspace = representation["data"].get("colorspace") colorspace = colorspace or version_data.get("colorspace") @@ -258,8 +273,6 @@ class LoadClip(plugin.NukeLoader): duration = last - first first = 1 last = first + duration - elif "#" not in filepath: - filepath = self._fix_path_for_knob(filepath, repre_cont) if not filepath: self.log.warning( @@ -348,8 +361,10 @@ class LoadClip(plugin.NukeLoader): time_warp_nodes = version_data.get('timewarps', []) last_node = None source_id = self.get_container_id(parent_node) - self.log.info("__ source_id: {}".format(source_id)) - self.log.info("__ members: {}".format(self.get_members(parent_node))) + self.log.debug("__ source_id: {}".format(source_id)) + self.log.debug("__ members: {}".format( + self.get_members(parent_node))) + dependent_nodes = self.clear_members(parent_node) with maintained_selection(): diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index d164e0604c..cef4b0a5fc 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -89,6 +89,9 @@ class LoadEffects(load.LoaderPlugin): "Group", "name {}_1".format(object_name)) + # hide property panel + GN.hideControlPanel() + # adding content to the group node with GN: pre_node = nuke.createNode("Input") diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index 44565c139d..9bd40be816 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -90,6 +90,9 @@ class LoadEffectsInputProcess(load.LoaderPlugin): "Group", "name {}_1".format(object_name)) + # hide property panel + GN.hideControlPanel() + # adding content to the group node with GN: pre_node = nuke.createNode("Input") diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py index 3e81ef999b..49dc12f588 100644 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ b/openpype/hosts/nuke/plugins/load/load_image.py @@ -62,7 +62,9 @@ class LoadImage(load.LoaderPlugin): def load(self, context, name, namespace, options): self.log.info("__ options: `{}`".format(options)) - frame_number = options.get("frame_number", 1) + frame_number = options.get( + "frame_number", int(nuke.root()["first_frame"].getValue()) + ) version = context['version'] version_data = version.get("data", {}) @@ -112,6 +114,10 @@ class LoadImage(load.LoaderPlugin): r = nuke.createNode( "Read", "name {}".format(read_name)) + + # hide property panel + r.hideControlPanel() + r["file"].setValue(file) # Set colorspace defined in version data diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py index 151401bad3..ad985e83c6 100644 --- a/openpype/hosts/nuke/plugins/load/load_model.py +++ b/openpype/hosts/nuke/plugins/load/load_model.py @@ -63,6 +63,10 @@ class AlembicModelLoader(load.LoaderPlugin): object_name, file), inpanel=False ) + + # hide property panel + model_node.hideControlPanel() + model_node.forceValidate() # Ensure all items are imported and selected. diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index 21e384b538..f0972f85d2 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -71,6 +71,9 @@ class LinkAsGroup(load.LoaderPlugin): "Precomp", "file {}".format(file)) + # hide property panel + P.hideControlPanel() + # Set colorspace defined in version data colorspace = context["version"]["data"].get("colorspace", None) self.log.info("colorspace: {}\n".format(colorspace)) diff --git a/openpype/hosts/photoshop/addon.py b/openpype/hosts/photoshop/addon.py index a41d91554b..965a545ac5 100644 --- a/openpype/hosts/photoshop/addon.py +++ b/openpype/hosts/photoshop/addon.py @@ -1,6 +1,5 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon PHOTOSHOP_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py index 2792a775e0..7672458165 100644 --- a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py @@ -29,7 +29,8 @@ class CreateImage(create.LegacyCreator): if len(selection) > 1: # Ask user whether to create one image or image per selected # item. - msg_box = QtWidgets.QMessageBox() + active_window = QtWidgets.QApplication.activeWindow() + msg_box = QtWidgets.QMessageBox(parent=active_window) msg_box.setIcon(QtWidgets.QMessageBox.Warning) msg_box.setText( "Multiple layers selected." @@ -102,7 +103,7 @@ class CreateImage(create.LegacyCreator): if group.long_name: for directory in group.long_name[::-1]: name = directory.replace(stub.PUBLISH_ICON, '').\ - replace(stub.LOADED_ICON, '') + replace(stub.LOADED_ICON, '') long_names.append(name) self.data.update({"subset": subset_name}) diff --git a/openpype/hosts/resolve/addon.py b/openpype/hosts/resolve/addon.py index a31da52a6d..02c1d7957f 100644 --- a/openpype/hosts/resolve/addon.py +++ b/openpype/hosts/resolve/addon.py @@ -1,7 +1,6 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon from .utils import RESOLVE_ROOT_DIR diff --git a/openpype/hosts/standalonepublisher/addon.py b/openpype/hosts/standalonepublisher/addon.py index 98ec44d4e2..65a4226664 100644 --- a/openpype/hosts/standalonepublisher/addon.py +++ b/openpype/hosts/standalonepublisher/addon.py @@ -4,8 +4,7 @@ import click from openpype.lib import get_openpype_execute_args from openpype.lib.execute import run_detached_process -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import ITrayAction, IHostAddon +from openpype.modules import OpenPypeModule, ITrayAction, IHostAddon STANDALONEPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/traypublisher/addon.py b/openpype/hosts/traypublisher/addon.py index c86c835ed9..c157799898 100644 --- a/openpype/hosts/traypublisher/addon.py +++ b/openpype/hosts/traypublisher/addon.py @@ -4,8 +4,7 @@ import click from openpype.lib import get_openpype_execute_args from openpype.lib.execute import run_detached_process -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import ITrayAction, IHostAddon +from openpype.modules import OpenPypeModule, ITrayAction, IHostAddon TRAYPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py index 40877968e9..75930f0f31 100644 --- a/openpype/hosts/traypublisher/api/plugin.py +++ b/openpype/hosts/traypublisher/api/plugin.py @@ -1,54 +1,32 @@ -import collections - from openpype.lib.attribute_definitions import FileDef +from openpype.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS from openpype.pipeline.create import ( Creator, HiddenCreator, CreatedInstance, + cache_and_get_instances, PRE_CREATE_THUMBNAIL_KEY, ) - from .pipeline import ( list_instances, update_instances, remove_instances, HostContext, ) -from openpype.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS - REVIEW_EXTENSIONS = set(IMAGE_EXTENSIONS) | set(VIDEO_EXTENSIONS) - - -def _cache_and_get_instances(creator): - """Cache instances in shared data. - - Args: - creator (Creator): Plugin which would like to get instances from host. - - Returns: - List[Dict[str, Any]]: Cached instances list from host implementation. - """ - - shared_key = "openpype.traypublisher.instances" - if shared_key not in creator.collection_shared_data: - instances_by_creator_id = collections.defaultdict(list) - for instance_data in list_instances(): - creator_id = instance_data.get("creator_identifier") - instances_by_creator_id[creator_id].append(instance_data) - creator.collection_shared_data[shared_key] = instances_by_creator_id - return creator.collection_shared_data[shared_key] +SHARED_DATA_KEY = "openpype.traypublisher.instances" class HiddenTrayPublishCreator(HiddenCreator): host_name = "traypublisher" def collect_instances(self): - instance_data_by_identifier = _cache_and_get_instances(self) - for instance_data in instance_data_by_identifier[self.identifier]: - instance = CreatedInstance.from_existing( - instance_data, self - ) + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) self._add_instance_to_context(instance) def update_instances(self, update_list): @@ -80,11 +58,11 @@ class TrayPublishCreator(Creator): host_name = "traypublisher" def collect_instances(self): - instance_data_by_identifier = _cache_and_get_instances(self) - for instance_data in instance_data_by_identifier[self.identifier]: - instance = CreatedInstance.from_existing( - instance_data, self - ) + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) self._add_instance_to_context(instance) def update_instances(self, update_list): diff --git a/openpype/hosts/traypublisher/plugins/create/create_online.py b/openpype/hosts/traypublisher/plugins/create/create_online.py new file mode 100644 index 0000000000..19f956a50e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_online.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +"""Creator of online files. + +Online file retain their original name and use it as subset name. To +avoid conflicts, this creator checks if subset with this name already +exists under selected asset. +""" +from pathlib import Path + +from openpype.client import get_subset_by_name, get_asset_by_name +from openpype.lib.attribute_definitions import FileDef +from openpype.pipeline import ( + CreatedInstance, + CreatorError +) +from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator + + +class OnlineCreator(TrayPublishCreator): + """Creates instance from file and retains its original name.""" + + identifier = "io.openpype.creators.traypublisher.online" + label = "Online" + family = "online" + description = "Publish file retaining its original file name" + extensions = [".mov", ".mp4", ".mxf", ".m4v", ".mpg"] + + def get_detail_description(self): + return """# Create file retaining its original file name. + + This will publish files using template helping to retain original + file name and that file name is used as subset name. + + Bz default it tries to guard against multiple publishes of the same + file.""" + + def get_icon(self): + return "fa.file" + + def create(self, subset_name, instance_data, pre_create_data): + repr_file = pre_create_data.get("representation_file") + if not repr_file: + raise CreatorError("No files specified") + + files = repr_file.get("filenames") + if not files: + # this should never happen + raise CreatorError("Missing files from representation") + + origin_basename = Path(files[0]).stem + + asset = get_asset_by_name( + self.project_name, instance_data["asset"], fields=["_id"]) + if get_subset_by_name( + self.project_name, origin_basename, asset["_id"], + fields=["_id"]): + raise CreatorError(f"subset with {origin_basename} already " + "exists in selected asset") + + instance_data["originalBasename"] = origin_basename + subset_name = origin_basename + + instance_data["creator_attributes"] = { + "path": (Path(repr_file["directory"]) / files[0]).as_posix() + } + + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, + instance_data, self) + self._store_new_instance(new_instance) + + def get_pre_create_attr_defs(self): + return [ + FileDef( + "representation_file", + folders=False, + extensions=self.extensions, + allow_sequences=False, + single_item=True, + label="Representation", + ) + ] + + def get_subset_name( + self, + variant, + task_name, + asset_doc, + project_name, + host_name=None, + instance=None + ): + if instance is None: + return "{originalBasename}" + + return instance.data["subset"] diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py b/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py new file mode 100644 index 0000000000..a3f86afa13 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from pathlib import Path + + +class CollectOnlineFile(pyblish.api.InstancePlugin): + """Collect online file and retain its file name.""" + label = "Collect Online File" + order = pyblish.api.CollectorOrder + families = ["online"] + hosts = ["traypublisher"] + + def process(self, instance): + file = Path(instance.data["creator_attributes"]["path"]) + + instance.data["representations"].append( + { + "name": file.suffix.lstrip("."), + "ext": file.suffix.lstrip("."), + "files": file.name, + "stagingDir": file.parent.as_posix() + } + ) diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py b/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py new file mode 100644 index 0000000000..12b2e72ced --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +import pyblish.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError, + OptionalPyblishPluginMixin, +) +from openpype.client import get_subset_by_name + + +class ValidateOnlineFile(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validate that subset doesn't exist yet.""" + label = "Validate Existing Online Files" + hosts = ["traypublisher"] + families = ["online"] + order = ValidateContentsOrder + + optional = True + + def process(self, instance): + project_name = instance.context.data["projectName"] + asset_id = instance.data["assetEntity"]["_id"] + subset = get_subset_by_name( + project_name, instance.data["subset"], asset_id) + + if subset: + raise PublishValidationError( + "Subset to be published already exists.", + title=self.label + ) diff --git a/openpype/hosts/tvpaint/addon.py b/openpype/hosts/tvpaint/addon.py index d710e63f93..b695bf8ecc 100644 --- a/openpype/hosts/tvpaint/addon.py +++ b/openpype/hosts/tvpaint/addon.py @@ -1,6 +1,5 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon TVPAINT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/tvpaint/plugins/load/load_image.py b/openpype/hosts/tvpaint/plugins/load/load_image.py index 151db94135..5283d04355 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_image.py @@ -1,4 +1,4 @@ -import qargparse +from openpype.lib.attribute_definitions import BoolDef from openpype.hosts.tvpaint.api import plugin from openpype.hosts.tvpaint.api.lib import execute_george_through_file @@ -27,26 +27,28 @@ class ImportImage(plugin.Loader): "preload": True } - options = [ - qargparse.Boolean( - "stretch", - label="Stretch to project size", - default=True, - help="Stretch loaded image/s to project resolution?" - ), - qargparse.Boolean( - "timestretch", - label="Stretch to timeline length", - default=True, - help="Clip loaded image/s to timeline length?" - ), - qargparse.Boolean( - "preload", - label="Preload loaded image/s", - default=True, - help="Preload image/s?" - ) - ] + @classmethod + def get_options(cls, contexts): + return [ + BoolDef( + "stretch", + label="Stretch to project size", + default=cls.defaults["stretch"], + tooltip="Stretch loaded image/s to project resolution?" + ), + BoolDef( + "timestretch", + label="Stretch to timeline length", + default=cls.defaults["timestretch"], + tooltip="Clip loaded image/s to timeline length?" + ), + BoolDef( + "preload", + label="Preload loaded image/s", + default=cls.defaults["preload"], + tooltip="Preload image/s?" + ) + ] def load(self, context, name, namespace, options): stretch = options.get("stretch", self.defaults["stretch"]) diff --git a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py index 393236fba6..7f7a68cc41 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py @@ -1,7 +1,6 @@ import collections -import qargparse - +from openpype.lib.attribute_definitions import BoolDef from openpype.pipeline import ( get_representation_context, register_host, @@ -42,26 +41,28 @@ class LoadImage(plugin.Loader): "preload": True } - options = [ - qargparse.Boolean( - "stretch", - label="Stretch to project size", - default=True, - help="Stretch loaded image/s to project resolution?" - ), - qargparse.Boolean( - "timestretch", - label="Stretch to timeline length", - default=True, - help="Clip loaded image/s to timeline length?" - ), - qargparse.Boolean( - "preload", - label="Preload loaded image/s", - default=True, - help="Preload image/s?" - ) - ] + @classmethod + def get_options(cls, contexts): + return [ + BoolDef( + "stretch", + label="Stretch to project size", + default=cls.defaults["stretch"], + tooltip="Stretch loaded image/s to project resolution?" + ), + BoolDef( + "timestretch", + label="Stretch to timeline length", + default=cls.defaults["timestretch"], + tooltip="Clip loaded image/s to timeline length?" + ), + BoolDef( + "preload", + label="Preload loaded image/s", + default=cls.defaults["preload"], + tooltip="Preload image/s?" + ) + ] def load(self, context, name, namespace, options): stretch = options.get("stretch", self.defaults["stretch"]) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py index f291c363b8..d5b79758ad 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py @@ -6,7 +6,7 @@ class CollectOutputFrameRange(pyblish.api.ContextPlugin): When instances are collected context does not contain `frameStart` and `frameEnd` keys yet. They are collected in global plugin - `CollectAvalonEntities`. + `CollectContextEntities`. """ label = "Collect output frame range" order = pyblish.api.CollectorOrder diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 1ebaf1da64..78074f720c 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -25,6 +25,7 @@ class ExtractSequence(pyblish.api.Extractor): label = "Extract Sequence" hosts = ["tvpaint"] families = ["review", "renderPass", "renderLayer", "renderScene"] + families_to_review = ["review"] # Modifiable with settings review_bg = [255, 255, 255, 255] @@ -133,9 +134,9 @@ class ExtractSequence(pyblish.api.Extractor): output_frame_start ) - # Fill tags and new families + # Fill tags and new families from project settings tags = [] - if family_lowered in ("review", "renderlayer", "renderscene"): + if family_lowered in self.families_to_review: tags.append("review") # Sequence of one frame diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index 12d50e17ff..0030b0fd1c 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -39,7 +39,7 @@ class ValidateMarks(pyblish.api.ContextPlugin): def get_expected_data(context): scene_mark_in = context.data["sceneMarkIn"] - # Data collected in `CollectAvalonEntities` + # Data collected in `CollectContextEntities` frame_end = context.data["frameEnd"] frame_start = context.data["frameStart"] handle_start = context.data["handleStart"] diff --git a/openpype/hosts/unreal/addon.py b/openpype/hosts/unreal/addon.py index 16736214c5..e2c8484651 100644 --- a/openpype/hosts/unreal/addon.py +++ b/openpype/hosts/unreal/addon.py @@ -1,6 +1,5 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon UNREAL_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/webpublisher/addon.py b/openpype/hosts/webpublisher/addon.py index a64d74e62b..eb7fced2e6 100644 --- a/openpype/hosts/webpublisher/addon.py +++ b/openpype/hosts/webpublisher/addon.py @@ -2,8 +2,7 @@ import os import click -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IHostAddon +from openpype.modules import OpenPypeModule, IHostAddon WEBPUBLISHER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py index dd4646f356..79ed499a20 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py @@ -83,8 +83,10 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): self.log.info("task_data:: {}".format(task_data)) is_sequence = len(task_data["files"]) > 1 + first_file = task_data["files"][0] - _, extension = os.path.splitext(task_data["files"][0]) + _, extension = os.path.splitext(first_file) + extension = extension.lower() family, families, tags = self._get_family( self.task_type_to_family, task_type, @@ -149,10 +151,13 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): self.log.warning("Unable to count frames " "duration {}".format(no_of_frames)) - # raise ValueError("STOP") instance.data["handleStart"] = asset_doc["data"]["handleStart"] instance.data["handleEnd"] = asset_doc["data"]["handleEnd"] + if "review" in tags: + first_file_path = os.path.join(task_dir, first_file) + instance.data["thumbnailSource"] = first_file_path + instances.append(instance) self.log.info("instance.data:: {}".format(instance.data)) @@ -176,6 +181,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): def _get_single_repre(self, task_dir, files, tags): _, ext = os.path.splitext(files[0]) + ext = ext.lower() repre_data = { "name": ext[1:], "ext": ext[1:], @@ -195,6 +201,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): frame_start = list(collections[0].indexes)[0] frame_end = list(collections[0].indexes)[-1] ext = collections[0].tail + ext = ext.lower() repre_data = { "frameStart": frame_start, "frameEnd": frame_end, @@ -240,8 +247,17 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): for config in families_config: if is_sequence != config["is_sequence"]: continue - if (extension in config["extensions"] or - '' in config["extensions"]): # all extensions setting + extensions = config.get("extensions") or [] + lower_extensions = set() + for ext in extensions: + if ext: + ext = ext.lower() + if ext.startswith("."): + ext = ext[1:] + lower_extensions.add(ext) + + # all extensions setting + if not lower_extensions or extension in lower_extensions: found_family = config["result_family"] break diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py deleted file mode 100644 index a56521891b..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,137 +0,0 @@ -import os -import shutil - -import pyblish.api -from openpype.lib import ( - get_ffmpeg_tool_path, - - run_subprocess, - - get_transcode_temp_directory, - convert_input_paths_for_ffmpeg, - should_convert_for_ffmpeg -) - - -class ExtractThumbnail(pyblish.api.InstancePlugin): - """Create jpg thumbnail from input using ffmpeg.""" - - label = "Extract Thumbnail" - order = pyblish.api.ExtractorOrder - families = [ - "render", - "image" - ] - hosts = ["webpublisher"] - targets = ["filespublish"] - - def process(self, instance): - self.log.info("subset {}".format(instance.data['subset'])) - - filtered_repres = self._get_filtered_repres(instance) - for repre in filtered_repres: - repre_files = repre["files"] - if not isinstance(repre_files, (list, tuple)): - input_file = repre_files - else: - file_index = int(float(len(repre_files)) * 0.5) - input_file = repre_files[file_index] - - stagingdir = os.path.normpath(repre["stagingDir"]) - - full_input_path = os.path.join(stagingdir, input_file) - self.log.info("Input filepath: {}".format(full_input_path)) - - do_convert = should_convert_for_ffmpeg(full_input_path) - # If result is None the requirement of conversion can't be - # determined - if do_convert is None: - self.log.info(( - "Can't determine if representation requires conversion." - " Skipped." - )) - continue - - # Do conversion if needed - # - change staging dir of source representation - # - must be set back after output definitions processing - convert_dir = None - if do_convert: - convert_dir = get_transcode_temp_directory() - filename = os.path.basename(full_input_path) - convert_input_paths_for_ffmpeg( - [full_input_path], - convert_dir, - self.log - ) - full_input_path = os.path.join(convert_dir, filename) - - filename = os.path.splitext(input_file)[0] - while filename.endswith("."): - filename = filename[:-1] - thumbnail_filename = filename + "_thumbnail.jpg" - full_output_path = os.path.join(stagingdir, thumbnail_filename) - - self.log.info("output {}".format(full_output_path)) - - ffmpeg_args = [ - get_ffmpeg_tool_path("ffmpeg"), - "-y", - "-i", full_input_path, - "-vframes", "1", - full_output_path - ] - - # run subprocess - self.log.debug("{}".format(" ".join(ffmpeg_args))) - try: # temporary until oiiotool is supported cross platform - run_subprocess( - ffmpeg_args, logger=self.log - ) - except RuntimeError as exp: - if "Compression" in str(exp): - self.log.debug( - "Unsupported compression on input files. Skipping!!!" - ) - return - self.log.warning("Conversion crashed", exc_info=True) - raise - - new_repre = { - "name": "thumbnail", - "ext": "jpg", - "files": thumbnail_filename, - "stagingDir": stagingdir, - "thumbnail": True, - "tags": ["thumbnail"] - } - - # adding representation - self.log.debug("Adding: {}".format(new_repre)) - instance.data["representations"].append(new_repre) - - # Cleanup temp folder - if convert_dir is not None and os.path.exists(convert_dir): - shutil.rmtree(convert_dir) - - def _get_filtered_repres(self, instance): - filtered_repres = [] - repres = instance.data.get("representations") or [] - for repre in repres: - self.log.debug(repre) - tags = repre.get("tags") or [] - # Skip instance if already has thumbnail representation - if "thumbnail" in tags: - return [] - - if "review" not in tags: - continue - - if not repre.get("files"): - self.log.info(( - "Representation \"{}\" don't have files. Skipping" - ).format(repre["name"])) - continue - - filtered_repres.append(repre) - return filtered_repres diff --git a/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py b/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py index a5e4868411..d8b7bb9078 100644 --- a/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py @@ -13,7 +13,7 @@ class ValidateWorkfileData(pyblish.api.ContextPlugin): targets = ["tvpaint_worker"] def process(self, context): - # Data collected in `CollectAvalonEntities` + # Data collected in `CollectContextEntities` frame_start = context.data["frameStart"] frame_end = context.data["frameEnd"] handle_start = context.data["handleStart"] diff --git a/openpype/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py index bb0b07948f..0df7b16e64 100644 --- a/openpype/lib/attribute_definitions.py +++ b/openpype/lib/attribute_definitions.py @@ -91,7 +91,7 @@ class AbstractAttrDefMeta(ABCMeta): @six.add_metaclass(AbstractAttrDefMeta) -class AbtractAttrDef: +class AbtractAttrDef(object): """Abstraction of attribute definiton. Each attribute definition must have implemented validation and @@ -105,11 +105,14 @@ class AbtractAttrDef: How to force to set `key` attribute? Args: - key(str): Under which key will be attribute value stored. - label(str): Attribute label. - tooltip(str): Attribute tooltip. - is_label_horizontal(bool): UI specific argument. Specify if label is + key (str): Under which key will be attribute value stored. + default (Any): Default value of an attribute. + label (str): Attribute label. + tooltip (str): Attribute tooltip. + is_label_horizontal (bool): UI specific argument. Specify if label is next to value input or ahead. + hidden (bool): Will be item hidden (for UI purposes). + disabled (bool): Item will be visible but disabled (for UI purposes). """ type_attributes = [] @@ -117,16 +120,29 @@ class AbtractAttrDef: is_value_def = True def __init__( - self, key, default, label=None, tooltip=None, is_label_horizontal=None + self, + key, + default, + label=None, + tooltip=None, + is_label_horizontal=None, + hidden=False, + disabled=False ): if is_label_horizontal is None: is_label_horizontal = True + + if hidden is None: + hidden = False + self.key = key self.label = label self.tooltip = tooltip self.default = default self.is_label_horizontal = is_label_horizontal - self._id = uuid.uuid4() + self.hidden = hidden + self.disabled = disabled + self._id = uuid.uuid4().hex self.__init__class__ = AbtractAttrDef @@ -173,7 +189,9 @@ class AbtractAttrDef: "label": self.label, "tooltip": self.tooltip, "default": self.default, - "is_label_horizontal": self.is_label_horizontal + "is_label_horizontal": self.is_label_horizontal, + "hidden": self.hidden, + "disabled": self.disabled } for attr in self.type_attributes: data[attr] = getattr(self, attr) @@ -235,6 +253,26 @@ class UnknownDef(AbtractAttrDef): return value +class HiddenDef(AbtractAttrDef): + """Hidden value of Any type. + + This attribute can be used for UI purposes to pass values related + to other attributes (e.g. in multi-page UIs). + + Keep in mind the value should be possible to parse by json parser. + """ + + type = "hidden" + + def __init__(self, key, default=None, **kwargs): + kwargs["default"] = default + kwargs["hidden"] = True + super(UnknownDef, self).__init__(key, **kwargs) + + def convert_value(self, value): + return value + + class NumberDef(AbtractAttrDef): """Number definition. @@ -541,6 +579,13 @@ class FileDefItem(object): return ext return None + @property + def lower_ext(self): + ext = self.ext + if ext is not None: + return ext.lower() + return ext + @property def is_dir(self): if self.is_empty: diff --git a/openpype/lib/vendor_bin_utils.py b/openpype/lib/vendor_bin_utils.py index 099f9a34ba..b6797dbba0 100644 --- a/openpype/lib/vendor_bin_utils.py +++ b/openpype/lib/vendor_bin_utils.py @@ -60,9 +60,10 @@ def find_executable(executable): path to file. Returns: - str: Full path to executable with extension (is file). - None: When the executable was not found. + Union[str, None]: Full path to executable with extension which was + found otherwise None. """ + # Skip if passed path is file if is_file_executable(executable): return executable @@ -70,24 +71,36 @@ def find_executable(executable): low_platform = platform.system().lower() _, ext = os.path.splitext(executable) - # Prepare variants for which it will be looked - variants = [executable] - # Add other extension variants only if passed executable does not have one - if not ext: - if low_platform == "windows": - exts = [".exe", ".ps1", ".bat"] - for ext in os.getenv("PATHEXT", "").split(os.pathsep): - ext = ext.lower() - if ext and ext not in exts: - exts.append(ext) - else: - exts = [".sh"] + # Prepare extensions to check + exts = set() + if ext: + exts.add(ext.lower()) - for ext in exts: - variant = executable + ext - if is_file_executable(variant): - return variant - variants.append(variant) + else: + # Add other possible extension variants only if passed executable + # does not have any + if low_platform == "windows": + exts |= {".exe", ".ps1", ".bat"} + for ext in os.getenv("PATHEXT", "").split(os.pathsep): + exts.add(ext.lower()) + + else: + exts |= {".sh"} + + # Executable is a path but there may be missing extension + # - this can happen primarily on windows where + # e.g. "ffmpeg" should be "ffmpeg.exe" + exe_dir, exe_filename = os.path.split(executable) + if exe_dir and os.path.isdir(exe_dir): + for filename in os.listdir(exe_dir): + filepath = os.path.join(exe_dir, filename) + basename, ext = os.path.splitext(filename) + if ( + basename == exe_filename + and ext.lower() in exts + and is_file_executable(filepath) + ): + return filepath # Get paths where to look for executable path_str = os.environ.get("PATH", None) @@ -97,13 +110,27 @@ def find_executable(executable): elif hasattr(os, "defpath"): path_str = os.defpath - if path_str: - paths = path_str.split(os.pathsep) - for path in paths: - for variant in variants: - filepath = os.path.abspath(os.path.join(path, variant)) - if is_file_executable(filepath): - return filepath + if not path_str: + return None + + paths = path_str.split(os.pathsep) + for path in paths: + if not os.path.isdir(path): + continue + for filename in os.listdir(path): + filepath = os.path.abspath(os.path.join(path, filename)) + # Filename matches executable exactly + if filename == executable and is_file_executable(filepath): + return filepath + + basename, ext = os.path.splitext(filename) + if ( + basename == executable + and ext.lower() in exts + and is_file_executable(filepath) + ): + return filepath + return None @@ -272,8 +299,8 @@ def get_oiio_tools_path(tool="oiiotool"): oiio_dir = get_vendor_bin_path("oiio") if platform.system().lower() == "linux": oiio_dir = os.path.join(oiio_dir, "bin") - default_path = os.path.join(oiio_dir, tool) - if _oiio_executable_validation(default_path): + default_path = find_executable(os.path.join(oiio_dir, tool)) + if default_path and _oiio_executable_validation(default_path): tool_executable_path = default_path # Look to PATH for the tool diff --git a/openpype/modules/__init__.py b/openpype/modules/__init__.py index 02e7dc13ab..1f345feea9 100644 --- a/openpype/modules/__init__.py +++ b/openpype/modules/__init__.py @@ -1,4 +1,14 @@ # -*- coding: utf-8 -*- +from .interfaces import ( + ILaunchHookPaths, + IPluginPaths, + ITrayModule, + ITrayAction, + ITrayService, + ISettingsChangeListener, + IHostAddon, +) + from .base import ( OpenPypeModule, OpenPypeAddOn, @@ -17,6 +27,14 @@ from .base import ( __all__ = ( + "ILaunchHookPaths", + "IPluginPaths", + "ITrayModule", + "ITrayAction", + "ITrayService", + "ISettingsChangeListener", + "IHostAddon", + "OpenPypeModule", "OpenPypeAddOn", diff --git a/openpype/modules/avalon_apps/avalon_app.py b/openpype/modules/avalon_apps/avalon_app.py index 1d21de129b..f9085522b0 100644 --- a/openpype/modules/avalon_apps/avalon_app.py +++ b/openpype/modules/avalon_apps/avalon_app.py @@ -1,7 +1,6 @@ import os -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule +from openpype.modules import OpenPypeModule, ITrayModule class AvalonModule(OpenPypeModule, ITrayModule): diff --git a/openpype/modules/base.py b/openpype/modules/base.py index 09aea50424..4761462df0 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -9,6 +9,7 @@ import logging import platform import threading import collections +import traceback from uuid import uuid4 from abc import ABCMeta, abstractmethod import six @@ -139,6 +140,15 @@ class _InterfacesClass(_ModuleClass): "cannot import name '{}' from 'openpype_interfaces'" ).format(attr_name)) + if _LoadCache.interfaces_loaded and attr_name != "log": + stack = list(traceback.extract_stack()) + stack.pop(-1) + self.log.warning(( + "Using deprecated import of \"{}\" from 'openpype_interfaces'." + " Please switch to use import" + " from 'openpype.modules.interfaces'" + " (will be removed after 3.16.x).{}" + ).format(attr_name, "".join(traceback.format_list(stack)))) return self.__attributes__[attr_name] diff --git a/openpype/modules/clockify/clockify_module.py b/openpype/modules/clockify/clockify_module.py index 932ce87c36..14fcb01f67 100644 --- a/openpype/modules/clockify/clockify_module.py +++ b/openpype/modules/clockify/clockify_module.py @@ -2,16 +2,17 @@ import os import threading import time +from openpype.modules import ( + OpenPypeModule, + ITrayModule, + IPluginPaths +) + from .clockify_api import ClockifyAPI from .constants import ( CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH ) -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( - ITrayModule, - IPluginPaths -) class ClockifyModule( diff --git a/openpype/modules/deadline/deadline_module.py b/openpype/modules/deadline/deadline_module.py index bbd0f74e8a..9855f8c1b1 100644 --- a/openpype/modules/deadline/deadline_module.py +++ b/openpype/modules/deadline/deadline_module.py @@ -4,8 +4,7 @@ import six import sys from openpype.lib import requests_get, Logger -from openpype.modules import OpenPypeModule -from openpype_interfaces import IPluginPaths +from openpype.modules import OpenPypeModule, IPluginPaths class DeadlineWebserviceError(Exception): diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index aba505b3c6..6362b4ca65 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -457,9 +457,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): cam = [c for c in cameras if c in col.head] if cam: - subset_name = '{}_{}_{}'.format(group_name, cam, aov) + if aov: + subset_name = '{}_{}_{}'.format(group_name, cam, aov) + else: + subset_name = '{}_{}'.format(group_name, cam) else: - subset_name = '{}_{}'.format(group_name, aov) + if aov: + subset_name = '{}_{}'.format(group_name, aov) + else: + subset_name = '{}'.format(group_name) if isinstance(col, (list, tuple)): staging = os.path.dirname(col[0]) @@ -488,12 +494,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): else: render_file_name = os.path.basename(col) aov_patterns = self.aov_filter - preview = match_aov_pattern(app, aov_patterns, render_file_name) + preview = match_aov_pattern(app, aov_patterns, render_file_name) # toggle preview on if multipart is on + if instance_data.get("multipartExr"): preview = True - + self.log.debug("preview:{}".format(preview)) new_instance = deepcopy(instance_data) new_instance["subset"] = subset_name new_instance["subsetGroup"] = group_name @@ -536,7 +543,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if new_instance.get("extendFrames", False): self._copy_extend_frames(new_instance, rep) instances.append(new_instance) - + self.log.debug("instances:{}".format(instances)) return instances def _get_representations(self, instance, exp_files): diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index 9b35c9502d..40193bac71 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -14,6 +14,137 @@ from Deadline.Scripting import ( ProcessUtils, ) +VERSION_REGEX = re.compile( + r"(?P0|[1-9]\d*)" + r"\.(?P0|[1-9]\d*)" + r"\.(?P0|[1-9]\d*)" + r"(?:-(?P[a-zA-Z\d\-.]*))?" + r"(?:\+(?P[a-zA-Z\d\-.]*))?" +) + + +class OpenPypeVersion: + """Fake semver version class for OpenPype version purposes. + + The version + """ + def __init__(self, major, minor, patch, prerelease, origin=None): + self.major = major + self.minor = minor + self.patch = patch + self.prerelease = prerelease + + is_valid = True + if not major or not minor or not patch: + is_valid = False + self.is_valid = is_valid + + if origin is None: + base = "{}.{}.{}".format(str(major), str(minor), str(patch)) + if not prerelease: + origin = base + else: + origin = "{}-{}".format(base, str(prerelease)) + + self.origin = origin + + @classmethod + def from_string(cls, version): + """Create an object of version from string. + + Args: + version (str): Version as a string. + + Returns: + Union[OpenPypeVersion, None]: Version object if input is nonempty + string otherwise None. + """ + + if not version: + return None + valid_parts = VERSION_REGEX.findall(version) + if len(valid_parts) != 1: + # Return invalid version with filled 'origin' attribute + return cls(None, None, None, None, origin=str(version)) + + # Unpack found version + major, minor, patch, pre, post = valid_parts[0] + prerelease = pre + # Post release is not important anymore and should be considered as + # part of prerelease + # - comparison is implemented to find suitable build and builds should + # never contain prerelease part so "not proper" parsing is + # acceptable for this use case. + if post: + prerelease = "{}+{}".format(pre, post) + + return cls( + int(major), int(minor), int(patch), prerelease, origin=version + ) + + def has_compatible_release(self, other): + """Version has compatible release as other version. + + Both major and minor versions must be exactly the same. In that case + a build can be considered as release compatible with any version. + + Args: + other (OpenPypeVersion): Other version. + + Returns: + bool: Version is release compatible with other version. + """ + + if self.is_valid and other.is_valid: + return self.major == other.major and self.minor == other.minor + return False + + def __bool__(self): + return self.is_valid + + def __repr__(self): + return "<{} {}>".format(self.__class__.__name__, self.origin) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return self.origin == other + return self.origin == other.origin + + def __lt__(self, other): + if not isinstance(other, self.__class__): + return None + + if not self.is_valid: + return True + + if not other.is_valid: + return False + + if self.origin == other.origin: + return None + + same_major = self.major == other.major + if not same_major: + return self.major < other.major + + same_minor = self.minor == other.minor + if not same_minor: + return self.minor < other.minor + + same_patch = self.patch == other.patch + if not same_patch: + return self.patch < other.patch + + if not self.prerelease: + return False + + if not other.prerelease: + return True + + pres = [self.prerelease, other.prerelease] + pres.sort() + return pres[0] == self.prerelease + def get_openpype_version_from_path(path, build=True): """Get OpenPype version from provided path. @@ -21,9 +152,9 @@ def get_openpype_version_from_path(path, build=True): build (bool, optional): Get only builds, not sources Returns: - str or None: version of OpenPype if found. - + Union[OpenPypeVersion, None]: version of OpenPype if found. """ + # fix path for application bundle on macos if platform.system().lower() == "darwin": path = os.path.join(path, "Contents", "MacOS", "lib", "Python") @@ -46,8 +177,10 @@ def get_openpype_version_from_path(path, build=True): with open(version_file, "r") as vf: exec(vf.read(), version) - version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) - return version_match[1] + version_str = version.get("__version__") + if version_str: + return OpenPypeVersion.from_string(version_str) + return None def get_openpype_executable(): @@ -59,6 +192,91 @@ def get_openpype_executable(): return exe_list, dir_list +def get_openpype_versions(dir_list): + print(">>> Getting OpenPype executable ...") + openpype_versions = [] + + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + print("--- Looking for OpenPype at: {}".format(install_dir)) + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = get_openpype_version_from_path(subdir) + if not version: + continue + print(" - found: {} - {}".format(version, subdir)) + openpype_versions.append((version, subdir)) + return openpype_versions + + +def get_requested_openpype_executable( + exe, dir_list, requested_version +): + requested_version_obj = OpenPypeVersion.from_string(requested_version) + if not requested_version_obj: + print(( + ">>> Requested version does not match version regex \"{}\"" + ).format(VERSION_REGEX)) + return None + + print(( + ">>> Scanning for compatible requested version {}" + ).format(requested_version)) + openpype_versions = get_openpype_versions(dir_list) + if not openpype_versions: + return None + + # if looking for requested compatible version, + # add the implicitly specified to the list too. + if exe: + exe_dir = os.path.dirname(exe) + print("Looking for OpenPype at: {}".format(exe_dir)) + version = get_openpype_version_from_path(exe_dir) + if version: + print(" - found: {} - {}".format(version, exe_dir)) + openpype_versions.append((version, exe_dir)) + + matching_item = None + compatible_versions = [] + for version_item in openpype_versions: + version, version_dir = version_item + if requested_version_obj.has_compatible_release(version): + compatible_versions.append(version_item) + if version == requested_version_obj: + # Store version item if version match exactly + # - break if is found matching version + matching_item = version_item + break + + if not compatible_versions: + return None + + compatible_versions.sort(key=lambda item: item[0]) + if matching_item: + version, version_dir = matching_item + print(( + "*** Found exact match build version {} in {}" + ).format(version_dir, version)) + + else: + version, version_dir = compatible_versions[-1] + + print(( + "*** Latest compatible version found is {} in {}" + ).format(version_dir, version)) + + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join(version_dir, "openpype_console.exe"), + os.path.join(version_dir, "openpype_console") + ] + return FileUtils.SearchFileList(";".join(exe_list)) + + def inject_openpype_environment(deadlinePlugin): """ Pull env vars from OpenPype and push them to rendering process. @@ -68,93 +286,29 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Injecting OpenPype environments ...") try: - print(">>> Getting OpenPype executable ...") exe_list, dir_list = get_openpype_executable() - openpype_versions = [] - # if the job requires specific OpenPype version, - # lets go over all available and find compatible build. + exe = FileUtils.SearchFileList(exe_list) + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") if requested_version: - print(( - ">>> Scanning for compatible requested version {}" - ).format(requested_version)) - install_dir = DirectoryUtils.SearchDirectoryList(dir_list) - if install_dir: - print("--- Looking for OpenPype at: {}".format(install_dir)) - sub_dirs = [ - f.path for f in os.scandir(install_dir) - if f.is_dir() - ] - for subdir in sub_dirs: - version = get_openpype_version_from_path(subdir) - if not version: - continue - print(" - found: {} - {}".format(version, subdir)) - openpype_versions.append((version, subdir)) + exe = get_requested_openpype_executable( + exe, dir_list, requested_version + ) + if exe is None: + raise RuntimeError(( + "Cannot find compatible version available for version {}" + " requested by the job. Please add it through plugin" + " configuration in Deadline or install it to configured" + " directory." + ).format(requested_version)) - exe = FileUtils.SearchFileList(exe_list) - if openpype_versions: - # if looking for requested compatible version, - # add the implicitly specified to the list too. - print("Looking for OpenPype at: {}".format(os.path.dirname(exe))) - version = get_openpype_version_from_path( - os.path.dirname(exe)) - if version: - print(" - found: {} - {}".format( - version, os.path.dirname(exe) - )) - openpype_versions.append((version, os.path.dirname(exe))) - - if requested_version: - # sort detected versions - if openpype_versions: - # use natural sorting - openpype_versions.sort( - key=lambda ver: [ - int(t) if t.isdigit() else t.lower() - for t in re.split(r"(\d+)", ver[0]) - ]) - print(( - "*** Latest available version found is {}" - ).format(openpype_versions[-1][0])) - requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 - compatible_versions = [] - for version in openpype_versions: - v = version[0].split(".")[:3] - if v[0] == requested_major and v[1] == requested_minor: - compatible_versions.append(version) - if not compatible_versions: - raise RuntimeError( - ("Cannot find compatible version available " - "for version {} requested by the job. " - "Please add it through plugin configuration " - "in Deadline or install it to configured " - "directory.").format(requested_version)) - # sort compatible versions nad pick the last one - compatible_versions.sort( - key=lambda ver: [ - int(t) if t.isdigit() else t.lower() - for t in re.split(r"(\d+)", ver[0]) - ]) - print(( - "*** Latest compatible version found is {}" - ).format(compatible_versions[-1][0])) - # create list of executables for different platform and let - # Deadline decide. - exe_list = [ - os.path.join( - compatible_versions[-1][1], "openpype_console.exe"), - os.path.join( - compatible_versions[-1][1], "openpype_console") - ] - exe = FileUtils.SearchFileList(";".join(exe_list)) - if exe == "": - raise RuntimeError( - "OpenPype executable was not found " + - "in the semicolon separated list " + - "\"" + ";".join(exe_list) + "\". " + - "The path to the render executable can be configured " + - "from the Plugin Configuration in the Deadline Monitor.") + if not exe: + raise RuntimeError(( + "OpenPype executable was not found in the semicolon " + "separated list \"{}\"." + "The path to the render executable can be configured" + " from the Plugin Configuration in the Deadline Monitor." + ).format(";".join(exe_list))) print("--- OpenPype executable: {}".format(exe)) @@ -172,22 +326,22 @@ def inject_openpype_environment(deadlinePlugin): export_url ] - add_args = {} - add_args['project'] = \ - job.GetJobEnvironmentKeyValue('AVALON_PROJECT') - add_args['asset'] = job.GetJobEnvironmentKeyValue('AVALON_ASSET') - add_args['task'] = job.GetJobEnvironmentKeyValue('AVALON_TASK') - add_args['app'] = job.GetJobEnvironmentKeyValue('AVALON_APP_NAME') - add_args["envgroup"] = "farm" + add_kwargs = { + "project": job.GetJobEnvironmentKeyValue("AVALON_PROJECT"), + "asset": job.GetJobEnvironmentKeyValue("AVALON_ASSET"), + "task": job.GetJobEnvironmentKeyValue("AVALON_TASK"), + "app": job.GetJobEnvironmentKeyValue("AVALON_APP_NAME"), + "envgroup": "farm" + } + if all(add_kwargs.values()): + for key, value in add_kwargs.items(): + args.extend(["--{}".format(key), value]) - if all(add_args.values()): - for key, value in add_args.items(): - args.append("--{}".format(key)) - args.append(value) else: - msg = "Required env vars: AVALON_PROJECT, AVALON_ASSET, " + \ - "AVALON_TASK, AVALON_APP_NAME" - raise RuntimeError(msg) + raise RuntimeError(( + "Missing required env vars: AVALON_PROJECT, AVALON_ASSET," + " AVALON_TASK, AVALON_APP_NAME" + )) if not os.environ.get("OPENPYPE_MONGO"): print(">>> Missing OPENPYPE_MONGO env var, process won't work") @@ -208,12 +362,12 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Loading file ...") with open(export_url) as fp: contents = json.load(fp) - for key, value in contents.items(): - deadlinePlugin.SetProcessEnvironmentVariable(key, value) + + for key, value in contents.items(): + deadlinePlugin.SetProcessEnvironmentVariable(key, value) script_url = job.GetJobPluginInfoKeyValue("ScriptFilename") if script_url: - script_url = script_url.format(**contents).replace("\\", "/") print(">>> Setting script path {}".format(script_url)) job.SetJobPluginInfoKeyValue("ScriptFilename", script_url) diff --git a/openpype/modules/example_addons/example_addon/addon.py b/openpype/modules/example_addons/example_addon/addon.py index 50554b1e43..ead647b41d 100644 --- a/openpype/modules/example_addons/example_addon/addon.py +++ b/openpype/modules/example_addons/example_addon/addon.py @@ -13,10 +13,7 @@ import click from openpype.modules import ( JsonFilesSettingsDef, OpenPypeAddOn, - ModulesManager -) -# Import interface defined by this addon to be able find other addons using it -from openpype_interfaces import ( + ModulesManager, IPluginPaths, ITrayAction ) diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py index 678af0e577..6f14f8428d 100644 --- a/openpype/modules/ftrack/ftrack_module.py +++ b/openpype/modules/ftrack/ftrack_module.py @@ -5,8 +5,8 @@ import platform import click -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import ( +from openpype.modules import ( + OpenPypeModule, ITrayModule, IPluginPaths, ISettingsChangeListener diff --git a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py index fa7a89050c..046dfd9ad8 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py @@ -7,10 +7,8 @@ import pyblish.api from openpype.client import get_asset_by_id from openpype.lib import filter_profiles +from openpype.pipeline import KnownPublishError - -# Copy of constant `openpype_modules.ftrack.lib.avalon_sync.CUST_ATTR_AUTO_SYNC` -CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" CUST_ATTR_GROUP = "openpype" @@ -19,7 +17,6 @@ CUST_ATTR_GROUP = "openpype" def get_pype_attr(session, split_hierarchical=True): custom_attributes = [] hier_custom_attributes = [] - # TODO remove deprecated "avalon" group from query cust_attrs_query = ( "select id, entity_type, object_type_id, is_hierarchical, default" " from CustomAttributeConfiguration" @@ -79,120 +76,284 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): create_task_status_profiles = [] def process(self, context): - self.context = context - if "hierarchyContext" not in self.context.data: + if "hierarchyContext" not in context.data: return hierarchy_context = self._get_active_assets(context) self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) - session = self.context.data["ftrackSession"] - project_name = self.context.data["projectEntity"]["name"] - query = 'Project where full_name is "{}"'.format(project_name) - project = session.query(query).one() - auto_sync_state = project["custom_attributes"][CUST_ATTR_AUTO_SYNC] + session = context.data["ftrackSession"] + project_name = context.data["projectName"] + project = session.query( + 'select id, full_name from Project where full_name is "{}"'.format( + project_name + ) + ).first() + if not project: + raise KnownPublishError( + "Project \"{}\" was not found on ftrack.".format(project_name) + ) + self.context = context self.session = session self.ft_project = project self.task_types = self.get_all_task_types(project) self.task_statuses = self.get_task_statuses(project) - # disable termporarily ftrack project's autosyncing - if auto_sync_state: - self.auto_sync_off(project) + # import ftrack hierarchy + self.import_to_ftrack(project_name, hierarchy_context) - try: - # import ftrack hierarchy - self.import_to_ftrack(project_name, hierarchy_context) - except Exception: - raise - finally: - if auto_sync_state: - self.auto_sync_on(project) + def query_ftrack_entitites(self, session, ft_project): + project_id = ft_project["id"] + entities = session.query(( + "select id, name, parent_id" + " from TypedContext where project_id is \"{}\"" + ).format(project_id)).all() - def import_to_ftrack(self, project_name, input_data, parent=None): + entities_by_id = {} + entities_by_parent_id = collections.defaultdict(list) + for entity in entities: + entities_by_id[entity["id"]] = entity + parent_id = entity["parent_id"] + entities_by_parent_id[parent_id].append(entity) + + ftrack_hierarchy = [] + ftrack_id_queue = collections.deque() + ftrack_id_queue.append((project_id, ftrack_hierarchy)) + while ftrack_id_queue: + item = ftrack_id_queue.popleft() + ftrack_id, parent_list = item + if ftrack_id == project_id: + entity = ft_project + name = entity["full_name"] + else: + entity = entities_by_id[ftrack_id] + name = entity["name"] + + children = [] + parent_list.append({ + "name": name, + "low_name": name.lower(), + "entity": entity, + "children": children, + }) + for child in entities_by_parent_id[ftrack_id]: + ftrack_id_queue.append((child["id"], children)) + return ftrack_hierarchy + + def find_matching_ftrack_entities( + self, hierarchy_context, ftrack_hierarchy + ): + walk_queue = collections.deque() + for entity_name, entity_data in hierarchy_context.items(): + walk_queue.append( + (entity_name, entity_data, ftrack_hierarchy) + ) + + matching_ftrack_entities = [] + while walk_queue: + item = walk_queue.popleft() + entity_name, entity_data, ft_children = item + matching_ft_child = None + for ft_child in ft_children: + if ft_child["low_name"] == entity_name.lower(): + matching_ft_child = ft_child + break + + if matching_ft_child is None: + continue + + entity = matching_ft_child["entity"] + entity_data["ft_entity"] = entity + matching_ftrack_entities.append(entity) + + hierarchy_children = entity_data.get("childs") + if not hierarchy_children: + continue + + for child_name, child_data in hierarchy_children.items(): + walk_queue.append( + (child_name, child_data, matching_ft_child["children"]) + ) + return matching_ftrack_entities + + def query_custom_attribute_values(self, session, entities, hier_attrs): + attr_ids = { + attr["id"] + for attr in hier_attrs + } + entity_ids = { + entity["id"] + for entity in entities + } + output = { + entity_id: {} + for entity_id in entity_ids + } + if not attr_ids or not entity_ids: + return {} + + joined_attr_ids = ",".join( + ['"{}"'.format(attr_id) for attr_id in attr_ids] + ) + + # Query values in chunks + chunk_size = int(5000 / len(attr_ids)) + # Make sure entity_ids is `list` for chunk selection + entity_ids = list(entity_ids) + results = [] + for idx in range(0, len(entity_ids), chunk_size): + joined_entity_ids = ",".join([ + '"{}"'.format(entity_id) + for entity_id in entity_ids[idx:idx + chunk_size] + ]) + results.extend( + session.query( + ( + "select value, entity_id, configuration_id" + " from CustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" + ).format( + joined_entity_ids, + joined_attr_ids + ) + ).all() + ) + + for result in results: + attr_id = result["configuration_id"] + entity_id = result["entity_id"] + output[entity_id][attr_id] = result["value"] + + return output + + def import_to_ftrack(self, project_name, hierarchy_context): # Prequery hiearchical custom attributes - hier_custom_attributes = get_pype_attr(self.session)[1] + hier_attrs = get_pype_attr(self.session)[1] hier_attr_by_key = { attr["key"]: attr - for attr in hier_custom_attributes + for attr in hier_attrs } + # Query user entity (for comments) + user = self.session.query( + "User where username is \"{}\"".format(self.session.api_user) + ).first() + if not user: + self.log.warning( + "Was not able to query current User {}".format( + self.session.api_user + ) + ) + + # Query ftrack hierarchy with parenting + ftrack_hierarchy = self.query_ftrack_entitites( + self.session, self.ft_project) + + # Fill ftrack entities to hierarchy context + # - there is no need to query entities again + matching_entities = self.find_matching_ftrack_entities( + hierarchy_context, ftrack_hierarchy) + # Query custom attribute values of each entity + custom_attr_values_by_id = self.query_custom_attribute_values( + self.session, matching_entities, hier_attrs) + # Get ftrack api module (as they are different per python version) ftrack_api = self.context.data["ftrackPythonModule"] - for entity_name in input_data: - entity_data = input_data[entity_name] + # Use queue of hierarchy items to process + import_queue = collections.deque() + for entity_name, entity_data in hierarchy_context.items(): + import_queue.append( + (entity_name, entity_data, None) + ) + + while import_queue: + item = import_queue.popleft() + entity_name, entity_data, parent = item + entity_type = entity_data['entity_type'] self.log.debug(entity_data) - self.log.debug(entity_type) - if entity_type.lower() == 'project': - entity = self.ft_project - - elif self.ft_project is None or parent is None: + entity = entity_data.get("ft_entity") + if entity is None and entity_type.lower() == "project": raise AssertionError( "Collected items are not in right order!" ) - # try to find if entity already exists - else: - query = ( - 'TypedContext where name is "{0}" and ' - 'project_id is "{1}"' - ).format(entity_name, self.ft_project["id"]) - try: - entity = self.session.query(query).one() - except Exception: - entity = None - # Create entity if not exists if entity is None: - entity = self.create_entity( - name=entity_name, - type=entity_type, - parent=parent - ) + entity = self.session.create(entity_type, { + "name": entity_name, + "parent": parent + }) + entity_data["ft_entity"] = entity + # self.log.info('entity: {}'.format(dict(entity))) # CUSTOM ATTRIBUTES - custom_attributes = entity_data.get('custom_attributes', []) - instances = [ - instance - for instance in self.context - if instance.data.get("asset") == entity["name"] - ] + custom_attributes = entity_data.get('custom_attributes', {}) + instances = [] + for instance in self.context: + instance_asset_name = instance.data.get("asset") + if ( + instance_asset_name + and instance_asset_name.lower() == entity["name"].lower() + ): + instances.append(instance) for instance in instances: instance.data["ftrackEntity"] = entity - for key in custom_attributes: + for key, cust_attr_value in custom_attributes.items(): + if cust_attr_value is None: + continue + hier_attr = hier_attr_by_key.get(key) # Use simple method if key is not hierarchical if not hier_attr: - assert (key in entity['custom_attributes']), ( - 'Missing custom attribute key: `{0}` in attrs: ' - '`{1}`'.format(key, entity['custom_attributes'].keys()) + if key not in entity["custom_attributes"]: + raise KnownPublishError(( + "Missing custom attribute in ftrack with name '{}'" + ).format(key)) + + entity['custom_attributes'][key] = cust_attr_value + continue + + attr_id = hier_attr["id"] + entity_values = custom_attr_values_by_id.get(entity["id"], {}) + # New value is defined by having id in values + # - it can be set to 'None' (ftrack allows that using API) + is_new_value = attr_id not in entity_values + attr_value = entity_values.get(attr_id) + + # Use ftrack operations method to set hiearchical + # attribute value. + # - this is because there may be non hiearchical custom + # attributes with different properties + entity_key = collections.OrderedDict(( + ("configuration_id", hier_attr["id"]), + ("entity_id", entity["id"]) + )) + op = None + if is_new_value: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + entity_key, + {"value": cust_attr_value} ) - entity['custom_attributes'][key] = custom_attributes[key] - - else: - # Use ftrack operations method to set hiearchical - # attribute value. - # - this is because there may be non hiearchical custom - # attributes with different properties - entity_key = collections.OrderedDict() - entity_key["configuration_id"] = hier_attr["id"] - entity_key["entity_id"] = entity["id"] - self.session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - entity_key, - "value", - ftrack_api.symbol.NOT_SET, - custom_attributes[key] - ) + elif attr_value != cust_attr_value: + op = ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", + entity_key, + "value", + attr_value, + cust_attr_value ) + if op is not None: + self.session.recorded_operations.push(op) + + if self.session.recorded_operations: try: self.session.commit() except Exception: @@ -206,7 +367,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): for instance in instances: task_name = instance.data.get("task") if task_name: - instances_by_task_name[task_name].append(instance) + instances_by_task_name[task_name.lower()].append(instance) tasks = entity_data.get('tasks', []) existing_tasks = [] @@ -247,30 +408,28 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): six.reraise(tp, value, tb) # Create notes. - user = self.session.query( - "User where username is \"{}\"".format(self.session.api_user) - ).first() - if user: - for comment in entity_data.get("comments", []): + entity_comments = entity_data.get("comments") + if user and entity_comments: + for comment in entity_comments: entity.create_note(comment, user) - else: - self.log.warning( - "Was not able to query current User {}".format( - self.session.api_user - ) - ) - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) + + try: + self.session.commit() + except Exception: + tp, value, tb = sys.exc_info() + self.session.rollback() + self.session._configure_locations() + six.reraise(tp, value, tb) # Import children. - if 'childs' in entity_data: - self.import_to_ftrack( - project_name, entity_data['childs'], entity) + children = entity_data.get("childs") + if not children: + continue + + for entity_name, entity_data in children.items(): + import_queue.append( + (entity_name, entity_data, entity) + ) def create_links(self, project_name, entity_data, entity): # Clear existing links. @@ -366,48 +525,6 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): return task - def create_entity(self, name, type, parent): - entity = self.session.create(type, { - 'name': name, - 'parent': parent - }) - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) - - return entity - - def auto_sync_off(self, project): - project["custom_attributes"][CUST_ATTR_AUTO_SYNC] = False - - self.log.info("Ftrack autosync swithed off") - - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) - - def auto_sync_on(self, project): - - project["custom_attributes"][CUST_ATTR_AUTO_SYNC] = True - - self.log.info("Ftrack autosync swithed on") - - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) - def _get_active_assets(self, context): """ Returns only asset dictionary. Usually the last part of deep dictionary which @@ -429,19 +546,17 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): hierarchy_context = context.data["hierarchyContext"] - active_assets = [] + active_assets = set() # filter only the active publishing insatnces for instance in context: if instance.data.get("publish") is False: continue - if not instance.data.get("asset"): - continue - - active_assets.append(instance.data["asset"]) + asset_name = instance.data.get("asset") + if asset_name: + active_assets.add(asset_name) # remove duplicity in list - active_assets = list(set(active_assets)) - self.log.debug("__ active_assets: {}".format(active_assets)) + self.log.debug("__ active_assets: {}".format(list(active_assets))) return get_pure_hierarchy_data(hierarchy_context) diff --git a/openpype/modules/ftrack/scripts/sub_event_status.py b/openpype/modules/ftrack/scripts/sub_event_status.py index 6c7ecb8351..eb3f63c04b 100644 --- a/openpype/modules/ftrack/scripts/sub_event_status.py +++ b/openpype/modules/ftrack/scripts/sub_event_status.py @@ -7,6 +7,8 @@ import signal import socket import datetime +import appdirs + import ftrack_api from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( @@ -253,6 +255,15 @@ class StatusFactory: ) }) + items.append({ + "type": "label", + "value": ( + "Local versions dir: {}
Version repository path: {}" + ).format( + appdirs.user_data_dir("openpype", "pypeclub"), + os.environ.get("OPENPYPE_PATH") + ) + }) items.append({"type": "label", "value": "---"}) return items diff --git a/openpype/modules/kitsu/kitsu_module.py b/openpype/modules/kitsu/kitsu_module.py index 23c032715b..b91373af20 100644 --- a/openpype/modules/kitsu/kitsu_module.py +++ b/openpype/modules/kitsu/kitsu_module.py @@ -3,8 +3,11 @@ import click import os -from openpype.modules import OpenPypeModule -from openpype_interfaces import IPluginPaths, ITrayAction +from openpype.modules import ( + OpenPypeModule, + IPluginPaths, + ITrayAction, +) class KitsuModule(OpenPypeModule, IPluginPaths, ITrayAction): diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_username.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_username.py new file mode 100644 index 0000000000..896050f7e2 --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_username.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +import os +import re + +import pyblish.api + + +class CollectKitsuUsername(pyblish.api.ContextPlugin): + """Collect Kitsu username from the kitsu login""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Kitsu username" + + def process(self, context): + kitsu_login = os.environ.get('KITSU_LOGIN') + + if not kitsu_login: + return + + kitsu_username = kitsu_login.split("@")[0].replace('.', ' ') + new_username = re.sub('[^a-zA-Z]', ' ', kitsu_username).title() + + for instance in context: + # Don't override customData if it already exists + if 'customData' not in instance.data: + instance.data['customData'] = {} + + instance.data['customData']["kitsuUsername"] = new_username diff --git a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py index bf80095225..e5e6439439 100644 --- a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py +++ b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py @@ -31,7 +31,6 @@ class IntegrateKitsuReview(pyblish.api.InstancePlugin): continue review_path = representation.get("published_path") - self.log.debug("Found review at: {}".format(review_path)) gazu.task.add_preview( diff --git a/openpype/modules/launcher_action.py b/openpype/modules/launcher_action.py index e3252e3842..c4331b6094 100644 --- a/openpype/modules/launcher_action.py +++ b/openpype/modules/launcher_action.py @@ -1,5 +1,7 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import ( + OpenPypeModule, + ITrayAction, +) class LauncherAction(OpenPypeModule, ITrayAction): diff --git a/openpype/modules/log_viewer/log_view_module.py b/openpype/modules/log_viewer/log_view_module.py index da1628b71f..31e954fadd 100644 --- a/openpype/modules/log_viewer/log_view_module.py +++ b/openpype/modules/log_viewer/log_view_module.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule +from openpype.modules import OpenPypeModule, ITrayModule class LogViewModule(OpenPypeModule, ITrayModule): diff --git a/openpype/modules/muster/muster.py b/openpype/modules/muster/muster.py index 6e26ad2d7b..8d395d16e8 100644 --- a/openpype/modules/muster/muster.py +++ b/openpype/modules/muster/muster.py @@ -2,8 +2,7 @@ import os import json import appdirs import requests -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule +from openpype.modules import OpenPypeModule, ITrayModule class MusterModule(OpenPypeModule, ITrayModule): diff --git a/openpype/modules/project_manager_action.py b/openpype/modules/project_manager_action.py index 251964a059..5f74dd9ee5 100644 --- a/openpype/modules/project_manager_action.py +++ b/openpype/modules/project_manager_action.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction class ProjectManagerAction(OpenPypeModule, ITrayAction): diff --git a/openpype/modules/python_console_interpreter/module.py b/openpype/modules/python_console_interpreter/module.py index 8c4a2fba73..cb99c05e37 100644 --- a/openpype/modules/python_console_interpreter/module.py +++ b/openpype/modules/python_console_interpreter/module.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction class PythonInterpreterAction(OpenPypeModule, ITrayAction): diff --git a/openpype/modules/royalrender/royal_render_module.py b/openpype/modules/royalrender/royal_render_module.py index 4f72860ad6..10d74d01d1 100644 --- a/openpype/modules/royalrender/royal_render_module.py +++ b/openpype/modules/royalrender/royal_render_module.py @@ -2,8 +2,7 @@ """Module providing support for Royal Render.""" import os import openpype.modules -from openpype.modules import OpenPypeModule -from openpype_interfaces import IPluginPaths +from openpype.modules import OpenPypeModule, IPluginPaths class RoyalRenderModule(OpenPypeModule, IPluginPaths): diff --git a/openpype/modules/settings_action.py b/openpype/modules/settings_action.py index 1e7eca4dec..1902caff1d 100644 --- a/openpype/modules/settings_action.py +++ b/openpype/modules/settings_action.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction class SettingsAction(OpenPypeModule, ITrayAction): diff --git a/openpype/modules/shotgrid/shotgrid_module.py b/openpype/modules/shotgrid/shotgrid_module.py index 281c6fdcad..d26647d06a 100644 --- a/openpype/modules/shotgrid/shotgrid_module.py +++ b/openpype/modules/shotgrid/shotgrid_module.py @@ -1,12 +1,11 @@ import os -from openpype_interfaces import ( +from openpype.modules import ( + OpenPypeModule, ITrayModule, IPluginPaths, ) -from openpype.modules import OpenPypeModule - SHOTGRID_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/modules/slack/plugins/publish/collect_slack_family.py b/openpype/modules/slack/plugins/publish/collect_slack_family.py index 39b05937dc..27e899d59a 100644 --- a/openpype/modules/slack/plugins/publish/collect_slack_family.py +++ b/openpype/modules/slack/plugins/publish/collect_slack_family.py @@ -18,15 +18,15 @@ class CollectSlackFamilies(pyblish.api.InstancePlugin): profiles = None def process(self, instance): - task_name = legacy_io.Session.get("AVALON_TASK") + task_data = instance.data["anatomyData"].get("task", {}) family = self.main_family_from_instance(instance) key_values = { "families": family, - "tasks": task_name, + "tasks": task_data.get("name"), + "task_types": task_data.get("type"), "hosts": instance.data["anatomyData"]["app"], "subsets": instance.data["subset"] } - profile = filter_profiles(self.profiles, key_values, logger=self.log) diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/slack/plugins/publish/integrate_slack_api.py index 643e55915b..0cd5ec9de8 100644 --- a/openpype/modules/slack/plugins/publish/integrate_slack_api.py +++ b/openpype/modules/slack/plugins/publish/integrate_slack_api.py @@ -112,7 +112,13 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): if review_path: fill_pairs.append(("review_filepath", review_path)) - task_data = fill_data.get("task") + task_data = ( + copy.deepcopy(instance.data.get("anatomyData", {})).get("task") + or fill_data.get("task") + ) + if not isinstance(task_data, dict): + # fallback for legacy - if task_data is only task name + task_data["name"] = task_data if task_data: if ( "{task}" in message_templ @@ -142,13 +148,17 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): def _get_thumbnail_path(self, instance): """Returns abs url for thumbnail if present in instance repres""" - published_path = None + thumbnail_path = None for repre in instance.data.get("representations", []): if repre.get('thumbnail') or "thumbnail" in repre.get('tags', []): - if os.path.exists(repre["published_path"]): - published_path = repre["published_path"] + repre_thumbnail_path = ( + repre.get("published_path") or + os.path.join(repre["stagingDir"], repre["files"]) + ) + if os.path.exists(repre_thumbnail_path): + thumbnail_path = repre_thumbnail_path break - return published_path + return thumbnail_path def _get_review_path(self, instance): """Returns abs url for review if present in instance repres""" @@ -178,10 +188,17 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): channel=channel, title=os.path.basename(p_file) ) - attachment_str += "\n<{}|{}>".format( - response["file"]["permalink"], - os.path.basename(p_file)) - file_ids.append(response["file"]["id"]) + if response.get("error"): + error_str = self._enrich_error( + str(response.get("error")), + channel) + self.log.warning( + "Error happened: {}".format(error_str)) + else: + attachment_str += "\n<{}|{}>".format( + response["file"]["permalink"], + os.path.basename(p_file)) + file_ids.append(response["file"]["id"]) if publish_files: message += attachment_str diff --git a/openpype/modules/slack/slack_module.py b/openpype/modules/slack/slack_module.py index 499c1c19ce..797ae19f4a 100644 --- a/openpype/modules/slack/slack_module.py +++ b/openpype/modules/slack/slack_module.py @@ -1,6 +1,5 @@ import os -from openpype.modules import OpenPypeModule -from openpype.modules.interfaces import IPluginPaths +from openpype.modules import OpenPypeModule, IPluginPaths SLACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/openpype/modules/sync_server/rest_api.py b/openpype/modules/sync_server/rest_api.py new file mode 100644 index 0000000000..a7d9dd80b7 --- /dev/null +++ b/openpype/modules/sync_server/rest_api.py @@ -0,0 +1,37 @@ +from aiohttp.web_response import Response +from openpype.lib import Logger + + +class SyncServerModuleRestApi: + """ + REST API endpoint used for calling from hosts when context change + happens in Workfile app. + """ + + def __init__(self, user_module, server_manager): + self._log = None + self.module = user_module + self.server_manager = server_manager + + self.prefix = "/sync_server" + + self.register() + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + def register(self): + self.server_manager.add_route( + "POST", + self.prefix + "/reset_timer", + self.reset_timer, + ) + + async def reset_timer(self, _request): + """Force timer to run immediately.""" + self.module.reset_timer() + + return Response(status=200) diff --git a/openpype/modules/sync_server/sync_server.py b/openpype/modules/sync_server/sync_server.py index 8b11055e65..d0a40a60ff 100644 --- a/openpype/modules/sync_server/sync_server.py +++ b/openpype/modules/sync_server/sync_server.py @@ -236,6 +236,7 @@ class SyncServerThread(threading.Thread): """ def __init__(self, module): self.log = Logger.get_logger(self.__class__.__name__) + super(SyncServerThread, self).__init__() self.module = module self.loop = None diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index a478faa9ef..653ee50541 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -11,9 +11,12 @@ from collections import deque, defaultdict import click from bson.objectid import ObjectId -from openpype.client import get_projects -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule +from openpype.client import ( + get_projects, + get_representations, + get_representation_by_id, +) +from openpype.modules import OpenPypeModule, ITrayModule from openpype.settings import ( get_project_settings, get_system_settings, @@ -30,9 +33,6 @@ from .providers import lib from .utils import time_function, SyncStatus, SiteAlreadyPresentError -from openpype.client import get_representations, get_representation_by_id - - log = Logger.get_logger("SyncServer") @@ -136,14 +136,14 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ Start of Public API """ def add_site(self, project_name, representation_id, site_name=None, - force=False): + force=False, priority=None, reset_timer=False): """ Adds new site to representation to be synced. 'project_name' must have synchronization enabled (globally or project only) - Used as a API endpoint from outside applications (Loader etc). + Used as an API endpoint from outside applications (Loader etc). Use 'force' to reset existing site. @@ -152,6 +152,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): representation_id (string): MongoDB _id value site_name (string): name of configured and active site force (bool): reset site if exists + priority (int): set priority + reset_timer (bool): if delay timer should be reset, eg. user mark + some representation to be synced manually Throws: SiteAlreadyPresentError - if adding already existing site and @@ -167,7 +170,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.reset_site_on_representation(project_name, representation_id, site_name=site_name, - force=force) + force=force, + priority=priority) + + if reset_timer: + self.reset_timer() def remove_site(self, project_name, representation_id, site_name, remove_local_files=False): @@ -911,7 +918,59 @@ class SyncServerModule(OpenPypeModule, ITrayModule): In case of user's involvement (reset site), start that right away. """ - self.sync_server_thread.reset_timer() + + if not self.enabled: + return + + if self.sync_server_thread is None: + self._reset_timer_with_rest_api() + else: + self.sync_server_thread.reset_timer() + + def is_representation_on_site( + self, project_name, representation_id, site_name + ): + """Checks if 'representation_id' has all files avail. on 'site_name'""" + representation = get_representation_by_id(project_name, + representation_id, + fields=["_id", "files"]) + if not representation: + return False + + on_site = False + for file_info in representation.get("files", []): + for site in file_info.get("sites", []): + if site["name"] != site_name: + continue + + if (site.get("progress") or site.get("error") or + not site.get("created_dt")): + return False + on_site = True + + return on_site + + def _reset_timer_with_rest_api(self): + # POST to webserver sites to add to representations + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + if not webserver_url: + self.log.warning("Couldn't find webserver url") + return + + rest_api_url = "{}/sync_server/reset_timer".format( + webserver_url + ) + + try: + import requests + except Exception: + self.log.warning( + "Couldn't add sites to representations " + "('requests' is not available)" + ) + return + + requests.post(rest_api_url) def get_enabled_projects(self): """Returns list of projects which have SyncServer enabled.""" @@ -1544,12 +1603,12 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Args: project_name (string): name of project - force to db connection as each file might come from different collection - new_file_id (string): + new_file_id (string): only present if file synced successfully file (dictionary): info about processed file (pulled from DB) representation (dictionary): parent repr of file (from DB) site (string): label ('gdrive', 'S3') error (string): exception message - progress (float): 0-1 of progress of upload/download + progress (float): 0-0.99 of progress of upload/download priority (int): 0-100 set priority Returns: @@ -1655,7 +1714,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule): def reset_site_on_representation(self, project_name, representation_id, side=None, file_id=None, site_name=None, - remove=False, pause=None, force=False): + remove=False, pause=None, force=False, + priority=None): """ Reset information about synchronization for particular 'file_id' and provider. @@ -1678,6 +1738,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): remove (bool): if True remove site altogether pause (bool or None): if True - pause, False - unpause force (bool): hard reset - currently only for add_site + priority (int): set priority Raises: SiteAlreadyPresentError - if adding already existing site and @@ -1705,6 +1766,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): elem = {"name": site_name} + # Add priority + if priority: + elem["priority"] = priority + if file_id: # reset site for particular file self._reset_site_for_file(project_name, representation_id, elem, file_id, site_name) @@ -2089,6 +2154,15 @@ class SyncServerModule(OpenPypeModule, ITrayModule): def cli(self, click_group): click_group.add_command(cli_main) + # Webserver module implementation + def webserver_initialization(self, server_manager): + """Add routes for syncs.""" + if self.tray_initialized: + from .rest_api import SyncServerModuleRestApi + self.rest_api_obj = SyncServerModuleRestApi( + self, server_manager + ) + @click.group(SyncServerModule.name, help="SyncServer module related commands.") def cli_main(): diff --git a/openpype/modules/timers_manager/rest_api.py b/openpype/modules/timers_manager/rest_api.py index 4a2e9e6575..979db9075b 100644 --- a/openpype/modules/timers_manager/rest_api.py +++ b/openpype/modules/timers_manager/rest_api.py @@ -21,7 +21,7 @@ class TimersManagerModuleRestApi: @property def log(self): if self._log is None: - self._log = Logger.get_logger(self.__ckass__.__name__) + self._log = Logger.get_logger(self.__class__.__name__) return self._log def register(self): diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/timers_manager/timers_manager.py index c168e9534d..0ba68285a4 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/openpype/modules/timers_manager/timers_manager.py @@ -3,8 +3,8 @@ import platform from openpype.client import get_asset_by_name -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( +from openpype.modules import ( + OpenPypeModule, ITrayService, IPluginPaths ) diff --git a/openpype/modules/webserver/host_console_listener.py b/openpype/modules/webserver/host_console_listener.py index 6138f9f097..fdfe1ba688 100644 --- a/openpype/modules/webserver/host_console_listener.py +++ b/openpype/modules/webserver/host_console_listener.py @@ -5,7 +5,7 @@ import logging from concurrent.futures import CancelledError from Qt import QtWidgets -from openpype_interfaces import ITrayService +from openpype.modules import ITrayService log = logging.getLogger(__name__) diff --git a/openpype/modules/webserver/webserver_module.py b/openpype/modules/webserver/webserver_module.py index 16861abd29..354ab1e4f9 100644 --- a/openpype/modules/webserver/webserver_module.py +++ b/openpype/modules/webserver/webserver_module.py @@ -24,8 +24,7 @@ import os import socket from openpype import resources -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayService +from openpype.modules import OpenPypeModule, ITrayService class WebServerModule(OpenPypeModule, ITrayService): diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 89b876e6de..b0877d0a29 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -25,6 +25,8 @@ from .creator_plugins import ( deregister_creator_plugin, register_creator_plugin_path, deregister_creator_plugin_path, + + cache_and_get_instances, ) from .context import ( diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index 4fd460ffea..9c468ae8fc 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -199,7 +199,7 @@ class InstanceMember: }) -class AttributeValues: +class AttributeValues(object): """Container which keep values of Attribute definitions. Goal is to have one object which hold values of attribute definitions for @@ -584,6 +584,7 @@ class CreatedInstance: if key in data: data.pop(key) + self._data["variant"] = self._data.get("variant") or "" # Stored creator specific attribute values # {key: value} creator_values = copy.deepcopy(orig_creator_attributes) diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index ef92b7ccc4..bb5ce00452 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -1,5 +1,6 @@ import os import copy +import collections from abc import ( ABCMeta, @@ -392,8 +393,9 @@ class BaseCreator: asset_doc(dict): Asset document for which subset is created. project_name(str): Project name. host_name(str): Which host creates subset. - instance(str|None): Object of 'CreatedInstance' for which is - subset name updated. Passed only on subset name update. + instance(CreatedInstance|None): Object of 'CreatedInstance' for + which is subset name updated. Passed only on subset name + update. """ dynamic_data = self.get_dynamic_data( @@ -674,3 +676,34 @@ def deregister_creator_plugin_path(path): deregister_plugin_path(BaseCreator, path) deregister_plugin_path(LegacyCreator, path) deregister_plugin_path(SubsetConvertorPlugin, path) + + +def cache_and_get_instances(creator, shared_key, list_instances_func): + """Common approach to cache instances in shared data. + + This is helper function which does not handle cases when a 'shared_key' is + used for different list instances functions. The same approach of caching + instances into 'collection_shared_data' is not required but is so common + we've decided to unify it to some degree. + + Function 'list_instances_func' is called only if 'shared_key' is not + available in 'collection_shared_data' on creator. + + Args: + creator (Creator): Plugin which would like to get instance data. + shared_key (str): Key under which output of function will be stored. + list_instances_func (Function): Function that will return instance data + if data were not yet stored under 'shared_key'. + + Returns: + Dict[str, Dict[str, Any]]: Cached instances by creator identifier from + result of passed function. + """ + + if shared_key not in creator.collection_shared_data: + value = collections.defaultdict(list) + for instance in list_instances_func(): + identifier = instance.get("creator_identifier") + value[identifier].append(instance) + creator.collection_shared_data[shared_key] = value + return creator.collection_shared_data[shared_key] diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py index 8433816908..55ce8e06f4 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/openpype/plugins/publish/collect_anatomy_context_data.py @@ -15,7 +15,6 @@ Provides: import json import pyblish.api -from openpype.pipeline import legacy_io from openpype.pipeline.template_data import get_template_data @@ -53,7 +52,7 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): asset_entity = context.data.get("assetEntity") task_name = None if asset_entity: - task_name = legacy_io.Session["AVALON_TASK"] + task_name = context.data["task"] anatomy_data = get_template_data( project_entity, asset_entity, task_name, host_name, system_settings diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/openpype/plugins/publish/collect_anatomy_instance_data.py index f67d3373d9..909b49a07d 100644 --- a/openpype/plugins/publish/collect_anatomy_instance_data.py +++ b/openpype/plugins/publish/collect_anatomy_instance_data.py @@ -188,7 +188,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): for subset_doc in subset_docs: subset_id = subset_doc["_id"] last_version_doc = last_version_docs_by_subset_id.get(subset_id) - if last_version_docs_by_subset_id is None: + if last_version_doc is None: continue asset_id = subset_doc["parent"] diff --git a/openpype/plugins/publish/collect_avalon_entities.py b/openpype/plugins/publish/collect_context_entities.py similarity index 90% rename from openpype/plugins/publish/collect_avalon_entities.py rename to openpype/plugins/publish/collect_context_entities.py index 3b05b6ae98..31fbeb5dbd 100644 --- a/openpype/plugins/publish/collect_avalon_entities.py +++ b/openpype/plugins/publish/collect_context_entities.py @@ -3,6 +3,8 @@ Requires: session -> AVALON_ASSET context -> projectName + context -> asset + context -> task Provides: context -> projectEntity - Project document from database. @@ -13,20 +15,19 @@ Provides: import pyblish.api from openpype.client import get_project, get_asset_by_name -from openpype.pipeline import legacy_io, KnownPublishError +from openpype.pipeline import KnownPublishError -class CollectAvalonEntities(pyblish.api.ContextPlugin): - """Collect Anatomy into Context.""" +class CollectContextEntities(pyblish.api.ContextPlugin): + """Collect entities into Context.""" order = pyblish.api.CollectorOrder - 0.1 - label = "Collect Avalon Entities" + label = "Collect Context Entities" def process(self, context): - legacy_io.install() project_name = context.data["projectName"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] + asset_name = context.data["asset"] + task_name = context.data["task"] project_entity = get_project(project_name) if not project_entity: diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index d457bdc988..f299d1c6e9 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -3,26 +3,26 @@ import re import copy import json import shutil - from abc import ABCMeta, abstractmethod + import six - import clique - +import speedcopy import pyblish.api from openpype.lib import ( get_ffmpeg_tool_path, - get_ffprobe_streams, path_to_subprocess_arg, run_subprocess, - +) +from openpype.lib.transcoding import ( + IMAGE_EXTENSIONS, + get_ffprobe_streams, should_convert_for_ffmpeg, convert_input_paths_for_ffmpeg, - get_transcode_temp_directory + get_transcode_temp_directory, ) -import speedcopy class ExtractReview(pyblish.api.InstancePlugin): @@ -175,6 +175,26 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs_per_representations.append((repre, outputs)) return outputs_per_representations + def _single_frame_filter(self, input_filepaths, output_defs): + single_frame_image = False + if len(input_filepaths) == 1: + ext = os.path.splitext(input_filepaths[0])[-1] + single_frame_image = ext.lower() in IMAGE_EXTENSIONS + + filtered_defs = [] + for output_def in output_defs: + output_filters = output_def.get("filter") or {} + frame_filter = output_filters.get("single_frame_filter") + if ( + (not single_frame_image and frame_filter == "single_frame") + or (single_frame_image and frame_filter == "multi_frame") + ): + continue + + filtered_defs.append(output_def) + + return filtered_defs + @staticmethod def get_instance_label(instance): return ( @@ -195,7 +215,7 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs_per_repres = self._get_outputs_per_representations( instance, profile_outputs ) - for repre, outpu_defs in outputs_per_repres: + for repre, output_defs in outputs_per_repres: # Check if input should be preconverted before processing # Store original staging dir (it's value may change) src_repre_staging_dir = repre["stagingDir"] @@ -216,6 +236,16 @@ class ExtractReview(pyblish.api.InstancePlugin): if first_input_path is None: first_input_path = filepath + filtered_output_defs = self._single_frame_filter( + input_filepaths, output_defs + ) + if not filtered_output_defs: + self.log.debug(( + "Repre: {} - All output definitions were filtered" + " out by single frame filter. Skipping" + ).format(repre["name"])) + continue + # Skip if file is not set if first_input_path is None: self.log.warning(( @@ -249,7 +279,10 @@ class ExtractReview(pyblish.api.InstancePlugin): try: self._render_output_definitions( - instance, repre, src_repre_staging_dir, outpu_defs + instance, + repre, + src_repre_staging_dir, + filtered_output_defs ) finally: @@ -263,10 +296,10 @@ class ExtractReview(pyblish.api.InstancePlugin): shutil.rmtree(new_staging_dir) def _render_output_definitions( - self, instance, repre, src_repre_staging_dir, outpu_defs + self, instance, repre, src_repre_staging_dir, output_defs ): fill_data = copy.deepcopy(instance.data["anatomyData"]) - for _output_def in outpu_defs: + for _output_def in output_defs: output_def = copy.deepcopy(_output_def) # Make sure output definition has "tags" key if "tags" not in output_def: @@ -468,7 +501,7 @@ class ExtractReview(pyblish.api.InstancePlugin): first_sequence_frame += handle_start ext = os.path.splitext(repre["files"][0])[1].replace(".", "") - if ext in self.alpha_exts: + if ext.lower() in self.alpha_exts: input_allow_bg = True return { @@ -901,6 +934,8 @@ class ExtractReview(pyblish.api.InstancePlugin): if output_ext.startswith("."): output_ext = output_ext[1:] + output_ext = output_ext.lower() + # Store extension to representation new_repre["ext"] = output_ext @@ -1659,9 +1694,7 @@ class ExtractReview(pyblish.api.InstancePlugin): return True return False - def filter_output_defs( - self, profile, subset_name, families - ): + def filter_output_defs(self, profile, subset_name, families): """Return outputs matching input instance families. Output definitions without families filter are marked as valid. diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py index 0998e643e6..401270a788 100644 --- a/openpype/plugins/publish/integrate.py +++ b/openpype/plugins/publish/integrate.py @@ -129,7 +129,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): "mvUsd", "mvUsdComposition", "mvUsdOverride", - "simpleUnrealTexture" + "simpleUnrealTexture", + "online" ] default_template_name = "publish" diff --git a/openpype/plugins/publish/preintegrate_thumbnail_representation.py b/openpype/plugins/publish/preintegrate_thumbnail_representation.py index f9e23223e6..b88ccee9dc 100644 --- a/openpype/plugins/publish/preintegrate_thumbnail_representation.py +++ b/openpype/plugins/publish/preintegrate_thumbnail_representation.py @@ -21,9 +21,8 @@ class PreIntegrateThumbnails(pyblish.api.InstancePlugin): label = "Override Integrate Thumbnail Representations" order = pyblish.api.IntegratorOrder - 0.1 - families = ["review"] - integrate_profiles = {} + integrate_profiles = [] def process(self, instance): repres = instance.data.get("representations") diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/openpype/settings/defaults/project_anatomy/templates.json index 3415c4451f..0ac56a4dad 100644 --- a/openpype/settings/defaults/project_anatomy/templates.json +++ b/openpype/settings/defaults/project_anatomy/templates.json @@ -48,10 +48,16 @@ "file": "{originalBasename}_{@version}.{ext}", "path": "{@folder}/{@file}" }, + "online": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}", + "file": "{originalBasename}<.{@frame}><_{udim}>.{ext}", + "path": "{@folder}/{@file}" + }, "__dynamic_keys_labels__": { "maya2unreal": "Maya to Unreal", "simpleUnrealTextureHero": "Simple Unreal Texture - Hero", - "simpleUnrealTexture": "Simple Unreal Texture" + "simpleUnrealTexture": "Simple Unreal Texture", + "online": "online" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index 0f3080ad64..34baf9ba06 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -142,7 +142,7 @@ "exr16fpdwaa" ], "reel_name": "OP_LoadedReel", - "clip_name_template": "{asset}_{subset}<_{output}>" + "clip_name_template": "{batch}_{asset}_{subset}<_{output}>" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index b128564bc2..46b8b1b0c8 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -53,6 +53,62 @@ "families": [], "hosts": [], "outputs": { + "png": { + "ext": "png", + "tags": [ + "ftrackreview" + ], + "burnins": [], + "ffmpeg_args": { + "video_filters": [], + "audio_filters": [], + "input": [], + "output": [] + }, + "filter": { + "families": [ + "render", + "review", + "ftrack" + ], + "subsets": [], + "custom_tags": [], + "single_frame_filter": "single_frame" + }, + "overscan_crop": "", + "overscan_color": [ + 0, + 0, + 0, + 255 + ], + "width": 1920, + "height": 1080, + "scale_pixel_aspect": true, + "bg_color": [ + 0, + 0, + 0, + 0 + ], + "letter_box": { + "enabled": false, + "ratio": 0.0, + "fill_color": [ + 0, + 0, + 0, + 255 + ], + "line_thickness": 0, + "line_color": [ + 255, + 0, + 0, + 255 + ] + } + }, "h264": { "ext": "mp4", "tags": [ @@ -79,7 +135,8 @@ "ftrack" ], "subsets": [], - "custom_tags": [] + "custom_tags": [], + "single_frame_filter": "multi_frame" }, "overscan_crop": "", "overscan_color": [ @@ -231,6 +288,17 @@ "task_types": [], "tasks": [], "template_name": "maya2unreal" + }, + { + "families": [ + "online" + ], + "hosts": [ + "traypublisher" + ], + "task_types": [], + "tasks": [], + "template_name": "online" } ] }, @@ -401,7 +469,8 @@ "hosts": [], "task_types": [], "tasks": [], - "enabled": true + "enabled": true, + "use_last_published_workfile": false } ], "open_workfile_tool_on_startup": [ diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/openpype/settings/defaults/project_settings/traypublisher.json index 5db2a79772..e99b96b8c4 100644 --- a/openpype/settings/defaults/project_settings/traypublisher.json +++ b/openpype/settings/defaults/project_settings/traypublisher.json @@ -303,5 +303,12 @@ "extensions": [ ".mov" ] + }, + "publish": { + "ValidateFrameRange": { + "enabled": true, + "optional": true, + "active": true + } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/tvpaint.json b/openpype/settings/defaults/project_settings/tvpaint.json index 88b5a598cd..e03ce32030 100644 --- a/openpype/settings/defaults/project_settings/tvpaint.json +++ b/openpype/settings/defaults/project_settings/tvpaint.json @@ -11,6 +11,11 @@ 255, 255, 255 + ], + "families_to_review": [ + "review", + "renderlayer", + "renderscene" ] }, "ValidateProjectSettings": { diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json index 7c61aeed50..faa5033d2a 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json @@ -311,6 +311,24 @@ "object_type": "text" } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "schema_template", + "name": "template_validate_plugin", + "template_data": [ + { + "key": "ValidateFrameRange", + "label": "Validate frame range" + } + ] + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json index 20fe5b0855..61342ef738 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json @@ -56,6 +56,18 @@ "key": "review_bg", "label": "Review BG color", "use_alpha": false + }, + { + "type": "enum", + "key": "families_to_review", + "label": "Families to review", + "multiselection": true, + "enum_items": [ + {"review": "review"}, + {"renderpass": "renderPass"}, + {"renderlayer": "renderLayer"}, + {"renderscene": "renderScene"} + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index a2a566da0e..3667c9d5d8 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -16,22 +16,26 @@ { "type": "number", "key": "frameStart", - "label": "Frame Start" + "label": "Frame Start", + "maximum": 999999999 }, { "type": "number", "key": "frameEnd", - "label": "Frame End" + "label": "Frame End", + "maximum": 999999999 }, { "type": "number", "key": "clipIn", - "label": "Clip In" + "label": "Clip In", + "maximum": 999999999 }, { "type": "number", "key": "clipOut", - "label": "Clip Out" + "label": "Clip Out", + "maximum": 999999999 }, { "type": "number", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 51fc8dedf3..742437fbde 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -304,6 +304,20 @@ "label": "Custom Tags", "type": "list", "object_type": "text" + }, + { + "type": "label", + "label": "Use output always / only if input is 1 frame image / only if has 2+ frames or is video" + }, + { + "type": "enum", + "key": "single_frame_filter", + "default": "everytime", + "enum_items": [ + {"everytime": "Always"}, + {"single_frame": "Only if input has 1 image frame"}, + {"multi_frame": "Only if input is video or sequence of frames"} + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json index ba446135e2..962008d476 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json @@ -149,6 +149,11 @@ "type": "boolean", "key": "enabled", "label": "Enabled" + }, + { + "type": "boolean", + "key": "use_last_published_workfile", + "label": "Use last published workfile" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_validate_plugin.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_validate_plugin.json new file mode 100644 index 0000000000..b57cad6719 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/template_validate_plugin.json @@ -0,0 +1,26 @@ +[ + { + "type": "dict", + "collapsible": true, + "key": "{key}", + "label": "{label}", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + } + ] + } +] diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py index 5eaddf6e6e..288c587d03 100644 --- a/openpype/settings/lib.py +++ b/openpype/settings/lib.py @@ -138,8 +138,7 @@ def save_studio_settings(data): SaveWarningExc: If any module raises the exception. """ # Notify Pype modules - from openpype.modules import ModulesManager - from openpype_interfaces import ISettingsChangeListener + from openpype.modules import ModulesManager, ISettingsChangeListener old_data = get_system_settings() default_values = get_default_settings()[SYSTEM_SETTINGS_KEY] @@ -186,8 +185,7 @@ def save_project_settings(project_name, overrides): SaveWarningExc: If any module raises the exception. """ # Notify Pype modules - from openpype.modules import ModulesManager - from openpype_interfaces import ISettingsChangeListener + from openpype.modules import ModulesManager, ISettingsChangeListener default_values = get_default_settings()[PROJECT_SETTINGS_KEY] if project_name: @@ -248,8 +246,7 @@ def save_project_anatomy(project_name, anatomy_data): SaveWarningExc: If any module raises the exception. """ # Notify Pype modules - from openpype.modules import ModulesManager - from openpype_interfaces import ISettingsChangeListener + from openpype.modules import ModulesManager, ISettingsChangeListener default_values = get_default_settings()[PROJECT_ANATOMY_KEY] if project_name: diff --git a/openpype/style/style.css b/openpype/style/style.css index 887c044dae..a7a48cdb9d 100644 --- a/openpype/style/style.css +++ b/openpype/style/style.css @@ -1126,6 +1126,10 @@ ValidationArtistMessage QLabel { background: transparent; } +CreateNextPageOverlay { + font-size: 32pt; +} + /* Settings - NOT USED YET - we need to define font family for settings UI */ diff --git a/openpype/widgets/attribute_defs/__init__.py b/openpype/tools/attribute_defs/__init__.py similarity index 65% rename from openpype/widgets/attribute_defs/__init__.py rename to openpype/tools/attribute_defs/__init__.py index ce6b80109e..f991fdec3d 100644 --- a/openpype/widgets/attribute_defs/__init__.py +++ b/openpype/tools/attribute_defs/__init__.py @@ -3,8 +3,14 @@ from .widgets import ( AttributeDefinitionsWidget, ) +from .dialog import ( + AttributeDefinitionsDialog, +) + __all__ = ( "create_widget_for_attr_def", "AttributeDefinitionsWidget", + + "AttributeDefinitionsDialog", ) diff --git a/openpype/tools/attribute_defs/dialog.py b/openpype/tools/attribute_defs/dialog.py new file mode 100644 index 0000000000..69923d54e5 --- /dev/null +++ b/openpype/tools/attribute_defs/dialog.py @@ -0,0 +1,33 @@ +from Qt import QtWidgets + +from .widgets import AttributeDefinitionsWidget + + +class AttributeDefinitionsDialog(QtWidgets.QDialog): + def __init__(self, attr_defs, parent=None): + super(AttributeDefinitionsDialog, self).__init__(parent) + + attrs_widget = AttributeDefinitionsWidget(attr_defs, self) + + btns_widget = QtWidgets.QWidget(self) + ok_btn = QtWidgets.QPushButton("OK", btns_widget) + cancel_btn = QtWidgets.QPushButton("Cancel", btns_widget) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addStretch(1) + btns_layout.addWidget(ok_btn, 0) + btns_layout.addWidget(cancel_btn, 0) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addWidget(attrs_widget, 0) + main_layout.addStretch(1) + main_layout.addWidget(btns_widget, 0) + + ok_btn.clicked.connect(self.accept) + cancel_btn.clicked.connect(self.reject) + + self._attrs_widget = attrs_widget + + def get_values(self): + return self._attrs_widget.current_value() diff --git a/openpype/widgets/attribute_defs/files_widget.py b/openpype/tools/attribute_defs/files_widget.py similarity index 99% rename from openpype/widgets/attribute_defs/files_widget.py rename to openpype/tools/attribute_defs/files_widget.py index 3f1e6a34e1..738e50ba07 100644 --- a/openpype/widgets/attribute_defs/files_widget.py +++ b/openpype/tools/attribute_defs/files_widget.py @@ -349,7 +349,7 @@ class FilesModel(QtGui.QStandardItemModel): item.setData(file_item.filenames, FILENAMES_ROLE) item.setData(file_item.directory, DIRPATH_ROLE) item.setData(icon_pixmap, ITEM_ICON_ROLE) - item.setData(file_item.ext, EXT_ROLE) + item.setData(file_item.lower_ext, EXT_ROLE) item.setData(file_item.is_dir, IS_DIR_ROLE) item.setData(file_item.is_sequence, IS_SEQUENCE_ROLE) @@ -463,7 +463,7 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel): for filepath in filepaths: if os.path.isfile(filepath): _, ext = os.path.splitext(filepath) - if ext in self._allowed_extensions: + if ext.lower() in self._allowed_extensions: return True elif self._allow_folders: @@ -475,7 +475,7 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel): for filepath in filepaths: if os.path.isfile(filepath): _, ext = os.path.splitext(filepath) - if ext in self._allowed_extensions: + if ext.lower() in self._allowed_extensions: filtered_paths.append(filepath) elif self._allow_folders: diff --git a/openpype/widgets/attribute_defs/widgets.py b/openpype/tools/attribute_defs/widgets.py similarity index 93% rename from openpype/widgets/attribute_defs/widgets.py rename to openpype/tools/attribute_defs/widgets.py index dc697b08a6..1ffb3d3799 100644 --- a/openpype/widgets/attribute_defs/widgets.py +++ b/openpype/tools/attribute_defs/widgets.py @@ -6,6 +6,7 @@ from Qt import QtWidgets, QtCore from openpype.lib.attribute_definitions import ( AbtractAttrDef, UnknownDef, + HiddenDef, NumberDef, TextDef, EnumDef, @@ -22,6 +23,16 @@ from .files_widget import FilesWidget def create_widget_for_attr_def(attr_def, parent=None): + widget = _create_widget_for_attr_def(attr_def, parent) + if attr_def.hidden: + widget.setVisible(False) + + if attr_def.disabled: + widget.setEnabled(False) + return widget + + +def _create_widget_for_attr_def(attr_def, parent=None): if not isinstance(attr_def, AbtractAttrDef): raise TypeError("Unexpected type \"{}\" expected \"{}\"".format( str(type(attr_def)), AbtractAttrDef @@ -42,6 +53,9 @@ def create_widget_for_attr_def(attr_def, parent=None): if isinstance(attr_def, UnknownDef): return UnknownAttrWidget(attr_def, parent) + if isinstance(attr_def, HiddenDef): + return HiddenAttrWidget(attr_def, parent) + if isinstance(attr_def, FileDef): return FileAttrWidget(attr_def, parent) @@ -115,6 +129,10 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget): self._current_keys.add(attr_def.key) widget = create_widget_for_attr_def(attr_def, self) + self._widgets.append(widget) + + if attr_def.hidden: + continue expand_cols = 2 if attr_def.is_value_def and attr_def.is_label_horizontal: @@ -133,7 +151,6 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget): layout.addWidget( widget, row, col_num, 1, expand_cols ) - self._widgets.append(widget) row += 1 def set_value(self, value): @@ -459,6 +476,29 @@ class UnknownAttrWidget(_BaseAttrDefWidget): self._input_widget.setText(str_value) +class HiddenAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + self.setVisible(False) + self._value = None + self._multivalue = False + + def setVisible(self, visible): + if visible: + visible = False + super(HiddenAttrWidget, self).setVisible(visible) + + def current_value(self): + if self._multivalue: + raise ValueError("{} can't output for multivalue.".format( + self.__class__.__name__ + )) + return self._value + + def set_value(self, value, multivalue=False): + self._value = copy.deepcopy(value) + self._multivalue = multivalue + + class FileAttrWidget(_BaseAttrDefWidget): def _ui_init(self): input_widget = FilesWidget( diff --git a/openpype/tools/creator/model.py b/openpype/tools/creator/model.py index d3d60b96f2..307993103b 100644 --- a/openpype/tools/creator/model.py +++ b/openpype/tools/creator/model.py @@ -36,7 +36,7 @@ class CreatorsModel(QtGui.QStandardItemModel): if not items: item = QtGui.QStandardItem("No registered families") item.setEnabled(False) - item.setData(QtCore.Qt.ItemIsEnabled, False) + item.setData(False, QtCore.Qt.ItemIsEnabled) items.append(item) self.invisibleRootItem().appendRows(items) diff --git a/openpype/tools/loader/lib.py b/openpype/tools/loader/lib.py index 28e94237ec..78a25d8d85 100644 --- a/openpype/tools/loader/lib.py +++ b/openpype/tools/loader/lib.py @@ -2,6 +2,8 @@ import inspect from Qt import QtGui import qtawesome +from openpype.lib.attribute_definitions import AbtractAttrDef +from openpype.tools.attribute_defs import AttributeDefinitionsDialog from openpype.tools.utils.widgets import ( OptionalAction, OptionDialog @@ -34,21 +36,30 @@ def get_options(action, loader, parent, repre_contexts): None when dialog was closed or cancelled, in all other cases {} if no options """ + # Pop option dialog options = {} loader_options = loader.get_options(repre_contexts) - if getattr(action, "optioned", False) and loader_options: + if not getattr(action, "optioned", False) or not loader_options: + return options + + if isinstance(loader_options[0], AbtractAttrDef): + qargparse_options = False + dialog = AttributeDefinitionsDialog(loader_options, parent) + else: + qargparse_options = True dialog = OptionDialog(parent) - dialog.setWindowTitle(action.label + " Options") dialog.create(loader_options) - if not dialog.exec_(): - return None + dialog.setWindowTitle(action.label + " Options") - # Get option - options = dialog.parse() + if not dialog.exec_(): + return None - return options + # Get option + if qargparse_options: + return dialog.parse() + return dialog.get_values() def add_representation_loaders_to_menu(loaders, menu, repre_contexts): diff --git a/openpype/tools/project_manager/project_manager/view.py b/openpype/tools/project_manager/project_manager/view.py index cca892ef72..8d1fe54e83 100644 --- a/openpype/tools/project_manager/project_manager/view.py +++ b/openpype/tools/project_manager/project_manager/view.py @@ -28,7 +28,7 @@ class NameDef: class NumberDef: def __init__(self, minimum=None, maximum=None, decimals=None): self.minimum = 0 if minimum is None else minimum - self.maximum = 999999 if maximum is None else maximum + self.maximum = 999999999 if maximum is None else maximum self.decimals = 0 if decimals is None else decimals diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 10734a69f4..615f3eb8d9 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -90,9 +90,9 @@ class AssetDocsCache: return project_name = self._controller.project_name - asset_docs = get_assets( + asset_docs = list(get_assets( project_name, fields=self.projection.keys() - ) + )) asset_docs_by_name = {} task_names_by_asset_name = {} for asset_doc in asset_docs: diff --git a/openpype/tools/publisher/publish_report_viewer/widgets.py b/openpype/tools/publisher/publish_report_viewer/widgets.py index ff388fb277..0d35ac3512 100644 --- a/openpype/tools/publisher/publish_report_viewer/widgets.py +++ b/openpype/tools/publisher/publish_report_viewer/widgets.py @@ -27,6 +27,9 @@ class PluginLoadReportModel(QtGui.QStandardItemModel): parent = self.invisibleRootItem() parent.removeRows(0, parent.rowCount()) + if report is None: + return + new_items = [] new_items_by_filepath = {} for filepath in report.crashed_plugin_paths.keys(): diff --git a/openpype/tools/publisher/publish_report_viewer/window.py b/openpype/tools/publisher/publish_report_viewer/window.py index 2c249d058c..646ae69e7f 100644 --- a/openpype/tools/publisher/publish_report_viewer/window.py +++ b/openpype/tools/publisher/publish_report_viewer/window.py @@ -367,6 +367,7 @@ class LoadedFilesView(QtWidgets.QTreeView): def _on_rows_inserted(self): header = self.header() header.resizeSections(header.ResizeToContents) + self._update_remove_btn() def resizeEvent(self, event): super(LoadedFilesView, self).resizeEvent(event) diff --git a/openpype/tools/publisher/widgets/__init__.py b/openpype/tools/publisher/widgets/__init__.py index a02c69d5e0..042985b007 100644 --- a/openpype/tools/publisher/widgets/__init__.py +++ b/openpype/tools/publisher/widgets/__init__.py @@ -8,6 +8,7 @@ from .widgets import ( ResetBtn, ValidateBtn, PublishBtn, + CreateNextPageOverlay, ) from .help_widget import ( HelpButton, @@ -28,6 +29,7 @@ __all__ = ( "ResetBtn", "ValidateBtn", "PublishBtn", + "CreateNextPageOverlay", "HelpButton", "HelpDialog", diff --git a/openpype/tools/publisher/widgets/card_view_widgets.py b/openpype/tools/publisher/widgets/card_view_widgets.py index 9fd2bf0824..09635d1a15 100644 --- a/openpype/tools/publisher/widgets/card_view_widgets.py +++ b/openpype/tools/publisher/widgets/card_view_widgets.py @@ -674,9 +674,16 @@ class InstanceCardView(AbstractInstanceView): instances_by_group[group_name] ) - self._update_ordered_group_nameS() + self._update_ordered_group_names() - def _update_ordered_group_nameS(self): + def has_items(self): + if self._convertor_items_group is not None: + return True + if self._widgets_by_group: + return True + return False + + def _update_ordered_group_names(self): ordered_group_names = [CONTEXT_GROUP] for idx in range(self._content_layout.count()): if idx > 0: diff --git a/openpype/tools/publisher/widgets/list_view_widgets.py b/openpype/tools/publisher/widgets/list_view_widgets.py index 32d84862f0..1cdb4cdcdb 100644 --- a/openpype/tools/publisher/widgets/list_view_widgets.py +++ b/openpype/tools/publisher/widgets/list_view_widgets.py @@ -912,6 +912,13 @@ class InstanceListView(AbstractInstanceView): if not self._instance_view.isExpanded(proxy_index): self._instance_view.expand(proxy_index) + def has_items(self): + if self._convertor_group_widget is not None: + return True + if self._group_items: + return True + return False + def get_selected_items(self): """Get selected instance ids and context selection. diff --git a/openpype/tools/publisher/widgets/overview_widget.py b/openpype/tools/publisher/widgets/overview_widget.py index be3839b90b..b1aeda9cd4 100644 --- a/openpype/tools/publisher/widgets/overview_widget.py +++ b/openpype/tools/publisher/widgets/overview_widget.py @@ -195,6 +195,20 @@ class OverviewWidget(QtWidgets.QFrame): self._subset_views_widget.setMaximumWidth(view_width) self._change_anim.start() + def get_subset_views_geo(self): + parent = self._subset_views_widget.parent() + global_pos = parent.mapToGlobal(self._subset_views_widget.pos()) + return QtCore.QRect( + global_pos.x(), + global_pos.y(), + self._subset_views_widget.width(), + self._subset_views_widget.height() + ) + + def has_items(self): + view = self._subset_views_layout.currentWidget() + return view.has_items() + def _on_create_clicked(self): """Pass signal to parent widget which should care about changing state. diff --git a/openpype/tools/publisher/widgets/precreate_widget.py b/openpype/tools/publisher/widgets/precreate_widget.py index ef34c9bcb5..b688a83053 100644 --- a/openpype/tools/publisher/widgets/precreate_widget.py +++ b/openpype/tools/publisher/widgets/precreate_widget.py @@ -1,6 +1,6 @@ from Qt import QtWidgets, QtCore -from openpype.widgets.attribute_defs import create_widget_for_attr_def +from openpype.tools.attribute_defs import create_widget_for_attr_def class PreCreateWidget(QtWidgets.QWidget): diff --git a/openpype/tools/publisher/widgets/tabs_widget.py b/openpype/tools/publisher/widgets/tabs_widget.py index 84638a002c..d8ad19cfc0 100644 --- a/openpype/tools/publisher/widgets/tabs_widget.py +++ b/openpype/tools/publisher/widgets/tabs_widget.py @@ -54,6 +54,9 @@ class PublisherTabsWidget(QtWidgets.QFrame): self._buttons_by_identifier = {} def is_current_tab(self, identifier): + if isinstance(identifier, int): + identifier = self.get_tab_by_index(identifier) + if isinstance(identifier, PublisherTabBtn): identifier = identifier.identifier return self._current_identifier == identifier @@ -68,7 +71,16 @@ class PublisherTabsWidget(QtWidgets.QFrame): self.set_current_tab(identifier) return button + def get_tab_by_index(self, index): + if 0 >= index < self._btns_layout.count(): + item = self._btns_layout.itemAt(index) + return item.widget() + return None + def set_current_tab(self, identifier): + if isinstance(identifier, int): + identifier = self.get_tab_by_index(identifier) + if isinstance(identifier, PublisherTabBtn): identifier = identifier.identifier diff --git a/openpype/tools/publisher/widgets/validations_widget.py b/openpype/tools/publisher/widgets/validations_widget.py index 8c483e8088..935a12bc73 100644 --- a/openpype/tools/publisher/widgets/validations_widget.py +++ b/openpype/tools/publisher/widgets/validations_widget.py @@ -511,7 +511,7 @@ class ValidationsWidget(QtWidgets.QFrame): ) # After success publishing publish_started_widget = ValidationArtistMessage( - "Publishing went smoothly", self + "So far so good", self ) # After success publishing publish_stop_ok_widget = ValidationArtistMessage( diff --git a/openpype/tools/publisher/widgets/widgets.py b/openpype/tools/publisher/widgets/widgets.py index f0c1a6df80..4b9626154d 100644 --- a/openpype/tools/publisher/widgets/widgets.py +++ b/openpype/tools/publisher/widgets/widgets.py @@ -9,7 +9,8 @@ import collections from Qt import QtWidgets, QtCore, QtGui import qtawesome -from openpype.widgets.attribute_defs import create_widget_for_attr_def +from openpype.lib.attribute_definitions import UnknownDef +from openpype.tools.attribute_defs import create_widget_for_attr_def from openpype.tools import resources from openpype.tools.flickcharm import FlickCharm from openpype.tools.utils import ( @@ -305,6 +306,20 @@ class AbstractInstanceView(QtWidgets.QWidget): "{} Method 'refresh' is not implemented." ).format(self.__class__.__name__)) + def has_items(self): + """View has at least one item. + + This is more a question for controller but is called from widget + which should probably should not use controller. + + Returns: + bool: There is at least one instance or conversion item. + """ + + raise NotImplementedError(( + "{} Method 'has_items' is not implemented." + ).format(self.__class__.__name__)) + def get_selected_items(self): """Selected instances required for callbacks. @@ -578,6 +593,11 @@ class TasksCombobox(QtWidgets.QComboBox): self._text = None + # Make sure combobox is extended horizontally + size_policy = self.sizePolicy() + size_policy.setHorizontalPolicy(size_policy.MinimumExpanding) + self.setSizePolicy(size_policy) + def set_invalid_empty_task(self, invalid=True): self._proxy_model.set_filter_empty(invalid) if invalid: @@ -1180,7 +1200,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget): """Set currently selected instances. Args: - instances(list): List of selected instances. + instances(List[CreatedInstance]): List of selected instances. Empty instances tells that nothing or context is selected. """ self._set_btns_visible(False) @@ -1229,7 +1249,7 @@ class CreatorAttrsWidget(QtWidgets.QWidget): Attributes are defined on creator so are dynamic. Their look and type is based on attribute definitions that are defined in `~/openpype/pipeline/lib/attribute_definitions.py` and their widget - representation in `~/openpype/widgets/attribute_defs/*`. + representation in `~/openpype/tools/attribute_defs/*`. Widgets are disabled if context of instance is not valid. @@ -1303,6 +1323,13 @@ class CreatorAttrsWidget(QtWidgets.QWidget): else: widget.set_value(values, True) + widget.value_changed.connect(self._input_value_changed) + self._attr_def_id_to_instances[attr_def.id] = attr_instances + self._attr_def_id_to_attr_def[attr_def.id] = attr_def + + if attr_def.hidden: + continue + expand_cols = 2 if attr_def.is_value_def and attr_def.is_label_horizontal: expand_cols = 1 @@ -1321,13 +1348,8 @@ class CreatorAttrsWidget(QtWidgets.QWidget): content_layout.addWidget( widget, row, col_num, 1, expand_cols ) - row += 1 - widget.value_changed.connect(self._input_value_changed) - self._attr_def_id_to_instances[attr_def.id] = attr_instances - self._attr_def_id_to_attr_def[attr_def.id] = attr_def - self._scroll_area.setWidget(content_widget) self._content_widget = content_widget @@ -1353,7 +1375,7 @@ class PublishPluginAttrsWidget(QtWidgets.QWidget): Look and type of attributes is based on attribute definitions that are defined in `~/openpype/pipeline/lib/attribute_definitions.py` and their - widget representation in `~/openpype/widgets/attribute_defs/*`. + widget representation in `~/openpype/tools/attribute_defs/*`. Widgets are disabled if context of instance is not valid. @@ -1421,8 +1443,17 @@ class PublishPluginAttrsWidget(QtWidgets.QWidget): widget = create_widget_for_attr_def( attr_def, content_widget ) - label = attr_def.label or attr_def.key - content_layout.addRow(label, widget) + hidden_widget = attr_def.hidden + # Hide unknown values of publish plugins + # - The keys in most of cases does not represent what would + # label represent + if isinstance(attr_def, UnknownDef): + widget.setVisible(False) + hidden_widget = True + + if not hidden_widget: + label = attr_def.label or attr_def.key + content_layout.addRow(label, widget) widget.value_changed.connect(self._input_value_changed) @@ -1614,6 +1645,7 @@ class SubsetAttributesWidget(QtWidgets.QWidget): instances(List[CreatedInstance]): List of currently selected instances. context_selected(bool): Is context selected. + convertor_identifiers(List[str]): Identifiers of convert items. """ all_valid = True @@ -1708,3 +1740,159 @@ class SubsetAttributesWidget(QtWidgets.QWidget): self._thumbnail_widget.setVisible(True) self._thumbnail_widget.set_current_thumbnails(thumbnail_paths) + + +class CreateNextPageOverlay(QtWidgets.QWidget): + clicked = QtCore.Signal() + + def __init__(self, parent): + super(CreateNextPageOverlay, self).__init__(parent) + self.setCursor(QtCore.Qt.PointingHandCursor) + self._arrow_color = ( + get_objected_colors("font").get_qcolor() + ) + self._bg_color = ( + get_objected_colors("bg-buttons").get_qcolor() + ) + + change_anim = QtCore.QVariantAnimation() + change_anim.setStartValue(0.0) + change_anim.setEndValue(1.0) + change_anim.setDuration(200) + change_anim.setEasingCurve(QtCore.QEasingCurve.OutCubic) + + change_anim.valueChanged.connect(self._on_anim) + + self._change_anim = change_anim + self._is_visible = None + self._anim_value = 0.0 + self._increasing = False + self._under_mouse = None + self._handle_show_on_own = True + self._mouse_pressed = False + self.set_visible(True) + + def set_increasing(self, increasing): + if self._increasing is increasing: + return + self._increasing = increasing + if increasing: + self._change_anim.setDirection(self._change_anim.Forward) + else: + self._change_anim.setDirection(self._change_anim.Backward) + + if self._change_anim.state() != self._change_anim.Running: + self._change_anim.start() + + def set_visible(self, visible): + if self._is_visible is visible: + return + + self._is_visible = visible + if not visible: + self.set_increasing(False) + if not self._is_anim_finished(): + return + + self.setVisible(visible) + self._check_anim_timer() + + def _is_anim_finished(self): + if self._increasing: + return self._anim_value == 1.0 + return self._anim_value == 0.0 + + def _on_anim(self, value): + self._check_anim_timer() + + self._anim_value = value + + self.update() + + if not self._is_anim_finished(): + return + + if not self._is_visible: + self.setVisible(False) + + def set_under_mouse(self, under_mouse): + if self._under_mouse is under_mouse: + return + + self._under_mouse = under_mouse + self.set_increasing(under_mouse) + + def _is_under_mouse(self): + mouse_pos = self.mapFromGlobal(QtGui.QCursor.pos()) + under_mouse = self.rect().contains(mouse_pos) + return under_mouse + + def _check_anim_timer(self): + if not self.isVisible(): + return + + self.set_increasing(self._under_mouse) + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self._mouse_pressed = True + super(CreateNextPageOverlay, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + if self._mouse_pressed: + self._mouse_pressed = False + if self.rect().contains(event.pos()): + self.clicked.emit() + + super(CreateNextPageOverlay, self).mouseReleaseEvent(event) + + def paintEvent(self, event): + painter = QtGui.QPainter() + painter.begin(self) + if self._anim_value == 0.0: + painter.end() + return + + painter.setClipRect(event.rect()) + painter.setRenderHints( + painter.Antialiasing + | painter.SmoothPixmapTransform + ) + + painter.setPen(QtCore.Qt.NoPen) + + rect = QtCore.QRect(self.rect()) + rect_width = rect.width() + rect_height = rect.height() + radius = rect_width * 0.2 + + x_offset = 0 + y_offset = 0 + if self._anim_value != 1.0: + x_offset += rect_width - (rect_width * self._anim_value) + + arrow_height = rect_height * 0.4 + arrow_half_height = arrow_height * 0.5 + arrow_x_start = x_offset + ((rect_width - arrow_half_height) * 0.5) + arrow_x_end = arrow_x_start + arrow_half_height + center_y = rect.center().y() + + painter.setBrush(self._bg_color) + painter.drawRoundedRect( + x_offset, y_offset, + rect_width + radius, rect_height, + radius, radius + ) + + src_arrow_path = QtGui.QPainterPath() + src_arrow_path.moveTo(arrow_x_start, center_y - arrow_half_height) + src_arrow_path.lineTo(arrow_x_end, center_y) + src_arrow_path.lineTo(arrow_x_start, center_y + arrow_half_height) + + arrow_stroker = QtGui.QPainterPathStroker() + arrow_stroker.setWidth(min(4, arrow_half_height * 0.2)) + arrow_path = arrow_stroker.createStroke(src_arrow_path) + + painter.fillPath(arrow_path, self._arrow_color) + + painter.end() diff --git a/openpype/tools/publisher/window.py b/openpype/tools/publisher/window.py index f07995acc6..0f7fd2c7e3 100644 --- a/openpype/tools/publisher/window.py +++ b/openpype/tools/publisher/window.py @@ -29,6 +29,8 @@ from .widgets import ( HelpButton, HelpDialog, + + CreateNextPageOverlay, ) @@ -154,7 +156,7 @@ class PublisherWindow(QtWidgets.QDialog): footer_layout.addWidget(footer_bottom_widget, 0) # Content - # - wrap stacked widget under one more widget to be able propagate + # - wrap stacked widget under one more widget to be able to propagate # margins (QStackedLayout can't have margins) content_widget = QtWidgets.QWidget(under_publish_widget) @@ -225,8 +227,8 @@ class PublisherWindow(QtWidgets.QDialog): # Floating publish frame publish_frame = PublishFrame(controller, self.footer_border, self) - # Timer started on show -> connected to timer counter - # - helps to deffer on show logic by 3 event loops + create_overlay_button = CreateNextPageOverlay(self) + show_timer = QtCore.QTimer() show_timer.setInterval(1) show_timer.timeout.connect(self._on_show_timer) @@ -255,6 +257,9 @@ class PublisherWindow(QtWidgets.QDialog): publish_btn.clicked.connect(self._on_publish_clicked) publish_frame.details_page_requested.connect(self._go_to_details_tab) + create_overlay_button.clicked.connect( + self._on_create_overlay_button_click + ) controller.event_system.add_callback( "instances.refresh.finished", self._on_instances_refresh @@ -262,6 +267,9 @@ class PublisherWindow(QtWidgets.QDialog): controller.event_system.add_callback( "publish.reset.finished", self._on_publish_reset ) + controller.event_system.add_callback( + "controller.reset.finished", self._on_controller_reset + ) controller.event_system.add_callback( "publish.process.started", self._on_publish_start ) @@ -310,6 +318,7 @@ class PublisherWindow(QtWidgets.QDialog): self._publish_overlay = publish_overlay self._publish_frame = publish_frame + self._content_widget = content_widget self._content_stacked_layout = content_stacked_layout self._overview_widget = overview_widget @@ -331,25 +340,39 @@ class PublisherWindow(QtWidgets.QDialog): self._controller = controller self._first_show = True + self._first_reset = True # This is a little bit confusing but 'reset_on_first_show' is too long - # forin init + # for init self._reset_on_first_show = reset_on_show self._reset_on_show = True self._publish_frame_visible = None + self._tab_on_reset = None self._error_messages_to_show = collections.deque() self._errors_dialog_message_timer = errors_dialog_message_timer self._set_publish_visibility(False) + self._create_overlay_button = create_overlay_button + self._app_event_listener_installed = False + self._show_timer = show_timer self._show_counter = 0 + self._window_is_visible = False @property def controller(self): return self._controller + def make_sure_is_visible(self): + if self._window_is_visible: + self.setWindowState(QtCore.Qt.ActiveWindow) + + else: + self.show() + def showEvent(self, event): + self._window_is_visible = True super(PublisherWindow, self).showEvent(event) if self._first_show: self._first_show = False @@ -360,6 +383,45 @@ class PublisherWindow(QtWidgets.QDialog): def resizeEvent(self, event): super(PublisherWindow, self).resizeEvent(event) self._update_publish_frame_rect() + self._update_create_overlay_size() + + def closeEvent(self, event): + self._window_is_visible = False + self._uninstall_app_event_listener() + self.save_changes() + self._reset_on_show = True + self._controller.clear_thumbnail_temp_dir_path() + super(PublisherWindow, self).closeEvent(event) + + def leaveEvent(self, event): + super(PublisherWindow, self).leaveEvent(event) + self._update_create_overlay_visibility() + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.MouseMove: + self._update_create_overlay_visibility(event.globalPos()) + return super(PublisherWindow, self).eventFilter(obj, event) + + def _install_app_event_listener(self): + if self._app_event_listener_installed: + return + self._app_event_listener_installed = True + app = QtWidgets.QApplication.instance() + app.installEventFilter(self) + + def _uninstall_app_event_listener(self): + if not self._app_event_listener_installed: + return + self._app_event_listener_installed = False + app = QtWidgets.QApplication.instance() + app.removeEventFilter(self) + + def keyPressEvent(self, event): + # Ignore escape button to close window + if event.key() == QtCore.Qt.Key_Escape: + event.accept() + return + super(PublisherWindow, self).keyPressEvent(event) def _on_overlay_message(self, event): self._overlay_object.add_message( @@ -383,17 +445,16 @@ class PublisherWindow(QtWidgets.QDialog): # Reset counter when done for next show event self._show_counter = 0 + self._update_create_overlay_size() + self._update_create_overlay_visibility() + if self._is_on_create_tab(): + self._install_app_event_listener() + # Reset if requested if self._reset_on_show: self._reset_on_show = False self.reset() - def closeEvent(self, event): - self.save_changes() - self._reset_on_show = True - self._controller.clear_thumbnail_temp_dir_path() - super(PublisherWindow, self).closeEvent(event) - def save_changes(self): self._controller.save_changes() @@ -403,8 +464,21 @@ class PublisherWindow(QtWidgets.QDialog): def set_context_label(self, label): self._context_label.setText(label) + def set_tab_on_reset(self, tab): + """Define tab that will be selected on window show. + + This is single use method, when publisher window is showed the value is + unset and not used on next show. + + Args: + tab (Union[int, Literal[create, publish, details, report]]: Index + or name of tab which will be selected on show (after reset). + """ + + self._tab_on_reset = tab + def _update_publish_details_widget(self, force=False): - if not force and self._tabs_widget.current_tab() != "details": + if not force and not self._is_on_details_tab(): return report_data = self.controller.get_publish_report() @@ -434,6 +508,10 @@ class PublisherWindow(QtWidgets.QDialog): self._help_dialog.width(), self._help_dialog.height() ) + def _on_create_overlay_button_click(self): + self._create_overlay_button.set_under_mouse(False) + self._go_to_publish_tab() + def _on_tab_change(self, old_tab, new_tab): if old_tab == "details": self._publish_details_widget.close_details_popup() @@ -458,20 +536,53 @@ class PublisherWindow(QtWidgets.QDialog): self._report_widget ) + is_create = new_tab == "create" + if is_create: + self._install_app_event_listener() + else: + self._uninstall_app_event_listener() + self._create_overlay_button.set_visible(is_create) + def _on_context_or_active_change(self): self._validate_create_instances() def _on_create_request(self): self._go_to_create_tab() + def _set_current_tab(self, identifier): + self._tabs_widget.set_current_tab(identifier) + + def set_current_tab(self, tab): + self._set_current_tab(tab) + if not self._window_is_visible: + self.set_tab_on_reset(tab) + + def _is_current_tab(self, identifier): + return self._tabs_widget.is_current_tab(identifier) + def _go_to_create_tab(self): - self._tabs_widget.set_current_tab("create") + self._set_current_tab("create") + + def _go_to_publish_tab(self): + self._set_current_tab("publish") def _go_to_details_tab(self): - self._tabs_widget.set_current_tab("details") + self._set_current_tab("details") def _go_to_report_tab(self): - self._tabs_widget.set_current_tab("report") + self._set_current_tab("report") + + def _is_on_create_tab(self): + return self._is_current_tab("create") + + def _is_on_publish_tab(self): + return self._is_current_tab("publish") + + def _is_on_details_tab(self): + return self._is_current_tab("details") + + def _is_on_report_tab(self): + return self._is_current_tab("report") def _set_publish_overlay_visibility(self, visible): if visible: @@ -523,11 +634,33 @@ class PublisherWindow(QtWidgets.QDialog): self._set_publish_visibility(False) self._set_footer_enabled(False) self._update_publish_details_widget() - if ( - not self._tabs_widget.is_current_tab("create") - and not self._tabs_widget.is_current_tab("publish") + + def _on_controller_reset(self): + self._first_reset, first_reset = False, self._first_reset + if self._tab_on_reset is not None: + self._tab_on_reset, new_tab = None, self._tab_on_reset + self._set_current_tab(new_tab) + return + + # On first reset change tab based on available items + # - if there is at least one instance the tab is changed to 'publish' + # otherwise 'create' is used + # - this happens only on first show + if first_reset: + if self._overview_widget.has_items(): + self._go_to_publish_tab() + else: + self._go_to_create_tab() + + elif ( + not self._is_on_create_tab() + and not self._is_on_publish_tab() ): - self._tabs_widget.set_current_tab("publish") + # If current tab is not 'Create' or 'Publish' go to 'Publish' + # - this can happen when publishing started and was reset + # at that moment it doesn't make sense to stay at publish + # specific tabs. + self._go_to_publish_tab() def _on_publish_start(self): self._create_tab.setEnabled(False) @@ -543,8 +676,8 @@ class PublisherWindow(QtWidgets.QDialog): self._publish_details_widget.close_details_popup() - if self._tabs_widget.is_current_tab(self._create_tab): - self._tabs_widget.set_current_tab("publish") + if self._is_on_create_tab(): + self._go_to_publish_tab() def _on_publish_validated_change(self, event): if event["value"]: @@ -557,7 +690,7 @@ class PublisherWindow(QtWidgets.QDialog): publish_has_crashed = self._controller.publish_has_crashed validate_enabled = not publish_has_crashed publish_enabled = not publish_has_crashed - if self._tabs_widget.is_current_tab("publish"): + if self._is_on_publish_tab(): self._go_to_report_tab() if validate_enabled: @@ -661,6 +794,36 @@ class PublisherWindow(QtWidgets.QDialog): event["title"], new_failed_info, "Convertor:" ) + def _update_create_overlay_size(self): + metrics = self._create_overlay_button.fontMetrics() + height = int(metrics.height()) + width = int(height * 0.7) + end_pos_x = self.width() + start_pos_x = end_pos_x - width + + center = self._content_widget.parent().mapTo( + self, + self._content_widget.rect().center() + ) + pos_y = center.y() - (height * 0.5) + + self._create_overlay_button.setGeometry( + start_pos_x, pos_y, + width, height + ) + + def _update_create_overlay_visibility(self, global_pos=None): + if global_pos is None: + global_pos = QtGui.QCursor.pos() + + under_mouse = False + my_pos = self.mapFromGlobal(global_pos) + if self.rect().contains(my_pos): + widget_geo = self._overview_widget.get_subset_views_geo() + widget_x = widget_geo.left() + (widget_geo.width() * 0.5) + under_mouse = widget_x < global_pos.x() + self._create_overlay_button.set_under_mouse(under_mouse) + class ErrorsMessageBox(ErrorMessageBox): def __init__(self, error_title, failed_info, message_start, parent): diff --git a/openpype/tools/settings/settings/categories.py b/openpype/tools/settings/settings/categories.py index f4b2c13a12..e1b3943317 100644 --- a/openpype/tools/settings/settings/categories.py +++ b/openpype/tools/settings/settings/categories.py @@ -892,6 +892,10 @@ class ProjectWidget(SettingsCategoryWidget): def __init__(self, *args, **kwargs): super(ProjectWidget, self).__init__(*args, **kwargs) + def set_edit_mode(self, enabled): + super(ProjectWidget, self).set_edit_mode(enabled) + self.project_list_widget.set_edit_mode(enabled) + def _check_last_saved_info(self): if self.is_modifying_defaults: return True diff --git a/openpype/tools/settings/settings/constants.py b/openpype/tools/settings/settings/constants.py index d98d18c8bf..23526e4de9 100644 --- a/openpype/tools/settings/settings/constants.py +++ b/openpype/tools/settings/settings/constants.py @@ -24,7 +24,6 @@ __all__ = ( "SETTINGS_PATH_KEY", "ROOT_KEY", - "SETTINGS_PATH_KEY", "VALUE_KEY", "SAVE_TIME_KEY", "PROJECT_NAME_KEY", diff --git a/openpype/tools/settings/settings/widgets.py b/openpype/tools/settings/settings/widgets.py index 722717df89..b8ad21e7e4 100644 --- a/openpype/tools/settings/settings/widgets.py +++ b/openpype/tools/settings/settings/widgets.py @@ -646,6 +646,9 @@ class UnsavedChangesDialog(QtWidgets.QDialog): def __init__(self, parent=None): super(UnsavedChangesDialog, self).__init__(parent) + + self.setWindowTitle("Unsaved changes") + message_label = QtWidgets.QLabel(self.message) btns_widget = QtWidgets.QWidget(self) @@ -1009,6 +1012,7 @@ class ProjectListWidget(QtWidgets.QWidget): self._entity = None self.current_project = None + self._edit_mode = True super(ProjectListWidget, self).__init__(parent) self.setObjectName("ProjectListWidget") @@ -1061,6 +1065,10 @@ class ProjectListWidget(QtWidgets.QWidget): self.project_model = project_model self.inactive_chk = inactive_chk + def set_edit_mode(self, enabled): + if self._edit_mode is not enabled: + self._edit_mode = enabled + def set_entity(self, entity): self._entity = entity @@ -1112,7 +1120,7 @@ class ProjectListWidget(QtWidgets.QWidget): save_changes = False change_project = False - if self.validate_context_change(): + if not self._edit_mode or self.validate_context_change(): change_project = True else: diff --git a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py index f8a8273b26..18c2b27678 100644 --- a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py +++ b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py @@ -178,7 +178,7 @@ class DropDataFrame(QtWidgets.QFrame): paths = self._get_all_paths(in_paths) collectionable_paths = [] non_collectionable_paths = [] - for path in in_paths: + for path in paths: ext = os.path.splitext(path)[1] if ext in self.image_extensions or ext in self.sequence_types: collectionable_paths.append(path) diff --git a/openpype/tools/tray/pype_tray.py b/openpype/tools/tray/pype_tray.py index 3842a4e216..d4189af4d8 100644 --- a/openpype/tools/tray/pype_tray.py +++ b/openpype/tools/tray/pype_tray.py @@ -401,7 +401,7 @@ class TrayManager: def initialize_modules(self): """Add modules to tray.""" - from openpype_interfaces import ( + from openpype.modules import ( ITrayAction, ITrayService ) diff --git a/openpype/tools/utils/host_tools.py b/openpype/tools/utils/host_tools.py index 046dcbdf6a..e8593a8ae2 100644 --- a/openpype/tools/utils/host_tools.py +++ b/openpype/tools/utils/host_tools.py @@ -285,14 +285,12 @@ class HostToolsHelper: return self._publisher_tool - def show_publisher_tool(self, parent=None, controller=None): + def show_publisher_tool(self, parent=None, controller=None, tab=None): with qt_app_context(): - dialog = self.get_publisher_tool(parent, controller) - - dialog.show() - dialog.raise_() - dialog.activateWindow() - dialog.showNormal() + window = self.get_publisher_tool(parent, controller) + if tab: + window.set_current_tab(tab) + window.make_sure_is_visible() def get_tool_by_name(self, tool_name, parent=None, *args, **kwargs): """Show tool by it's name. @@ -446,8 +444,8 @@ def show_publish(parent=None): _SingletonPoint.show_tool_by_name("publish", parent) -def show_publisher(parent=None): - _SingletonPoint.show_tool_by_name("publisher", parent) +def show_publisher(parent=None, **kwargs): + _SingletonPoint.show_tool_by_name("publisher", parent, **kwargs) def show_experimental_tools_dialog(parent=None): diff --git a/openpype/tools/utils/widgets.py b/openpype/tools/utils/widgets.py index 13225081ed..88893a57d5 100644 --- a/openpype/tools/utils/widgets.py +++ b/openpype/tools/utils/widgets.py @@ -3,10 +3,12 @@ import logging from Qt import QtWidgets, QtCore, QtGui import qargparse import qtawesome + from openpype.style import ( get_objected_colors, get_style_image_path ) +from openpype.lib.attribute_definitions import AbtractAttrDef log = logging.getLogger(__name__) @@ -401,8 +403,26 @@ class OptionalAction(QtWidgets.QWidgetAction): def set_option_tip(self, options): sep = "\n\n" - mak = (lambda opt: opt["name"] + " :\n " + opt["help"]) - self.option_tip = sep.join(mak(opt) for opt in options) + if not options or not isinstance(options[0], AbtractAttrDef): + mak = (lambda opt: opt["name"] + " :\n " + opt["help"]) + self.option_tip = sep.join(mak(opt) for opt in options) + return + + option_items = [] + for option in options: + option_lines = [] + if option.label: + option_lines.append( + "{} ({}) :".format(option.label, option.key) + ) + else: + option_lines.append("{} :".format(option.key)) + + if option.tooltip: + option_lines.append(" - {}".format(option.tooltip)) + option_items.append("\n".join(option_lines)) + + self.option_tip = sep.join(option_items) def on_option(self): self.optioned = True diff --git a/openpype/tools/workfile_template_build/window.py b/openpype/tools/workfile_template_build/window.py index ea4e2fec5a..22e26be451 100644 --- a/openpype/tools/workfile_template_build/window.py +++ b/openpype/tools/workfile_template_build/window.py @@ -3,7 +3,7 @@ from Qt import QtWidgets from openpype import style from openpype.lib import Logger from openpype.pipeline import legacy_io -from openpype.widgets.attribute_defs import AttributeDefinitionsWidget +from openpype.tools.attribute_defs import AttributeDefinitionsWidget class WorkfileBuildPlaceholderDialog(QtWidgets.QDialog): diff --git a/openpype/vendor/python/python_2/secrets/LICENSE b/openpype/vendor/python/python_2/secrets/LICENSE new file mode 100644 index 0000000000..d3211e4d9f --- /dev/null +++ b/openpype/vendor/python/python_2/secrets/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2019 Scaleway + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/openpype/vendor/python/python_2/secrets/__init__.py b/openpype/vendor/python/python_2/secrets/__init__.py new file mode 100644 index 0000000000..c29ee61be1 --- /dev/null +++ b/openpype/vendor/python/python_2/secrets/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + + +__version__ = "1.0.6" + +# Emulates __all__ for Python2 +from .secrets import ( + choice, + randbelow, + randbits, + SystemRandom, + token_bytes, + token_hex, + token_urlsafe, + compare_digest +) diff --git a/openpype/vendor/python/python_2/secrets/secrets.py b/openpype/vendor/python/python_2/secrets/secrets.py new file mode 100644 index 0000000000..967d2862d9 --- /dev/null +++ b/openpype/vendor/python/python_2/secrets/secrets.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +"""Generate cryptographically strong pseudo-random numbers suitable for + +managing secrets such as account authentication, tokens, and similar. + + +See PEP 506 for more information. + +https://www.python.org/dev/peps/pep-0506/ + + +""" + + +__all__ = ['choice', 'randbelow', 'randbits', 'SystemRandom', + + 'token_bytes', 'token_hex', 'token_urlsafe', + + 'compare_digest', + + ] + +import os +import sys +from random import SystemRandom + +import base64 + +import binascii + + +# hmac.compare_digest did appear in python 2.7.7 +if sys.version_info >= (2, 7, 7): + from hmac import compare_digest +else: + # If we use an older python version, we will define an equivalent method + def compare_digest(a, b): + """Compatibility compare_digest method for python < 2.7. + This method is NOT cryptographically secure and may be subject to + timing attacks, see https://docs.python.org/2/library/hmac.html + """ + return a == b + + +_sysrand = SystemRandom() + + +randbits = _sysrand.getrandbits + +choice = _sysrand.choice + + +def randbelow(exclusive_upper_bound): + + """Return a random int in the range [0, n).""" + + if exclusive_upper_bound <= 0: + + raise ValueError("Upper bound must be positive.") + + return _sysrand._randbelow(exclusive_upper_bound) + + +DEFAULT_ENTROPY = 32 # number of bytes to return by default + + +def token_bytes(nbytes=None): + + """Return a random byte string containing *nbytes* bytes. + + + If *nbytes* is ``None`` or not supplied, a reasonable + + default is used. + + + >>> token_bytes(16) #doctest:+SKIP + + b'\\xebr\\x17D*t\\xae\\xd4\\xe3S\\xb6\\xe2\\xebP1\\x8b' + + + """ + + if nbytes is None: + + nbytes = DEFAULT_ENTROPY + + return os.urandom(nbytes) + + +def token_hex(nbytes=None): + + """Return a random text string, in hexadecimal. + + + The string has *nbytes* random bytes, each byte converted to two + + hex digits. If *nbytes* is ``None`` or not supplied, a reasonable + + default is used. + + + >>> token_hex(16) #doctest:+SKIP + + 'f9bf78b9a18ce6d46a0cd2b0b86df9da' + + + """ + + return binascii.hexlify(token_bytes(nbytes)).decode('ascii') + + +def token_urlsafe(nbytes=None): + + """Return a random URL-safe text string, in Base64 encoding. + + + The string has *nbytes* random bytes. If *nbytes* is ``None`` + + or not supplied, a reasonable default is used. + + + >>> token_urlsafe(16) #doctest:+SKIP + + 'Drmhze6EPcv0fN_81Bj-nA' + + + """ + + tok = token_bytes(nbytes) + + return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii') diff --git a/openpype/version.py b/openpype/version.py index 46bb4b1cd0..9a34c85bf8 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.14.7-nightly.2" +__version__ = "3.14.8-nightly.2" diff --git a/website/docs/project_settings/settings_project_global.md b/website/docs/project_settings/settings_project_global.md index 24ea09b6fb..9666c6568a 100644 --- a/website/docs/project_settings/settings_project_global.md +++ b/website/docs/project_settings/settings_project_global.md @@ -135,6 +135,12 @@ Profile may generate multiple outputs from a single input. Each output must defi - set alpha to `0` to not use this option at all (in most of cases background stays black) - other than `0` alpha will draw color as background +- **`Additional filtering`** + - Profile filtering defines which group of output definitions is used but output definitions may require more specific filters on their own. + - They may filter by subset name (regex can be used) or publish families. Publish families are more complex as are based on knowing code base. + - Filtering by custom tags -> this is used for targeting to output definitions from other extractors using settings (at this moment only Nuke bake extractor can target using custom tags). + - Nuke extractor settings path: `project_settings/nuke/publish/ExtractReviewDataMov/outputs/baking/add_custom_tags` + - Filtering by input length. Input may be video, sequence or single image. It is possible that `.mp4` should be created only when input is video or sequence and to create review `.png` when input is single frame. In some cases the output should be created even if it's single frame or multi frame input. ### IntegrateAssetNew diff --git a/website/yarn.lock b/website/yarn.lock index 04b9dd658b..220a489dfa 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -1543,15 +1543,37 @@ dependencies: "@hapi/hoek" "^9.0.0" +"@jridgewell/gen-mapping@^0.3.0": + version "0.3.2" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9" + integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A== + dependencies: + "@jridgewell/set-array" "^1.0.1" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.9" + "@jridgewell/resolve-uri@^3.0.3": - version "3.0.5" - resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.0.5.tgz#68eb521368db76d040a6315cdb24bf2483037b9c" - integrity sha512-VPeQ7+wH0itvQxnG+lIzWgkysKIr3L9sslimFW55rHMdGu/qCQ5z5h9zq4gI8uBtqkpHhsF4Z/OwExufUCThew== + version "3.1.0" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78" + integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w== + +"@jridgewell/set-array@^1.0.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72" + integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw== + +"@jridgewell/source-map@^0.3.2": + version "0.3.2" + resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.2.tgz#f45351aaed4527a298512ec72f81040c998580fb" + integrity sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw== + dependencies: + "@jridgewell/gen-mapping" "^0.3.0" + "@jridgewell/trace-mapping" "^0.3.9" "@jridgewell/sourcemap-codec@^1.4.10": - version "1.4.11" - resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.11.tgz#771a1d8d744eeb71b6adb35808e1a6c7b9b8c8ec" - integrity sha512-Fg32GrJo61m+VqYSdRSjRXMjQ06j8YIYfcTqndLYVAaHmroZHLJZCydsWBOTDqXS2v+mjxohBWEMfg97GXmYQg== + version "1.4.14" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24" + integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw== "@jridgewell/trace-mapping@^0.3.0": version "0.3.4" @@ -1561,6 +1583,14 @@ "@jridgewell/resolve-uri" "^3.0.3" "@jridgewell/sourcemap-codec" "^1.4.10" +"@jridgewell/trace-mapping@^0.3.9": + version "0.3.14" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.14.tgz#b231a081d8f66796e475ad588a1ef473112701ed" + integrity sha512-bJWEfQ9lPTvm3SneWwRFVLzrh6nhjwqw7TUFFBEMzwvg7t7PCDenf2lDwqo4NQXzdpgBXyFgDWnQA+2vkruksQ== + dependencies: + "@jridgewell/resolve-uri" "^3.0.3" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@mdx-js/mdx@1.6.22", "@mdx-js/mdx@^1.6.21": version "1.6.22" resolved "https://registry.yarnpkg.com/@mdx-js/mdx/-/mdx-1.6.22.tgz#8a723157bf90e78f17dc0f27995398e6c731f1ba" @@ -2140,10 +2170,10 @@ acorn@^6.1.1: resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.4.2.tgz#35866fd710528e92de10cf06016498e47e39e1e6" integrity sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ== -acorn@^8.0.4, acorn@^8.4.1: - version "8.7.0" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.7.0.tgz#90951fde0f8f09df93549481e5fc141445b791cf" - integrity sha512-V/LGr1APy+PXIwKebEWrkZPwoeoF+w1jiOBUmuxuiUIaOHtob8Qc9BTrYo7VuI5fR8tqsy+buA2WFooR5olqvQ== +acorn@^8.0.4, acorn@^8.4.1, acorn@^8.5.0: + version "8.7.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.7.1.tgz#0197122c843d1bf6d0a5e83220a788f278f63c30" + integrity sha512-Xx54uLJQZ19lKygFXOWsscKUbsBZW0CPykPhVQdhIeIwrbPmJzqeASDInc8nKBnp/JT6igTs82qPXz069H8I/A== address@^1.0.1, address@^1.1.2: version "1.1.2" @@ -4782,9 +4812,9 @@ loader-runner@^4.2.0: integrity sha512-92+huvxMvYlMzMt0iIOukcwYBFpkYJdpl2xsZ7LrlayO7E8SOv+JJUEK17B/dJIHAOLMfh2dZZ/Y18WgmGtYNw== loader-utils@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.4.0.tgz#c579b5e34cb34b1a74edc6c1fb36bfa371d5a613" - integrity sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA== + version "1.4.2" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.4.2.tgz#29a957f3a63973883eb684f10ffd3d151fec01a3" + integrity sha512-I5d00Pd/jwMD2QCduo657+YM/6L3KZu++pmX9VFncxaxvHcru9jx1lBaFft+r4Mt2jK0Yhp41XlRAihzPxHNCg== dependencies: big.js "^5.2.2" emojis-list "^3.0.0" @@ -5124,7 +5154,12 @@ minimatch@^3.0.4: dependencies: brace-expansion "^1.1.7" -minimist@^1.2.0, minimist@^1.2.5: +minimist@^1.2.0: + version "1.2.7" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18" + integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g== + +minimist@^1.2.5: version "1.2.6" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== @@ -6838,11 +6873,6 @@ source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1: resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== -source-map@~0.7.2: - version "0.7.3" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.3.tgz#5302f8169031735226544092e64981f751750383" - integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ== - sourcemap-codec@^1.4.4: version "1.4.8" resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4" @@ -7048,12 +7078,13 @@ terser-webpack-plugin@^5.1.3, terser-webpack-plugin@^5.2.4: terser "^5.7.2" terser@^5.10.0, terser@^5.7.2: - version "5.10.0" - resolved "https://registry.yarnpkg.com/terser/-/terser-5.10.0.tgz#b86390809c0389105eb0a0b62397563096ddafcc" - integrity sha512-AMmF99DMfEDiRJfxfY5jj5wNH/bYO09cniSqhfoyxc8sFoYIgkJy86G04UoZU5VjlpnplVu0K6Tx6E9b5+DlHA== + version "5.14.2" + resolved "https://registry.yarnpkg.com/terser/-/terser-5.14.2.tgz#9ac9f22b06994d736174f4091aa368db896f1c10" + integrity sha512-oL0rGeM/WFQCUd0y2QrWxYnq7tfSuKBiqTjRPWrRgB46WD/kiwHwF8T23z78H6Q6kGCuuHcPB+KULHRdxvVGQA== dependencies: + "@jridgewell/source-map" "^0.3.2" + acorn "^8.5.0" commander "^2.20.0" - source-map "~0.7.2" source-map-support "~0.5.20" text-table@^0.2.0: