diff --git a/.gitignore b/.gitignore index 7eaef69873..ea5b20eb69 100644 --- a/.gitignore +++ b/.gitignore @@ -102,5 +102,8 @@ website/.docusaurus .poetry/ .python-version +.editorconfig +.pre-commit-config.yaml +mypy.ini tools/run_eventserver.* diff --git a/.gitmodules b/.gitmodules index dfd89cdb3c..bac3132b77 100644 --- a/.gitmodules +++ b/.gitmodules @@ -5,3 +5,6 @@ [submodule "tools/modules/powershell/PSWriteColor"] path = tools/modules/powershell/PSWriteColor url = https://github.com/EvotecIT/PSWriteColor.git +[submodule "vendor/configs/OpenColorIO-Configs"] + path = vendor/configs/OpenColorIO-Configs + url = https://github.com/imageworks/OpenColorIO-Configs diff --git a/CHANGELOG.md b/CHANGELOG.md index ec880b9c61..2adb4ac154 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,80 @@ # Changelog -## [3.12.2-nightly.3](https://github.com/pypeclub/OpenPype/tree/HEAD) +## [3.13.1-nightly.2](https://github.com/pypeclub/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.1...HEAD) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.13.0...HEAD) + +**🐛 Bug fixes** + +- General: Hero version representations have full context [\#3638](https://github.com/pypeclub/OpenPype/pull/3638) +- Maya: FBX support for update in reference loader [\#3631](https://github.com/pypeclub/OpenPype/pull/3631) + +**🔀 Refactored code** + +- TimersManager: Plugins are in timers manager module [\#3639](https://github.com/pypeclub/OpenPype/pull/3639) +- General: Move workfiles functions into pipeline [\#3637](https://github.com/pypeclub/OpenPype/pull/3637) + +**Merged pull requests:** + +- Kitsu|Fix: Movie project type fails & first loop children names [\#3636](https://github.com/pypeclub/OpenPype/pull/3636) + +## [3.13.0](https://github.com/pypeclub/OpenPype/tree/3.13.0) (2022-08-09) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.13.0-nightly.1...3.13.0) + +**🆕 New features** + +- Support for mutliple installed versions - 3.13 [\#3605](https://github.com/pypeclub/OpenPype/pull/3605) + +**🚀 Enhancements** + +- Editorial: Mix audio use side file for ffmpeg filters [\#3630](https://github.com/pypeclub/OpenPype/pull/3630) +- Ftrack: Comment template can contain optional keys [\#3615](https://github.com/pypeclub/OpenPype/pull/3615) +- Ftrack: Add more metadata to ftrack components [\#3612](https://github.com/pypeclub/OpenPype/pull/3612) +- General: Add context to pyblish context [\#3594](https://github.com/pypeclub/OpenPype/pull/3594) +- Kitsu: Shot&Sequence name with prefix over appends [\#3593](https://github.com/pypeclub/OpenPype/pull/3593) +- Photoshop: implemented {layer} placeholder in subset template [\#3591](https://github.com/pypeclub/OpenPype/pull/3591) +- General: Python module appdirs from git [\#3589](https://github.com/pypeclub/OpenPype/pull/3589) +- Ftrack: Update ftrack api to 2.3.3 [\#3588](https://github.com/pypeclub/OpenPype/pull/3588) +- General: New Integrator small fixes [\#3583](https://github.com/pypeclub/OpenPype/pull/3583) + +**🐛 Bug fixes** + +- Maya: fix aov separator in Redshift [\#3625](https://github.com/pypeclub/OpenPype/pull/3625) +- Fix for multi-version build on Mac [\#3622](https://github.com/pypeclub/OpenPype/pull/3622) +- Ftrack: Sync hierarchical attributes can handle new created entities [\#3621](https://github.com/pypeclub/OpenPype/pull/3621) +- General: Extract review aspect ratio scale is calculated by ffmpeg [\#3620](https://github.com/pypeclub/OpenPype/pull/3620) +- Maya: Fix types of default settings [\#3617](https://github.com/pypeclub/OpenPype/pull/3617) +- Integrator: Don't force to have dot before frame [\#3611](https://github.com/pypeclub/OpenPype/pull/3611) +- AfterEffects: refactored integrate doesnt work formulti frame publishes [\#3610](https://github.com/pypeclub/OpenPype/pull/3610) +- Maya look data contents fails with custom attribute on group [\#3607](https://github.com/pypeclub/OpenPype/pull/3607) +- TrayPublisher: Fix wrong conflict merge [\#3600](https://github.com/pypeclub/OpenPype/pull/3600) +- Bugfix: Add OCIO as submodule to prepare for handling `maketx` color space conversion. [\#3590](https://github.com/pypeclub/OpenPype/pull/3590) +- Fix general settings environment variables resolution [\#3587](https://github.com/pypeclub/OpenPype/pull/3587) +- Editorial publishing workflow improvements [\#3580](https://github.com/pypeclub/OpenPype/pull/3580) +- General: Update imports in start script [\#3579](https://github.com/pypeclub/OpenPype/pull/3579) +- Nuke: render family integration consistency [\#3576](https://github.com/pypeclub/OpenPype/pull/3576) +- Ftrack: Handle missing published path in integrator [\#3570](https://github.com/pypeclub/OpenPype/pull/3570) +- Maya: fix Review image plane attribute [\#3569](https://github.com/pypeclub/OpenPype/pull/3569) +- Nuke: publish existing frames with slate with correct range [\#3555](https://github.com/pypeclub/OpenPype/pull/3555) + +**🔀 Refactored code** + +- General: Plugin settings handled by plugins [\#3623](https://github.com/pypeclub/OpenPype/pull/3623) +- General: Naive implementation of document create, update, delete [\#3601](https://github.com/pypeclub/OpenPype/pull/3601) +- General: Use query functions in general code [\#3596](https://github.com/pypeclub/OpenPype/pull/3596) +- General: Separate extraction of template data into more functions [\#3574](https://github.com/pypeclub/OpenPype/pull/3574) +- General: Lib cleanup [\#3571](https://github.com/pypeclub/OpenPype/pull/3571) + +**Merged pull requests:** + +- Webpublisher: timeout for PS studio processing [\#3619](https://github.com/pypeclub/OpenPype/pull/3619) +- Core: translated validate\_containers.py into New publisher style [\#3614](https://github.com/pypeclub/OpenPype/pull/3614) +- Enable write color sets on animation publish automatically [\#3582](https://github.com/pypeclub/OpenPype/pull/3582) + +## [3.12.2](https://github.com/pypeclub/OpenPype/tree/3.12.2) (2022-07-27) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.2-nightly.4...3.12.2) ### 📖 Documentation @@ -11,21 +83,15 @@ **🚀 Enhancements** +- General: Global thumbnail extractor is ready for more cases [\#3561](https://github.com/pypeclub/OpenPype/pull/3561) - Maya: add additional validators to Settings [\#3540](https://github.com/pypeclub/OpenPype/pull/3540) - General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) - Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) -- Ftrack: add source into Note [\#3509](https://github.com/pypeclub/OpenPype/pull/3509) -- Ftrack: Trigger custom ftrack topic of project structure creation [\#3506](https://github.com/pypeclub/OpenPype/pull/3506) -- Settings UI: Add extract to file action on project view [\#3505](https://github.com/pypeclub/OpenPype/pull/3505) -- Add pack and unpack convenience scripts [\#3502](https://github.com/pypeclub/OpenPype/pull/3502) -- General: Event system [\#3499](https://github.com/pypeclub/OpenPype/pull/3499) -- NewPublisher: Keep plugins with mismatch target in report [\#3498](https://github.com/pypeclub/OpenPype/pull/3498) -- Nuke: load clip with options from settings [\#3497](https://github.com/pypeclub/OpenPype/pull/3497) -- TrayPublisher: implemented render\_mov\_batch [\#3486](https://github.com/pypeclub/OpenPype/pull/3486) -- Migrate basic families to the new Tray Publisher [\#3469](https://github.com/pypeclub/OpenPype/pull/3469) **🐛 Bug fixes** +- Maya: Fix animated attributes \(ie. overscan\) on loaded cameras breaking review publishing. [\#3562](https://github.com/pypeclub/OpenPype/pull/3562) +- NewPublisher: Python 2 compatible html escape [\#3559](https://github.com/pypeclub/OpenPype/pull/3559) - Remove invalid submodules from `/vendor` [\#3557](https://github.com/pypeclub/OpenPype/pull/3557) - General: Remove hosts filter on integrator plugins [\#3556](https://github.com/pypeclub/OpenPype/pull/3556) - Settings: Clean default values of environments [\#3550](https://github.com/pypeclub/OpenPype/pull/3550) @@ -38,93 +104,28 @@ - General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) - Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) - TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) -- NewPublisher: Publish attributes are properly collected [\#3510](https://github.com/pypeclub/OpenPype/pull/3510) -- TrayPublisher: Make sure host name is filled [\#3504](https://github.com/pypeclub/OpenPype/pull/3504) -- NewPublisher: Groups work and enum multivalue [\#3501](https://github.com/pypeclub/OpenPype/pull/3501) **🔀 Refactored code** +- General: Use query functions in integrator [\#3563](https://github.com/pypeclub/OpenPype/pull/3563) +- General: Mongo core connection moved to client [\#3531](https://github.com/pypeclub/OpenPype/pull/3531) - Refactor Integrate Asset [\#3530](https://github.com/pypeclub/OpenPype/pull/3530) - General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) +- General: Move load related functions into pipeline [\#3527](https://github.com/pypeclub/OpenPype/pull/3527) - General: Get current context document functions [\#3522](https://github.com/pypeclub/OpenPype/pull/3522) -- Kitsu: Use query function from client [\#3496](https://github.com/pypeclub/OpenPype/pull/3496) -- TimersManager: Use query functions [\#3495](https://github.com/pypeclub/OpenPype/pull/3495) -- Deadline: Use query functions [\#3466](https://github.com/pypeclub/OpenPype/pull/3466) + +**Merged pull requests:** + +- Maya: fix active pane loss [\#3566](https://github.com/pypeclub/OpenPype/pull/3566) ## [3.12.1](https://github.com/pypeclub/OpenPype/tree/3.12.1) (2022-07-13) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.1-nightly.6...3.12.1) -### 📖 Documentation - -- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) - -**🆕 New features** - -- Maya: Add VDB to Arnold loader [\#3433](https://github.com/pypeclub/OpenPype/pull/3433) - -**🚀 Enhancements** - -- TrayPublisher: Added more options for grouping of instances [\#3494](https://github.com/pypeclub/OpenPype/pull/3494) -- NewPublisher: Align creator attributes from top to bottom [\#3487](https://github.com/pypeclub/OpenPype/pull/3487) -- NewPublisher: Added ability to use label of instance [\#3484](https://github.com/pypeclub/OpenPype/pull/3484) -- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) -- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) -- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) -- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) -- Blender: Bugfix - Set fps properly on open [\#3426](https://github.com/pypeclub/OpenPype/pull/3426) -- Hiero: Add custom scripts menu [\#3425](https://github.com/pypeclub/OpenPype/pull/3425) - -**🐛 Bug fixes** - -- TrayPublisher: Keep use instance label in list view [\#3493](https://github.com/pypeclub/OpenPype/pull/3493) -- General: Extract review use first frame of input sequence [\#3491](https://github.com/pypeclub/OpenPype/pull/3491) -- General: Fix Plist loading for application launch [\#3485](https://github.com/pypeclub/OpenPype/pull/3485) -- Nuke: Workfile tools open on start [\#3479](https://github.com/pypeclub/OpenPype/pull/3479) -- New Publisher: Disabled context change allows creation [\#3478](https://github.com/pypeclub/OpenPype/pull/3478) -- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) -- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) -- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) -- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) -- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) -- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) -- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) -- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) -- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) -- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) -- Nuke: Slate frame is integrated [\#3427](https://github.com/pypeclub/OpenPype/pull/3427) - -**🔀 Refactored code** - -- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) -- Maya: Re-use `maintained\_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) -- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) -- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) -- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) -- General: Use query functions in openpype lib functions [\#3454](https://github.com/pypeclub/OpenPype/pull/3454) -- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) -- General: Move publish plugin and publish render abstractions [\#3442](https://github.com/pypeclub/OpenPype/pull/3442) -- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) -- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) - ## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.0-nightly.3...3.12.0) -**🚀 Enhancements** - -- Webserver: Added CORS middleware [\#3422](https://github.com/pypeclub/OpenPype/pull/3422) - -**🐛 Bug fixes** - -- NewPublisher: Fix subset name change on change of creator plugin [\#3420](https://github.com/pypeclub/OpenPype/pull/3420) -- Bug: fix invalid avalon import [\#3418](https://github.com/pypeclub/OpenPype/pull/3418) - -**🔀 Refactored code** - -- Unreal: Use client query functions [\#3421](https://github.com/pypeclub/OpenPype/pull/3421) -- General: Move editorial lib to pipeline [\#3419](https://github.com/pypeclub/OpenPype/pull/3419) - ## [3.11.1](https://github.com/pypeclub/OpenPype/tree/3.11.1) (2022-06-20) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.11.1-nightly.1...3.11.1) diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index 08333885c0..750b2f1bf7 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -122,7 +122,7 @@ class OpenPypeVersion(semver.VersionInfo): if self.staging: if kwargs.get("build"): if "staging" not in kwargs.get("build"): - kwargs["build"] = "{}-staging".format(kwargs.get("build")) + kwargs["build"] = f"{kwargs.get('build')}-staging" else: kwargs["build"] = "staging" @@ -136,8 +136,7 @@ class OpenPypeVersion(semver.VersionInfo): return bool(result and self.staging == other.staging) def __repr__(self): - return "<{}: {} - path={}>".format( - self.__class__.__name__, str(self), self.path) + return f"<{self.__class__.__name__}: {str(self)} - path={self.path}>" def __lt__(self, other: OpenPypeVersion): result = super().__lt__(other) @@ -232,10 +231,7 @@ class OpenPypeVersion(semver.VersionInfo): return openpype_version def __hash__(self): - if self.path: - return hash(self.path) - else: - return hash(str(self)) + return hash(self.path) if self.path else hash(str(self)) @staticmethod def is_version_in_dir( @@ -384,7 +380,8 @@ class OpenPypeVersion(semver.VersionInfo): @classmethod def get_local_versions( - cls, production: bool = None, staging: bool = None + cls, production: bool = None, + staging: bool = None, compatible_with: OpenPypeVersion = None ) -> List: """Get all versions available on this machine. @@ -394,6 +391,8 @@ class OpenPypeVersion(semver.VersionInfo): Args: production (bool): Return production versions. staging (bool): Return staging versions. + compatible_with (OpenPypeVersion): Return only those compatible + with specified version. """ # Return all local versions if arguments are set to None if production is None and staging is None: @@ -410,10 +409,19 @@ class OpenPypeVersion(semver.VersionInfo): if not production and not staging: return [] + # DEPRECATED: backwards compatible way to look for versions in root dir_to_search = Path(user_data_dir("openpype", "pypeclub")) versions = OpenPypeVersion.get_versions_from_directory( - dir_to_search + dir_to_search, compatible_with=compatible_with ) + if compatible_with: + dir_to_search = Path( + user_data_dir("openpype", "pypeclub")) / f"{compatible_with.major}.{compatible_with.minor}" # noqa + versions += OpenPypeVersion.get_versions_from_directory( + dir_to_search, compatible_with=compatible_with + ) + + filtered_versions = [] for version in versions: if version.is_staging(): @@ -425,7 +433,8 @@ class OpenPypeVersion(semver.VersionInfo): @classmethod def get_remote_versions( - cls, production: bool = None, staging: bool = None + cls, production: bool = None, + staging: bool = None, compatible_with: OpenPypeVersion = None ) -> List: """Get all versions available in OpenPype Path. @@ -435,6 +444,8 @@ class OpenPypeVersion(semver.VersionInfo): Args: production (bool): Return production versions. staging (bool): Return staging versions. + compatible_with (OpenPypeVersion): Return only those compatible + with specified version. """ # Return all local versions if arguments are set to None if production is None and staging is None: @@ -468,7 +479,14 @@ class OpenPypeVersion(semver.VersionInfo): if not dir_to_search: return [] - versions = cls.get_versions_from_directory(dir_to_search) + # DEPRECATED: look for version in root directory + versions = cls.get_versions_from_directory( + dir_to_search, compatible_with=compatible_with) + if compatible_with: + dir_to_search = dir_to_search / f"{compatible_with.major}.{compatible_with.minor}" # noqa + versions += cls.get_versions_from_directory( + dir_to_search, compatible_with=compatible_with) + filtered_versions = [] for version in versions: if version.is_staging(): @@ -479,11 +497,15 @@ class OpenPypeVersion(semver.VersionInfo): return list(sorted(set(filtered_versions))) @staticmethod - def get_versions_from_directory(openpype_dir: Path) -> List: + def get_versions_from_directory( + openpype_dir: Path, + compatible_with: OpenPypeVersion = None) -> List: """Get all detected OpenPype versions in directory. Args: openpype_dir (Path): Directory to scan. + compatible_with (OpenPypeVersion): Return only versions compatible + with build version specified as OpenPypeVersion. Returns: list of OpenPypeVersion @@ -492,10 +514,10 @@ class OpenPypeVersion(semver.VersionInfo): ValueError: if invalid path is specified. """ - if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") - _openpype_versions = [] + if not openpype_dir.exists() and not openpype_dir.is_dir(): + return _openpype_versions + # iterate over directory in first level and find all that might # contain OpenPype. for item in openpype_dir.iterdir(): @@ -518,6 +540,10 @@ class OpenPypeVersion(semver.VersionInfo): )[0]: continue + if compatible_with and not detected_version.is_compatible( + compatible_with): + continue + detected_version.path = item _openpype_versions.append(detected_version) @@ -549,8 +575,9 @@ class OpenPypeVersion(semver.VersionInfo): def get_latest_version( staging: bool = False, local: bool = None, - remote: bool = None - ) -> OpenPypeVersion: + remote: bool = None, + compatible_with: OpenPypeVersion = None + ) -> Union[OpenPypeVersion, None]: """Get latest available version. The version does not contain information about path and source. @@ -568,6 +595,9 @@ class OpenPypeVersion(semver.VersionInfo): staging (bool, optional): List staging versions if True. local (bool, optional): List local versions if True. remote (bool, optional): List remote versions if True. + compatible_with (OpenPypeVersion, optional) Return only version + compatible with compatible_with. + """ if local is None and remote is None: local = True @@ -598,7 +628,12 @@ class OpenPypeVersion(semver.VersionInfo): return None all_versions.sort() - return all_versions[-1] + latest_version: OpenPypeVersion + latest_version = all_versions[-1] + if compatible_with and not latest_version.is_compatible( + compatible_with): + return None + return latest_version @classmethod def get_expected_studio_version(cls, staging=False, global_settings=None): @@ -621,6 +656,21 @@ class OpenPypeVersion(semver.VersionInfo): return None return OpenPypeVersion(version=result) + def is_compatible(self, version: OpenPypeVersion): + """Test build compatibility. + + This will simply compare major and minor versions (ignoring patch + and the rest). + + Args: + version (OpenPypeVersion): Version to check compatibility with. + + Returns: + bool: if the version is compatible + + """ + return self.major == version.major and self.minor == version.minor + class BootstrapRepos: """Class for bootstrapping local OpenPype installation. @@ -741,8 +791,9 @@ class BootstrapRepos: return # create destination directory - if not self.data_dir.exists(): - self.data_dir.mkdir(parents=True) + destination = self.data_dir / f"{installed_version.major}.{installed_version.minor}" # noqa + if not destination.exists(): + destination.mkdir(parents=True) # create zip inside temporary directory. with tempfile.TemporaryDirectory() as temp_dir: @@ -770,7 +821,9 @@ class BootstrapRepos: Path to moved zip on success. """ - destination = self.data_dir / zip_file.name + version = OpenPypeVersion.version_in_str(zip_file.name) + destination_dir = self.data_dir / f"{version.major}.{version.minor}" + destination = destination_dir / zip_file.name if destination.exists(): self._print( @@ -782,7 +835,7 @@ class BootstrapRepos: self._print(str(e), LOG_ERROR, exc_info=True) return None try: - shutil.move(zip_file.as_posix(), self.data_dir.as_posix()) + shutil.move(zip_file.as_posix(), destination_dir.as_posix()) except shutil.Error as e: self._print(str(e), LOG_ERROR, exc_info=True) return None @@ -995,6 +1048,16 @@ class BootstrapRepos: @staticmethod def _validate_dir(path: Path) -> tuple: + """Validate checksums in a given path. + + Args: + path (Path): path to folder to validate. + + Returns: + tuple(bool, str): returns status and reason as a bool + and str in a tuple. + + """ checksums_file = Path(path / "checksums") if not checksums_file.exists(): # FIXME: This should be set to False sometimes in the future @@ -1076,7 +1139,20 @@ class BootstrapRepos: sys.path.insert(0, directory.as_posix()) @staticmethod - def find_openpype_version(version, staging): + def find_openpype_version( + version: Union[str, OpenPypeVersion], + staging: bool, + compatible_with: OpenPypeVersion = None + ) -> Union[OpenPypeVersion, None]: + """Find location of specified OpenPype version. + + Args: + version (Union[str, OpenPypeVersion): Version to find. + staging (bool): Filter staging versions. + compatible_with (OpenPypeVersion, optional): Find only + versions compatible with specified one. + + """ if isinstance(version, str): version = OpenPypeVersion(version=version) @@ -1085,7 +1161,8 @@ class BootstrapRepos: return installed_version local_versions = OpenPypeVersion.get_local_versions( - staging=staging, production=not staging + staging=staging, production=not staging, + compatible_with=compatible_with ) zip_version = None for local_version in local_versions: @@ -1099,7 +1176,8 @@ class BootstrapRepos: return zip_version remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging, production=not staging + staging=staging, production=not staging, + compatible_with=compatible_with ) for remote_version in remote_versions: if remote_version == version: @@ -1107,13 +1185,14 @@ class BootstrapRepos: return None @staticmethod - def find_latest_openpype_version(staging): + def find_latest_openpype_version( + staging, compatible_with: OpenPypeVersion = None): installed_version = OpenPypeVersion.get_installed_version() local_versions = OpenPypeVersion.get_local_versions( - staging=staging + staging=staging, compatible_with=compatible_with ) remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging + staging=staging, compatible_with=compatible_with ) all_versions = local_versions + remote_versions if not staging: @@ -1138,7 +1217,9 @@ class BootstrapRepos: self, openpype_path: Union[Path, str] = None, staging: bool = False, - include_zips: bool = False) -> Union[List[OpenPypeVersion], None]: + include_zips: bool = False, + compatible_with: OpenPypeVersion = None + ) -> Union[List[OpenPypeVersion], None]: """Get ordered dict of detected OpenPype version. Resolution order for OpenPype is following: @@ -1154,6 +1235,8 @@ class BootstrapRepos: otherwise. include_zips (bool, optional): If set True it will try to find OpenPype in zip files in given directory. + compatible_with (OpenPypeVersion, optional): Find only those + versions compatible with the one specified. Returns: dict of Path: Dictionary of detected OpenPype version. @@ -1172,30 +1255,56 @@ class BootstrapRepos: ("Finding OpenPype in non-filesystem locations is" " not implemented yet.")) - dir_to_search = self.data_dir - user_versions = self.get_openpype_versions(self.data_dir, staging) - # if we have openpype_path specified, search only there. + version_dir = "" + if compatible_with: + version_dir = f"{compatible_with.major}.{compatible_with.minor}" + + # if checks bellow for OPENPYPE_PATH and registry fails, use data_dir + # DEPRECATED: lookup in root of this folder is deprecated in favour + # of major.minor sub-folders. + dirs_to_search = [ + self.data_dir + ] + if compatible_with: + dirs_to_search.append(self.data_dir / version_dir) + if openpype_path: - dir_to_search = openpype_path + dirs_to_search = [openpype_path] + + if compatible_with: + dirs_to_search.append(openpype_path / version_dir) else: - if os.getenv("OPENPYPE_PATH"): - if Path(os.getenv("OPENPYPE_PATH")).exists(): - dir_to_search = Path(os.getenv("OPENPYPE_PATH")) + # first try OPENPYPE_PATH and if that is not available, + # try registry. + if os.getenv("OPENPYPE_PATH") \ + and Path(os.getenv("OPENPYPE_PATH")).exists(): + dirs_to_search = [Path(os.getenv("OPENPYPE_PATH"))] + + if compatible_with: + dirs_to_search.append( + Path(os.getenv("OPENPYPE_PATH")) / version_dir) else: try: registry_dir = Path( str(self.registry.get_item("openPypePath"))) if registry_dir.exists(): - dir_to_search = registry_dir + dirs_to_search = [registry_dir] + if compatible_with: + dirs_to_search.append(registry_dir / version_dir) except ValueError: # nothing found in registry, we'll use data dir pass - openpype_versions = self.get_openpype_versions(dir_to_search, staging) - openpype_versions += user_versions + openpype_versions = [] + for dir_to_search in dirs_to_search: + try: + openpype_versions += self.get_openpype_versions( + dir_to_search, staging, compatible_with=compatible_with) + except ValueError: + # location is invalid, skip it + pass - # remove zip file version if needed. if not include_zips: openpype_versions = [ v for v in openpype_versions if v.path.suffix != ".zip" @@ -1308,9 +1417,8 @@ class BootstrapRepos: raise ValueError( f"version {version} is not associated with any file") - destination = self.data_dir / version.path.stem - if destination.exists(): - assert destination.is_dir() + destination = self.data_dir / f"{version.major}.{version.minor}" / version.path.stem # noqa + if destination.exists() and destination.is_dir(): try: shutil.rmtree(destination) except OSError as e: @@ -1379,7 +1487,7 @@ class BootstrapRepos: else: dir_name = openpype_version.path.stem - destination = self.data_dir / dir_name + destination = self.data_dir / f"{openpype_version.major}.{openpype_version.minor}" / dir_name # noqa # test if destination directory already exist, if so lets delete it. if destination.exists() and force: @@ -1557,14 +1665,18 @@ class BootstrapRepos: return False return True - def get_openpype_versions(self, - openpype_dir: Path, - staging: bool = False) -> list: + def get_openpype_versions( + self, + openpype_dir: Path, + staging: bool = False, + compatible_with: OpenPypeVersion = None) -> list: """Get all detected OpenPype versions in directory. Args: openpype_dir (Path): Directory to scan. staging (bool, optional): Find staging versions if True. + compatible_with (OpenPypeVersion, optional): Get only versions + compatible with the one specified. Returns: list of OpenPypeVersion @@ -1574,7 +1686,7 @@ class BootstrapRepos: """ if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") + raise ValueError(f"specified directory {openpype_dir} is invalid") _openpype_versions = [] # iterate over directory in first level and find all that might @@ -1599,6 +1711,10 @@ class BootstrapRepos: ): continue + if compatible_with and \ + not detected_version.is_compatible(compatible_with): + continue + detected_version.path = item if staging and detected_version.is_staging(): _openpype_versions.append(detected_version) diff --git a/igniter/tools.py b/igniter/tools.py index 57159b5e52..a9d592acf0 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -21,6 +21,11 @@ class OpenPypeVersionNotFound(Exception): pass +class OpenPypeVersionIncompatible(Exception): + """OpenPype version is not compatible with the installed one (build).""" + pass + + def should_add_certificate_path_to_mongo_url(mongo_url): """Check if should add ca certificate to mongo url. diff --git a/openpype/api.py b/openpype/api.py index fac2ae572b..c2227c1a52 100644 --- a/openpype/api.py +++ b/openpype/api.py @@ -9,6 +9,7 @@ from .settings import ( ) from .lib import ( PypeLogger, + Logger, Anatomy, config, execute, @@ -58,8 +59,6 @@ from .action import ( RepairContextAction ) -# for backward compatibility with Pype 2 -Logger = PypeLogger __all__ = [ "get_system_settings", diff --git a/openpype/cli.py b/openpype/cli.py index 9a2dfaa141..ffe288040e 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -443,3 +443,26 @@ def interactive(): __version__, sys.version, sys.platform ) code.interact(banner) + + +@main.command() +@click.option("--build", help="Print only build version", + is_flag=True, default=False) +def version(build): + """Print OpenPype version.""" + + from openpype.version import __version__ + from igniter.bootstrap_repos import BootstrapRepos, OpenPypeVersion + from pathlib import Path + import os + + if getattr(sys, 'frozen', False): + local_version = BootstrapRepos.get_version( + Path(os.getenv("OPENPYPE_ROOT"))) + else: + local_version = OpenPypeVersion.get_installed_version_str() + + if build: + print(local_version) + return + print(f"{__version__} (booted: {local_version})") diff --git a/openpype/client/__init__.py b/openpype/client/__init__.py index 97e6755d09..64a82334d9 100644 --- a/openpype/client/__init__.py +++ b/openpype/client/__init__.py @@ -1,3 +1,7 @@ +from .mongo import ( + OpenPypeMongoConnection, +) + from .entities import ( get_projects, get_project, @@ -25,6 +29,8 @@ from .entities import ( get_last_version_by_subset_name, get_output_link_versions, + version_is_latest, + get_representation_by_id, get_representation_by_name, get_representations, @@ -40,6 +46,8 @@ from .entities import ( ) __all__ = ( + "OpenPypeMongoConnection", + "get_projects", "get_project", "get_whole_project", @@ -66,6 +74,8 @@ __all__ = ( "get_last_version_by_subset_name", "get_output_link_versions", + "version_is_latest", + "get_representation_by_id", "get_representation_by_name", "get_representations", diff --git a/openpype/client/entities.py b/openpype/client/entities.py index e7eeadcf48..7362f57d7f 100644 --- a/openpype/client/entities.py +++ b/openpype/client/entities.py @@ -6,24 +6,12 @@ that has project name as a context (e.g. on 'ProjectEntity'?). + We will need more specific functions doing wery specific queires really fast. """ -import os import collections import six from bson.objectid import ObjectId -from openpype.lib.mongo import OpenPypeMongoConnection - - -def _get_project_database(): - db_name = os.environ.get("AVALON_DB") or "avalon" - return OpenPypeMongoConnection.get_mongo_client()[db_name] - - -def _get_project_connection(project_name): - if not project_name: - raise ValueError("Invalid project name {}".format(str(project_name))) - return _get_project_database()[project_name] +from .mongo import get_project_database, get_project_connection def _prepare_fields(fields, required_fields=None): @@ -58,7 +46,7 @@ def _convert_ids(in_ids): def get_projects(active=True, inactive=False, fields=None): - mongodb = _get_project_database() + mongodb = get_project_database() for project_name in mongodb.collection_names(): if project_name in ("system.indexes",): continue @@ -93,7 +81,7 @@ def get_project(project_name, active=True, inactive=False, fields=None): {"data.active": False}, ] - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -108,7 +96,7 @@ def get_whole_project(project_name): project collection. """ - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find({}) @@ -131,7 +119,7 @@ def get_asset_by_id(project_name, asset_id, fields=None): return None query_filter = {"type": "asset", "_id": asset_id} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -153,7 +141,7 @@ def get_asset_by_name(project_name, asset_name, fields=None): return None query_filter = {"type": "asset", "name": asset_name} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -223,7 +211,7 @@ def _get_assets( return [] query_filter["data.visualParent"] = {"$in": parent_ids} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -323,7 +311,7 @@ def get_asset_ids_with_subsets(project_name, asset_ids=None): return [] subset_query["parent"] = {"$in": asset_ids} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) result = conn.aggregate([ { "$match": subset_query @@ -363,7 +351,7 @@ def get_subset_by_id(project_name, subset_id, fields=None): return None query_filters = {"type": "subset", "_id": subset_id} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filters, _prepare_fields(fields)) @@ -394,7 +382,7 @@ def get_subset_by_name(project_name, subset_name, asset_id, fields=None): "name": subset_name, "parent": asset_id } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filters, _prepare_fields(fields)) @@ -467,7 +455,7 @@ def get_subsets( return [] query_filter["$or"] = or_query - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -491,7 +479,7 @@ def get_subset_families(project_name, subset_ids=None): return set() subset_filter["_id"] = {"$in": list(subset_ids)} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) result = list(conn.aggregate([ {"$match": subset_filter}, {"$project": { @@ -529,7 +517,7 @@ def get_version_by_id(project_name, version_id, fields=None): "type": {"$in": ["version", "hero_version"]}, "_id": version_id } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -552,7 +540,7 @@ def get_version_by_name(project_name, version, subset_id, fields=None): if not subset_id: return None - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) query_filter = { "type": "version", "parent": subset_id, @@ -561,6 +549,42 @@ def get_version_by_name(project_name, version, subset_id, fields=None): return conn.find_one(query_filter, _prepare_fields(fields)) +def version_is_latest(project_name, version_id): + """Is version the latest from it's subset. + + Note: + Hero versions are considered as latest. + + Todo: + Maybe raise exception when version was not found? + + Args: + project_name (str):Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Version id which is checked. + + Returns: + bool: True if is latest version from subset else False. + """ + + version_id = _convert_id(version_id) + if not version_id: + return False + version_doc = get_version_by_id( + project_name, version_id, fields=["_id", "type", "parent"] + ) + # What to do when version is not found? + if not version_doc: + return False + + if version_doc["type"] == "hero_version": + return True + + last_version = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) + return last_version["_id"] == version_id + + def _get_versions( project_name, subset_ids=None, @@ -606,7 +630,7 @@ def _get_versions( else: query_filter["name"] = {"$in": versions} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -765,11 +789,11 @@ def get_output_link_versions(project_name, version_id, fields=None): if not version_id: return [] - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) # Does make sense to look for hero versions? query_filter = { "type": "version", - "data.inputLinks.input": version_id + "data.inputLinks.id": version_id } return conn.find(query_filter, _prepare_fields(fields)) @@ -830,7 +854,7 @@ def get_last_versions(project_name, subset_ids, fields=None): {"$group": group_item} ] - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) aggregate_result = conn.aggregate(aggregation_pipeline) if limit_query: output = {} @@ -948,7 +972,7 @@ def get_representation_by_id(project_name, representation_id, fields=None): if representation_id is not None: query_filter["_id"] = _convert_id(representation_id) - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -981,7 +1005,7 @@ def get_representation_by_name( "parent": version_id } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -1044,7 +1068,7 @@ def _get_representations( return [] query_filter["$or"] = or_query - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -1255,7 +1279,7 @@ def get_thumbnail_id_from_source(project_name, src_type, src_id): query_filter = {"_id": _convert_id(src_id)} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) src_doc = conn.find_one(query_filter, {"data.thumbnail_id"}) if src_doc: return src_doc.get("data", {}).get("thumbnail_id") @@ -1288,7 +1312,7 @@ def get_thumbnails(project_name, thumbnail_ids, fields=None): "type": "thumbnail", "_id": {"$in": thumbnail_ids} } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -1309,7 +1333,7 @@ def get_thumbnail(project_name, thumbnail_id, fields=None): if not thumbnail_id: return None query_filter = {"type": "thumbnail", "_id": _convert_id(thumbnail_id)} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -1340,7 +1364,7 @@ def get_workfile_info( "task_name": task_name, "filename": filename } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) diff --git a/openpype/client/mongo.py b/openpype/client/mongo.py new file mode 100644 index 0000000000..72acbc5476 --- /dev/null +++ b/openpype/client/mongo.py @@ -0,0 +1,235 @@ +import os +import sys +import time +import logging +import pymongo +import certifi + +if sys.version_info[0] == 2: + from urlparse import urlparse, parse_qs +else: + from urllib.parse import urlparse, parse_qs + + +class MongoEnvNotSet(Exception): + pass + + +def _decompose_url(url): + """Decompose mongo url to basic components. + + Used for creation of MongoHandler which expect mongo url components as + separated kwargs. Components are at the end not used as we're setting + connection directly this is just a dumb components for MongoHandler + validation pass. + """ + + # Use first url from passed url + # - this is because it is possible to pass multiple urls for multiple + # replica sets which would crash on urlparse otherwise + # - please don't use comma in username of password + url = url.split(",")[0] + components = { + "scheme": None, + "host": None, + "port": None, + "username": None, + "password": None, + "auth_db": None + } + + result = urlparse(url) + if result.scheme is None: + _url = "mongodb://{}".format(url) + result = urlparse(_url) + + components["scheme"] = result.scheme + components["host"] = result.hostname + try: + components["port"] = result.port + except ValueError: + raise RuntimeError("invalid port specified") + components["username"] = result.username + components["password"] = result.password + + try: + components["auth_db"] = parse_qs(result.query)['authSource'][0] + except KeyError: + # no auth db provided, mongo will use the one we are connecting to + pass + + return components + + +def get_default_components(): + mongo_url = os.environ.get("OPENPYPE_MONGO") + if mongo_url is None: + raise MongoEnvNotSet( + "URL for Mongo logging connection is not set." + ) + return _decompose_url(mongo_url) + + +def should_add_certificate_path_to_mongo_url(mongo_url): + """Check if should add ca certificate to mongo url. + + Since 30.9.2021 cloud mongo requires newer certificates that are not + available on most of workstation. This adds path to certifi certificate + which is valid for it. To add the certificate path url must have scheme + 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. + """ + + parsed = urlparse(mongo_url) + query = parse_qs(parsed.query) + lowered_query_keys = set(key.lower() for key in query.keys()) + add_certificate = False + # Check if url 'ssl' or 'tls' are set to 'true' + for key in ("ssl", "tls"): + if key in query and "true" in query["ssl"]: + add_certificate = True + break + + # Check if url contains 'mongodb+srv' + if not add_certificate and parsed.scheme == "mongodb+srv": + add_certificate = True + + # Check if url does already contain certificate path + if add_certificate and "tlscafile" in lowered_query_keys: + add_certificate = False + + return add_certificate + + +def validate_mongo_connection(mongo_uri): + """Check if provided mongodb URL is valid. + + Args: + mongo_uri (str): URL to validate. + + Raises: + ValueError: When port in mongo uri is not valid. + pymongo.errors.InvalidURI: If passed mongo is invalid. + pymongo.errors.ServerSelectionTimeoutError: If connection timeout + passed so probably couldn't connect to mongo server. + + """ + + client = OpenPypeMongoConnection.create_connection( + mongo_uri, retry_attempts=1 + ) + client.close() + + +class OpenPypeMongoConnection: + """Singleton MongoDB connection. + + Keeps MongoDB connections by url. + """ + + mongo_clients = {} + log = logging.getLogger("OpenPypeMongoConnection") + + @staticmethod + def get_default_mongo_url(): + return os.environ["OPENPYPE_MONGO"] + + @classmethod + def get_mongo_client(cls, mongo_url=None): + if mongo_url is None: + mongo_url = cls.get_default_mongo_url() + + connection = cls.mongo_clients.get(mongo_url) + if connection: + # Naive validation of existing connection + try: + connection.server_info() + with connection.start_session(): + pass + except Exception: + connection = None + + if not connection: + cls.log.debug("Creating mongo connection to {}".format(mongo_url)) + connection = cls.create_connection(mongo_url) + cls.mongo_clients[mongo_url] = connection + + return connection + + @classmethod + def create_connection(cls, mongo_url, timeout=None, retry_attempts=None): + parsed = urlparse(mongo_url) + # Force validation of scheme + if parsed.scheme not in ["mongodb", "mongodb+srv"]: + raise pymongo.errors.InvalidURI(( + "Invalid URI scheme:" + " URI must begin with 'mongodb://' or 'mongodb+srv://'" + )) + + if timeout is None: + timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000) + + kwargs = { + "serverSelectionTimeoutMS": timeout + } + if should_add_certificate_path_to_mongo_url(mongo_url): + kwargs["ssl_ca_certs"] = certifi.where() + + mongo_client = pymongo.MongoClient(mongo_url, **kwargs) + + if retry_attempts is None: + retry_attempts = 3 + + elif not retry_attempts: + retry_attempts = 1 + + last_exc = None + valid = False + t1 = time.time() + for attempt in range(1, retry_attempts + 1): + try: + mongo_client.server_info() + with mongo_client.start_session(): + pass + valid = True + break + + except Exception as exc: + last_exc = exc + if attempt < retry_attempts: + cls.log.warning( + "Attempt {} failed. Retrying... ".format(attempt) + ) + time.sleep(1) + + if not valid: + raise last_exc + + cls.log.info("Connected to {}, delay {:.3f}s".format( + mongo_url, time.time() - t1 + )) + return mongo_client + + +def get_project_database(): + db_name = os.environ.get("AVALON_DB") or "avalon" + return OpenPypeMongoConnection.get_mongo_client()[db_name] + + +def get_project_connection(project_name): + """Direct access to mongo collection. + + We're trying to avoid using direct access to mongo. This should be used + only for Create, Update and Remove operations until there are implemented + api calls for that. + + Args: + project_name(str): Project name for which collection should be + returned. + + Returns: + pymongo.Collection: Collection realated to passed project. + """ + + if not project_name: + raise ValueError("Invalid project name {}".format(str(project_name))) + return get_project_database()[project_name] diff --git a/openpype/client/operations.py b/openpype/client/operations.py new file mode 100644 index 0000000000..c4b95bf696 --- /dev/null +++ b/openpype/client/operations.py @@ -0,0 +1,634 @@ +import uuid +import copy +import collections +from abc import ABCMeta, abstractmethod, abstractproperty + +import six +from bson.objectid import ObjectId +from pymongo import DeleteOne, InsertOne, UpdateOne + +from .mongo import get_project_connection + +REMOVED_VALUE = object() + +CURRENT_PROJECT_SCHEMA = "openpype:project-3.0" +CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0" +CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0" +CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0" +CURRENT_VERSION_SCHEMA = "openpype:version-3.0" +CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0" +CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0" + + +def _create_or_convert_to_mongo_id(mongo_id): + if mongo_id is None: + return ObjectId() + return ObjectId(mongo_id) + + +def new_project_document( + project_name, project_code, config, data=None, entity_id=None +): + """Create skeleton data of project document. + + Args: + project_name (str): Name of project. Used as identifier of a project. + project_code (str): Shorter version of projet without spaces and + special characters (in most of cases). Should be also considered + as unique name across projects. + config (Dic[str, Any]): Project config consist of roots, templates, + applications and other project Anatomy related data. + data (Dict[str, Any]): Project data with information about it's + attributes (e.g. 'fps' etc.) or integration specific keys. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of project document. + """ + + if data is None: + data = {} + + data["code"] = project_code + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "name": project_name, + "type": CURRENT_PROJECT_SCHEMA, + "entity_data": data, + "config": config + } + + +def new_asset_document( + name, project_id, parent_id, parents, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + name (str): Is considered as unique identifier of asset in project. + project_id (Union[str, ObjectId]): Id of project doument. + parent_id (Union[str, ObjectId]): Id of parent asset. + parents (List[str]): List of parent assets names. + data (Dict[str, Any]): Asset document data. Empty dictionary is used + if not passed. Value of 'parent_id' is used to fill 'visualParent'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of asset document. + """ + + if data is None: + data = {} + if parent_id is not None: + parent_id = ObjectId(parent_id) + data["visualParent"] = parent_id + data["parents"] = parents + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "asset", + "name": name, + "parent": ObjectId(project_id), + "data": data, + "schema": CURRENT_ASSET_DOC_SCHEMA + } + + +def new_subset_document(name, family, asset_id, data=None, entity_id=None): + """Create skeleton data of subset document. + + Args: + name (str): Is considered as unique identifier of subset under asset. + family (str): Subset's family. + asset_id (Union[str, ObjectId]): Id of parent asset. + data (Dict[str, Any]): Subset document data. Empty dictionary is used + if not passed. Value of 'family' is used to fill 'family'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of subset document. + """ + + if data is None: + data = {} + data["family"] = family + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_SUBSET_SCHEMA, + "type": "subset", + "name": name, + "data": data, + "parent": asset_id + } + + +def new_version_doc(version, subset_id, data=None, entity_id=None): + """Create skeleton data of version document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + subset_id (Union[str, ObjectId]): Id of parent subset. + data (Dict[str, Any]): Version document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_VERSION_SCHEMA, + "type": "version", + "name": int(version), + "parent": subset_id, + "data": data + } + + +def new_representation_doc( + name, version_id, context, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + version_id (Union[str, ObjectId]): Id of parent version. + context (Dict[str, Any]): Representation context used for fill template + of to query. + data (Dict[str, Any]): Representation document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_REPRESENTATION_SCHEMA, + "type": "representation", + "parent": version_id, + "name": name, + "data": data, + + # Imprint shortcut to context for performance reasons. + "context": context + } + + +def new_workfile_info_doc( + filename, asset_id, task_name, files, data=None, entity_id=None +): + """Create skeleton data of workfile info document. + + Workfile document is at this moment used primarily for artist notes. + + Args: + filename (str): Filename of workfile. + asset_id (Union[str, ObjectId]): Id of asset under which workfile live. + task_name (str): Task under which was workfile created. + files (List[str]): List of rootless filepaths related to workfile. + data (Dict[str, Any]): Additional metadata. + + Returns: + Dict[str, Any]: Skeleton of workfile info document. + """ + + if not data: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "workfile", + "parent": ObjectId(asset_id), + "task_name": task_name, + "filename": filename, + "data": data, + "files": files + } + + +def _prepare_update_data(old_doc, new_doc, replace): + changes = {} + for key, value in new_doc.items(): + if key not in old_doc or value != old_doc[key]: + changes[key] = value + + if replace: + for key in old_doc.keys(): + if key not in new_doc: + changes[key] = REMOVED_VALUE + return changes + + +def prepare_subset_update_data(old_doc, new_doc, replace=True): + """Compare two subset documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_version_update_data(old_doc, new_doc, replace=True): + """Compare two version documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_representation_update_data(old_doc, new_doc, replace=True): + """Compare two representation documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): + """Compare two workfile info documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +@six.add_metaclass(ABCMeta) +class AbstractOperation(object): + """Base operation class. + + Opration represent a call into database. The call can create, change or + remove data. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + """ + + def __init__(self, project_name, entity_type): + self._project_name = project_name + self._entity_type = entity_type + self._id = str(uuid.uuid4()) + + @property + def project_name(self): + return self._project_name + + @property + def id(self): + """Identifier of operation.""" + + return self._id + + @property + def entity_type(self): + return self._entity_type + + @abstractproperty + def operation_name(self): + """Stringified type of operation.""" + + pass + + @abstractmethod + def to_mongo_operation(self): + """Convert operation to Mongo batch operation.""" + + pass + + def to_data(self): + """Convert opration to data that can be converted to json or others. + + Warning: + Current state returns ObjectId objects which cannot be parsed by + json. + + Returns: + Dict[str, Any]: Description of operation. + """ + + return { + "id": self._id, + "entity_type": self.entity_type, + "project_name": self.project_name, + "operation": self.operation_name + } + + +class CreateOperation(AbstractOperation): + """Opeartion to create an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + data (Dict[str, Any]): Data of entity that will be created. + """ + + operation_name = "create" + + def __init__(self, project_name, entity_type, data): + super(CreateOperation, self).__init__(project_name, entity_type) + + if not data: + data = {} + else: + data = copy.deepcopy(dict(data)) + + if "_id" not in data: + data["_id"] = ObjectId() + else: + data["_id"] = ObjectId(data["_id"]) + + self._entity_id = data["_id"] + self._data = data + + def __setitem__(self, key, value): + self.set_value(key, value) + + def __getitem__(self, key): + return self.data[key] + + def set_value(self, key, value): + self.data[key] = value + + def get(self, key, *args, **kwargs): + return self.data.get(key, *args, **kwargs) + + @property + def entity_id(self): + return self._entity_id + + @property + def data(self): + return self._data + + def to_mongo_operation(self): + return InsertOne(copy.deepcopy(self._data)) + + def to_data(self): + output = super(CreateOperation, self).to_data() + output["data"] = copy.deepcopy(self.data) + return output + + +class UpdateOperation(AbstractOperation): + """Opeartion to update an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Identifier of an entity. + update_data (Dict[str, Any]): Key -> value changes that will be set in + database. If value is set to 'REMOVED_VALUE' the key will be + removed. Only first level of dictionary is checked (on purpose). + """ + + operation_name = "update" + + def __init__(self, project_name, entity_type, entity_id, update_data): + super(UpdateOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + self._update_data = update_data + + @property + def entity_id(self): + return self._entity_id + + @property + def update_data(self): + return self._update_data + + def to_mongo_operation(self): + unset_data = {} + set_data = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + unset_data[key] = value + else: + set_data[key] = value + + op_data = {} + if unset_data: + op_data["$unset"] = unset_data + if set_data: + op_data["$set"] = set_data + + if not op_data: + return None + + return UpdateOne( + {"_id": self.entity_id}, + op_data + ) + + def to_data(self): + changes = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + value = None + changes[key] = value + + output = super(UpdateOperation, self).to_data() + output.update({ + "entity_id": self.entity_id, + "changes": changes + }) + return output + + +class DeleteOperation(AbstractOperation): + """Opeartion to delete an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Entity id that will be removed. + """ + + operation_name = "delete" + + def __init__(self, project_name, entity_type, entity_id): + super(DeleteOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + + @property + def entity_id(self): + return self._entity_id + + def to_mongo_operation(self): + return DeleteOne({"_id": self.entity_id}) + + def to_data(self): + output = super(DeleteOperation, self).to_data() + output["entity_id"] = self.entity_id + return output + + +class OperationsSession(object): + """Session storing operations that should happen in an order. + + At this moment does not handle anything special can be sonsidered as + stupid list of operations that will happen after each other. If creation + of same entity is there multiple times it's handled in any way and document + values are not validated. + + All operations must be related to single project. + + Args: + project_name (str): Project name to which are operations related. + """ + + def __init__(self): + self._operations = [] + + def add(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + if not isinstance( + operation, + (CreateOperation, UpdateOperation, DeleteOperation) + ): + raise TypeError("Expected Operation object got {}".format( + str(type(operation)) + )) + + self._operations.append(operation) + + def append(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + + self.add(operation) + + def extend(self, operations): + """Add operations to be processed. + + Args: + operations (List[BaseOperation]): Operations that should be + processed. + """ + + for operation in operations: + self.add(operation) + + def remove(self, operation): + """Remove operation.""" + + self._operations.remove(operation) + + def clear(self): + """Clear all registered operations.""" + + self._operations = [] + + def to_data(self): + return [ + operation.to_data() + for operation in self._operations + ] + + def commit(self): + """Commit session operations.""" + + operations, self._operations = self._operations, [] + if not operations: + return + + operations_by_project = collections.defaultdict(list) + for operation in operations: + operations_by_project[operation.project_name].append(operation) + + for project_name, operations in operations_by_project.items(): + bulk_writes = [] + for operation in operations: + mongo_op = operation.to_mongo_operation() + if mongo_op is not None: + bulk_writes.append(mongo_op) + + if bulk_writes: + collection = get_project_connection(project_name) + collection.bulk_write(bulk_writes) + + def create_entity(self, project_name, entity_type, data): + """Fast access to 'CreateOperation'. + + Returns: + CreateOperation: Object of update operation. + """ + + operation = CreateOperation(project_name, entity_type, data) + self.add(operation) + return operation + + def update_entity(self, project_name, entity_type, entity_id, update_data): + """Fast access to 'UpdateOperation'. + + Returns: + UpdateOperation: Object of update operation. + """ + + operation = UpdateOperation( + project_name, entity_type, entity_id, update_data + ) + self.add(operation) + return operation + + def delete_entity(self, project_name, entity_type, entity_id): + """Fast access to 'DeleteOperation'. + + Returns: + DeleteOperation: Object of delete operation. + """ + + operation = DeleteOperation(project_name, entity_type, entity_id) + self.add(operation) + return operation diff --git a/openpype/hooks/pre_copy_template_workfile.py b/openpype/hooks/pre_copy_template_workfile.py index dffac22ee2..70c549919f 100644 --- a/openpype/hooks/pre_copy_template_workfile.py +++ b/openpype/hooks/pre_copy_template_workfile.py @@ -1,11 +1,11 @@ import os import shutil -from openpype.lib import ( - PreLaunchHook, - get_custom_workfile_template_by_context, +from openpype.lib import PreLaunchHook +from openpype.settings import get_project_settings +from openpype.pipeline.workfile import ( + get_custom_workfile_template, get_custom_workfile_template_by_string_context ) -from openpype.settings import get_project_settings class CopyTemplateWorkfile(PreLaunchHook): @@ -54,41 +54,22 @@ class CopyTemplateWorkfile(PreLaunchHook): project_name = self.data["project_name"] asset_name = self.data["asset_name"] task_name = self.data["task_name"] + host_name = self.application.host_name project_settings = get_project_settings(project_name) - host_settings = project_settings[self.application.host_name] - - workfile_builder_settings = host_settings.get("workfile_builder") - if not workfile_builder_settings: - # TODO remove warning when deprecated - self.log.warning(( - "Seems like old version of settings is used." - " Can't access custom templates in host \"{}\"." - ).format(self.application.full_label)) - return - - if not workfile_builder_settings["create_first_version"]: - self.log.info(( - "Project \"{}\" has turned off to create first workfile for" - " application \"{}\"" - ).format(project_name, self.application.full_label)) - return - - # Backwards compatibility - template_profiles = workfile_builder_settings.get("custom_templates") - if not template_profiles: - self.log.info( - "Custom templates are not filled. Skipping template copy." - ) - return project_doc = self.data.get("project_doc") asset_doc = self.data.get("asset_doc") anatomy = self.data.get("anatomy") if project_doc and asset_doc: self.log.debug("Started filtering of custom template paths.") - template_path = get_custom_workfile_template_by_context( - template_profiles, project_doc, asset_doc, task_name, anatomy + template_path = get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy, + project_settings ) else: @@ -96,10 +77,13 @@ class CopyTemplateWorkfile(PreLaunchHook): "Global data collection probably did not execute." " Using backup solution." )) - dbcon = self.data.get("dbcon") template_path = get_custom_workfile_template_by_string_context( - template_profiles, project_name, asset_name, task_name, - dbcon, anatomy + project_name, + asset_name, + task_name, + host_name, + anatomy, + project_settings ) if not template_path: diff --git a/openpype/hooks/pre_global_host_data.py b/openpype/hooks/pre_global_host_data.py index 6577e37cbe..8a178915fb 100644 --- a/openpype/hooks/pre_global_host_data.py +++ b/openpype/hooks/pre_global_host_data.py @@ -1,3 +1,4 @@ +from openpype.client import get_project, get_asset_by_name from openpype.lib import ( PreLaunchHook, EnvironmentPrepData, @@ -69,7 +70,7 @@ class GlobalHostDataHook(PreLaunchHook): self.data["dbcon"] = dbcon # Project document - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) self.data["project_doc"] = project_doc asset_name = self.data.get("asset_name") @@ -79,8 +80,5 @@ class GlobalHostDataHook(PreLaunchHook): ) return - asset_doc = dbcon.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) self.data["asset_doc"] = asset_doc diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py index 0bc47665b0..c13c22ced5 100644 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ b/openpype/hosts/aftereffects/api/pipeline.py @@ -1,5 +1,4 @@ import os -import sys from Qt import QtWidgets @@ -15,6 +14,7 @@ from openpype.pipeline import ( AVALON_CONTAINER_ID, legacy_io, ) +from openpype.pipeline.load import any_outdated_containers import openpype.hosts.aftereffects from openpype.lib import register_event_callback @@ -136,7 +136,7 @@ def ls(): def check_inventory(): """Checks loaded containers if they are of highest version""" - if not lib.any_outdated(): + if not any_outdated_containers(): return # Warn about outdated containers. diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index bb199a61f7..d444ead6dc 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -102,7 +102,6 @@ class CollectAERender(publish.AbstractCollectRender): attachTo=False, setMembers='', publish=True, - renderer='aerender', name=subset_name, resolutionWidth=render_q.width, resolutionHeight=render_q.height, @@ -113,7 +112,6 @@ class CollectAERender(publish.AbstractCollectRender): frameStart=frame_start, frameEnd=frame_end, frameStep=1, - toBeRenderedOn='deadline', fps=fps, app_version=app_version, publish_attributes=inst.data.get("publish_attributes", {}), @@ -138,6 +136,9 @@ class CollectAERender(publish.AbstractCollectRender): fam = "render.farm" if fam not in instance.families: instance.families.append(fam) + instance.toBeRenderedOn = "deadline" + instance.renderer = "aerender" + instance.farm = True # to skip integrate instances.append(instance) instances_to_remove.append(inst) diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index c1b5add518..4f8410da74 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -220,12 +220,9 @@ class LaunchQtApp(bpy.types.Operator): self._app.store_window(self.bl_idname, window) self._window = window - if not isinstance( - self._window, - (QtWidgets.QMainWindow, QtWidgets.QDialog, ModuleType) - ): + if not isinstance(self._window, (QtWidgets.QWidget, ModuleType)): raise AttributeError( - "`window` should be a `QDialog or module`. Got: {}".format( + "`window` should be a `QWidget or module`. Got: {}".format( str(type(window)) ) ) @@ -249,9 +246,9 @@ class LaunchQtApp(bpy.types.Operator): self._window.setWindowFlags(on_top_flags) self._window.show() - if on_top_flags != origin_flags: - self._window.setWindowFlags(origin_flags) - self._window.show() + # if on_top_flags != origin_flags: + # self._window.setWindowFlags(origin_flags) + # self._window.show() return {'FINISHED'} diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 5db89a0ab9..992db62c75 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -136,7 +136,8 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): "tasks": { task["name"]: {"type": task["type"]} for task in self.add_tasks}, - "representations": [] + "representations": [], + "newAssetPublishing": True }) self.log.debug("__ inst_data: {}".format(pformat(inst_data))) diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py index b59107f155..4d45f67ded 100644 --- a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -3,9 +3,9 @@ import copy from collections import OrderedDict from pprint import pformat import pyblish -from openpype.lib import get_workdir import openpype.hosts.flame.api as opfapi import openpype.pipeline as op_pipeline +from openpype.pipeline.workfile import get_workdir class IntegrateBatchGroup(pyblish.api.InstancePlugin): @@ -324,7 +324,13 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin): project_doc = instance.data["projectEntity"] asset_entity = instance.data["assetEntity"] anatomy = instance.context.data["anatomy"] + project_settings = instance.context.data["project_settings"] return get_workdir( - project_doc, asset_entity, task_data["name"], "flame", anatomy + project_doc, + asset_entity, + task_data["name"], + "flame", + anatomy, + project_settings=project_settings ) diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py index 52a157c56e..49ef340679 100644 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py @@ -3,9 +3,7 @@ import re import sys import logging -# Pipeline imports from openpype.client import ( - get_project, get_asset_by_name, get_versions, ) @@ -17,13 +15,10 @@ from openpype.pipeline import ( from openpype.lib import version_up from openpype.hosts.fusion import api from openpype.hosts.fusion.api import lib -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Update Slap Comp") -self = sys.modules[__name__] -self._project = None - def _format_version_folder(folder): """Format a version folder based on the filepath @@ -212,9 +207,6 @@ def switch(asset_name, filepath=None, new=True): asset = get_asset_by_name(project_name, asset_name) assert asset, "Could not find '%s' in the database" % asset_name - # Get current project - self._project = get_project(project_name) - # Go to comp if not filepath: current_comp = api.get_current_comp() diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/utility_scripts/switch_ui.py index 01d55db647..93f775b24b 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/utility_scripts/switch_ui.py @@ -14,7 +14,7 @@ from openpype.pipeline import ( legacy_io, ) from openpype.hosts.fusion import api -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Fusion Switch Shot") diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index 94ca134205..4d71b9380d 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -4,17 +4,15 @@ import logging import pyblish.api -from openpype import lib -from openpype.client import get_representation_by_id from openpype.lib import register_event_callback from openpype.pipeline import ( - legacy_io, register_loader_plugin_path, register_creator_plugin_path, deregister_loader_plugin_path, deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.load import get_outdated_containers from openpype.pipeline.context_tools import get_current_project_asset import openpype.hosts.harmony import openpype.hosts.harmony.api as harmony @@ -108,16 +106,7 @@ def check_inventory(): in Harmony. """ - project_name = legacy_io.active_project() - outdated_containers = [] - for container in ls(): - representation_id = container['representation'] - representation_doc = get_representation_by_id( - project_name, representation_id, fields=["parent"] - ) - if representation_doc and not lib.is_latest(representation_doc): - outdated_containers.append(container) - + outdated_containers = get_outdated_containers() if not outdated_containers: return diff --git a/openpype/hosts/harmony/plugins/load/load_background.py b/openpype/hosts/harmony/plugins/load/load_background.py index 9c01fe3cd8..c28a87791e 100644 --- a/openpype/hosts/harmony/plugins/load/load_background.py +++ b/openpype/hosts/harmony/plugins/load/load_background.py @@ -5,8 +5,8 @@ from openpype.pipeline import ( load, get_representation_path, ) +from openpype.pipeline.context_tools import is_representation_from_latest import openpype.hosts.harmony.api as harmony -import openpype.lib copy_files = """function copyFile(srcFilename, dstFilename) @@ -280,9 +280,7 @@ class BackgroundLoader(load.LoaderPlugin): ) def update(self, container, representation): - path = get_representation_path(representation) - with open(path) as json_file: data = json.load(json_file) @@ -300,10 +298,9 @@ class BackgroundLoader(load.LoaderPlugin): bg_folder = os.path.dirname(path) - path = get_representation_path(representation) - print(container) + is_latest = is_representation_from_latest(representation) for layer in sorted(layers): file_to_import = [ os.path.join(bg_folder, layer).replace("\\", "/") @@ -347,7 +344,7 @@ class BackgroundLoader(load.LoaderPlugin): } %s """ % (sig, sig) - if openpype.lib.is_latest(representation): + if is_latest: harmony.send({"function": func, "args": [node, "green"]}) else: harmony.send({"function": func, "args": [node, "red"]}) diff --git a/openpype/hosts/harmony/plugins/load/load_imagesequence.py b/openpype/hosts/harmony/plugins/load/load_imagesequence.py index 18695438d5..1b64aff595 100644 --- a/openpype/hosts/harmony/plugins/load/load_imagesequence.py +++ b/openpype/hosts/harmony/plugins/load/load_imagesequence.py @@ -10,8 +10,8 @@ from openpype.pipeline import ( load, get_representation_path, ) +from openpype.pipeline.context_tools import is_representation_from_latest import openpype.hosts.harmony.api as harmony -import openpype.lib class ImageSequenceLoader(load.LoaderPlugin): @@ -109,7 +109,7 @@ class ImageSequenceLoader(load.LoaderPlugin): ) # Colour node. - if openpype.lib.is_latest(representation): + if is_representation_from_latest(representation): harmony.send( { "function": "PypeHarmony.setColor", diff --git a/openpype/hosts/harmony/plugins/load/load_template.py b/openpype/hosts/harmony/plugins/load/load_template.py index c6dc9d913b..f3c69a9104 100644 --- a/openpype/hosts/harmony/plugins/load/load_template.py +++ b/openpype/hosts/harmony/plugins/load/load_template.py @@ -10,8 +10,8 @@ from openpype.pipeline import ( load, get_representation_path, ) +from openpype.pipeline.context_tools import is_representation_from_latest import openpype.hosts.harmony.api as harmony -import openpype.lib class TemplateLoader(load.LoaderPlugin): @@ -83,7 +83,7 @@ class TemplateLoader(load.LoaderPlugin): self_name = self.__class__.__name__ update_and_replace = False - if openpype.lib.is_latest(representation): + if is_representation_from_latest(representation): self._set_green(node) else: self._set_red(node) diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py index 2d0ec6fc99..0c7dbc1f22 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py @@ -109,7 +109,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin): "clipAnnotations": annotations, # add all additional tags - "tags": phiero.get_track_item_tags(track_item) + "tags": phiero.get_track_item_tags(track_item), + "newAssetPublishing": True }) # otio clip data diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index 7048accceb..b5f5459392 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -12,13 +12,13 @@ from openpype.pipeline import ( register_loader_plugin_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.load import any_outdated_containers import openpype.hosts.houdini from openpype.hosts.houdini.api import lib from openpype.lib import ( register_event_callback, emit_event, - any_outdated, ) from .lib import get_asset_fps @@ -245,7 +245,7 @@ def on_open(): # ensure it is using correct FPS for the asset lib.validate_fps() - if any_outdated(): + if any_outdated_containers(): from openpype.widgets import popup log.warning("Scene has outdated content.") diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index 123b934428..1e883ea43f 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -82,6 +82,14 @@ IMAGE_PREFIXES = { RENDERMAN_IMAGE_DIR = "maya//" + +def has_tokens(string, tokens): + """Return whether any of tokens is in input string (case-insensitive)""" + pattern = "({})".format("|".join(re.escape(token) for token in tokens)) + match = re.search(pattern, string, re.IGNORECASE) + return bool(match) + + @attr.s class LayerMetadata(object): """Data class for Render Layer metadata.""" @@ -99,6 +107,12 @@ class LayerMetadata(object): # Render Products products = attr.ib(init=False, default=attr.Factory(list)) + # The AOV separator token. Note that not all renderers define an explicit + # render separator but allow to put the AOV/RenderPass token anywhere in + # the file path prefix. For those renderers we'll fall back to whatever + # is between the last occurrences of and tokens. + aov_separator = attr.ib(default="_") + @attr.s class RenderProduct(object): @@ -183,7 +197,6 @@ class ARenderProducts: self.layer = layer self.render_instance = render_instance self.multipart = False - self.aov_separator = render_instance.data.get("aovSeparator", "_") # Initialize self.layer_data = self._get_layer_data() @@ -296,6 +309,42 @@ class ARenderProducts: return lib.get_attr_in_layer(plug, layer=self.layer) + @staticmethod + def extract_separator(file_prefix): + """Extract AOV separator character from the prefix. + + Default behavior extracts the part between + last occurrences of and + + Todo: + This code also triggers for V-Ray which overrides it explicitly + so this code will invalidly debug log it couldn't extract the + AOV separator even though it does set it in RenderProductsVray. + + Args: + file_prefix (str): File prefix with tokens. + + Returns: + str or None: prefix character if it can be extracted. + """ + layer_tokens = ["", ""] + aov_tokens = ["", ""] + + def match_last(tokens, text): + """regex match the last occurence from a list of tokens""" + pattern = "(?:.*)({})".format("|".join(tokens)) + return re.search(pattern, text, re.IGNORECASE) + + layer_match = match_last(layer_tokens, file_prefix) + aov_match = match_last(aov_tokens, file_prefix) + separator = None + if layer_match and aov_match: + matches = sorted((layer_match, aov_match), + key=lambda match: match.end(1)) + separator = file_prefix[matches[0].end(1):matches[1].start(1)] + return separator + + def _get_layer_data(self): # type: () -> LayerMetadata # ______________________________________________ @@ -304,7 +353,7 @@ class ARenderProducts: # ____________________/ _, scene_basename = os.path.split(cmds.file(q=True, loc=True)) scene_name, _ = os.path.splitext(scene_basename) - + kwargs = {} file_prefix = self.get_renderer_prefix() # If the Render Layer belongs to a Render Setup layer then the @@ -319,6 +368,13 @@ class ARenderProducts: # defaultRenderLayer renders as masterLayer layer_name = "masterLayer" + separator = self.extract_separator(file_prefix) + if separator: + kwargs["aov_separator"] = separator + else: + log.debug("Couldn't extract aov separator from " + "file prefix: {}".format(file_prefix)) + # todo: Support Custom Frames sequences 0,5-10,100-120 # Deadline allows submitting renders with a custom frame list # to support those cases we might want to allow 'custom frames' @@ -335,7 +391,8 @@ class ARenderProducts: layerName=layer_name, renderer=self.renderer, defaultExt=self._get_attr("defaultRenderGlobals.imfPluginKey"), - filePrefix=file_prefix + filePrefix=file_prefix, + **kwargs ) def _generate_file_sequence( @@ -680,9 +737,17 @@ class RenderProductsVray(ARenderProducts): """ prefix = super(RenderProductsVray, self).get_renderer_prefix() - prefix = "{}{}".format(prefix, self.aov_separator) + aov_separator = self._get_aov_separator() + prefix = "{}{}".format(prefix, aov_separator) return prefix + def _get_aov_separator(self): + # type: () -> str + """Return the V-Ray AOV/Render Elements separator""" + return self._get_attr( + "vraySettings.fileNameRenderElementSeparator" + ) + def _get_layer_data(self): # type: () -> LayerMetadata """Override to get vray specific extension.""" @@ -694,6 +759,8 @@ class RenderProductsVray(ARenderProducts): layer_data.defaultExt = default_ext layer_data.padding = self._get_attr("vraySettings.fileNamePadding") + layer_data.aov_separator = self._get_aov_separator() + return layer_data def get_render_products(self): @@ -913,8 +980,9 @@ class RenderProductsRedshift(ARenderProducts): :func:`ARenderProducts.get_renderer_prefix()` """ - prefix = super(RenderProductsRedshift, self).get_renderer_prefix() - prefix = "{}{}".format(prefix, self.aov_separator) + file_prefix = super(RenderProductsRedshift, self).get_renderer_prefix() + separator = self.extract_separator(file_prefix) + prefix = "{}{}".format(file_prefix, separator or "_") return prefix def get_render_products(self): diff --git a/openpype/hosts/maya/api/lib_rendersettings.py b/openpype/hosts/maya/api/lib_rendersettings.py new file mode 100644 index 0000000000..9aea55a03b --- /dev/null +++ b/openpype/hosts/maya/api/lib_rendersettings.py @@ -0,0 +1,242 @@ +# -*- coding: utf-8 -*- +"""Class for handling Render Settings.""" +from maya import cmds # noqa +import maya.mel as mel +import six +import sys + +from openpype.api import ( + get_project_settings, + get_current_project_settings +) + +from openpype.pipeline import legacy_io +from openpype.pipeline import CreatorError +from openpype.pipeline.context_tools import get_current_project_asset +from openpype.hosts.maya.api.commands import reset_frame_range + + +class RenderSettings(object): + + _image_prefix_nodes = { + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + _image_prefixes = { + 'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa + 'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa + 'renderman': 'maya///{aov_separator}', + 'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa + } + + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + @classmethod + def get_image_prefix_attr(cls, renderer): + return cls._image_prefix_nodes[renderer] + + def __init__(self, project_settings=None): + self._project_settings = project_settings + if not self._project_settings: + self._project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + def set_default_renderer_settings(self, renderer=None): + """Set basic settings based on renderer.""" + if not renderer: + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + + asset_doc = get_current_project_asset() + # project_settings/maya/create/CreateRender/aov_separator + try: + aov_separator = self._aov_chars[( + self._project_settings["maya"] + ["create"] + ["CreateRender"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "_" + reset_frame = self._project_settings["maya"]["RenderSettings"]["reset_current_frame"] # noqa + + if reset_frame: + start_frame = cmds.getAttr("defaultRenderGlobals.startFrame") + cmds.currentTime(start_frame, edit=True) + + if renderer in self._image_prefix_nodes: + prefix = self._image_prefixes[renderer] + prefix = prefix.replace("{aov_separator}", aov_separator) + cmds.setAttr(self._image_prefix_nodes[renderer], + prefix, type="string") # noqa + else: + print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa + + # TODO: handle not having res values in the doc + width = asset_doc["data"].get("resolutionWidth") + height = asset_doc["data"].get("resolutionHeight") + + if renderer == "arnold": + # set renderer settings for Arnold from project settings + self._set_arnold_settings(width, height) + + if renderer == "vray": + self._set_vray_settings(aov_separator, width, height) + + if renderer == "redshift": + self._set_redshift_settings(width, height) + + def _set_arnold_settings(self, width, height): + """Sets settings for Arnold.""" + from mtoa.core import createOptions # noqa + from mtoa.aovs import AOVInterface # noqa + createOptions() + arnold_render_presets = self._project_settings["maya"]["RenderSettings"]["arnold_renderer"] # noqa + # Force resetting settings and AOV list to avoid having to deal with + # AOV checking logic, for now. + # This is a work around because the standard + # function to revert render settings does not reset AOVs list in MtoA + # Fetch current aovs in case there's any. + current_aovs = AOVInterface().getAOVs() + # Remove fetched AOVs + AOVInterface().removeAOVs(current_aovs) + mel.eval("unifiedRenderGlobalsRevertToDefault") + img_ext = arnold_render_presets["image_format"] + img_prefix = arnold_render_presets["image_prefix"] + aovs = arnold_render_presets["aov_list"] + img_tiled = arnold_render_presets["tiled"] + multi_exr = arnold_render_presets["multilayer_exr"] + additional_options = arnold_render_presets["additional_options"] + for aov in aovs: + AOVInterface('defaultArnoldRenderOptions').addAOV(aov) + + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + + self._set_global_output_settings() + + cmds.setAttr( + "defaultRenderGlobals.imageFilePrefix", img_prefix, type="string") + + cmds.setAttr( + "defaultArnoldDriver.ai_translator", img_ext, type="string") + + cmds.setAttr( + "defaultArnoldDriver.exrTiled", img_tiled) + + cmds.setAttr( + "defaultArnoldDriver.mergeAOVs", multi_exr) + # Passes additional options in from the schema as a list + # but converts it to a dictionary because ftrack doesn't + # allow fullstops in custom attributes. Then checks for + # type of MtoA attribute passed to adjust the `setAttr` + # command accordingly. + self._additional_attribs_setter(additional_options) + for item in additional_options: + attribute, value = item + if (cmds.getAttr(str(attribute), type=True)) == "long": + cmds.setAttr(str(attribute), int(value)) + elif (cmds.getAttr(str(attribute), type=True)) == "bool": + cmds.setAttr(str(attribute), int(value), type = "Boolean") # noqa + elif (cmds.getAttr(str(attribute), type=True)) == "string": + cmds.setAttr(str(attribute), str(value), type = "string") # noqa + reset_frame_range() + + def _set_redshift_settings(self, width, height): + """Sets settings for Redshift.""" + redshift_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["redshift_renderer"] + ) + additional_options = redshift_render_presets["additional_options"] + ext = redshift_render_presets["image_format"] + img_exts = ["iff", "exr", "tif", "png", "tga", "jpg"] + img_ext = img_exts.index(ext) + + self._set_global_output_settings() + cmds.setAttr("redshiftOptions.imageFormat", img_ext) + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + self._additional_attribs_setter(additional_options) + + def _set_vray_settings(self, aov_separator, width, height): + # type: (str, int, int) -> None + """Sets important settings for Vray.""" + settings = cmds.ls(type="VRaySettingsNode") + node = settings[0] if settings else cmds.createNode("VRaySettingsNode") + vray_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["vray_renderer"] + ) + # Set aov separator + # First we need to explicitly set the UI items in Render Settings + # because that is also what V-Ray updates to when that Render Settings + # UI did initialize before and refreshes again. + MENU = "vrayRenderElementSeparator" + if cmds.optionMenuGrp(MENU, query=True, exists=True): + items = cmds.optionMenuGrp(MENU, query=True, ill=True) + separators = [cmds.menuItem(i, query=True, label=True) for i in items] # noqa: E501 + try: + sep_idx = separators.index(aov_separator) + except ValueError as e: + six.reraise( + CreatorError, + CreatorError( + "AOV character {} not in {}".format( + aov_separator, separators)), + sys.exc_info()[2]) + + cmds.optionMenuGrp(MENU, edit=True, select=sep_idx + 1) + + # Set the render element attribute as string. This is also what V-Ray + # sets whenever the `vrayRenderElementSeparator` menu items switch + cmds.setAttr( + "{}.fileNameRenderElementSeparator".format(node), + aov_separator, + type="string" + ) + + # Set render file format to exr + cmds.setAttr("{}.imageFormatStr".format(node), "exr", type="string") + + # animType + cmds.setAttr("{}.animType".format(node), 1) + + # resolution + cmds.setAttr("{}.width".format(node), width) + cmds.setAttr("{}.height".format(node), height) + + additional_options = vray_render_presets["additional_options"] + + self._additional_attribs_setter(additional_options) + + @staticmethod + def _set_global_output_settings(): + # enable animation + cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) + cmds.setAttr("defaultRenderGlobals.animation", 1) + cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) + cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + + def _additional_attribs_setter(self, additional_attribs): + print(additional_attribs) + for item in additional_attribs: + attribute, value = item + if (cmds.getAttr(str(attribute), type=True)) == "long": + cmds.setAttr(str(attribute), int(value)) + elif (cmds.getAttr(str(attribute), type=True)) == "bool": + cmds.setAttr(str(attribute), int(value)) # noqa + elif (cmds.getAttr(str(attribute), type=True)) == "string": + cmds.setAttr(str(attribute), str(value), type = "string") # noqa diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 97f06c43af..b7ab529a55 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -6,11 +6,11 @@ from Qt import QtWidgets, QtGui import maya.utils import maya.cmds as cmds -from openpype.api import BuildWorkfile from openpype.settings import get_project_settings from openpype.pipeline import legacy_io +from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools -from openpype.hosts.maya.api import lib +from openpype.hosts.maya.api import lib, lib_rendersettings from .lib import get_main_window, IS_HEADLESS from .commands import reset_frame_range @@ -44,6 +44,7 @@ def install(): parent="MayaWindow" ) + renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() # Create context menu context_label = "{}, {}".format( legacy_io.Session["AVALON_ASSET"], @@ -98,6 +99,13 @@ def install(): cmds.menuItem(divider=True) + cmds.menuItem( + "Set Render Settings", + command=lambda *args: lib_rendersettings.RenderSettings().set_default_renderer_settings() # noqa + ) + + cmds.menuItem(divider=True) + cmds.menuItem( "Work Files...", command=lambda *args: host_tools.show_workfiles( diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index d08e8d1926..f565f6a308 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -13,7 +13,6 @@ from openpype.host import HostBase, IWorkfileHost, ILoadHost import openpype.hosts.maya from openpype.tools.utils import host_tools from openpype.lib import ( - any_outdated, register_event_callback, emit_event ) @@ -28,6 +27,7 @@ from openpype.pipeline import ( deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.load import any_outdated_containers from openpype.hosts.maya.lib import copy_workspace_mel from . import menu, lib from .workio import ( @@ -470,7 +470,7 @@ def on_open(): lib.validate_fps() lib.fix_incompatible_containers() - if any_outdated(): + if any_outdated_containers(): log.warning("Scene has outdated content.") # Find maya main window diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 9280805945..e50ebfccad 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -208,7 +208,8 @@ class ReferenceLoader(Loader): file_type = { "ma": "mayaAscii", "mb": "mayaBinary", - "abc": "Alembic" + "abc": "Alembic", + "fbx": "FBX" }.get(representation["name"]) assert file_type, "Unsupported representation: %s" % representation @@ -234,7 +235,7 @@ class ReferenceLoader(Loader): path = self.prepare_root_value(path, representation["context"] ["project"] - ["code"]) + ["name"]) content = cmds.file(path, loadReference=reference_node, type=file_type, diff --git a/openpype/hosts/maya/api/shader_definition_editor.py b/openpype/hosts/maya/api/shader_definition_editor.py index 911db48ac2..6ea5e1a127 100644 --- a/openpype/hosts/maya/api/shader_definition_editor.py +++ b/openpype/hosts/maya/api/shader_definition_editor.py @@ -6,7 +6,7 @@ Shader names are stored as simple text file over GridFS in mongodb. """ import os from Qt import QtWidgets, QtCore, QtGui -from openpype.lib.mongo import OpenPypeMongoConnection +from openpype.client.mongo import OpenPypeMongoConnection from openpype import resources import gridfs diff --git a/openpype/hosts/maya/plugins/create/create_animation.py b/openpype/hosts/maya/plugins/create/create_animation.py index 5cd1f7090a..e47d4e5b5a 100644 --- a/openpype/hosts/maya/plugins/create/create_animation.py +++ b/openpype/hosts/maya/plugins/create/create_animation.py @@ -11,6 +11,7 @@ class CreateAnimation(plugin.Creator): label = "Animation" family = "animation" icon = "male" + write_color_sets = False def __init__(self, *args, **kwargs): super(CreateAnimation, self).__init__(*args, **kwargs) @@ -22,7 +23,7 @@ class CreateAnimation(plugin.Creator): self.data[key] = value # Write vertex colors with the geometry. - self.data["writeColorSets"] = False + self.data["writeColorSets"] = self.write_color_sets self.data["writeFaceSets"] = False # Include only renderable visible shapes. diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py index e876015adb..5516445de8 100644 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ b/openpype/hosts/maya/plugins/create/create_pointcache.py @@ -11,6 +11,7 @@ class CreatePointCache(plugin.Creator): label = "Point Cache" family = "pointcache" icon = "gears" + write_color_sets = False def __init__(self, *args, **kwargs): super(CreatePointCache, self).__init__(*args, **kwargs) @@ -18,7 +19,8 @@ class CreatePointCache(plugin.Creator): # Add animation data self.data.update(lib.collect_animation_data()) - self.data["writeColorSets"] = False # Vertex colors with the geometry. + # Vertex colors with the geometry. + self.data["writeColorSets"] = self.write_color_sets self.data["writeFaceSets"] = False # Vertex colors with the geometry. self.data["renderableOnly"] = False # Only renderable visible shapes self.data["visibleOnly"] = False # only nodes that are visible diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index de07a0b23d..fbe670b1ea 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -1,15 +1,21 @@ # -*- coding: utf-8 -*- """Create ``Render`` instance in Maya.""" -import os import json +import os + import appdirs import requests from maya import cmds -import maya.app.renderSetup.model.renderSetup as renderSetup +from maya.app.renderSetup.model import renderSetup +from openpype.api import ( + get_system_settings, + get_project_settings, +) from openpype.hosts.maya.api import ( lib, + lib_rendersettings, plugin ) from openpype.lib import requests_get @@ -17,6 +23,7 @@ from openpype.api import ( get_system_settings, get_project_settings) from openpype.modules import ModulesManager +from openpype.pipeline import legacy_io from openpype.pipeline import ( CreatorError, legacy_io, @@ -69,35 +76,6 @@ class CreateRender(plugin.Creator): _user = None _password = None - # renderSetup instance - _rs = None - - _image_prefix_nodes = { - 'mentalray': 'defaultRenderGlobals.imageFilePrefix', - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'rmanGlobals.imageFileFormat', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', - } - - _image_prefixes = { - 'mentalray': 'maya///{aov_separator}', # noqa - 'vray': 'maya///', - 'arnold': 'maya///{aov_separator}', # noqa - # this needs `imageOutputDir` - # (/renders/maya/) set separately - 'renderman': '_..', - 'redshift': 'maya///', # noqa - 'mayahardware2': 'maya///', # noqa - } - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - _project_settings = None def __init__(self, *args, **kwargs): @@ -109,18 +87,8 @@ class CreateRender(plugin.Creator): return self._project_settings = get_project_settings( legacy_io.Session["AVALON_PROJECT"]) - - # project_settings/maya/create/CreateRender/aov_separator - try: - self.aov_separator = self._aov_chars[( - self._project_settings["maya"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - self.aov_separator = "_" - + if self._project_settings["maya"]["RenderSettings"]["apply_render_settings"]: # noqa + lib_rendersettings.RenderSettings().set_default_renderer_settings() manager = ModulesManager() self.deadline_module = manager.modules_by_name["deadline"] try: @@ -177,13 +145,13 @@ class CreateRender(plugin.Creator): ]) cmds.setAttr("{}.machineList".format(self.instance), lock=True) - self._rs = renderSetup.instance() - layers = self._rs.getRenderLayers() + rs = renderSetup.instance() + layers = rs.getRenderLayers() if use_selection: - print(">>> processing existing layers") + self.log.info("Processing existing layers") sets = [] for layer in layers: - print(" - creating set for {}:{}".format( + self.log.info(" - creating set for {}:{}".format( namespace, layer.name())) render_set = cmds.sets( n="{}:{}".format(namespace, layer.name())) @@ -193,17 +161,10 @@ class CreateRender(plugin.Creator): # if no render layers are present, create default one with # asterisk selector if not layers: - render_layer = self._rs.createRenderLayer('Main') + render_layer = rs.createRenderLayer('Main') collection = render_layer.createCollection("defaultCollection") collection.getSelector().setPattern('*') - renderer = cmds.getAttr( - 'defaultRenderGlobals.currentRenderer').lower() - # handle various renderman names - if renderer.startswith('renderman'): - renderer = 'renderman' - - self._set_default_renderer_settings(renderer) return self.instance def _deadline_webservice_changed(self): @@ -237,7 +198,7 @@ class CreateRender(plugin.Creator): def _create_render_settings(self): """Create instance settings.""" - # get pools + # get pools (slave machines of the render farm) pool_names = [] default_priority = 50 @@ -281,7 +242,8 @@ class CreateRender(plugin.Creator): # if 'default' server is not between selected, # use first one for initial list of pools. deadline_url = next(iter(self.deadline_servers.values())) - + # Uses function to get pool machines from the assigned deadline + # url in settings pool_names = self.deadline_module.get_deadline_pools(deadline_url, self.log) maya_submit_dl = self._project_settings.get( @@ -400,102 +362,36 @@ class CreateRender(plugin.Creator): self.log.error("Cannot show login form to Muster") raise Exception("Cannot show login form to Muster") - def _set_default_renderer_settings(self, renderer): - """Set basic settings based on renderer. + def _requests_post(self, *args, **kwargs): + """Wrap request post method. - Args: - renderer (str): Renderer name. + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. """ - prefix = self._image_prefixes[renderer] - prefix = prefix.replace("{aov_separator}", self.aov_separator) - cmds.setAttr(self._image_prefix_nodes[renderer], - prefix, - type="string") + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.post(*args, **kwargs) - asset = get_current_project_asset() + def _requests_get(self, *args, **kwargs): + """Wrap request get method. - if renderer == "arnold": - # set format to exr + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. - cmds.setAttr( - "defaultArnoldDriver.ai_translator", "exr", type="string") - self._set_global_output_settings() - # resolution - cmds.setAttr( - "defaultResolution.width", - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "defaultResolution.height", - asset["data"].get("resolutionHeight")) + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. - if renderer == "vray": - self._set_vray_settings(asset) - if renderer == "redshift": - cmds.setAttr("redshiftOptions.imageFormat", 1) - - # resolution - cmds.setAttr( - "defaultResolution.width", - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "defaultResolution.height", - asset["data"].get("resolutionHeight")) - - self._set_global_output_settings() - - if renderer == "renderman": - cmds.setAttr("rmanGlobals.imageOutputDir", - "maya//", type="string") - - def _set_vray_settings(self, asset): - # type: (dict) -> None - """Sets important settings for Vray.""" - settings = cmds.ls(type="VRaySettingsNode") - node = settings[0] if settings else cmds.createNode("VRaySettingsNode") - - # set separator - # set it in vray menu - if cmds.optionMenuGrp("vrayRenderElementSeparator", exists=True, - q=True): - items = cmds.optionMenuGrp( - "vrayRenderElementSeparator", ill=True, query=True) - - separators = [cmds.menuItem(i, label=True, query=True) for i in items] # noqa: E501 - try: - sep_idx = separators.index(self.aov_separator) - except ValueError: - raise CreatorError( - "AOV character {} not in {}".format( - self.aov_separator, separators)) - - cmds.optionMenuGrp( - "vrayRenderElementSeparator", sl=sep_idx + 1, edit=True) - cmds.setAttr( - "{}.fileNameRenderElementSeparator".format(node), - self.aov_separator, - type="string" - ) - # set format to exr - cmds.setAttr( - "{}.imageFormatStr".format(node), "exr", type="string") - - # animType - cmds.setAttr( - "{}.animType".format(node), 1) - - # resolution - cmds.setAttr( - "{}.width".format(node), - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "{}.height".format(node), - asset["data"].get("resolutionHeight")) - - @staticmethod - def _set_global_output_settings(): - # enable animation - cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) - cmds.setAttr("defaultRenderGlobals.animation", 1) - cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) - cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index ec583bcce7..157be5717b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -551,7 +551,9 @@ class CollectLook(pyblish.api.InstancePlugin): if cmds.getAttr(attribute, type=True) == "message": continue node_attributes[attr] = cmds.getAttr(attribute) - + # Only include if there are any properties we care about + if not node_attributes: + continue attributes.append({"name": node, "uuid": lib.get_id(node), "attributes": node_attributes}) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 8b911a867d..e132cffe53 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -72,7 +72,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): def process(self, context): """Entry point to collector.""" render_instance = None - deadline_url = None for instance in context: if "rendering" in instance.data["families"]: @@ -96,23 +95,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin): asset = legacy_io.Session["AVALON_ASSET"] workspace = context.data["workspaceDir"] - deadline_settings = ( - context.data - ["system_settings"] - ["modules"] - ["deadline"] - ) - - if deadline_settings["enabled"]: - deadline_url = render_instance.data.get("deadlineUrl") - self._rs = renderSetup.instance() - current_layer = self._rs.getVisibleRenderLayer() + # Retrieve render setup layers + rs = renderSetup.instance() maya_render_layers = { - layer.name(): layer for layer in self._rs.getRenderLayers() + layer.name(): layer for layer in rs.getRenderLayers() } - self.maya_layers = maya_render_layers - for layer in collected_render_layers: try: if layer.startswith("LAYER_"): @@ -147,49 +135,34 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.warning(msg) continue - # test if there are sets (subsets) to attach render to + # detect if there are sets (subsets) to attach render to sets = cmds.sets(layer, query=True) or [] attach_to = [] - if sets: - for s in sets: - if "family" not in cmds.listAttr(s): - continue + for s in sets: + if not cmds.attributeQuery("family", node=s, exists=True): + continue - attach_to.append( - { - "version": None, # we need integrator for that - "subset": s, - "family": cmds.getAttr("{}.family".format(s)), - } - ) - self.log.info(" -> attach render to: {}".format(s)) + attach_to.append( + { + "version": None, # we need integrator for that + "subset": s, + "family": cmds.getAttr("{}.family".format(s)), + } + ) + self.log.info(" -> attach render to: {}".format(s)) layer_name = "rs_{}".format(expected_layer_name) # collect all frames we are expecting to be rendered - renderer = cmds.getAttr( - "defaultRenderGlobals.currentRenderer" - ).lower() + renderer = self.get_render_attribute("currentRenderer", + layer=layer_name) # handle various renderman names if renderer.startswith("renderman"): renderer = "renderman" - try: - aov_separator = self._aov_chars[( - context.data["project_settings"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - - render_instance.data["aovSeparator"] = aov_separator - # return all expected files for all cameras and aovs in given # frame range - layer_render_products = get_layer_render_products( - layer_name, render_instance) + layer_render_products = get_layer_render_products(layer_name) render_products = layer_render_products.layer_data.products assert render_products, "no render products generated" exp_files = [] @@ -226,13 +199,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin): ) # append full path - full_exp_files = [] aov_dict = {} default_render_file = context.data.get('project_settings')\ .get('maya')\ .get('create')\ .get('CreateRender')\ - .get('default_render_image_folder') + .get('default_render_image_folder') or "" # replace relative paths with absolute. Render products are # returned as list of dictionaries. publish_meta_path = None @@ -246,6 +218,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): full_paths.append(full_path) publish_meta_path = os.path.dirname(full_path) aov_dict[aov_first_key] = full_paths + full_exp_files = [aov_dict] frame_start_render = int(self.get_render_attribute( "startFrame", layer=layer_name)) @@ -269,8 +242,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): frame_start_handle = frame_start_render frame_end_handle = frame_end_render - full_exp_files.append(aov_dict) - # find common path to store metadata # so if image prefix is branching to many directories # metadata file will be located in top-most common @@ -299,16 +270,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides - try: - aov_separator = self._aov_chars[( - context.data["project_settings"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - data = { "subset": expected_layer_name, "attachTo": attach_to, @@ -357,11 +318,15 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "useReferencedAovs": render_instance.data.get( "useReferencedAovs") or render_instance.data.get( "vrayUseReferencedAovs") or False, - "aovSeparator": aov_separator + "aovSeparator": layer_render_products.layer_data.aov_separator # noqa: E501 } - if deadline_url: - data["deadlineUrl"] = deadline_url + # Collect Deadline url if Deadline module is enabled + deadline_settings = ( + context.data["system_settings"]["modules"]["deadline"] + ) + if deadline_settings["enabled"]: + data["deadlineUrl"] = render_instance.data.get("deadlineUrl") if self.sync_workfile_version: data["version"] = context.data["version"] @@ -370,19 +335,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): if instance.data['family'] == "workfile": instance.data["version"] = context.data["version"] - # Apply each user defined attribute as data - for attr in cmds.listAttr(layer, userDefined=True) or list(): - try: - value = cmds.getAttr("{}.{}".format(layer, attr)) - except Exception: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - - data[attr] = value - # handle standalone renderers if render_instance.data.get("vrayScene") is True: data["families"].append("vrayscene_render") @@ -490,10 +442,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): return pool_a, pool_b - def _get_overrides(self, layer): - rset = self.maya_layers[layer].renderSettingsCollectionInstance() - return rset.getOverrides() - @staticmethod def get_render_attribute(attr, layer): """Get attribute from render options. diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index d35b529c76..8be0c7aae5 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -27,6 +27,29 @@ def escape_space(path): return '"{}"'.format(path) if " " in path else path +def get_ocio_config_path(profile_folder): + """Path to OpenPype vendorized OCIO. + + Vendorized OCIO config file path is grabbed from the specific path + hierarchy specified below. + + "{OPENPYPE_ROOT}/vendor/OpenColorIO-Configs/{profile_folder}/config.ocio" + Args: + profile_folder (str): Name of folder to grab config file from. + + Returns: + str: Path to vendorized config file. + """ + return os.path.join( + os.environ["OPENPYPE_ROOT"], + "vendor", + "configs", + "OpenColorIO-Configs", + profile_folder, + "config.ocio" + ) + + def find_paths_by_hash(texture_hash): """Find the texture hash key in the dictionary. @@ -79,10 +102,11 @@ def maketx(source, destination, *args): # use oiio-optimized settings for tile-size, planarconfig, metadata "--oiio", "--filter lanczos3", + escape_space(source) ] cmd.extend(args) - cmd.extend(["-o", escape_space(destination), escape_space(source)]) + cmd.extend(["-o", escape_space(destination)]) cmd = " ".join(cmd) @@ -405,7 +429,19 @@ class ExtractLook(openpype.api.Extractor): # node doesn't have color space attribute color_space = "Raw" else: - if files_metadata[source]["color_space"] == "Raw": + # get the resolved files + metadata = files_metadata.get(source) + # if the files are unresolved from `source` + # assume color space from the first file of + # the resource + if not metadata: + first_file = next(iter(resource.get( + "files", [])), None) + if not first_file: + continue + first_filepath = os.path.normpath(first_file) + metadata = files_metadata[first_filepath] + if metadata["color_space"] == "Raw": # set color space to raw if we linearized it color_space = "Raw" # Remap file node filename to destination @@ -493,6 +529,8 @@ class ExtractLook(openpype.api.Extractor): else: colorconvert = "" + config_path = get_ocio_config_path("nuke-default") + color_config = "--colorconfig {0}".format(config_path) # Ensure folder exists if not os.path.exists(os.path.dirname(converted)): os.makedirs(os.path.dirname(converted)) @@ -502,10 +540,11 @@ class ExtractLook(openpype.api.Extractor): filepath, converted, # Include `source-hash` as string metadata - "-sattrib", + "--sattrib", "sourceHash", escape_space(texture_hash), colorconvert, + color_config ) return converted, COPY, texture_hash diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index 233a0b60c2..54ef09e060 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -128,8 +128,10 @@ class ExtractPlayblast(openpype.api.Extractor): # Update preset with current panel setting # if override_viewport_options is turned off if not override_viewport_options: + panel = cmds.getPanel(with_focus=True) panel_preset = capture.parse_active_view() preset.update(panel_preset) + cmds.setFocus(panel) path = capture.capture(**preset) diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index 4f28aa167c..01980578cf 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -100,6 +100,13 @@ class ExtractThumbnail(openpype.api.Extractor): # camera. if preset.pop("isolate_view", False) and instance.data.get("isolate"): preset["isolate"] = instance.data["setMembers"] + + # Show or Hide Image Plane + image_plane = instance.data.get("imagePlane", True) + if "viewport_options" in preset: + preset["viewport_options"]["imagePlane"] = image_plane + else: + preset["viewport_options"] = {"imagePlane": image_plane} with lib.maintained_time(): # Force viewer to False in call to capture because we have our own @@ -110,14 +117,17 @@ class ExtractThumbnail(openpype.api.Extractor): # Update preset with current panel setting # if override_viewport_options is turned off if not override_viewport_options: + panel = cmds.getPanel(with_focus=True) panel_preset = capture.parse_active_view() preset.update(panel_preset) + cmds.setFocus(panel) path = capture.capture(**preset) playblast = self._fix_playblast_output_path(path) _, thumbnail = os.path.split(playblast) + self.log.info("file list {}".format(thumbnail)) if "representations" not in instance.data: diff --git a/openpype/hosts/maya/plugins/publish/validate_look_contents.py b/openpype/hosts/maya/plugins/publish/validate_look_contents.py index 443a0ad719..b1e1d5416b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_contents.py @@ -78,14 +78,13 @@ class ValidateLookContents(pyblish.api.InstancePlugin): # Check if attributes are on a node with an ID, crucial for rebuild! for attr_changes in lookdata["attributes"]: - if not attr_changes["uuid"]: + if not attr_changes["uuid"] and not attr_changes["attributes"]: cls.log.error("Node '%s' has no cbId, please set the " "attributes to its children if it has any" % attr_changes["name"]) invalid.add(instance.name) return list(invalid) - @classmethod def validate_looks(cls, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_model_name.py b/openpype/hosts/maya/plugins/publish/validate_model_name.py index 50acf2b8b7..02107d5732 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_name.py @@ -10,7 +10,7 @@ from openpype.pipeline import legacy_io import openpype.hosts.maya.api.action from openpype.hosts.maya.api.shader_definition_editor import ( DEFINITION_FILENAME) -from openpype.lib.mongo import OpenPypeMongoConnection +from openpype.client.mongo import OpenPypeMongoConnection import gridfs diff --git a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py index e6c6ef6c9e..35b87fd0ab 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py @@ -1,20 +1,11 @@ import re import pyblish.api -import openpype.api -import openpype.hosts.maya.api.action - from maya import cmds - -ImagePrefixes = { - 'mentalray': 'defaultRenderGlobals.imageFilePrefix', - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'defaultRenderGlobals.imageFilePrefix', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', -} +import openpype.api +import openpype.hosts.maya.api.action +from openpype.hosts.maya.api.render_settings import RenderSettings class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): @@ -47,7 +38,11 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): # handle various renderman names if renderer.startswith('renderman'): renderer = 'renderman' - file_prefix = cmds.getAttr(ImagePrefixes[renderer]) + + file_prefix = cmds.getAttr( + RenderSettings.get_image_prefix_attr(renderer) + ) + if len(cameras) > 1: if re.search(cls.R_CAMERA_TOKEN, file_prefix): diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index a806680caa..6bcc95fcfc 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -21,9 +21,7 @@ from openpype.client import ( ) from openpype.api import ( Logger, - BuildWorkfile, get_version_from_path, - get_workdir_data, get_current_project_settings, ) from openpype.tools.utils import host_tools @@ -34,12 +32,17 @@ from openpype.settings import ( get_anatomy_settings, ) from openpype.modules import ModulesManager +from openpype.pipeline.template_data import get_template_data_with_names from openpype.pipeline import ( discover_legacy_creator_plugins, legacy_io, Anatomy, ) -from openpype.pipeline.context_tools import get_current_project_asset +from openpype.pipeline.context_tools import ( + get_current_project_asset, + get_custom_workfile_template_from_session +) +from openpype.pipeline.workfile import BuildWorkfile from . import gizmo_menu @@ -905,6 +908,27 @@ def check_subsetname_exists(nodes, subset_name): if subset_name in read_avalon_data(n).get("subset", "")), False) + +def get_render_path(node): + ''' Generate Render path from presets regarding avalon knob data + ''' + avalon_knob_data = read_avalon_data(node) + + nuke_imageio_writes = get_imageio_node_setting( + node_class=avalon_knob_data["families"], + plugin_name=avalon_knob_data["creator"], + subset=avalon_knob_data["subset"] + ) + + data = { + "avalon": avalon_knob_data, + "nuke_imageio_writes": nuke_imageio_writes + } + + anatomy_filled = format_anatomy(data) + return anatomy_filled["render"]["path"].replace("\\", "/") + + def format_anatomy(data): ''' Helping function for formatting of anatomy paths @@ -942,12 +966,11 @@ def format_anatomy(data): data["version"] = get_version_from_path(file) project_name = anatomy.project_name - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, data["avalon"]["asset"]) + asset_name = data["avalon"]["asset"] task_name = os.environ["AVALON_TASK"] host_name = os.environ["AVALON_APP"] - context_data = get_workdir_data( - project_doc, asset_doc, task_name, host_name + context_data = get_template_data_with_names( + project_name, asset_name, task_name, host_name ) data.update(context_data) data.update({ @@ -1105,10 +1128,8 @@ def create_write_node( if knob["name"] == "file_type": representation = knob["value"] - host_name = os.environ.get("AVALON_APP") try: data.update({ - "app": host_name, "imageio_writes": imageio_writes, "representation": representation, }) @@ -1909,7 +1930,7 @@ class WorkfileSettings(object): families.append(avalon_knob_data.get("families")) nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) @@ -2432,15 +2453,12 @@ def _launch_workfile_app(): def process_workfile_builder(): - from openpype.lib import ( - env_value_to_bool, - get_custom_workfile_template - ) # to avoid looping of the callback, remove it! nuke.removeOnCreate(process_workfile_builder, nodeClass="Root") # get state from settings - workfile_builder = get_current_project_settings()["nuke"].get( + project_settings = get_current_project_settings() + workfile_builder = project_settings["nuke"].get( "workfile_builder", {}) # get all imortant settings @@ -2450,7 +2468,6 @@ def process_workfile_builder(): # get settings createfv_on = workfile_builder.get("create_first_version") or None - custom_templates = workfile_builder.get("custom_templates") or None builder_on = workfile_builder.get("builder_on_start") or None last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE") @@ -2458,8 +2475,8 @@ def process_workfile_builder(): # generate first version in file not existing and feature is enabled if createfv_on and not os.path.exists(last_workfile_path): # get custom template path if any - custom_template_path = get_custom_workfile_template( - custom_templates + custom_template_path = get_custom_workfile_template_from_session( + project_settings=project_settings ) # if custom template is defined diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 0afc56d2f7..c1cd8f771a 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -9,7 +9,6 @@ import pyblish.api import openpype from openpype.api import ( Logger, - BuildWorkfile, get_current_project_settings ) from openpype.lib import register_event_callback @@ -22,6 +21,7 @@ from openpype.pipeline import ( deregister_inventory_action_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools from .command import viewer_update_and_undo_stop diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 925cab0bef..37ce03dc55 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -181,8 +181,6 @@ class ExporterReview(object): # get first and last frame self.first_frame = min(self.collection.indexes) self.last_frame = max(self.collection.indexes) - if "slate" in self.instance.data["families"]: - self.first_frame += 1 else: self.fname = os.path.basename(self.path_in) self.fhead = os.path.splitext(self.fname)[0] + "." diff --git a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py index 4257ed3131..bfe32d8fd1 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py +++ b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py @@ -33,6 +33,7 @@ class CollectSlate(pyblish.api.InstancePlugin): if slate_node: instance.data["slateNode"] = slate_node + instance.data["slate"] = True instance.data["families"].append("slate") instance.data["versionData"]["families"].append("slate") self.log.info( diff --git a/openpype/hosts/nuke/plugins/publish/extract_render_local.py b/openpype/hosts/nuke/plugins/publish/extract_render_local.py index 1595fe03fb..8879f0c999 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_render_local.py +++ b/openpype/hosts/nuke/plugins/publish/extract_render_local.py @@ -31,10 +31,6 @@ class NukeRenderLocal(openpype.api.Extractor): first_frame = instance.data.get("frameStartHandle", None) - # exception for slate workflow - if "slate" in families: - first_frame -= 1 - last_frame = instance.data.get("frameEndHandle", None) node_subset_name = instance.data.get("name", None) @@ -68,10 +64,6 @@ class NukeRenderLocal(openpype.api.Extractor): int(last_frame) ) - # exception for slate workflow - if "slate" in families: - first_frame += 1 - ext = node["file_type"].value() if "representations" not in instance.data: @@ -88,8 +80,11 @@ class NukeRenderLocal(openpype.api.Extractor): repre = { 'name': ext, 'ext': ext, - 'frameStart': "%0{}d".format( - len(str(last_frame))) % first_frame, + 'frameStart': ( + "{{:0>{}}}" + .format(len(str(last_frame))) + .format(first_frame) + ), 'files': filenames, "stagingDir": out_dir } @@ -105,13 +100,16 @@ class NukeRenderLocal(openpype.api.Extractor): instance.data['family'] = 'render' families.remove('render.local') families.insert(0, "render2d") + instance.data["anatomyData"]["family"] = "render" elif "prerender.local" in families: instance.data['family'] = 'prerender' families.remove('prerender.local') families.insert(0, "prerender") + instance.data["anatomyData"]["family"] = "prerender" elif "still.local" in families: instance.data['family'] = 'image' families.remove('still.local') + instance.data["anatomyData"]["family"] = "image" instance.data["families"] = families collections, remainder = clique.assemble(filenames) @@ -123,4 +121,4 @@ class NukeRenderLocal(openpype.api.Extractor): self.log.info('Finished render') - self.log.debug("instance extracted: {}".format(instance.data)) + self.log.debug("_ instance.data: {}".format(instance.data)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index 99ade4cf9b..b5cad143db 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -13,6 +13,7 @@ from openpype.hosts.nuke.api import ( get_view_process_node ) + class ExtractSlateFrame(openpype.api.Extractor): """Extracts movie and thumbnail with baked in luts @@ -236,6 +237,7 @@ class ExtractSlateFrame(openpype.api.Extractor): def _render_slate_to_sequence(self, instance): # set slate frame first_frame = instance.data["frameStartHandle"] + last_frame = instance.data["frameEndHandle"] slate_first_frame = first_frame - 1 # render slate as sequence frame @@ -284,6 +286,13 @@ class ExtractSlateFrame(openpype.api.Extractor): matching_repre["files"] = [first_filename, slate_filename] elif slate_filename not in matching_repre["files"]: matching_repre["files"].insert(0, slate_filename) + matching_repre["frameStart"] = ( + "{{:0>{}}}" + .format(len(str(last_frame))) + .format(slate_first_frame) + ) + self.log.debug( + "__ matching_repre: {}".format(pformat(matching_repre))) self.log.warning("Added slate frame to representation files") diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py index 4b3b70fa12..b396056eb9 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py @@ -50,7 +50,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): # establish families family = avalon_knob_data["family"] families_ak = avalon_knob_data.get("families", []) - families = list() + families = [] # except disabled nodes but exclude backdrops in test if ("nukenodes" not in family) and (node["disable"].value()): @@ -94,6 +94,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): # Farm rendering self.log.info("flagged for farm render") instance.data["transfer"] = False + instance.data["farm"] = True families.append("{}.farm".format(family)) family = families_ak.lower() @@ -110,10 +111,10 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("__ families: `{}`".format(families)) # Get format - format = root['format'].value() - resolution_width = format.width() - resolution_height = format.height() - pixel_aspect = format.pixelAspect() + format_ = root['format'].value() + resolution_width = format_.width() + resolution_height = format_.height() + pixel_aspect = format_.pixelAspect() # get publish knob value if "publish" not in node.knobs(): @@ -124,8 +125,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("__ _families_test: `{}`".format(_families_test)) for family_test in _families_test: if family_test in self.sync_workfile_version_on_families: - self.log.debug("Syncing version with workfile for '{}'" - .format(family_test)) + self.log.debug( + "Syncing version with workfile for '{}'".format( + family_test + ) + ) # get version to instance for integration instance.data['version'] = instance.context.data['version'] diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index a97f34b370..e37cc8a80a 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -144,8 +144,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): self.log.debug("colorspace: `{}`".format(colorspace)) version_data = { - "families": [f.replace(".local", "").replace(".farm", "") - for f in _families_test if "write" not in f], + "families": [ + _f.replace(".local", "").replace(".farm", "") + for _f in _families_test if "write" != _f + ], "colorspace": colorspace } diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index 20a6e3169f..ee150d1808 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -1,6 +1,5 @@ import os from Qt import QtWidgets -from bson.objectid import ObjectId import pyblish.api @@ -13,8 +12,8 @@ from openpype.pipeline import ( deregister_loader_plugin_path, deregister_creator_plugin_path, AVALON_CONTAINER_ID, - registered_host, ) +from openpype.pipeline.load import any_outdated_containers import openpype.hosts.photoshop from . import lib @@ -30,7 +29,7 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") def check_inventory(): - if not lib.any_outdated(): + if not any_outdated_containers(): return # Warn about outdated containers. diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index f15068b031..2cfbfa8778 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,3 +1,5 @@ +import re + from openpype.hosts.photoshop import api from openpype.lib import BoolDef from openpype.pipeline import ( @@ -5,6 +7,8 @@ from openpype.pipeline import ( CreatedInstance, legacy_io ) +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class ImageCreator(Creator): @@ -38,17 +42,24 @@ class ImageCreator(Creator): top_level_selected_items = stub.get_selected_layers() if pre_create_data.get("use_selection"): only_single_item_selected = len(top_level_selected_items) == 1 - for selected_item in top_level_selected_items: - if ( - only_single_item_selected or - pre_create_data.get("create_multiple")): + if ( + only_single_item_selected or + pre_create_data.get("create_multiple")): + for selected_item in top_level_selected_items: if selected_item.group: groups_to_create.append(selected_item) else: top_layers_to_wrap.append(selected_item) - else: - group = stub.group_selected_layers(subset_name_from_ui) - groups_to_create.append(group) + else: + group = stub.group_selected_layers(subset_name_from_ui) + groups_to_create.append(group) + else: + stub.select_layers(stub.get_layers()) + try: + group = stub.group_selected_layers(subset_name_from_ui) + except: + raise ValueError("Cannot group locked Bakcground layer!") + groups_to_create.append(group) if not groups_to_create and not top_layers_to_wrap: group = stub.create_group(subset_name_from_ui) @@ -60,6 +71,7 @@ class ImageCreator(Creator): group = stub.group_selected_layers(layer.name) groups_to_create.append(group) + layer_name = '' creating_multiple_groups = len(groups_to_create) > 1 for group in groups_to_create: subset_name = subset_name_from_ui # reset to name from creator UI @@ -67,8 +79,16 @@ class ImageCreator(Creator): created_group_name = self._clean_highlights(stub, group.name) if creating_multiple_groups: - # concatenate with layer name to differentiate subsets - subset_name += group.name.title().replace(" ", "") + layer_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + group.name + ) + if "{layer}" not in subset_name.lower(): + subset_name += "{Layer}" + + layer_fill = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill) if group.long_name: for directory in group.long_name[::-1]: @@ -143,3 +163,6 @@ class ImageCreator(Creator): def _clean_highlights(self, stub, item): return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, '') + @classmethod + def get_dynamic_data(cls, *args, **kwargs): + return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py index 9736471a26..2792a775e0 100644 --- a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py @@ -1,7 +1,12 @@ +import re + from Qt import QtWidgets from openpype.pipeline import create from openpype.hosts.photoshop import api as photoshop +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS + class CreateImage(create.LegacyCreator): """Image folder for publish.""" @@ -75,6 +80,7 @@ class CreateImage(create.LegacyCreator): groups.append(group) creator_subset_name = self.data["subset"] + layer_name = '' for group in groups: long_names = [] group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ @@ -82,7 +88,16 @@ class CreateImage(create.LegacyCreator): subset_name = creator_subset_name if len(groups) > 1: - subset_name += group.name.title().replace(" ", "") + layer_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + group.name + ) + if "{layer}" not in subset_name.lower(): + subset_name += "{Layer}" + + layer_fill = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill) if group.long_name: for directory in group.long_name[::-1]: @@ -98,3 +113,7 @@ class CreateImage(create.LegacyCreator): # reusing existing group, need to rename afterwards if not create_group: stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) + + @classmethod + def get_dynamic_data(cls, *args, **kwargs): + return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py index b53f4e8198..8106d6ff16 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py @@ -4,6 +4,7 @@ import pyblish.api import openpype.api from openpype.pipeline import PublishXmlValidationError from openpype.hosts.photoshop import api as photoshop +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class ValidateNamingRepair(pyblish.api.Action): @@ -50,6 +51,13 @@ class ValidateNamingRepair(pyblish.api.Action): subset_name = re.sub(invalid_chars, replace_char, instance.data["subset"]) + # format from Tool Creator + subset_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + subset_name + ) + layer_meta["subset"] = subset_name stub.imprint(instance_id, layer_meta) diff --git a/openpype/hosts/resolve/plugins/publish/precollect_instances.py b/openpype/hosts/resolve/plugins/publish/precollect_instances.py index 8f1a13a4e5..ee51998c0d 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_instances.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_instances.py @@ -70,7 +70,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin): "publish": resolve.get_publish_attribute(timeline_item), "fps": context.data["fps"], "handleStart": handle_start, - "handleEnd": handle_end + "handleEnd": handle_end, + "newAssetPublishing": True }) # otio clip data diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py index 3237fbbe12..75c260bad7 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py @@ -170,7 +170,8 @@ class CollectInstances(pyblish.api.InstancePlugin): "frameStart": frame_start, "frameEnd": frame_end, "frameStartH": frame_start - handle_start, - "frameEndH": frame_end + handle_end + "frameEndH": frame_end + handle_end, + "newAssetPublishing": True } for data_key in instance_data_filter: diff --git a/openpype/hosts/testhost/api/pipeline.py b/openpype/hosts/testhost/api/pipeline.py index 285fe8f8d6..1e05f336fb 100644 --- a/openpype/hosts/testhost/api/pipeline.py +++ b/openpype/hosts/testhost/api/pipeline.py @@ -1,6 +1,6 @@ import os import json -from openpype.pipeline import legacy_io +from openpype.client import get_asset_by_name class HostContext: @@ -17,10 +17,10 @@ class HostContext: if not asset_name: return project_name - asset_doc = legacy_io.find_one( - {"type": "asset", "name": asset_name}, - {"data.parents": 1} + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.parents"] ) + parents = asset_doc.get("data", {}).get("parents") or [] hierarchy = [project_name] diff --git a/openpype/hosts/testhost/plugins/create/auto_creator.py b/openpype/hosts/testhost/plugins/create/auto_creator.py index 06b95375b1..8d59fc3242 100644 --- a/openpype/hosts/testhost/plugins/create/auto_creator.py +++ b/openpype/hosts/testhost/plugins/create/auto_creator.py @@ -1,10 +1,11 @@ from openpype.lib import NumberDef -from openpype.hosts.testhost.api import pipeline +from openpype.client import get_asset_by_name from openpype.pipeline import ( legacy_io, AutoCreator, CreatedInstance, ) +from openpype.hosts.testhost.api import pipeline class MyAutoCreator(AutoCreator): @@ -44,10 +45,7 @@ class MyAutoCreator(AutoCreator): host_name = legacy_io.Session["AVALON_APP"] if existing_instance is None: - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) @@ -69,10 +67,7 @@ class MyAutoCreator(AutoCreator): existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) diff --git a/openpype/hosts/traypublisher/api/editorial.py b/openpype/hosts/traypublisher/api/editorial.py new file mode 100644 index 0000000000..7c392ef508 --- /dev/null +++ b/openpype/hosts/traypublisher/api/editorial.py @@ -0,0 +1,331 @@ +import re +from copy import deepcopy + +from openpype.client import get_asset_by_id +from openpype.pipeline.create import CreatorError + + +class ShotMetadataSolver: + """ Solving hierarchical metadata + + Used during editorial publishing. Works with imput + clip name and settings defining python formatable + template. Settings also define searching patterns + and its token keys used for formating in templates. + """ + + NO_DECOR_PATERN = re.compile(r"\{([a-z]*?)\}") + + # presets + clip_name_tokenizer = None + shot_rename = True + shot_hierarchy = None + shot_add_tasks = None + + def __init__( + self, + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + logger + ): + self.clip_name_tokenizer = clip_name_tokenizer + self.shot_rename = shot_rename + self.shot_hierarchy = shot_hierarchy + self.shot_add_tasks = shot_add_tasks + self.log = logger + + def _rename_template(self, data): + """Shot renaming function + + Args: + data (dict): formating data + + Raises: + CreatorError: If missing keys + + Returns: + str: formated new name + """ + shot_rename_template = self.shot_rename[ + "shot_rename_template"] + try: + # format to new shot name + return shot_rename_template.format(**data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct:: \n\n" + f"From template string {shot_rename_template} > " + f"`{_E}` has no equivalent in \n" + f"{list(data.keys())} input formating keys!" + )) + + def _generate_tokens(self, clip_name, source_data): + """Token generator + + Settings defines token pairs key and regex expression. + + Args: + clip_name (str): name of clip in editorial + source_data (dict): data for formating + + Raises: + CreatorError: if missing key + + Returns: + dict: updated source_data + """ + output_data = deepcopy(source_data["anatomy_data"]) + output_data["clip_name"] = clip_name + + if not self.clip_name_tokenizer: + return output_data + + parent_name = source_data["selected_asset_doc"]["name"] + + search_text = parent_name + clip_name + + for token_key, pattern in self.clip_name_tokenizer.items(): + p = re.compile(pattern) + match = p.findall(search_text) + if not match: + raise CreatorError(( + "Make sure regex expression works with your data: \n\n" + f"'{token_key}' with regex '{pattern}' in your settings\n" + "can't find any match in your clip name " + f"'{search_text}'!\n\nLook to: " + "'project_settings/traypublisher/editorial_creators" + "/editorial_simple/clip_name_tokenizer'\n" + "at your project settings..." + )) + + # QUESTION:how to refactory `match[-1]` to some better way? + output_data[token_key] = match[-1] + + return output_data + + def _create_parents_from_settings(self, parents, data): + """Formating parent components. + + Args: + parents (list): list of dict parent components + data (dict): formating data + + Raises: + CreatorError: missing formating key + CreatorError: missing token key + KeyError: missing parent token + + Returns: + list: list of dict of parent components + """ + # fill the parents parts from presets + shot_hierarchy = deepcopy(self.shot_hierarchy) + hierarchy_parents = shot_hierarchy["parents"] + + # fill parent keys data template from anatomy data + try: + _parent_tokens_formating_data = { + parent_token["name"]: parent_token["value"].format(**data) + for parent_token in hierarchy_parents + } + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n" + f"`{_E}` has no equivalent in \n{list(data.keys())}" + )) + + _parent_tokens_type = { + parent_token["name"]: parent_token["type"] + for parent_token in hierarchy_parents + } + for _index, _parent in enumerate( + shot_hierarchy["parents_path"].split("/") + ): + # format parent token with value which is formated + try: + parent_name = _parent.format( + **_parent_tokens_formating_data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n\n" + f"`{_E}` from template string " + f"{shot_hierarchy['parents_path']}, " + f" has no equivalent in \n" + f"{list(_parent_tokens_formating_data.keys())} parents" + )) + + parent_token_name = ( + self.NO_DECOR_PATERN.findall(_parent).pop()) + + if not parent_token_name: + raise KeyError( + f"Parent token is not found in: `{_parent}`") + + # find parent type + parent_token_type = _parent_tokens_type[parent_token_name] + + # in case selected context is set to the same asset + if ( + _index == 0 + and parents[-1]["entity_name"] == parent_name + ): + self.log.debug(f" skipping : {parent_name}") + continue + + # in case first parent is project then start parents from start + if ( + _index == 0 + and parent_token_type == "Project" + ): + self.log.debug("rebuilding parents from scratch") + project_parent = parents[0] + parents = [project_parent] + continue + + parents.append({ + "entity_type": parent_token_type, + "entity_name": parent_name + }) + + self.log.debug(f"__ parents: {parents}") + + return parents + + def _create_hierarchy_path(self, parents): + """Converting hierarchy path from parents + + Args: + parents (list): list of dict parent components + + Returns: + str: hierarchy path + """ + return "/".join( + [ + p["entity_name"] for p in parents + if p["entity_type"] != "Project" + ] + ) if parents else "" + + def _get_parents_from_selected_asset( + self, + asset_doc, + project_doc + ): + """Returning parents from context on selected asset. + + Context defined in Traypublisher project tree. + + Args: + asset_doc (db obj): selected asset doc + project_doc (db obj): actual project doc + + Returns: + list: list of dict parent components + """ + project_name = project_doc["name"] + visual_hierarchy = [asset_doc] + current_doc = asset_doc + + # looping trought all available visual parents + # if they are not available anymore than it breaks + while True: + visual_parent_id = current_doc["data"]["visualParent"] + visual_parent = None + if visual_parent_id: + visual_parent = get_asset_by_id(project_name, visual_parent_id) + + if not visual_parent: + visual_hierarchy.append(project_doc) + break + visual_hierarchy.append(visual_parent) + current_doc = visual_parent + + # add current selection context hierarchy + return [ + { + "entity_type": entity["data"]["entityType"], + "entity_name": entity["name"] + } + for entity in reversed(visual_hierarchy) + ] + + def _generate_tasks_from_settings(self, project_doc): + """Convert settings inputs to task data. + + Args: + project_doc (db obj): actual project doc + + Raises: + KeyError: Missing task type in project doc + + Returns: + dict: tasks data + """ + tasks_to_add = {} + + project_tasks = project_doc["config"]["tasks"] + for task_name, task_data in self.shot_add_tasks.items(): + _task_data = deepcopy(task_data) + + # check if task type in project task types + if _task_data["type"] in project_tasks.keys(): + tasks_to_add[task_name] = _task_data + else: + raise KeyError( + "Missing task type `{}` for `{}` is not" + " existing in `{}``".format( + _task_data["type"], + task_name, + list(project_tasks.keys()) + ) + ) + + return tasks_to_add + + def generate_data(self, clip_name, source_data): + """Metadata generator. + + Converts input data to hierarchy mentadata. + + Args: + clip_name (str): clip name + source_data (dict): formating data + + Returns: + (str, dict): shot name and hierarchy data + """ + self.log.info(f"_ source_data: {source_data}") + + tasks = {} + asset_doc = source_data["selected_asset_doc"] + project_doc = source_data["project_doc"] + + # match clip to shot name at start + shot_name = clip_name + + # parse all tokens and generate formating data + formating_data = self._generate_tokens(shot_name, source_data) + + # generate parents from selected asset + parents = self._get_parents_from_selected_asset(asset_doc, project_doc) + + if self.shot_rename["enabled"]: + shot_name = self._rename_template(formating_data) + self.log.info(f"Renamed shot name: {shot_name}") + + if self.shot_hierarchy["enabled"]: + parents = self._create_parents_from_settings( + parents, formating_data) + + if self.shot_add_tasks: + tasks = self._generate_tasks_from_settings( + project_doc) + + return shot_name, { + "hierarchy": self._create_hierarchy_path(parents), + "parents": parents, + "tasks": tasks + } diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py index 9b9425855e..a3eead51c8 100644 --- a/openpype/hosts/traypublisher/api/plugin.py +++ b/openpype/hosts/traypublisher/api/plugin.py @@ -1,6 +1,7 @@ from openpype.lib.attribute_definitions import FileDef -from openpype.pipeline import ( +from openpype.pipeline.create import ( Creator, + HiddenCreator, CreatedInstance ) @@ -11,7 +12,6 @@ from .pipeline import ( HostContext, ) - IMAGE_EXTENSIONS = [ ".ani", ".anim", ".apng", ".art", ".bmp", ".bpg", ".bsave", ".cal", ".cin", ".cpc", ".cpt", ".dds", ".dpx", ".ecw", ".exr", ".fits", @@ -35,6 +35,42 @@ VIDEO_EXTENSIONS = [ REVIEW_EXTENSIONS = IMAGE_EXTENSIONS + VIDEO_EXTENSIONS +class HiddenTrayPublishCreator(HiddenCreator): + host_name = "traypublisher" + + def collect_instances(self): + for instance_data in list_instances(): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + update_instances(update_list) + + def remove_instances(self, instances): + remove_instances(instances) + for instance in instances: + self._remove_instance_from_context(instance) + + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + # Add instance to current context + self._add_instance_to_context(new_instance) + + class TrayPublishCreator(Creator): create_allow_context_change = True host_name = "traypublisher" @@ -56,10 +92,6 @@ class TrayPublishCreator(Creator): for instance in instances: self._remove_instance_from_context(instance) - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attrobites - return self.get_instance_attr_defs() - def _store_new_instance(self, new_instance): """Tray publisher specific method to store instance. @@ -81,15 +113,6 @@ class SettingsCreator(TrayPublishCreator): extensions = [] - def collect_instances(self): - for instance_data in list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - def create(self, subset_name, data, pre_create_data): # Pass precreate data to creator attributes data["creator_attributes"] = pre_create_data @@ -120,6 +143,10 @@ class SettingsCreator(TrayPublishCreator): ) ] + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attrobites + return self.get_instance_attr_defs() + @classmethod def from_settings(cls, item_data): identifier = item_data["identifier"] diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/openpype/hosts/traypublisher/plugins/create/create_editorial.py new file mode 100644 index 0000000000..28a115629e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_editorial.py @@ -0,0 +1,869 @@ +import os +from copy import deepcopy +from pprint import pformat +import opentimelineio as otio +from openpype.client import ( + get_asset_by_name, + get_project +) +from openpype.hosts.traypublisher.api.plugin import ( + TrayPublishCreator, + HiddenTrayPublishCreator +) +from openpype.hosts.traypublisher.api.editorial import ( + ShotMetadataSolver +) + +from openpype.pipeline import CreatedInstance + +from openpype.lib import ( + get_ffprobe_data, + convert_ffprobe_fps_value, + + FileDef, + TextDef, + NumberDef, + EnumDef, + BoolDef, + UISeparatorDef, + UILabelDef +) + + +CLIP_ATTR_DEFS = [ + EnumDef( + "fps", + items={ + "from_selection": "From selection", + 23.997: "23.976", + 24: "24", + 25: "25", + 29.97: "29.97", + 30: "30" + }, + label="FPS" + ), + NumberDef( + "workfile_start_frame", + default=1001, + label="Workfile start frame" + ), + NumberDef( + "handle_start", + default=0, + label="Handle start" + ), + NumberDef( + "handle_end", + default=0, + label="Handle end" + ) +] + + +class EditorialClipInstanceCreatorBase(HiddenTrayPublishCreator): + """ Wrapper class for clip family creators + + Args: + HiddenTrayPublishCreator (BaseCreator): hidden supporting class + """ + host_name = "traypublisher" + + def create(self, instance_data, source_data=None): + self.log.info(f"instance_data: {instance_data}") + subset_name = instance_data["subset"] + + # Create new instance + new_instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + self.log.info(f"instance_data: {pformat(new_instance.data)}") + + self._store_new_instance(new_instance) + + return new_instance + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + +class EditorialShotInstanceCreator(EditorialClipInstanceCreatorBase): + """ Shot family class + + The shot metadata instance carrier. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_shot" + family = "shot" + label = "Editorial Shot" + + def get_instance_attr_defs(self): + attr_defs = [ + TextDef( + "asset_name", + label="Asset name", + ) + ] + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs + + +class EditorialPlateInstanceCreator(EditorialClipInstanceCreatorBase): + """ Plate family class + + Plate representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_plate" + family = "plate" + label = "Editorial Plate" + + +class EditorialAudioInstanceCreator(EditorialClipInstanceCreatorBase): + """ Audio family class + + Audio representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_audio" + family = "audio" + label = "Editorial Audio" + + +class EditorialReviewInstanceCreator(EditorialClipInstanceCreatorBase): + """ Review family class + + Review representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_review" + family = "review" + label = "Editorial Review" + + +class EditorialSimpleCreator(TrayPublishCreator): + """ Editorial creator class + + Simple workflow creator. This creator only disecting input + video file into clip chunks and then converts each to + defined format defined Settings for each subset preset. + + Args: + TrayPublishCreator (Creator): Tray publisher plugin class + """ + + label = "Editorial Simple" + family = "editorial" + identifier = "editorial_simple" + default_variants = [ + "main" + ] + description = "Editorial files to generate shots." + detailed_description = """ +Supporting publishing new shots to project +or updating already created. Publishing will create OTIO file. +""" + icon = "fa.file" + + def __init__( + self, project_settings, *args, **kwargs + ): + super(EditorialSimpleCreator, self).__init__( + project_settings, *args, **kwargs + ) + editorial_creators = deepcopy( + project_settings["traypublisher"]["editorial_creators"] + ) + # get this creator settings by identifier + self._creator_settings = editorial_creators.get(self.identifier) + + clip_name_tokenizer = self._creator_settings["clip_name_tokenizer"] + shot_rename = self._creator_settings["shot_rename"] + shot_hierarchy = self._creator_settings["shot_hierarchy"] + shot_add_tasks = self._creator_settings["shot_add_tasks"] + + self._shot_metadata_solver = ShotMetadataSolver( + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + self.log + ) + + # try to set main attributes from settings + if self._creator_settings.get("default_variants"): + self.default_variants = self._creator_settings["default_variants"] + + def create(self, subset_name, instance_data, pre_create_data): + allowed_family_presets = self._get_allowed_family_presets( + pre_create_data) + + clip_instance_properties = { + k: v for k, v in pre_create_data.items() + if k != "sequence_filepath_data" + if k not in [ + i["family"] for i in self._creator_settings["family_presets"] + ] + } + # Create otio editorial instance + asset_name = instance_data["asset"] + asset_doc = get_asset_by_name(self.project_name, asset_name) + + self.log.info(pre_create_data["fps"]) + + if pre_create_data["fps"] == "from_selection": + # get asset doc data attributes + fps = asset_doc["data"]["fps"] + else: + fps = float(pre_create_data["fps"]) + + instance_data.update({ + "fps": fps + }) + + # get path of sequence + sequence_path_data = pre_create_data["sequence_filepath_data"] + media_path_data = pre_create_data["media_filepaths_data"] + + sequence_path = self._get_path_from_file_data(sequence_path_data) + media_path = self._get_path_from_file_data(media_path_data) + + # get otio timeline + otio_timeline = self._create_otio_timeline( + sequence_path, fps) + + # Create all clip instances + clip_instance_properties.update({ + "fps": fps, + "parent_asset_name": asset_name, + "variant": instance_data["variant"] + }) + + # create clip instances + self._get_clip_instances( + otio_timeline, + media_path, + clip_instance_properties, + family_presets=allowed_family_presets + + ) + + # create otio editorial instance + self._create_otio_instance( + subset_name, instance_data, + sequence_path, media_path, + otio_timeline + ) + + def _create_otio_instance( + self, + subset_name, + data, + sequence_path, + media_path, + otio_timeline + ): + """Otio instance creating function + + Args: + subset_name (str): name of subset + data (dict): instnance data + sequence_path (str): path to sequence file + media_path (str): path to media file + otio_timeline (otio.Timeline): otio timeline object + """ + # Pass precreate data to creator attributes + data.update({ + "sequenceFilePath": sequence_path, + "editorialSourcePath": media_path, + "otioTimeline": otio.adapters.write_to_string(otio_timeline) + }) + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._store_new_instance(new_instance) + + def _create_otio_timeline(self, sequence_path, fps): + """Creating otio timeline from sequence path + + Args: + sequence_path (str): path to sequence file + fps (float): frame per second + + Returns: + otio.Timeline: otio timeline object + """ + # get editorial sequence file into otio timeline object + extension = os.path.splitext(sequence_path)[1] + + kwargs = {} + if extension == ".edl": + # EDL has no frame rate embedded so needs explicit + # frame rate else 24 is asssumed. + kwargs["rate"] = fps + kwargs["ignore_timecode_mismatch"] = True + + self.log.info(f"kwargs: {kwargs}") + return otio.adapters.read_from_file(sequence_path, **kwargs) + + def _get_path_from_file_data(self, file_path_data): + """Converting creator path data to single path string + + Args: + file_path_data (FileDefItem): creator path data inputs + + Raises: + FileExistsError: in case nothing had been set + + Returns: + str: path string + """ + # TODO: just temporarly solving only one media file + if isinstance(file_path_data, list): + file_path_data = file_path_data.pop() + + if len(file_path_data["filenames"]) == 0: + raise FileExistsError( + f"File path was not added: {file_path_data}") + + return os.path.join( + file_path_data["directory"], file_path_data["filenames"][0]) + + def _get_clip_instances( + self, + otio_timeline, + media_path, + instance_data, + family_presets + ): + """Helping function fro creating clip instance + + Args: + otio_timeline (otio.Timeline): otio timeline object + media_path (str): media file path string + instance_data (dict): clip instance data + family_presets (list): list of dict settings subset presets + """ + self.asset_name_check = [] + + tracks = otio_timeline.each_child( + descended_from_type=otio.schema.Track + ) + + # media data for audio sream and reference solving + media_data = self._get_media_source_metadata(media_path) + + for track in tracks: + self.log.debug(f"track.name: {track.name}") + try: + track_start_frame = ( + abs(track.source_range.start_time.value) + ) + self.log.debug(f"track_start_frame: {track_start_frame}") + track_start_frame -= self.timeline_frame_start + except AttributeError: + track_start_frame = 0 + + self.log.debug(f"track_start_frame: {track_start_frame}") + + for clip in track.each_child(): + if not self._validate_clip_for_processing(clip): + continue + + # get available frames info to clip data + self._create_otio_reference(clip, media_path, media_data) + + # convert timeline range to source range + self._restore_otio_source_range(clip) + + base_instance_data = self._get_base_instance_data( + clip, + instance_data, + track_start_frame + ) + + parenting_data = { + "instance_label": None, + "instance_id": None + } + self.log.info(( + "Creating subsets from presets: \n" + f"{pformat(family_presets)}" + )) + + for _fpreset in family_presets: + # exclude audio family if no audio stream + if ( + _fpreset["family"] == "audio" + and not media_data.get("audio") + ): + continue + + instance = self._make_subset_instance( + clip, + _fpreset, + deepcopy(base_instance_data), + parenting_data + ) + self.log.debug(f"{pformat(dict(instance.data))}") + + def _restore_otio_source_range(self, otio_clip): + """Infusing source range. + + Otio clip is missing proper source clip range so + here we add them from from parent timeline frame range. + + Args: + otio_clip (otio.Clip): otio clip object + """ + otio_clip.source_range = otio_clip.range_in_parent() + + def _create_otio_reference( + self, + otio_clip, + media_path, + media_data + ): + """Creating otio reference at otio clip. + + Args: + otio_clip (otio.Clip): otio clip object + media_path (str): media file path string + media_data (dict): media metadata + """ + start_frame = media_data["start_frame"] + frame_duration = media_data["duration"] + fps = media_data["fps"] + + available_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime( + start_frame, fps), + duration=otio.opentime.RationalTime( + frame_duration, fps) + ) + # in case old OTIO or video file create `ExternalReference` + media_reference = otio.schema.ExternalReference( + target_url=media_path, + available_range=available_range + ) + + otio_clip.media_reference = media_reference + + def _get_media_source_metadata(self, path): + """Get all available metadata from file + + Args: + path (str): media file path string + + Raises: + AssertionError: ffprobe couldn't read metadata + + Returns: + dict: media file metadata + """ + return_data = {} + + try: + media_data = get_ffprobe_data( + path, self.log + ) + self.log.debug(f"__ media_data: {pformat(media_data)}") + + # get video stream data + video_stream = media_data["streams"][0] + return_data = { + "video": True, + "start_frame": 0, + "duration": int(video_stream["nb_frames"]), + "fps": float( + convert_ffprobe_fps_value( + video_stream["r_frame_rate"] + ) + ) + } + + # get audio streams data + audio_stream = [ + stream for stream in media_data["streams"] + if stream["codec_type"] == "audio" + ] + + if audio_stream: + return_data["audio"] = True + + except Exception as exc: + raise AssertionError(( + "FFprobe couldn't read information about input file: " + f"\"{path}\". Error message: {exc}" + )) + + return return_data + + def _make_subset_instance( + self, + otio_clip, + preset, + instance_data, + parenting_data + ): + """Making subset instance from input preset + + Args: + otio_clip (otio.Clip): otio clip object + preset (dict): sigle family preset + instance_data (dict): instance data + parenting_data (dict): shot instance parent data + + Returns: + CreatedInstance: creator instance object + """ + family = preset["family"] + label = self._make_subset_naming( + preset, + instance_data + ) + instance_data["label"] = label + + # add file extension filter only if it is not shot family + if family == "shot": + instance_data["otioClip"] = ( + otio.adapters.write_to_string(otio_clip)) + c_instance = self.create_context.creators[ + "editorial_shot"].create( + instance_data) + parenting_data.update({ + "instance_label": label, + "instance_id": c_instance.data["instance_id"] + }) + else: + # add review family if defined + instance_data.update({ + "outputFileType": preset["output_file_type"], + "parent_instance_id": parenting_data["instance_id"], + "creator_attributes": { + "parent_instance": parenting_data["instance_label"], + "add_review_family": preset.get("review") + } + }) + + creator_identifier = f"editorial_{family}" + editorial_clip_creator = self.create_context.creators[ + creator_identifier] + c_instance = editorial_clip_creator.create( + instance_data) + + return c_instance + + def _make_subset_naming( + self, + preset, + instance_data + ): + """ Subset name maker + + Args: + preset (dict): single preset item + instance_data (dict): instance data + + Returns: + str: label string + """ + shot_name = instance_data["shotName"] + variant_name = instance_data["variant"] + family = preset["family"] + + # get variant name from preset or from inharitance + _variant_name = preset.get("variant") or variant_name + + self.log.debug(f"__ family: {family}") + self.log.debug(f"__ preset: {preset}") + + # subset name + subset_name = "{}{}".format( + family, _variant_name.capitalize() + ) + label = "{}_{}".format( + shot_name, + subset_name + ) + + instance_data.update({ + "family": family, + "label": label, + "variant": _variant_name, + "subset": subset_name, + }) + + return label + + def _get_base_instance_data( + self, + otio_clip, + instance_data, + track_start_frame, + ): + """ Factoring basic set of instance data. + + Args: + otio_clip (otio.Clip): otio clip object + instance_data (dict): precreate instance data + track_start_frame (int): track start frame + + Returns: + dict: instance data + """ + # get clip instance properties + parent_asset_name = instance_data["parent_asset_name"] + handle_start = instance_data["handle_start"] + handle_end = instance_data["handle_end"] + timeline_offset = instance_data["timeline_offset"] + workfile_start_frame = instance_data["workfile_start_frame"] + fps = instance_data["fps"] + variant_name = instance_data["variant"] + + # basic unique asset name + clip_name = os.path.splitext(otio_clip.name)[0].lower() + project_doc = get_project(self.project_name) + + shot_name, shot_metadata = self._shot_metadata_solver.generate_data( + clip_name, + { + "anatomy_data": { + "project": { + "name": self.project_name, + "code": project_doc["data"]["code"] + }, + "parent": parent_asset_name, + "app": self.host_name + }, + "selected_asset_doc": get_asset_by_name( + self.project_name, parent_asset_name), + "project_doc": project_doc + } + ) + + self._validate_name_uniqueness(shot_name) + + timing_data = self._get_timing_data( + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ) + + # create creator attributes + creator_attributes = { + "asset_name": shot_name, + "Parent hierarchy path": shot_metadata["hierarchy"], + "workfile_start_frame": workfile_start_frame, + "fps": fps, + "handle_start": int(handle_start), + "handle_end": int(handle_end) + } + creator_attributes.update(timing_data) + + # create shared new instance data + base_instance_data = { + "shotName": shot_name, + "variant": variant_name, + + # HACK: just for temporal bug workaround + # TODO: should loockup shot name for update + "asset": parent_asset_name, + "task": "", + + "newAssetPublishing": True, + + # parent time properties + "trackStartFrame": track_start_frame, + "timelineOffset": timeline_offset, + # creator_attributes + "creator_attributes": creator_attributes + } + # add hierarchy shot metadata + base_instance_data.update(shot_metadata) + + return base_instance_data + + def _get_timing_data( + self, + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ): + """Returning available timing data + + Args: + otio_clip (otio.Clip): otio clip object + timeline_offset (int): offset value + track_start_frame (int): starting frame input + workfile_start_frame (int): start frame for shot's workfiles + + Returns: + dict: timing metadata + """ + # frame ranges data + clip_in = otio_clip.range_in_parent().start_time.value + clip_in += track_start_frame + clip_out = otio_clip.range_in_parent().end_time_inclusive().value + clip_out += track_start_frame + self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}") + + # add offset in case there is any + self.log.debug(f"__ timeline_offset: {timeline_offset}") + if timeline_offset: + clip_in += timeline_offset + clip_out += timeline_offset + + clip_duration = otio_clip.duration().value + self.log.info(f"clip duration: {clip_duration}") + + source_in = otio_clip.trimmed_range().start_time.value + source_out = source_in + clip_duration + + # define starting frame for future shot + frame_start = ( + clip_in if workfile_start_frame is None + else workfile_start_frame + ) + frame_end = frame_start + (clip_duration - 1) + + return { + "frameStart": int(frame_start), + "frameEnd": int(frame_end), + "clipIn": int(clip_in), + "clipOut": int(clip_out), + "clipDuration": int(otio_clip.duration().value), + "sourceIn": int(source_in), + "sourceOut": int(source_out) + } + + def _get_allowed_family_presets(self, pre_create_data): + """ Filter out allowed family presets. + + Args: + pre_create_data (dict): precreate attributes inputs + + Returns: + list: lit of dict with preset items + """ + self.log.debug(f"__ pre_create_data: {pre_create_data}") + return [ + {"family": "shot"}, + *[ + preset for preset in self._creator_settings["family_presets"] + if pre_create_data[preset["family"]] + ] + ] + + def _validate_clip_for_processing(self, otio_clip): + """Validate otio clip attribues + + Args: + otio_clip (otio.Clip): otio clip object + + Returns: + bool: True if all passing conditions + """ + if otio_clip.name is None: + return False + + if isinstance(otio_clip, otio.schema.Gap): + return False + + # skip all generators like black empty + if isinstance( + otio_clip.media_reference, + otio.schema.GeneratorReference): + return False + + # Transitions are ignored, because Clips have the full frame + # range. + if isinstance(otio_clip, otio.schema.Transition): + return False + + return True + + def _validate_name_uniqueness(self, name): + """ Validating name uniqueness. + + In context of other clip names in sequence file. + + Args: + name (str): shot name string + """ + if name not in self.asset_name_check: + self.asset_name_check.append(name) + else: + self.log.warning( + f"Duplicate shot name: {name}! " + "Please check names in the input sequence files." + ) + + def get_pre_create_attr_defs(self): + """ Creating pre-create attributes at creator plugin. + + Returns: + list: list of attribute object instances + """ + # Use same attributes as for instance attrobites + attr_defs = [ + FileDef( + "sequence_filepath_data", + folders=False, + extensions=[ + ".edl", + ".xml", + ".aaf", + ".fcpxml" + ], + allow_sequences=False, + single_item=True, + label="Sequence file", + ), + FileDef( + "media_filepaths_data", + folders=False, + extensions=[ + ".mov", + ".mp4", + ".wav" + ], + allow_sequences=False, + single_item=False, + label="Media files", + ), + # TODO: perhpas better would be timecode and fps input + NumberDef( + "timeline_offset", + default=0, + label="Timeline offset" + ), + UISeparatorDef(), + UILabelDef("Clip instance attributes"), + UISeparatorDef() + ] + # add variants swithers + attr_defs.extend( + BoolDef(_var["family"], label=_var["family"]) + for _var in self._creator_settings["family_presets"] + ) + attr_defs.append(UISeparatorDef()) + + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs diff --git a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py index baca274ea6..41c1c29bb0 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py +++ b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py @@ -1,6 +1,7 @@ import os +from openpype.api import get_project_settings, Logger -from openpype.api import get_project_settings +log = Logger.get_logger(__name__) def initialize(): @@ -13,6 +14,7 @@ def initialize(): global_variables = globals() for item in simple_creators: + dynamic_plugin = SettingsCreator.from_settings(item) global_variables[dynamic_plugin.__name__] = dynamic_plugin diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py new file mode 100644 index 0000000000..bdf7c05f3d --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py @@ -0,0 +1,36 @@ +from pprint import pformat +import pyblish.api + + +class CollectClipInstance(pyblish.api.InstancePlugin): + """Collect clip instances and resolve its parent""" + + label = "Collect Clip Instances" + order = pyblish.api.CollectorOrder - 0.081 + + hosts = ["traypublisher"] + families = ["plate", "review", "audio"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + instance.data["families"].append("clip") + + parent_instance_id = instance.data["parent_instance_id"] + edit_shared_data = instance.context.data["editorialSharedData"] + instance.data.update( + edit_shared_data[parent_instance_id] + ) + + if "editorialSourcePath" in instance.context.data.keys(): + instance.data["editorialSourcePath"] = ( + instance.context.data["editorialSourcePath"]) + instance.data["families"].append("trimming") + + self.log.debug(pformat(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py new file mode 100644 index 0000000000..e181d0abe5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py @@ -0,0 +1,48 @@ +import os +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectEditorialInstance(pyblish.api.InstancePlugin): + """Collect data for instances created by settings creators.""" + + label = "Collect Editorial Instances" + order = pyblish.api.CollectorOrder - 0.1 + + hosts = ["traypublisher"] + families = ["editorial"] + + def process(self, instance): + + if "families" not in instance.data: + instance.data["families"] = [] + + if "representations" not in instance.data: + instance.data["representations"] = [] + + fpath = instance.data["sequenceFilePath"] + otio_timeline_string = instance.data.pop("otioTimeline") + otio_timeline = otio.adapters.read_from_string( + otio_timeline_string) + + instance.context.data["otioTimeline"] = otio_timeline + instance.context.data["editorialSourcePath"] = ( + instance.data["editorialSourcePath"]) + + self.log.info(fpath) + + instance.data["stagingDir"] = os.path.dirname(fpath) + + _, ext = os.path.splitext(fpath) + + instance.data["representations"].append({ + "ext": ext[1:], + "name": ext[1:], + "stagingDir": instance.data["stagingDir"], + "files": os.path.basename(fpath) + }) + + self.log.debug("Created Editorial Instance {}".format( + pformat(instance.data) + )) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py new file mode 100644 index 0000000000..4af4fb94e9 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py @@ -0,0 +1,30 @@ +import pyblish.api + + +class CollectEditorialReviewable(pyblish.api.InstancePlugin): + """ Collect review input from user. + + Adds the input to instance data. + """ + + label = "Collect Editorial Reviewable" + order = pyblish.api.CollectorOrder + + families = ["plate", "review", "audio"] + hosts = ["traypublisher"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + creator_attributes = instance.data["creator_attributes"] + + if creator_attributes["add_review_family"]: + instance.data["families"].append("review") + + self.log.debug("instance.data {}".format(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py new file mode 100644 index 0000000000..716f73022e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py @@ -0,0 +1,213 @@ +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectShotInstance(pyblish.api.InstancePlugin): + """ Collect shot instances + + Resolving its user inputs from creator attributes + to instance data. + """ + + label = "Collect Shot Instances" + order = pyblish.api.CollectorOrder - 0.09 + + hosts = ["traypublisher"] + families = ["shot"] + + SHARED_KEYS = [ + "asset", + "fps", + "handleStart", + "handleEnd", + "frameStart", + "frameEnd", + "clipIn", + "clipOut", + "clipDuration", + "sourceIn", + "sourceOut", + "otioClip", + "workfileFrameStart" + ] + + def process(self, instance): + self.log.debug(pformat(instance.data)) + + creator_identifier = instance.data["creator_identifier"] + if "editorial" not in creator_identifier: + return + + # get otio clip object + otio_clip = self._get_otio_clip(instance) + instance.data["otioClip"] = otio_clip + + # first solve the inputs from creator attr + data = self._solve_inputs_to_data(instance) + instance.data.update(data) + + # distribute all shared keys to clips instances + self._distribute_shared_data(instance) + self._solve_hierarchy_context(instance) + + self.log.debug(pformat(instance.data)) + + def _get_otio_clip(self, instance): + """ Converts otio string data. + + Convert them to proper otio object + and finds its equivalent at otio timeline. + This process is a hack to support also + resolving parent range. + + Args: + instance (obj): publishing instance + + Returns: + otio.Clip: otio clip object + """ + context = instance.context + # convert otio clip from string to object + otio_clip_string = instance.data.pop("otioClip") + otio_clip = otio.adapters.read_from_string( + otio_clip_string) + + otio_timeline = context.data["otioTimeline"] + + clips = [ + clip for clip in otio_timeline.each_child( + descended_from_type=otio.schema.Clip) + if clip.name == otio_clip.name + ] + + otio_clip = clips.pop() + self.log.debug(f"__ otioclip.parent: {otio_clip.parent}") + + return otio_clip + + def _distribute_shared_data(self, instance): + """ Distribute all defined keys. + + All data are shared between all related + instances in context. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + instance_id = instance.data["instance_id"] + + if not context.data.get("editorialSharedData"): + context.data["editorialSharedData"] = {} + + context.data["editorialSharedData"][instance_id] = { + _k: _v for _k, _v in instance.data.items() + if _k in self.SHARED_KEYS + } + + def _solve_inputs_to_data(self, instance): + """ Resolve all user inputs into instance data. + + Args: + instance (obj): publishing instance + + Returns: + dict: instance data updating data + """ + _cr_attrs = instance.data["creator_attributes"] + workfile_start_frame = _cr_attrs["workfile_start_frame"] + frame_start = _cr_attrs["frameStart"] + frame_end = _cr_attrs["frameEnd"] + frame_dur = frame_end - frame_start + + return { + "asset": _cr_attrs["asset_name"], + "fps": float(_cr_attrs["fps"]), + "handleStart": _cr_attrs["handle_start"], + "handleEnd": _cr_attrs["handle_end"], + "frameStart": workfile_start_frame, + "frameEnd": workfile_start_frame + frame_dur, + "clipIn": _cr_attrs["clipIn"], + "clipOut": _cr_attrs["clipOut"], + "clipDuration": _cr_attrs["clipDuration"], + "sourceIn": _cr_attrs["sourceIn"], + "sourceOut": _cr_attrs["sourceOut"], + "workfileFrameStart": workfile_start_frame + } + + def _solve_hierarchy_context(self, instance): + """ Adding hierarchy data to context shared data. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + final_context = ( + context.data["hierarchyContext"] + if context.data.get("hierarchyContext") + else {} + ) + + name = instance.data["asset"] + + # get handles + handle_start = int(instance.data["handleStart"]) + handle_end = int(instance.data["handleEnd"]) + + in_info = { + "entity_type": "Shot", + "custom_attributes": { + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "clipIn": instance.data["clipIn"], + "clipOut": instance.data["clipOut"], + "fps": instance.data["fps"] + }, + "tasks": instance.data["tasks"] + } + + parents = instance.data.get('parents', []) + self.log.debug(f"parents: {pformat(parents)}") + + actual = {name: in_info} + + for parent in reversed(parents): + parent_name = parent["entity_name"] + next_dict = { + parent_name: { + "entity_type": parent["entity_type"], + "childs": actual + } + } + actual = next_dict + + final_context = self._update_dict(final_context, actual) + + # adding hierarchy context to instance + context.data["hierarchyContext"] = final_context + self.log.debug(pformat(final_context)) + + def _update_dict(self, ex_dict, new_dict): + """ Recursion function + + Updating nested data with another nested data. + + Args: + ex_dict (dict): nested data + new_dict (dict): nested data + + Returns: + dict: updated nested data + """ + for key in ex_dict: + if key in new_dict and isinstance(ex_dict[key], dict): + new_dict[key] = self._update_dict(ex_dict[key], new_dict[key]) + elif not ex_dict.get(key) or not new_dict.get(key): + new_dict[key] = ex_dict[key] + + return new_dict diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index c6dc765a27..a99b300730 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,17 +1,16 @@ import os -from openpype.client import get_project, get_asset_by_name -from openpype.lib import ( - StringTemplate, - get_workfile_template_key_from_context, - get_workdir_data, - get_last_workfile_with_version, -) +from openpype.lib import StringTemplate from openpype.pipeline import ( registered_host, legacy_io, Anatomy, ) +from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context, + get_last_workfile_with_version, +) +from openpype.pipeline.template_data import get_template_data_with_names from openpype.hosts.tvpaint.api import lib, pipeline, plugin @@ -54,19 +53,17 @@ class LoadWorkfile(plugin.Loader): asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, asset_name) - template_key = get_workfile_template_key_from_context( asset_name, task_name, host_name, - project_name=project_name, - dbcon=legacy_io + project_name=project_name ) anatomy = Anatomy(project_name) - data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) data["root"] = anatomy.roots file_template = anatomy.templates[template_key]["file"] diff --git a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py index 5be04fc841..50b34bd573 100644 --- a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py +++ b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- """Hook to launch Unreal and prepare projects.""" import os +import copy from pathlib import Path from openpype.lib import ( PreLaunchHook, ApplicationLaunchFailed, ApplicationNotFound, - get_workdir_data, get_workfile_template_key ) import openpype.hosts.unreal.lib as unreal_lib @@ -35,18 +35,13 @@ class UnrealPrelaunchHook(PreLaunchHook): return last_workfile.name # Prepare data for fill data and for getting workfile template key - task_name = self.data["task_name"] anatomy = self.data["anatomy"] - asset_doc = self.data["asset_doc"] project_doc = self.data["project_doc"] - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") + # Use already prepared workdir data + workdir_data = copy.deepcopy(self.data["workdir_data"]) + task_type = workdir_data.get("task", {}).get("type") - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, self.host_name - ) # QUESTION raise exception if version is part of filename template? workdir_data["version"] = 1 workdir_data["ext"] = "uproject" diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py index 4cb3cee8e1..6444a5191d 100644 --- a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py +++ b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py @@ -10,9 +10,9 @@ from aiohttp.web_response import Response from openpype.client import ( get_projects, get_assets, + OpenPypeMongoConnection, ) from openpype.lib import ( - OpenPypeMongoConnection, PypeLogger, ) from openpype.lib.remote_publish import ( diff --git a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py b/openpype/hosts/webpublisher/webserver_service/webserver_cli.py index 1ed8f22b2c..6620e5d5cf 100644 --- a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py +++ b/openpype/hosts/webpublisher/webserver_service/webserver_cli.py @@ -6,6 +6,7 @@ import requests import json import subprocess +from openpype.client import OpenPypeMongoConnection from openpype.lib import PypeLogger from .webpublish_routes import ( @@ -121,8 +122,6 @@ def run_webserver(*args, **kwargs): def reprocess_failed(upload_dir, webserver_url): # log.info("check_reprocesable_records") - from openpype.lib import OpenPypeMongoConnection - mongo_client = OpenPypeMongoConnection.get_mongo_client() database_name = os.environ["OPENPYPE_DATABASE_NAME"] dbcon = mongo_client[database_name]["webpublishes"] diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index fb52a9aca7..3d3e425a86 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -63,7 +63,10 @@ from .execute import ( path_to_subprocess_arg, CREATE_NO_WINDOW ) -from .log import PypeLogger, timeit +from .log import ( + Logger, + PypeLogger, +) from .path_templates import ( merge_dict, @@ -83,8 +86,9 @@ from .anatomy import ( Anatomy ) -from .config import ( +from .dateutils import ( get_datetime_data, + get_timestamp, get_formatted_current_time ) @@ -111,6 +115,7 @@ from .transcoding import ( get_ffmpeg_codec_args, get_ffmpeg_format_args, convert_ffprobe_fps_value, + convert_ffprobe_fps_to_float, ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, @@ -283,6 +288,7 @@ __all__ = [ "get_ffmpeg_codec_args", "get_ffmpeg_format_args", "convert_ffprobe_fps_value", + "convert_ffprobe_fps_to_float", "CURRENT_DOC_SCHEMAS", "PROJECT_NAME_ALLOWED_SYMBOLS", @@ -370,13 +376,13 @@ __all__ = [ "get_datetime_data", "get_formatted_current_time", + "Logger", "PypeLogger", + "get_default_components", "validate_mongo_connection", "OpenPypeMongoConnection", - "timeit", - "is_overlapping_otio_ranges", "otio_range_with_handles", "convert_to_padded_path", diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index f46197e15f..8c92665366 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -27,12 +27,6 @@ from openpype.settings.constants import ( from . import PypeLogger from .profiles_filtering import filter_profiles from .local_settings import get_openpype_username -from .avalon_context import ( - get_workdir_data, - get_workdir_with_workdir_data, - get_workfile_template_key, - get_last_workfile -) from .python_module_tools import ( modules_from_path, @@ -1576,6 +1570,9 @@ def prepare_context_environments(data, env_group=None): data (EnvironmentPrepData): Dictionary where result and intermediate result will be stored. """ + + from openpype.pipeline.template_data import get_template_data + # Context environments log = data["log"] @@ -1596,7 +1593,9 @@ def prepare_context_environments(data, env_group=None): # Load project specific environments project_name = project_doc["name"] project_settings = get_project_settings(project_name) + system_settings = get_system_settings() data["project_settings"] = project_settings + data["system_settings"] = system_settings # Apply project specific environments on current env value apply_project_environments_value( project_name, data["env"], project_settings, env_group @@ -1619,8 +1618,8 @@ def prepare_context_environments(data, env_group=None): if not app.is_host: return - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, app.host_name + workdir_data = get_template_data( + project_doc, asset_doc, task_name, app.host_name, system_settings ) data["workdir_data"] = workdir_data @@ -1631,7 +1630,14 @@ def prepare_context_environments(data, env_group=None): data["task_type"] = task_type try: - workdir = get_workdir_with_workdir_data(workdir_data, anatomy) + from openpype.pipeline.workfile import get_workdir_with_workdir_data + + workdir = get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + project_settings=project_settings + ) except Exception as exc: raise ApplicationLaunchFailed( @@ -1721,11 +1727,19 @@ def _prepare_last_workfile(data, workdir): if not last_workfile_path: extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) if extensions: + from openpype.pipeline.workfile import ( + get_workfile_template_key, + get_last_workfile + ) + anatomy = data["anatomy"] project_settings = data["project_settings"] task_type = workdir_data["task"]["type"] template_key = get_workfile_template_key( - task_type, app.host_name, project_settings=project_settings + task_type, + app.host_name, + project_name, + project_settings=project_settings ) # Find last workfile file_template = str(anatomy.templates[template_key]["file"]) diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 2e177f195d..3c7e9b6289 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -13,23 +13,16 @@ from openpype.client import ( get_project, get_assets, get_asset_by_name, - get_subset_by_name, get_subsets, - get_version_by_id, get_last_versions, - get_last_version_by_subset_id, + get_last_version_by_subset_name, get_representations, - get_representation_by_id, get_workfile_info, ) -from openpype.settings import ( - get_project_settings, - get_system_settings -) +from openpype.settings import get_project_settings from .profiles_filtering import filter_profiles from .events import emit_event from .path_templates import StringTemplate -from .local_settings import get_openpype_username legacy_io = None @@ -180,7 +173,7 @@ def with_pipeline_io(func): return wrapped -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.is_representation_from_latest") def is_latest(representation): """Return whether the representation is from latest version @@ -189,51 +182,27 @@ def is_latest(representation): Returns: bool: Whether the representation is of latest version. + + Deprecated: + Function will be removed after release version 3.14.* """ - project_name = legacy_io.active_project() - version = get_version_by_id( - project_name, - representation["parent"], - fields=["_id", "type", "parent"] - ) - if version["type"] == "hero_version": - return True + from openpype.pipeline.context_tools import is_representation_from_latest - # Get highest version under the parent - last_version = get_last_version_by_subset_id( - project_name, version["parent"], fields=["_id"] - ) - - return version["_id"] == last_version["_id"] + return is_representation_from_latest(representation) -@with_pipeline_io +@deprecated("openpype.pipeline.load.any_outdated_containers") def any_outdated(): - """Return whether the current scene has any outdated content""" - from openpype.pipeline import registered_host + """Return whether the current scene has any outdated content. - project_name = legacy_io.active_project() - checked = set() - host = registered_host() - for container in host.ls(): - representation = container['representation'] - if representation in checked: - continue + Deprecated: + Function will be removed after release version 3.14.* + """ - representation_doc = get_representation_by_id( - project_name, representation, fields=["parent"] - ) - if representation_doc and not is_latest(representation_doc): - return True - elif not representation_doc: - log.debug("Container '{objectName}' has an invalid " - "representation, it is missing in the " - "database".format(**container)) + from openpype.pipeline.load import any_outdated_containers - checked.add(representation) - - return False + return any_outdated_containers() @deprecated("openpype.pipeline.context_tools.get_current_project_asset") @@ -247,6 +216,9 @@ def get_asset(asset_name=None): Returns: (MongoDB document) + + Deprecated: + Function will be removed after release version 3.14.* """ from openpype.pipeline.context_tools import get_current_project_asset @@ -254,17 +226,15 @@ def get_asset(asset_name=None): return get_current_project_asset(asset_name=asset_name) +@deprecated("openpype.pipeline.template_data.get_general_template_data") def get_system_general_anatomy_data(system_settings=None): - if not system_settings: - system_settings = get_system_settings() - studio_name = system_settings["general"]["studio_name"] - studio_code = system_settings["general"]["studio_code"] - return { - "studio": { - "name": studio_name, - "code": studio_code - } - } + """ + Deprecated: + Function will be removed after release version 3.14.* + """ + from openpype.pipeline.template_data import get_general_template_data + + return get_general_template_data(system_settings) def get_linked_asset_ids(asset_doc): @@ -313,7 +283,7 @@ def get_linked_assets(asset_doc): return list(get_assets(project_name, link_ids)) -@with_pipeline_io +@deprecated("openpype.client.get_last_version_by_subset_name") def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): """Retrieve latest version from `asset_name`, and `subset_name`. @@ -329,11 +299,16 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): Returns: None: If asset, subset or version were not found. - dict: Last version document for entered . + dict: Last version document for entered. + + Deprecated: + Function will be removed after release version 3.14.* """ if not project_name: if not dbcon: + from openpype.pipeline import legacy_io + log.debug("Using `legacy_io` for query.") dbcon = legacy_io # Make sure is installed @@ -341,39 +316,13 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): project_name = dbcon.active_project() - log.debug(( - "Getting latest version for Project: \"{}\" Asset: \"{}\"" - " and Subset: \"{}\"" - ).format(project_name, asset_name, subset_name)) - - # Query asset document id by asset name - asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) - if not asset_doc: - log.info( - "Asset \"{}\" was not found in Database.".format(asset_name) - ) - return None - - subset_doc = get_subset_by_name( - project_name, subset_name, asset_doc["_id"] + return get_last_version_by_subset_name( + project_name, subset_name, asset_name=asset_name ) - if not subset_doc: - log.info( - "Subset \"{}\" was not found in Database.".format(subset_name) - ) - return None - - version_doc = get_last_version_by_subset_id( - project_name, subset_doc["_id"] - ) - if not version_doc: - log.info( - "Subset \"{}\" does not have any version yet.".format(subset_name) - ) - return None - return version_doc +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key_from_context") def get_workfile_template_key_from_context( asset_name, task_name, host_name, project_name=None, dbcon=None, project_settings=None @@ -402,27 +351,26 @@ def get_workfile_template_key_from_context( ValueError: When both 'dbcon' and 'project_name' were not passed. """ + + from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context + ) + if not project_name: if not dbcon: raise ValueError(( "`get_workfile_template_key_from_context` requires to pass" " one of 'dbcon' or 'project_name' arguments." )) - project_name = dbcon.active_project() - asset_doc = get_asset_by_name( - project_name, asset_name, fields=["data.tasks"] - ) - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") - - return get_workfile_template_key( - task_type, host_name, project_name, project_settings + return get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings ) +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key") def get_workfile_template_key( task_type, host_name, project_name=None, project_settings=None ): @@ -446,43 +394,15 @@ def get_workfile_template_key( ValueError: When both 'project_name' and 'project_settings' were not passed. """ - default = "work" - if not task_type or not host_name: - return default - if not project_settings: - if not project_name: - raise ValueError(( - "`get_workfile_template_key` requires to pass" - " one of 'project_name' or 'project_settings' arguments." - )) - project_settings = get_project_settings(project_name) + from openpype.pipeline.workfile import get_workfile_template_key - try: - profiles = ( - project_settings - ["global"] - ["tools"] - ["Workfiles"] - ["workfile_template_profiles"] - ) - except Exception: - profiles = [] - - if not profiles: - return default - - profile_filter = { - "task_types": task_type, - "hosts": host_name - } - profile = filter_profiles(profiles, profile_filter) - if profile: - return profile["workfile_template"] or default - return default + return get_workfile_template_key( + task_type, host_name, project_name, project_settings + ) -# TODO rename function as is not just "work" specific +@deprecated("openpype.pipeline.template_data.get_template_data") def get_workdir_data(project_doc, asset_doc, task_name, host_name): """Prepare data for workdir template filling from entered information. @@ -495,42 +415,19 @@ def get_workdir_data(project_doc, asset_doc, task_name, host_name): Returns: dict: Data prepared for filling workdir template. + + Deprecated: + Function will be removed after release version 3.14.* """ - task_type = asset_doc['data']['tasks'].get(task_name, {}).get('type') - project_task_types = project_doc["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") + from openpype.pipeline.template_data import get_template_data - asset_parents = asset_doc["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_doc["name"] - if asset_parents: - parent_name = asset_parents[-1] - - data = { - "project": { - "name": project_doc["name"], - "code": project_doc["data"].get("code") - }, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - }, - "asset": asset_doc["name"], - "parent": parent_name, - "app": host_name, - "user": get_openpype_username(), - "hierarchy": hierarchy, - } - - system_general_data = get_system_general_anatomy_data() - data.update(system_general_data) - - return data + return get_template_data( + project_doc, asset_doc, task_name, host_name + ) +@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") def get_workdir_with_workdir_data( workdir_data, anatomy=None, project_name=None, template_key=None ): @@ -557,31 +454,24 @@ def get_workdir_with_workdir_data( Raises: ValueError: When both `anatomy` and `project_name` are set to None. """ + if not anatomy and not project_name: raise ValueError(( "Missing required arguments one of `project_name` or `anatomy`" " must be entered." )) - if not anatomy: - from openpype.pipeline import Anatomy - anatomy = Anatomy(project_name) + if not project_name: + project_name = anatomy.project_name - if not template_key: - template_key = get_workfile_template_key( - workdir_data["task"]["type"], - workdir_data["app"], - project_name=workdir_data["project"]["name"] - ) + from openpype.pipeline.workfile import get_workdir_with_workdir_data - anatomy_filled = anatomy.format(workdir_data) - # Output is TemplateResult object which contain useful data - output = anatomy_filled[template_key]["folder"] - if output: - return output.normalized() - return output + return get_workdir_with_workdir_data( + workdir_data, project_name, anatomy, template_key + ) +@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") def get_workdir( project_doc, asset_doc, @@ -610,40 +500,36 @@ def get_workdir( TemplateResult: Workdir path. """ - if not anatomy: - from openpype.pipeline import Anatomy - anatomy = Anatomy(project_doc["name"]) - - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, host_name - ) + from openpype.pipeline.workfile import get_workdir # Output is TemplateResult object which contain useful data - return get_workdir_with_workdir_data( - workdir_data, anatomy, template_key=template_key + return get_workdir( + project_doc, + asset_doc, + task_name, + host_name, + anatomy, + template_key ) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_template_data_from_session") def template_data_from_session(session=None): """ Return dictionary with template from session keys. Args: session (dict, Optional): The Session to use. If not provided use the currently active global Session. + Returns: dict: All available data from session. + + Deprecated: + Function will be removed after release version 3.14.* """ - if session is None: - session = legacy_io.Session + from openpype.pipeline.context_tools import get_template_data_from_session - project_name = session["AVALON_PROJECT"] - asset_name = session["AVALON_ASSET"] - task_name = session["AVALON_TASK"] - host_name = session["AVALON_APP"] - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, asset_name) - return get_workdir_data(project_doc, asset_doc, task_name, host_name) + return get_template_data_from_session(session) @with_pipeline_io @@ -669,6 +555,8 @@ def compute_session_changes( dict: The required changes in the Session dictionary. """ + from openpype.pipeline.context_tools import get_workdir_from_session + changes = dict() # If no changes, return directly @@ -715,29 +603,11 @@ def compute_session_changes( return changes -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_workdir_from_session") def get_workdir_from_session(session=None, template_key=None): - from openpype.pipeline import Anatomy + from openpype.pipeline.context_tools import get_workdir_from_session - if session is None: - session = legacy_io.Session - project_name = session["AVALON_PROJECT"] - host_name = session["AVALON_APP"] - anatomy = Anatomy(project_name) - template_data = template_data_from_session(session) - anatomy_filled = anatomy.format(template_data) - - if not template_key: - task_type = template_data["task"]["type"] - template_key = get_workfile_template_key( - task_type, - host_name, - project_name=project_name - ) - path = anatomy_filled[template_key]["folder"] - if path: - path = os.path.normpath(path) - return path + return get_workdir_from_session(session, template_key) @with_pipeline_io @@ -753,8 +623,8 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): Returns: dict: The changed key, values in the current Session. - """ + changes = compute_session_changes( legacy_io.Session, task=task, @@ -784,7 +654,6 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): return changes -@with_pipeline_io @deprecated("openpype.client.get_workfile_info") def get_workfile_doc(asset_id, task_name, filename, dbcon=None): """Return workfile document for entered context. @@ -805,13 +674,14 @@ def get_workfile_doc(asset_id, task_name, filename, dbcon=None): # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io project_name = dbcon.active_project() return get_workfile_info(project_name, asset_id, task_name, filename) -@with_pipeline_io +@deprecated def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): """Creates or replace workfile document in mongo. @@ -826,10 +696,13 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and `legacy_io` is used if not entered. """ + from openpype.pipeline import Anatomy + from openpype.pipeline.template_data import get_template_data # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io # Filter of workfile document @@ -845,7 +718,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): # Prepare project for workdir data project_name = dbcon.active_project() project_doc = get_project(project_name) - workdir_data = get_workdir_data( + workdir_data = get_template_data( project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"] ) # Prepare anatomy @@ -876,7 +749,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): ) -@with_pipeline_io +@deprecated def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): if not workfile_doc: # TODO add log message @@ -887,6 +760,7 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io # Convert data to mongo modification keys/values @@ -904,661 +778,11 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): ) -class BuildWorkfile: - """Wrapper for build workfile process. +@deprecated("openpype.pipeline.workfile.BuildWorkfile") +def BuildWorkfile(): + from openpype.pipeline.workfile import BuildWorkfile - Load representations for current context by build presets. Build presets - are host related, since each host has it's loaders. - """ - - log = logging.getLogger("BuildWorkfile") - - @staticmethod - def map_subsets_by_family(subsets): - subsets_by_family = collections.defaultdict(list) - for subset in subsets: - family = subset["data"].get("family") - if not family: - families = subset["data"].get("families") - if not families: - continue - family = families[0] - - subsets_by_family[family].append(subset) - return subsets_by_family - - def process(self): - """Main method of this wrapper. - - Building of workfile is triggered and is possible to implement - post processing of loaded containers if necessary. - """ - containers = self.build_workfile() - - return containers - - @with_pipeline_io - def build_workfile(self): - """Prepares and load containers into workfile. - - Loads latest versions of current and linked assets to workfile by logic - stored in Workfile profiles from presets. Profiles are set by host, - filtered by current task name and used by families. - - Each family can specify representation names and loaders for - representations and first available and successful loaded - representation is returned as container. - - At the end you'll get list of loaded containers per each asset. - - loaded_containers [{ - "asset_entity": , - "containers": [, , ...] - }, { - "asset_entity": , - "containers": [, ...] - }, { - ... - }] - """ - from openpype.pipeline import discover_loader_plugins - - # Get current asset name and entity - project_name = legacy_io.active_project() - current_asset_name = legacy_io.Session["AVALON_ASSET"] - current_asset_entity = get_asset_by_name( - project_name, current_asset_name - ) - # Skip if asset was not found - if not current_asset_entity: - print("Asset entity with name `{}` was not found".format( - current_asset_name - )) - return - - # Prepare available loaders - loaders_by_name = {} - for loader in discover_loader_plugins(): - loader_name = loader.__name__ - if loader_name in loaders_by_name: - raise KeyError( - "Duplicated loader name {0}!".format(loader_name) - ) - loaders_by_name[loader_name] = loader - - # Skip if there are any loaders - if not loaders_by_name: - self.log.warning("There are no registered loaders.") - return - - # Get current task name - current_task_name = legacy_io.Session["AVALON_TASK"] - - # Load workfile presets for task - self.build_presets = self.get_build_presets( - current_task_name, current_asset_entity - ) - - # Skip if there are any presets for task - if not self.build_presets: - self.log.warning( - "Current task `{}` does not have any loading preset.".format( - current_task_name - ) - ) - return - - # Get presets for loading current asset - current_context_profiles = self.build_presets.get("current_context") - # Get presets for loading linked assets - link_context_profiles = self.build_presets.get("linked_assets") - # Skip if both are missing - if not current_context_profiles and not link_context_profiles: - self.log.warning( - "Current task `{}` has empty loading preset.".format( - current_task_name - ) - ) - return - - elif not current_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any loading" - " preset for it's context." - ).format(current_task_name)) - - elif not link_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any" - "loading preset for it's linked assets." - ).format(current_task_name)) - - # Prepare assets to process by workfile presets - assets = [] - current_asset_id = None - if current_context_profiles: - # Add current asset entity if preset has current context set - assets.append(current_asset_entity) - current_asset_id = current_asset_entity["_id"] - - if link_context_profiles: - # Find and append linked assets if preset has set linked mapping - link_assets = get_linked_assets(current_asset_entity) - if link_assets: - assets.extend(link_assets) - - # Skip if there are no assets. This can happen if only linked mapping - # is set and there are no links for his asset. - if not assets: - self.log.warning( - "Asset does not have linked assets. Nothing to process." - ) - return - - # Prepare entities from database for assets - prepared_entities = self._collect_last_version_repres(assets) - - # Load containers by prepared entities and presets - loaded_containers = [] - # - Current asset containers - if current_asset_id and current_asset_id in prepared_entities: - current_context_data = prepared_entities.pop(current_asset_id) - loaded_data = self.load_containers_by_asset_data( - current_context_data, current_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # - Linked assets container - for linked_asset_data in prepared_entities.values(): - loaded_data = self.load_containers_by_asset_data( - linked_asset_data, link_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # Return list of loaded containers - return loaded_containers - - @with_pipeline_io - def get_build_presets(self, task_name, asset_doc): - """ Returns presets to build workfile for task name. - - Presets are loaded for current project set in - io.Session["AVALON_PROJECT"], filtered by registered host - and entered task name. - - Args: - task_name (str): Task name used for filtering build presets. - - Returns: - (dict): preset per entered task name - """ - host_name = os.environ["AVALON_APP"] - project_settings = get_project_settings( - legacy_io.Session["AVALON_PROJECT"] - ) - - host_settings = project_settings.get(host_name) or {} - # Get presets for host - wb_settings = host_settings.get("workfile_builder") - if not wb_settings: - # backward compatibility - wb_settings = host_settings.get("workfile_build") or {} - - builder_profiles = wb_settings.get("profiles") - if not builder_profiles: - return None - - task_type = ( - asset_doc - .get("data", {}) - .get("tasks", {}) - .get(task_name, {}) - .get("type") - ) - filter_data = { - "task_types": task_type, - "tasks": task_name - } - return filter_profiles(builder_profiles, filter_data) - - def _filter_build_profiles(self, build_profiles, loaders_by_name): - """ Filter build profiles by loaders and prepare process data. - - Valid profile must have "loaders", "families" and "repre_names" keys - with valid values. - - "loaders" expects list of strings representing possible loaders. - - "families" expects list of strings for filtering - by main subset family. - - "repre_names" expects list of strings for filtering by - representation name. - - Lowered "families" and "repre_names" are prepared for each profile with - all required keys. - - Args: - build_profiles (dict): Profiles for building workfile. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list): Filtered and prepared profiles. - """ - valid_profiles = [] - for profile in build_profiles: - # Check loaders - profile_loaders = profile.get("loaders") - if not profile_loaders: - self.log.warning(( - "Build profile has missing loaders configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check if any loader is available - loaders_match = False - for loader_name in profile_loaders: - if loader_name in loaders_by_name: - loaders_match = True - break - - if not loaders_match: - self.log.warning(( - "All loaders from Build profile are not available: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check families - profile_families = profile.get("families") - if not profile_families: - self.log.warning(( - "Build profile is missing families configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check representation names - profile_repre_names = profile.get("repre_names") - if not profile_repre_names: - self.log.warning(( - "Build profile is missing" - " representation names filtering: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Prepare lowered families and representation names - profile["families_lowered"] = [ - fam.lower() for fam in profile_families - ] - profile["repre_names_lowered"] = [ - name.lower() for name in profile_repre_names - ] - - valid_profiles.append(profile) - - return valid_profiles - - def _prepare_profile_for_subsets(self, subsets, profiles): - """Select profile for each subset by it's data. - - Profiles are filtered for each subset individually. - Profile is filtered by subset's family, optionally by name regex and - representation names set in profile. - It is possible to not find matching profile for subset, in that case - subset is skipped and it is possible that none of subsets have - matching profile. - - Args: - subsets (list): Subset documents. - profiles (dict): Build profiles. - - Returns: - (dict) Profile by subset's id. - """ - # Prepare subsets - subsets_by_family = self.map_subsets_by_family(subsets) - - profiles_per_subset_id = {} - for family, subsets in subsets_by_family.items(): - family_low = family.lower() - for profile in profiles: - # Skip profile if does not contain family - if family_low not in profile["families_lowered"]: - continue - - # Precompile name filters as regexes - profile_regexes = profile.get("subset_name_filters") - if profile_regexes: - _profile_regexes = [] - for regex in profile_regexes: - _profile_regexes.append(re.compile(regex)) - profile_regexes = _profile_regexes - - # TODO prepare regex compilation - for subset in subsets: - # Verify regex filtering (optional) - if profile_regexes: - valid = False - for pattern in profile_regexes: - if re.match(pattern, subset["name"]): - valid = True - break - - if not valid: - continue - - profiles_per_subset_id[subset["_id"]] = profile - - # break profiles loop on finding the first matching profile - break - return profiles_per_subset_id - - def load_containers_by_asset_data( - self, asset_entity_data, build_profiles, loaders_by_name - ): - """Load containers for entered asset entity by Build profiles. - - Args: - asset_entity_data (dict): Prepared data with subsets, last version - and representations for specific asset. - build_profiles (dict): Build profiles. - loaders_by_name (dict): Available loaders per name. - - Returns: - (dict) Output contains asset document and loaded containers. - """ - - # Make sure all data are not empty - if not asset_entity_data or not build_profiles or not loaders_by_name: - return - - asset_entity = asset_entity_data["asset_entity"] - - valid_profiles = self._filter_build_profiles( - build_profiles, loaders_by_name - ) - if not valid_profiles: - self.log.warning( - "There are not valid Workfile profiles. Skipping process." - ) - return - - self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) - - subsets_by_id = {} - version_by_subset_id = {} - repres_by_version_id = {} - for subset_id, in_data in asset_entity_data["subsets"].items(): - subset_entity = in_data["subset_entity"] - subsets_by_id[subset_entity["_id"]] = subset_entity - - version_data = in_data["version"] - version_entity = version_data["version_entity"] - version_by_subset_id[subset_id] = version_entity - repres_by_version_id[version_entity["_id"]] = ( - version_data["repres"] - ) - - if not subsets_by_id: - self.log.warning("There are not subsets for asset {0}".format( - asset_entity["name"] - )) - return - - profiles_per_subset_id = self._prepare_profile_for_subsets( - subsets_by_id.values(), valid_profiles - ) - if not profiles_per_subset_id: - self.log.warning("There are not valid subsets.") - return - - valid_repres_by_subset_id = collections.defaultdict(list) - for subset_id, profile in profiles_per_subset_id.items(): - profile_repre_names = profile["repre_names_lowered"] - - version_entity = version_by_subset_id[subset_id] - version_id = version_entity["_id"] - repres = repres_by_version_id[version_id] - for repre in repres: - repre_name_low = repre["name"].lower() - if repre_name_low in profile_repre_names: - valid_repres_by_subset_id[subset_id].append(repre) - - # DEBUG message - msg = "Valid representations for Asset: `{}`".format( - asset_entity["name"] - ) - for subset_id, repres in valid_repres_by_subset_id.items(): - subset = subsets_by_id[subset_id] - msg += "\n# Subset Name/ID: `{}`/{}".format( - subset["name"], subset_id - ) - for repre in repres: - msg += "\n## Repre name: `{}`".format(repre["name"]) - - self.log.debug(msg) - - containers = self._load_containers( - valid_repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ) - - return { - "asset_entity": asset_entity, - "containers": containers - } - - @with_pipeline_io - def _load_containers( - self, repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ): - """Real load by collected data happens here. - - Loading of representations per subset happens here. Each subset can - loads one representation. Loading is tried in specific order. - Representations are tried to load by names defined in configuration. - If subset has representation matching representation name each loader - is tried to load it until any is successful. If none of them was - successful then next representation name is tried. - Subset process loop ends when any representation is loaded or - all matching representations were already tried. - - Args: - repres_by_subset_id (dict): Available representations mapped - by their parent (subset) id. - subsets_by_id (dict): Subset documents mapped by their id. - profiles_per_subset_id (dict): Build profiles mapped by subset id. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list) Objects of loaded containers. - """ - from openpype.pipeline import ( - IncompatibleLoaderError, - load_container, - ) - - loaded_containers = [] - - # Get subset id order from build presets. - build_presets = self.build_presets.get("current_context", []) - build_presets += self.build_presets.get("linked_assets", []) - subset_ids_ordered = [] - for preset in build_presets: - for preset_family in preset["families"]: - for id, subset in subsets_by_id.items(): - if preset_family not in subset["data"].get("families", []): - continue - - subset_ids_ordered.append(id) - - # Order representations from subsets. - print("repres_by_subset_id", repres_by_subset_id) - representations_ordered = [] - representations = [] - for id in subset_ids_ordered: - for subset_id, repres in repres_by_subset_id.items(): - if repres in representations: - continue - - if id == subset_id: - representations_ordered.append((subset_id, repres)) - representations.append(repres) - - print("representations", representations) - - # Load ordered representations. - for subset_id, repres in representations_ordered: - subset_name = subsets_by_id[subset_id]["name"] - - profile = profiles_per_subset_id[subset_id] - loaders_last_idx = len(profile["loaders"]) - 1 - repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 - - repre_by_low_name = { - repre["name"].lower(): repre for repre in repres - } - - is_loaded = False - for repre_name_idx, profile_repre_name in enumerate( - profile["repre_names_lowered"] - ): - # Break iteration if representation was already loaded - if is_loaded: - break - - repre = repre_by_low_name.get(profile_repre_name) - if not repre: - continue - - for loader_idx, loader_name in enumerate(profile["loaders"]): - if is_loaded: - break - - loader = loaders_by_name.get(loader_name) - if not loader: - continue - try: - container = load_container( - loader, - repre["_id"], - name=subset_name - ) - loaded_containers.append(container) - is_loaded = True - - except Exception as exc: - if exc == IncompatibleLoaderError: - self.log.info(( - "Loader `{}` is not compatible with" - " representation `{}`" - ).format(loader_name, repre["name"])) - - else: - self.log.error( - "Unexpected error happened during loading", - exc_info=True - ) - - msg = "Loading failed." - if loader_idx < loaders_last_idx: - msg += " Trying next loader." - elif repre_name_idx < repre_names_last_idx: - msg += ( - " Loading of subset `{}` was not successful." - ).format(subset_name) - else: - msg += " Trying next representation." - self.log.info(msg) - - return loaded_containers - - @with_pipeline_io - def _collect_last_version_repres(self, asset_docs): - """Collect subsets, versions and representations for asset_entities. - - Args: - asset_entities (list): Asset entities for which want to find data - - Returns: - (dict): collected entities - - Example output: - ``` - { - {Asset ID}: { - "asset_entity": , - "subsets": { - {Subset ID}: { - "subset_entity": , - "version": { - "version_entity": , - "repres": [ - , , ... - ] - } - }, - ... - } - }, - ... - } - output[asset_id]["subsets"][subset_id]["version"]["repres"] - ``` - """ - - output = {} - if not asset_docs: - return output - - asset_docs_by_ids = {asset["_id"]: asset for asset in asset_docs} - - project_name = legacy_io.active_project() - subsets = list(get_subsets( - project_name, asset_ids=asset_docs_by_ids.keys() - )) - subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - - last_version_by_subset_id = get_last_versions( - project_name, subset_entity_by_ids.keys() - ) - last_version_docs_by_id = { - version["_id"]: version - for version in last_version_by_subset_id.values() - } - repre_docs = get_representations( - project_name, version_ids=last_version_docs_by_id.keys() - ) - - for repre_doc in repre_docs: - version_id = repre_doc["parent"] - version_doc = last_version_docs_by_id[version_id] - - subset_id = version_doc["parent"] - subset_doc = subset_entity_by_ids[subset_id] - - asset_id = subset_doc["parent"] - asset_doc = asset_docs_by_ids[asset_id] - - if asset_id not in output: - output[asset_id] = { - "asset_entity": asset_doc, - "subsets": {} - } - - if subset_id not in output[asset_id]["subsets"]: - output[asset_id]["subsets"][subset_id] = { - "subset_entity": subset_doc, - "version": { - "version_entity": version_doc, - "repres": [] - } - } - - output[asset_id]["subsets"][subset_id]["version"]["repres"].append( - repre_doc - ) - - return output + return BuildWorkfile() @with_pipeline_io @@ -1591,13 +815,21 @@ def get_creator_by_name(creator_name, case_sensitive=False): return None -@with_pipeline_io +@deprecated def change_timer_to_current_context(): """Called after context change to change timers. + Deprecated: + This method is specific for TimersManager module so please use the + functionality from there. Function will be removed after release + version 3.14.* + TODO: - use TimersManager's static method instead of reimplementing it here """ + + from openpype.pipeline import legacy_io + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") if not webserver_url: log.warning("Couldn't find webserver url") @@ -1685,6 +917,8 @@ def _get_task_context_data_for_anatomy( return data +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_context") def get_custom_workfile_template_by_context( template_profiles, project_doc, asset_doc, task_name, anatomy=None ): @@ -1738,6 +972,9 @@ def get_custom_workfile_template_by_context( return None +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_string_context" +) def get_custom_workfile_template_by_string_context( template_profiles, project_name, asset_name, task_name, dbcon=None, anatomy=None @@ -1782,7 +1019,7 @@ def get_custom_workfile_template_by_string_context( ) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_custom_workfile_template") def get_custom_workfile_template(template_profiles): """Filter and fill workfile template profiles by current context. @@ -1797,6 +1034,8 @@ def get_custom_workfile_template(template_profiles): context. (Existence of formatted path is not validated.) """ + from openpype.pipeline import legacy_io + return get_custom_workfile_template_by_string_context( template_profiles, legacy_io.Session["AVALON_PROJECT"], @@ -1806,6 +1045,7 @@ def get_custom_workfile_template(template_profiles): ) +@deprecated("openpype.pipeline.workfile.get_last_workfile_with_version") def get_last_workfile_with_version( workdir, file_template, fill_data, extensions ): @@ -1821,78 +1061,15 @@ def get_last_workfile_with_version( tuple: Last workfile with version if there is any otherwise returns (None, None). """ - if not os.path.exists(workdir): - return None, None - # Fast match on extension - filenames = [ - filename - for filename in os.listdir(workdir) - if os.path.splitext(filename)[1] in extensions - ] + from openpype.pipeline.workfile import get_last_workfile_with_version - # Build template without optionals, version to digits only regex - # and comment to any definable value. - _ext = [] - for ext in extensions: - if not ext.startswith("."): - ext = "." + ext - # Escape dot for regex - ext = "\\" + ext - _ext.append(ext) - ext_expression = "(?:" + "|".join(_ext) + ")" - - # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end - file_template = re.sub(r"\.?{ext}", ext_expression, file_template) - # Replace optional keys with optional content regex - file_template = re.sub(r"<.*?>", r".*?", file_template) - # Replace `{version}` with group regex - file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) - file_template = re.sub(r"{comment.*?}", r".+?", file_template) - file_template = StringTemplate.format_strict_template( - file_template, fill_data + return get_last_workfile_with_version( + workdir, file_template, fill_data, extensions ) - # Match with ignore case on Windows due to the Windows - # OS not being case-sensitive. This avoids later running - # into the error that the file did exist if it existed - # with a different upper/lower-case. - kwargs = {} - if platform.system().lower() == "windows": - kwargs["flags"] = re.IGNORECASE - - # Get highest version among existing matching files - version = None - output_filenames = [] - for filename in sorted(filenames): - match = re.match(file_template, filename, **kwargs) - if not match: - continue - - file_version = int(match.group(1)) - if version is None or file_version > version: - output_filenames[:] = [] - version = file_version - - if file_version == version: - output_filenames.append(filename) - - output_filename = None - if output_filenames: - if len(output_filenames) == 1: - output_filename = output_filenames[0] - else: - last_time = None - for _output_filename in output_filenames: - full_path = os.path.join(workdir, _output_filename) - mod_time = os.path.getmtime(full_path) - if last_time is None or last_time < mod_time: - output_filename = _output_filename - last_time = mod_time - - return output_filename, version - +@deprecated("openpype.pipeline.workfile.get_last_workfile") def get_last_workfile( workdir, file_template, fill_data, extensions, full_path=False ): @@ -1910,22 +1087,12 @@ def get_last_workfile( Returns: str: Last or first workfile as filename of full path to filename. """ - filename, version = get_last_workfile_with_version( - workdir, file_template, fill_data, extensions + + from openpype.pipeline.workfile import get_last_workfile + + return get_last_workfile( + workdir, file_template, fill_data, extensions, full_path ) - if filename is None: - data = copy.deepcopy(fill_data) - data["version"] = 1 - data.pop("comment", None) - if not data.get("ext"): - data["ext"] = extensions[0] - data["ext"] = data["ext"].replace('.', '') - filename = StringTemplate.format_strict_template(file_template, data) - - if full_path: - return os.path.normpath(os.path.join(workdir, filename)) - - return filename @with_pipeline_io diff --git a/openpype/lib/config.py b/openpype/lib/config.py index 57e8efa57d..26822649e4 100644 --- a/openpype/lib/config.py +++ b/openpype/lib/config.py @@ -1,82 +1,41 @@ -# -*- coding: utf-8 -*- -"""Get configuration data.""" -import datetime +import warnings +import functools -def get_datetime_data(datetime_obj=None): - """Returns current datetime data as dictionary. +class ConfigDeprecatedWarning(DeprecationWarning): + pass - Args: - datetime_obj (datetime): Specific datetime object - Returns: - dict: prepared date & time data +def deprecated(func): + """Mark functions as deprecated. - Available keys: - "d" - in shortest possible way. - "dd" - with 2 digits. - "ddd" - shortened week day. e.g.: `Mon`, ... - "dddd" - full name of week day. e.g.: `Monday`, ... - "m" - in shortest possible way. e.g.: `1` if January - "mm" - with 2 digits. - "mmm" - shortened month name. e.g.: `Jan`, ... - "mmmm" - full month name. e.g.: `January`, ... - "yy" - shortened year. e.g.: `19`, `20`, ... - "yyyy" - full year. e.g.: `2019`, `2020`, ... - "H" - shortened hours. - "HH" - with 2 digits. - "h" - shortened hours. - "hh" - with 2 digits. - "ht" - AM or PM. - "M" - shortened minutes. - "MM" - with 2 digits. - "S" - shortened seconds. - "SS" - with 2 digits. + It will result in a warning being emitted when the function is used. """ - if not datetime_obj: - datetime_obj = datetime.datetime.now() - - year = datetime_obj.strftime("%Y") - - month = datetime_obj.strftime("%m") - month_name_full = datetime_obj.strftime("%B") - month_name_short = datetime_obj.strftime("%b") - day = datetime_obj.strftime("%d") - - weekday_full = datetime_obj.strftime("%A") - weekday_short = datetime_obj.strftime("%a") - - hours = datetime_obj.strftime("%H") - hours_midday = datetime_obj.strftime("%I") - hour_midday_type = datetime_obj.strftime("%p") - minutes = datetime_obj.strftime("%M") - seconds = datetime_obj.strftime("%S") - - return { - "d": str(int(day)), - "dd": str(day), - "ddd": weekday_short, - "dddd": weekday_full, - "m": str(int(month)), - "mm": str(month), - "mmm": month_name_short, - "mmmm": month_name_full, - "yy": str(year[2:]), - "yyyy": str(year), - "H": str(int(hours)), - "HH": str(hours), - "h": str(int(hours_midday)), - "hh": str(hours_midday), - "ht": hour_midday_type, - "M": str(int(minutes)), - "MM": str(minutes), - "S": str(int(seconds)), - "SS": str(seconds), - } + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.simplefilter("always", ConfigDeprecatedWarning) + warnings.warn( + ( + "Deprecated import of function '{}'." + " Class was moved to 'openpype.lib.dateutils.{}'." + " Please change your imports." + ).format(func.__name__), + category=ConfigDeprecatedWarning + ) + return func(*args, **kwargs) + return new_func +@deprecated +def get_datetime_data(datetime_obj=None): + from .dateutils import get_datetime_data + + return get_datetime_data(datetime_obj) + + +@deprecated def get_formatted_current_time(): - return datetime.datetime.now().strftime( - "%Y%m%dT%H%M%SZ" - ) + from .dateutils import get_formatted_current_time + + return get_formatted_current_time() diff --git a/openpype/lib/dateutils.py b/openpype/lib/dateutils.py new file mode 100644 index 0000000000..68cd1d1c5b --- /dev/null +++ b/openpype/lib/dateutils.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +"""Get configuration data.""" +import datetime + + +def get_datetime_data(datetime_obj=None): + """Returns current datetime data as dictionary. + + Args: + datetime_obj (datetime): Specific datetime object + + Returns: + dict: prepared date & time data + + Available keys: + "d" - in shortest possible way. + "dd" - with 2 digits. + "ddd" - shortened week day. e.g.: `Mon`, ... + "dddd" - full name of week day. e.g.: `Monday`, ... + "m" - in shortest possible way. e.g.: `1` if January + "mm" - with 2 digits. + "mmm" - shortened month name. e.g.: `Jan`, ... + "mmmm" - full month name. e.g.: `January`, ... + "yy" - shortened year. e.g.: `19`, `20`, ... + "yyyy" - full year. e.g.: `2019`, `2020`, ... + "H" - shortened hours. + "HH" - with 2 digits. + "h" - shortened hours. + "hh" - with 2 digits. + "ht" - AM or PM. + "M" - shortened minutes. + "MM" - with 2 digits. + "S" - shortened seconds. + "SS" - with 2 digits. + """ + + if not datetime_obj: + datetime_obj = datetime.datetime.now() + + year = datetime_obj.strftime("%Y") + + month = datetime_obj.strftime("%m") + month_name_full = datetime_obj.strftime("%B") + month_name_short = datetime_obj.strftime("%b") + day = datetime_obj.strftime("%d") + + weekday_full = datetime_obj.strftime("%A") + weekday_short = datetime_obj.strftime("%a") + + hours = datetime_obj.strftime("%H") + hours_midday = datetime_obj.strftime("%I") + hour_midday_type = datetime_obj.strftime("%p") + minutes = datetime_obj.strftime("%M") + seconds = datetime_obj.strftime("%S") + + return { + "d": str(int(day)), + "dd": str(day), + "ddd": weekday_short, + "dddd": weekday_full, + "m": str(int(month)), + "mm": str(month), + "mmm": month_name_short, + "mmmm": month_name_full, + "yy": str(year[2:]), + "yyyy": str(year), + "H": str(int(hours)), + "HH": str(hours), + "h": str(int(hours_midday)), + "hh": str(hours_midday), + "ht": hour_midday_type, + "M": str(int(minutes)), + "MM": str(minutes), + "S": str(int(seconds)), + "SS": str(seconds), + } + + +def get_timestamp(datetime_obj=None): + """Get standardized timestamp from datetime object. + + Args: + datetime_obj (datetime.datetime): Object of datetime. Current time + is used if not passed. + """ + + if datetime_obj is None: + datetime_obj = datetime.datetime.now() + return datetime_obj.strftime( + "%Y%m%dT%H%M%SZ" + ) + + +def get_formatted_current_time(): + return get_timestamp() diff --git a/openpype/lib/git_progress.py b/openpype/lib/git_progress.py deleted file mode 100644 index 331b7b6745..0000000000 --- a/openpype/lib/git_progress.py +++ /dev/null @@ -1,86 +0,0 @@ -import git -from tqdm import tqdm - - -class _GitProgress(git.remote.RemoteProgress): - """ Class handling displaying progress during git operations. - - This is using **tqdm** for showing progress bars. As **GitPython** - is parsing progress directly from git command, it is somehow unreliable - as in some operations it is difficult to get total count of iterations - to display meaningful progress bar. - - """ - _t = None - _code = 0 - _current_status = '' - _current_max = '' - - _description = { - 256: "Checking out files", - 4: "Counting objects", - 128: "Finding sources", - 32: "Receiving objects", - 64: "Resolving deltas", - 16: "Writing objects" - } - - def __init__(self): - super().__init__() - - def __del__(self): - if self._t is not None: - self._t.close() - - def _detroy_tqdm(self): - """ Used to close tqdm when operation ended. - - """ - if self._t is not None: - self._t.close() - self._t = None - - def _check_mask(self, opcode: int) -> bool: - """" Add meaningful description to **GitPython** opcodes. - - :param opcode: OP_MASK opcode - :type opcode: int - :return: String description of opcode - :rtype: str - - .. seealso:: For opcodes look at :class:`git.RemoteProgress` - - """ - if opcode & self.COUNTING: - return self._description.get(self.COUNTING) - elif opcode & self.CHECKING_OUT: - return self._description.get(self.CHECKING_OUT) - elif opcode & self.WRITING: - return self._description.get(self.WRITING) - elif opcode & self.RECEIVING: - return self._description.get(self.RECEIVING) - elif opcode & self.RESOLVING: - return self._description.get(self.RESOLVING) - elif opcode & self.FINDING_SOURCES: - return self._description.get(self.FINDING_SOURCES) - else: - return "Processing" - - def update(self, op_code, cur_count, max_count=None, message=''): - """ Called when git operation update progress. - - .. seealso:: For more details see - :func:`git.objects.submodule.base.Submodule.update` - `Documentation `_ - - """ - code = self._check_mask(op_code) - if self._current_status != code or self._current_max != max_count: - self._current_max = max_count - self._current_status = code - self._detroy_tqdm() - self._t = tqdm(total=max_count) - self._t.set_description(" . {}".format(code)) - - self._t.update(cur_count) diff --git a/openpype/lib/local_settings.py b/openpype/lib/local_settings.py index 97e99b4b5a..c6c9699240 100644 --- a/openpype/lib/local_settings.py +++ b/openpype/lib/local_settings.py @@ -34,7 +34,7 @@ from openpype.settings import ( get_system_settings ) -from .import validate_mongo_connection +from openpype.client.mongo import validate_mongo_connection _PLACEHOLDER = object() diff --git a/openpype/lib/log.py b/openpype/lib/log.py index 2cdb7ec8e4..e77edea0e9 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -24,12 +24,13 @@ import traceback import threading import copy -from . import Terminal -from .mongo import ( +from openpype.client.mongo import ( MongoEnvNotSet, get_default_components, - OpenPypeMongoConnection + OpenPypeMongoConnection, ) +from . import Terminal + try: import log4mongo from log4mongo.handlers import MongoHandler @@ -41,13 +42,13 @@ except ImportError: USE_UNICODE = hasattr(__builtins__, "unicode") -class PypeStreamHandler(logging.StreamHandler): +class LogStreamHandler(logging.StreamHandler): """ StreamHandler class designed to handle utf errors in python 2.x hosts. """ def __init__(self, stream=None): - super(PypeStreamHandler, self).__init__(stream) + super(LogStreamHandler, self).__init__(stream) self.enabled = True def enable(self): @@ -56,7 +57,6 @@ class PypeStreamHandler(logging.StreamHandler): Used to silence output """ self.enabled = True - pass def disable(self): """ Disable StreamHandler @@ -107,13 +107,13 @@ class PypeStreamHandler(logging.StreamHandler): self.handleError(record) -class PypeFormatter(logging.Formatter): +class LogFormatter(logging.Formatter): DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ]' default_formatter = logging.Formatter(DFT) def __init__(self, formats): - super(PypeFormatter, self).__init__() + super(LogFormatter, self).__init__() self.formatters = {} for loglevel in formats: self.formatters[loglevel] = logging.Formatter(formats[loglevel]) @@ -141,7 +141,7 @@ class PypeFormatter(logging.Formatter): return out -class PypeMongoFormatter(logging.Formatter): +class MongoFormatter(logging.Formatter): DEFAULT_PROPERTIES = logging.LogRecord( '', '', '', '', '', '', '', '').__dict__.keys() @@ -161,7 +161,7 @@ class PypeMongoFormatter(logging.Formatter): 'method': record.funcName, 'lineNumber': record.lineno } - document.update(PypeLogger.get_process_data()) + document.update(Logger.get_process_data()) # Standard document decorated with exception info if record.exc_info is not None: @@ -181,7 +181,7 @@ class PypeMongoFormatter(logging.Formatter): return document -class PypeLogger: +class Logger: DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ] ' DBG = " - { %(name)s }: [ %(message)s ] " INF = ">>> [ %(message)s ] " @@ -239,7 +239,7 @@ class PypeLogger: for handler in logger.handlers: if isinstance(handler, MongoHandler): add_mongo_handler = False - elif isinstance(handler, PypeStreamHandler): + elif isinstance(handler, LogStreamHandler): add_console_handler = False if add_console_handler: @@ -292,7 +292,7 @@ class PypeLogger: "username": components["username"], "password": components["password"], "capped": True, - "formatter": PypeMongoFormatter() + "formatter": MongoFormatter() } if components["port"] is not None: kwargs["port"] = int(components["port"]) @@ -303,10 +303,10 @@ class PypeLogger: @classmethod def _get_console_handler(cls): - formatter = PypeFormatter(cls.FORMAT_FILE) - console_handler = PypeStreamHandler() + formatter = LogFormatter(cls.FORMAT_FILE) + console_handler = LogStreamHandler() - console_handler.set_name("PypeStreamHandler") + console_handler.set_name("LogStreamHandler") console_handler.setFormatter(formatter) return console_handler @@ -417,9 +417,9 @@ class PypeLogger: def get_process_name(cls): """Process name that is like "label" of a process. - Pype's logging can be used from pype itseld of from hosts. Even in Pype - it's good to know if logs are from Pype tray or from pype's event - server. This should help to identify that information. + OpenPype's logging can be used from OpenPyppe itself of from hosts. + Even in OpenPype process it's good to know if logs are from tray or + from other cli commands. This should help to identify that information. """ if cls._process_name is not None: return cls._process_name @@ -485,23 +485,13 @@ class PypeLogger: return OpenPypeMongoConnection.get_mongo_client() -def timeit(method): - """Print time in function. - - For debugging. - - """ - log = logging.getLogger() - - def timed(*args, **kw): - ts = time.time() - result = method(*args, **kw) - te = time.time() - if 'log_time' in kw: - name = kw.get('log_name', method.__name__.upper()) - kw['log_time'][name] = int((te - ts) * 1000) - else: - log.debug('%r %2.2f ms' % (method.__name__, (te - ts) * 1000)) - print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000)) - return result - return timed +class PypeLogger(Logger): + @classmethod + def get_logger(cls, *args, **kwargs): + logger = Logger.get_logger(*args, **kwargs) + # TODO uncomment when replaced most of places + # logger.warning(( + # "'openpype.lib.PypeLogger' is deprecated class." + # " Please use 'openpype.lib.Logger' instead." + # )) + return logger diff --git a/openpype/lib/mongo.py b/openpype/lib/mongo.py index c08e76c75c..bb2ee6016a 100644 --- a/openpype/lib/mongo.py +++ b/openpype/lib/mongo.py @@ -1,206 +1,61 @@ -import os -import sys -import time -import logging -import pymongo -import certifi - -if sys.version_info[0] == 2: - from urlparse import urlparse, parse_qs -else: - from urllib.parse import urlparse, parse_qs +import warnings +import functools +from openpype.client.mongo import ( + MongoEnvNotSet, + OpenPypeMongoConnection, +) -class MongoEnvNotSet(Exception): +class MongoDeprecatedWarning(DeprecationWarning): pass -def _decompose_url(url): - """Decompose mongo url to basic components. +def mongo_deprecated(func): + """Mark functions as deprecated. - Used for creation of MongoHandler which expect mongo url components as - separated kwargs. Components are at the end not used as we're setting - connection directly this is just a dumb components for MongoHandler - validation pass. + It will result in a warning being emitted when the function is used. """ - # Use first url from passed url - # - this is because it is possible to pass multiple urls for multiple - # replica sets which would crash on urlparse otherwise - # - please don't use comma in username of password - url = url.split(",")[0] - components = { - "scheme": None, - "host": None, - "port": None, - "username": None, - "password": None, - "auth_db": None - } - result = urlparse(url) - if result.scheme is None: - _url = "mongodb://{}".format(url) - result = urlparse(_url) - - components["scheme"] = result.scheme - components["host"] = result.hostname - try: - components["port"] = result.port - except ValueError: - raise RuntimeError("invalid port specified") - components["username"] = result.username - components["password"] = result.password - - try: - components["auth_db"] = parse_qs(result.query)['authSource'][0] - except KeyError: - # no auth db provided, mongo will use the one we are connecting to - pass - - return components - - -def get_default_components(): - mongo_url = os.environ.get("OPENPYPE_MONGO") - if mongo_url is None: - raise MongoEnvNotSet( - "URL for Mongo logging connection is not set." + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.simplefilter("always", MongoDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'." + " Function was moved to 'openpype.client.mongo'." + ).format(func.__name__), + category=MongoDeprecatedWarning, + stacklevel=2 ) - return _decompose_url(mongo_url) + return func(*args, **kwargs) + return new_func +@mongo_deprecated +def get_default_components(): + from openpype.client.mongo import get_default_components + + return get_default_components() + + +@mongo_deprecated def should_add_certificate_path_to_mongo_url(mongo_url): - """Check if should add ca certificate to mongo url. + from openpype.client.mongo import should_add_certificate_path_to_mongo_url - Since 30.9.2021 cloud mongo requires newer certificates that are not - available on most of workstation. This adds path to certifi certificate - which is valid for it. To add the certificate path url must have scheme - 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. - """ - parsed = urlparse(mongo_url) - query = parse_qs(parsed.query) - lowered_query_keys = set(key.lower() for key in query.keys()) - add_certificate = False - # Check if url 'ssl' or 'tls' are set to 'true' - for key in ("ssl", "tls"): - if key in query and "true" in query["ssl"]: - add_certificate = True - break - - # Check if url contains 'mongodb+srv' - if not add_certificate and parsed.scheme == "mongodb+srv": - add_certificate = True - - # Check if url does already contain certificate path - if add_certificate and "tlscafile" in lowered_query_keys: - add_certificate = False - - return add_certificate + return should_add_certificate_path_to_mongo_url(mongo_url) +@mongo_deprecated def validate_mongo_connection(mongo_uri): - """Check if provided mongodb URL is valid. + from openpype.client.mongo import validate_mongo_connection - Args: - mongo_uri (str): URL to validate. - - Raises: - ValueError: When port in mongo uri is not valid. - pymongo.errors.InvalidURI: If passed mongo is invalid. - pymongo.errors.ServerSelectionTimeoutError: If connection timeout - passed so probably couldn't connect to mongo server. - - """ - client = OpenPypeMongoConnection.create_connection( - mongo_uri, retry_attempts=1 - ) - client.close() + return validate_mongo_connection(mongo_uri) -class OpenPypeMongoConnection: - """Singleton MongoDB connection. - - Keeps MongoDB connections by url. - """ - mongo_clients = {} - log = logging.getLogger("OpenPypeMongoConnection") - - @staticmethod - def get_default_mongo_url(): - return os.environ["OPENPYPE_MONGO"] - - @classmethod - def get_mongo_client(cls, mongo_url=None): - if mongo_url is None: - mongo_url = cls.get_default_mongo_url() - - connection = cls.mongo_clients.get(mongo_url) - if connection: - # Naive validation of existing connection - try: - connection.server_info() - with connection.start_session(): - pass - except Exception: - connection = None - - if not connection: - cls.log.debug("Creating mongo connection to {}".format(mongo_url)) - connection = cls.create_connection(mongo_url) - cls.mongo_clients[mongo_url] = connection - - return connection - - @classmethod - def create_connection(cls, mongo_url, timeout=None, retry_attempts=None): - parsed = urlparse(mongo_url) - # Force validation of scheme - if parsed.scheme not in ["mongodb", "mongodb+srv"]: - raise pymongo.errors.InvalidURI(( - "Invalid URI scheme:" - " URI must begin with 'mongodb://' or 'mongodb+srv://'" - )) - - if timeout is None: - timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000) - - kwargs = { - "serverSelectionTimeoutMS": timeout - } - if should_add_certificate_path_to_mongo_url(mongo_url): - kwargs["ssl_ca_certs"] = certifi.where() - - mongo_client = pymongo.MongoClient(mongo_url, **kwargs) - - if retry_attempts is None: - retry_attempts = 3 - - elif not retry_attempts: - retry_attempts = 1 - - last_exc = None - valid = False - t1 = time.time() - for attempt in range(1, retry_attempts + 1): - try: - mongo_client.server_info() - with mongo_client.start_session(): - pass - valid = True - break - - except Exception as exc: - last_exc = exc - if attempt < retry_attempts: - cls.log.warning( - "Attempt {} failed. Retrying... ".format(attempt) - ) - time.sleep(1) - - if not valid: - raise last_exc - - cls.log.info("Connected to {}, delay {:.3f}s".format( - mongo_url, time.time() - t1 - )) - return mongo_client +__all__ = ( + "MongoEnvNotSet", + "OpenPypeMongoConnection", + "get_default_components", + "should_add_certificate_path_to_mongo_url", + "validate_mongo_connection", +) diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index c1282016ef..e4b18ec258 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -211,15 +211,28 @@ class StringTemplate(object): if counted_symb > -1: parts = tmp_parts.pop(counted_symb) counted_symb -= 1 + # If part contains only single string keep value + # unchanged if parts: # Remove optional start char parts.pop(0) - if counted_symb < 0: - out_parts = new_parts - else: - out_parts = tmp_parts[counted_symb] - # Store temp parts - out_parts.append(OptionalPart(parts)) + + if not parts: + value = "<>" + elif ( + len(parts) == 1 + and isinstance(parts[0], six.string_types) + ): + value = "<{}>".format(parts[0]) + else: + value = OptionalPart(parts) + + if counted_symb < 0: + out_parts = new_parts + else: + out_parts = tmp_parts[counted_symb] + # Store value + out_parts.append(value) continue if counted_symb < 0: @@ -793,6 +806,7 @@ class OptionalPart: parts(list): Parts of template. Can contain 'str', 'OptionalPart' or 'FormattingPart'. """ + def __init__(self, parts): self._parts = parts diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 1d3c1eec6b..060db94ae0 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -1,11 +1,13 @@ # -*- coding: utf-8 -*- """Avalon/Pyblish plugin tools.""" import os -import inspect import logging import re import json +import warnings +import functools + from openpype.client import get_asset_by_id from openpype.settings import get_project_settings @@ -17,6 +19,51 @@ log = logging.getLogger(__name__) DEFAULT_SUBSET_TEMPLATE = "{family}{Variant}" +class PluginToolsDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", PluginToolsDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=PluginToolsDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + class TaskNotSetError(KeyError): def __init__(self, msg=None): if not msg: @@ -197,6 +244,7 @@ def prepare_template_data(fill_pairs): return fill_data +@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins") def filter_pyblish_plugins(plugins): """Filter pyblish plugins by presets. @@ -206,57 +254,14 @@ def filter_pyblish_plugins(plugins): Args: plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base` `discover()` method. - """ - from pyblish import api - host = api.current_host() + from openpype.pipeline.publish.lib import filter_pyblish_plugins - presets = get_project_settings(os.environ['AVALON_PROJECT']) or {} - # skip if there are no presets to process - if not presets: - return - - # iterate over plugins - for plugin in plugins[:]: - - try: - config_data = presets[host]["publish"][plugin.__name__] - except KeyError: - # host determined from path - file = os.path.normpath(inspect.getsourcefile(plugin)) - file = os.path.normpath(file) - - split_path = file.split(os.path.sep) - if len(split_path) < 4: - log.warning( - 'plugin path too short to extract host {}'.format(file) - ) - continue - - host_from_file = split_path[-4] - plugin_kind = split_path[-2] - - # TODO: change after all plugins are moved one level up - if host_from_file == "openpype": - host_from_file = "global" - - try: - config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501 - except KeyError: - continue - - for option, value in config_data.items(): - if option == "enabled" and value is False: - log.info('removing plugin {}'.format(plugin.__name__)) - plugins.remove(plugin) - else: - log.info('setting {}:{} on plugin {}'.format( - option, value, plugin.__name__)) - - setattr(plugin, option, value) + filter_pyblish_plugins(plugins) +@deprecated def set_plugin_attributes_from_settings( plugins, superclass, host_name=None, project_name=None ): @@ -273,6 +278,8 @@ def set_plugin_attributes_from_settings( project_name (str): Name of project for which settings will be loaded. Value from environment `AVALON_PROJECT` is used if not entered. """ + + # Function is not used anymore from openpype.pipeline import LegacyCreator, LoaderPlugin # determine host application to use for finding presets diff --git a/openpype/lib/remote_publish.py b/openpype/lib/remote_publish.py index d7884d0200..b4b05c053b 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/lib/remote_publish.py @@ -7,8 +7,10 @@ from bson.objectid import ObjectId import pyblish.util import pyblish.api -from openpype.lib.mongo import OpenPypeMongoConnection +from openpype.client.mongo import OpenPypeMongoConnection from openpype.lib.plugin_tools import parse_json +from openpype.lib.profiles_filtering import filter_profiles +from openpype.api import get_project_settings ERROR_STATUS = "error" IN_PROGRESS_STATUS = "in_progress" @@ -175,14 +177,12 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): ) -def fail_batch(_id, batches_in_progress, dbcon): - """Set current batch as failed as there are some stuck batches.""" - running_batches = [str(batch["_id"]) - for batch in batches_in_progress - if batch["_id"] != _id] - msg = "There are still running batches {}\n". \ - format("\n".join(running_batches)) - msg += "Ask admin to check them and reprocess current batch" +def fail_batch(_id, dbcon, msg): + """Set current batch as failed as there is some problem. + + Raises: + ValueError + """ dbcon.update_one( {"_id": _id}, {"$set": @@ -259,3 +259,19 @@ def get_task_data(batch_dir): "Cannot parse batch meta in {} folder".format(task_data)) return task_data + + +def get_timeout(project_name, host_name, task_type): + """Returns timeout(seconds) from Setting profile.""" + filter_data = { + "task_types": task_type, + "hosts": host_name + } + timeout_profiles = (get_project_settings(project_name)["webpublisher"] + ["timeout_profiles"]) + matching_item = filter_profiles(timeout_profiles, filter_data) + timeout = 3600 + if matching_item: + timeout = matching_item["timeout"] + + return timeout diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index ee9a0f08de..60d5d3ed4a 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -938,3 +938,40 @@ def convert_ffprobe_fps_value(str_value): fps = int(fps) return str(fps) + + +def convert_ffprobe_fps_to_float(value): + """Convert string value of frame rate to float. + + Copy of 'convert_ffprobe_fps_value' which raises exceptions on invalid + value, does not convert value to string and does not return "Unknown" + string. + + Args: + value (str): Value to be converted. + + Returns: + Float: Converted frame rate in float. If divisor in value is '0' then + '0.0' is returned. + + Raises: + ValueError: Passed value is invalid for conversion. + """ + + if not value: + raise ValueError("Got empty value.") + + items = value.split("/") + if len(items) == 1: + return float(items[0]) + + if len(items) > 2: + raise ValueError(( + "FPS expression contains multiple dividers \"{}\"." + ).format(value)) + + dividend = float(items.pop(0)) + divisor = float(items.pop(0)) + if divisor == 0.0: + return 0.0 + return dividend / divisor diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index de8df3dd9e..c55f85c8da 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -80,7 +80,8 @@ class AfterEffectsSubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index a1ee5e0957..3f9c09b592 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -274,7 +274,8 @@ class HarmonySubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index fdf67b51bc..95856137e2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -130,6 +130,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "houdini17.5;pluginx2.3" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index aca88c7440..beda753723 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -101,6 +101,7 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 3707c5709f..f253ceb21a 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -519,12 +519,14 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", + "OPENPYPE_SG_USER", "AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index 57572fcb24..38ae5d2f7f 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -5,7 +5,6 @@ from maya import cmds from openpype.pipeline import legacy_io, PublishXmlValidationError from openpype.settings import get_project_settings -import openpype.api import pyblish.api @@ -34,7 +33,9 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): targets = ["local"] def process(self, instance): - settings = get_project_settings(os.getenv("AVALON_PROJECT")) + project_name = instance.context.data["projectName"] + # TODO settings can be received from 'context.data["project_settings"]' + settings = get_project_settings(project_name) # use setting for publish job on farm, no reason to have it separately deadline_publish_job_sett = (settings["deadline"] ["publish"] @@ -53,9 +54,6 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): scene = instance.context.data["currentFile"] scenename = os.path.basename(scene) - # Get project code - project_name = legacy_io.Session["AVALON_PROJECT"] - job_name = "{scene} [PUBLISH]".format(scene=scenename) batch_name = "{code} - {scene}".format(code=project_name, scene=scenename) @@ -102,13 +100,14 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", - "FTRACK_SERVER" + "FTRACK_SERVER", + "OPENPYPE_VERSION" ] environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) - # TODO replace legacy_io with context.data ? - environment["AVALON_PROJECT"] = legacy_io.Session["AVALON_PROJECT"] + # TODO replace legacy_io with context.data + environment["AVALON_PROJECT"] = project_name environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"] environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"] environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index 93fb511a34..336a56ec45 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -80,10 +80,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Using published scene for render {}".format(script_path) ) - # exception for slate workflow - if "slate" in instance.data["families"]: - submit_frame_start -= 1 - response = self.payload_submit( instance, script_path, @@ -99,10 +95,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["publishJobState"] = "Suspended" if instance.data.get("bakingNukeScripts"): - # exception for slate workflow - if "slate" in instance.data["families"]: - submit_frame_start += 1 - for baking_script in instance.data["bakingNukeScripts"]: render_path = baking_script["bakeRenderPath"] script_path = baking_script["bakeScriptPath"] @@ -261,7 +253,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "PYBLISHPLUGINPATH", "NUKE_PATH", "TOOL_ENV", - "FOUNDRY_LICENSE" + "FOUNDRY_LICENSE", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): @@ -365,7 +358,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): if not instance.data.get("expectedFiles"): instance.data["expectedFiles"] = [] - dir = os.path.dirname(path) + dirname = os.path.dirname(path) file = os.path.basename(path) if "#" in file: @@ -377,9 +370,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["expectedFiles"].append(path) return + if instance.data.get("slate"): + start_frame -= 1 + for i in range(start_frame, (end_frame + 1)): instance.data["expectedFiles"].append( - os.path.join(dir, (file % i)).replace("\\", "/")) + os.path.join(dirname, (file % i)).replace("\\", "/")) def get_limit_groups(self): """Search for limit group nodes and return group name. diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index b098eaba8e..379953c9e4 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -10,8 +10,10 @@ import clique import pyblish.api -import openpype.api -from openpype.client import get_representations +from openpype.client import ( + get_last_version_by_subset_name, + get_representations, +) from openpype.pipeline import ( get_representation_path, legacy_io, @@ -139,7 +141,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "OPENPYPE_USERNAME", "OPENPYPE_RENDER_JOB", "OPENPYPE_PUBLISH_JOB", - "OPENPYPE_MONGO" + "OPENPYPE_MONGO", + "OPENPYPE_VERSION" ] # custom deadline attributes @@ -156,7 +159,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # mapping of instance properties to be transfered to new instance for every # specified family instance_transfer = { - "slate": ["slateFrames"], + "slate": ["slateFrames", "slate"], "review": ["lutPath"], "render2d": ["bakingNukeScripts", "version"], "renderlayer": ["convertToScanline"] @@ -343,8 +346,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # get latest version of subset # this will stop if subset wasn't published yet - version = openpype.api.get_latest_version(instance.data.get("asset"), - instance.data.get("subset")) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + instance.data.get("subset"), + asset_name=instance.data.get("asset") + ) + # get its files based on extension subset_resources = get_resources( project_name, version, representation.get("ext") @@ -578,11 +586,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): " This may cause issues on farm." ).format(staging)) + frame_start = int(instance.get("frameStartHandle")) + if instance.get("slate"): + frame_start -= 1 + rep = { "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(collection)], - "frameStart": int(instance.get("frameStartHandle")), + "frameStart": frame_start, "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": staging, @@ -1025,9 +1037,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): prev_start = None prev_end = None - version = openpype.api.get_latest_version(asset_name=asset, - subset_name=subset - ) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + subset, + asset_name=asset + ) # Set prev start / end frames for comparison if not prev_start and not prev_end: @@ -1072,7 +1087,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): based on 'publish' template """ if not version: - version = openpype.api.get_latest_version(asset, subset) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + subset, + asset_name=asset + ) if version: version = int(version["name"]) + 1 else: diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index bcd853f374..172649c951 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -6,13 +6,52 @@ import subprocess import json import platform import uuid -from Deadline.Scripting import RepositoryUtils, FileUtils +import re +from Deadline.Scripting import RepositoryUtils, FileUtils, DirectoryUtils + + +def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + str or None: version of OpenPype if found. + + """ + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(f" ! path is not a build: {path}") + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) + return version_match[1] def get_openpype_executable(): """Return OpenPype Executable from Event Plug-in Settings""" config = RepositoryUtils.GetPluginConfig("OpenPype") - return config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + dir_list = config.GetConfigEntryWithDefault( + "OpenPypeInstallationDirs", "") + return exe_list, dir_list def inject_openpype_environment(deadlinePlugin): @@ -25,16 +64,89 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Injecting OpenPype environments ...") try: print(">>> Getting OpenPype executable ...") - exe_list = get_openpype_executable() - openpype_app = FileUtils.SearchFileList(exe_list) - if openpype_app == "": + exe_list, dir_list = get_openpype_executable() + openpype_versions = [] + # if the job requires specific OpenPype version, + # lets go over all available and find compatible build. + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + print((">>> Scanning for compatible requested " + f"version {requested_version}")) + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + print(f"--- Looking for OpenPype at: {install_dir}") + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = get_openpype_version_from_path(subdir) + if not version: + continue + print(f" - found: {version} - {subdir}") + openpype_versions.append((version, subdir)) + + exe = FileUtils.SearchFileList(exe_list) + if openpype_versions: + # if looking for requested compatible version, + # add the implicitly specified to the list too. + print(f"Looking for OpenPype at: {os.path.dirname(exe)}") + version = get_openpype_version_from_path( + os.path.dirname(exe)) + if version: + print(f" - found: {version} - {os.path.dirname(exe)}") + openpype_versions.append((version, os.path.dirname(exe))) + + if requested_version: + # sort detected versions + if openpype_versions: + # use natural sorting + openpype_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + print(("*** Latest available version found is " + f"{openpype_versions[-1][0]}")) + requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 + compatible_versions = [] + for version in openpype_versions: + v = version[0].split(".")[:3] + if v[0] == requested_major and v[1] == requested_minor: + compatible_versions.append(version) + if not compatible_versions: + raise RuntimeError( + ("Cannot find compatible version available " + "for version {} requested by the job. " + "Please add it through plugin configuration " + "in Deadline or install it to configured " + "directory.").format(requested_version)) + # sort compatible versions nad pick the last one + compatible_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + print(("*** Latest compatible version found is " + f"{compatible_versions[-1][0]}")) + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join( + compatible_versions[-1][1], "openpype_console.exe"), + os.path.join( + compatible_versions[-1][1], "openpype_console") + ] + exe = FileUtils.SearchFileList(";".join(exe_list)) + if exe == "": raise RuntimeError( "OpenPype executable was not found " + - "in the semicolon separated list \"" + exe_list + "\". " + + "in the semicolon separated list " + + "\"" + ";".join(exe_list) + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") - print("--- OpenPype executable: {}".format(openpype_app)) + print("--- OpenPype executable: {}".format(exe)) # tempfile.TemporaryFile cannot be used because of locking temp_file_name = "{}_{}.json".format( @@ -45,7 +157,7 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Temporary path: {}".format(export_url)) args = [ - openpype_app, + exe, "--headless", 'extractenvironments', export_url @@ -75,9 +187,9 @@ def inject_openpype_environment(deadlinePlugin): env["OPENPYPE_HEADLESS_MODE"] = "1" env["AVALON_TIMEOUT"] = "5000" - print(">>> Executing: {}".format(args)) + print(">>> Executing: {}".format(" ".join(args))) std_output = subprocess.check_output(args, - cwd=os.path.dirname(openpype_app), + cwd=os.path.dirname(exe), env=env) print(">>> Process result {}".format(std_output)) diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param index 8bd6dce12d..b3ac18e20c 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param @@ -7,11 +7,20 @@ Index=0 Default=OpenPype Plugin for Deadline Description=Not configurable +[OpenPypeInstallationDirs] +Type=multilinemultifolder +Label=Directories where OpenPype versions are installed +Category=OpenPype Installation Directories +CategoryOrder=0 +Index=0 +Default=C:\Program Files (x86)\OpenPype +Description=Path or paths to directories where multiple versions of OpenPype might be installed. Enter every such path on separate lines. + [OpenPypeExecutable] Type=multilinemultifilename Label=OpenPype Executable Category=OpenPype Executables -CategoryOrder=0 +CategoryOrder=1 Index=0 Default= Description=The path to the OpenPype executable. Enter alternative paths on separate lines. diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py index 451d71fb63..6b0f69d98f 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py @@ -1,10 +1,19 @@ +#!/usr/bin/env python3 + from System.IO import Path from System.Text.RegularExpressions import Regex from Deadline.Plugins import PluginType, DeadlinePlugin -from Deadline.Scripting import StringUtils, FileUtils, RepositoryUtils +from Deadline.Scripting import ( + StringUtils, + FileUtils, + DirectoryUtils, + RepositoryUtils +) import re +import os +import platform ###################################################################### @@ -52,13 +61,115 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): self.AddStdoutHandlerCallback( ".*Progress: (\d+)%.*").HandleCallback += self.HandleProgress + @staticmethod + def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + str or None: version of OpenPype if found. + + """ + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(f" ! path is not a build: {path}") + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) + return version_match[1] + def RenderExecutable(self): - exeList = self.GetConfigEntry("OpenPypeExecutable") - exe = FileUtils.SearchFileList(exeList) + job = self.GetJob() + openpype_versions = [] + # if the job requires specific OpenPype version, + # lets go over all available and find compatible build. + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + self.LogInfo(( + "Scanning for compatible requested " + f"version {requested_version}")) + dir_list = self.GetConfigEntry("OpenPypeInstallationDirs") + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if dir: + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = self.get_openpype_version_from_path(subdir) + if not version: + continue + openpype_versions.append((version, subdir)) + + exe_list = self.GetConfigEntry("OpenPypeExecutable") + exe = FileUtils.SearchFileList(exe_list) + if openpype_versions: + # if looking for requested compatible version, + # add the implicitly specified to the list too. + version = self.get_openpype_version_from_path( + os.path.dirname(exe)) + if version: + openpype_versions.append((version, os.path.dirname(exe))) + + if requested_version: + # sort detected versions + if openpype_versions: + openpype_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 + compatible_versions = [] + for version in openpype_versions: + v = version[0].split(".")[:3] + if v[0] == requested_major and v[1] == requested_minor: + compatible_versions.append(version) + if not compatible_versions: + self.FailRender(("Cannot find compatible version available " + "for version {} requested by the job. " + "Please add it through plugin configuration " + "in Deadline or install it to configured " + "directory.").format(requested_version)) + # sort compatible versions nad pick the last one + compatible_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join( + compatible_versions[-1][1], "openpype_console.exe"), + os.path.join( + compatible_versions[-1][1], "openpype_console") + ] + exe = FileUtils.SearchFileList(";".join(exe_list)) + if exe == "": self.FailRender( "OpenPype executable was not found " + - "in the semicolon separated list \"" + exeList + "\". " + + "in the semicolon separated list " + + "\"" + ";".join(exe_list) + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") return exe diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py index 0914933de4..dc76920a57 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py @@ -1,10 +1,11 @@ import collections import datetime +import copy import ftrack_api from openpype_modules.ftrack.lib import ( BaseEvent, - query_custom_attributes + query_custom_attributes, ) @@ -124,10 +125,15 @@ class PushFrameValuesToTaskEvent(BaseEvent): # Separate value changes and task parent changes _entities_info = [] + added_entities = [] + added_entity_ids = set() task_parent_changes = [] for entity_info in entities_info: if entity_info["entity_type"].lower() == "task": task_parent_changes.append(entity_info) + elif entity_info.get("action") == "add": + added_entities.append(entity_info) + added_entity_ids.add(entity_info["entityId"]) else: _entities_info.append(entity_info) entities_info = _entities_info @@ -136,6 +142,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data, changed_keys_by_object_id = self.filter_changes( session, event, entities_info, interest_attributes ) + self.interesting_data_for_added( + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ) if not interesting_data and not task_parent_changes: return @@ -151,9 +164,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): # - it is a complex way how to find out if interesting_data: self.process_attribute_changes( - session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ) if task_parent_changes: @@ -163,8 +180,12 @@ class PushFrameValuesToTaskEvent(BaseEvent): ) def process_task_parent_change( - self, session, object_types_by_name, task_parent_changes, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + task_parent_changes, + interest_entity_types, + interest_attributes ): """Push custom attribute values if task parent has changed. @@ -176,6 +197,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): real hierarchical value and non hierarchical custom attribute value should be set to hierarchical value. """ + # Store task ids which were created or moved under parent with entity # type defined in settings (interest_entity_types). task_ids = set() @@ -380,33 +402,49 @@ class PushFrameValuesToTaskEvent(BaseEvent): uncommited_changes = False for idx, item in enumerate(changes): new_value = item["new_value"] + old_value = item["old_value"] attr_id = item["attr_id"] entity_id = item["entity_id"] attr_key = item["attr_key"] - entity_key = collections.OrderedDict() - entity_key["configuration_id"] = attr_id - entity_key["entity_id"] = entity_id + entity_key = collections.OrderedDict(( + ("configuration_id", attr_id), + ("entity_id", entity_id) + )) self._cached_changes.append({ "attr_key": attr_key, "entity_id": entity_id, "value": new_value, "time": datetime.datetime.now() }) + old_value_is_set = ( + old_value is not ftrack_api.symbol.NOT_SET + and old_value is not None + ) if new_value is None: + if not old_value_is_set: + continue op = ftrack_api.operation.DeleteEntityOperation( "CustomAttributeValue", entity_key ) - else: + + elif old_value_is_set: op = ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + "CustomAttributeValue", entity_key, "value", - ftrack_api.symbol.NOT_SET, + old_value, new_value ) + else: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + entity_key, + {"value": new_value} + ) + session.recorded_operations.push(op) self.log.info(( "Changing Custom Attribute \"{}\" to value" @@ -432,9 +470,14 @@ class PushFrameValuesToTaskEvent(BaseEvent): self.log.warning("Changing of values failed.", exc_info=True) def process_attribute_changes( - self, session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ): # Prepare task object id task_object_id = object_types_by_name["task"]["id"] @@ -522,15 +565,26 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id_by_task_id[task_id] = task_entity["parent_id"] self.finalize_attribute_changes( - session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ) def finalize_attribute_changes( - self, session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + self, + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ): attr_id_to_key = {} for attr_confs in attrs_by_obj_id.values(): @@ -550,7 +604,11 @@ class PushFrameValuesToTaskEvent(BaseEvent): attr_ids = set(attr_id_to_key.keys()) current_values_by_id = self.get_current_values( - session, attr_ids, entity_ids, task_entity_ids, hier_attrs + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ) changes = [] @@ -560,14 +618,25 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id = entity_id values = interesting_data[parent_id] + added_entity = entity_id in added_entity_ids for attr_id, old_value in current_values.items(): + if added_entity and attr_id in hier_attrs: + continue + attr_key = attr_id_to_key.get(attr_id) if not attr_key: continue # Convert new value from string new_value = values.get(attr_key) - if new_value is not None and old_value is not None: + new_value_is_valid = ( + old_value is not ftrack_api.symbol.NOT_SET + and new_value is not None + ) + if added_entity and not new_value_is_valid: + continue + + if new_value is not None and new_value_is_valid: try: new_value = type(old_value)(new_value) except Exception: @@ -581,6 +650,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): changes.append({ "new_value": new_value, "attr_id": attr_id, + "old_value": old_value, "entity_id": entity_id, "attr_key": attr_key }) @@ -599,6 +669,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data = {} changed_keys_by_object_id = {} + for entity_info in entities_info: # Care only about changes if specific keys entity_changes = {} @@ -644,16 +715,123 @@ class PushFrameValuesToTaskEvent(BaseEvent): return interesting_data, changed_keys_by_object_id + def interesting_data_for_added( + self, + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ): + if not added_entities or not interest_attributes: + return + + object_type_ids = set() + entity_ids = set() + all_entity_ids = set() + object_id_by_entity_id = {} + project_id = None + entity_ids_by_parent_id = collections.defaultdict(set) + for entity_info in added_entities: + object_id = entity_info["objectTypeId"] + entity_id = entity_info["entityId"] + object_type_ids.add(object_id) + entity_ids.add(entity_id) + object_id_by_entity_id[entity_id] = object_id + + for item in entity_info["parents"]: + entity_id = item["entityId"] + all_entity_ids.add(entity_id) + parent_id = item["parentId"] + if not parent_id: + project_id = entity_id + else: + entity_ids_by_parent_id[parent_id].add(entity_id) + + hier_attrs = self.get_hierarchical_configurations( + session, interest_attributes + ) + if not hier_attrs: + return + + hier_attrs_key_by_id = { + attr_conf["id"]: attr_conf["key"] + for attr_conf in hier_attrs + } + default_values_by_key = { + attr_conf["key"]: attr_conf["default"] + for attr_conf in hier_attrs + } + + values = query_custom_attributes( + session, list(hier_attrs_key_by_id.keys()), all_entity_ids, True + ) + values_per_entity_id = {} + for entity_id in all_entity_ids: + values_per_entity_id[entity_id] = {} + for attr_name in interest_attributes: + values_per_entity_id[entity_id][attr_name] = None + + for item in values: + entity_id = item["entity_id"] + key = hier_attrs_key_by_id[item["configuration_id"]] + values_per_entity_id[entity_id][key] = item["value"] + + fill_queue = collections.deque() + fill_queue.append((project_id, default_values_by_key)) + while fill_queue: + item = fill_queue.popleft() + entity_id, values_by_key = item + entity_values = values_per_entity_id[entity_id] + new_values_by_key = copy.deepcopy(values_by_key) + for key, value in values_by_key.items(): + current_value = entity_values[key] + if current_value is None: + entity_values[key] = value + else: + new_values_by_key[key] = current_value + + for child_id in entity_ids_by_parent_id[entity_id]: + fill_queue.append((child_id, new_values_by_key)) + + for entity_id in entity_ids: + entity_changes = {} + for key, value in values_per_entity_id[entity_id].items(): + if value is not None: + entity_changes[key] = value + + if not entity_changes: + continue + + interesting_data[entity_id] = entity_changes + object_id = object_id_by_entity_id[entity_id] + if object_id not in changed_keys_by_object_id: + changed_keys_by_object_id[object_id] = set() + changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) + def get_current_values( - self, session, attr_ids, entity_ids, task_entity_ids, hier_attrs + self, + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ): current_values_by_id = {} if not attr_ids or not entity_ids: return current_values_by_id + for entity_id in entity_ids: + current_values_by_id[entity_id] = {} + for attr_id in attr_ids: + current_values_by_id[entity_id][attr_id] = ( + ftrack_api.symbol.NOT_SET + ) + values = query_custom_attributes( session, attr_ids, entity_ids, True ) + for item in values: entity_id = item["entity_id"] attr_id = item["configuration_id"] @@ -699,6 +877,18 @@ class PushFrameValuesToTaskEvent(BaseEvent): output[obj_id][attr["key"]] = attr["id"] return output, hiearchical + def get_hierarchical_configurations(self, session, interest_attributes): + hier_attr_query = ( + "select id, key, object_type_id, is_hierarchical, default" + " from CustomAttributeConfiguration" + " where key in ({}) and is_hierarchical is true" + ) + if not interest_attributes: + return [] + return list(session.query(hier_attr_query.format( + self.join_query_keys(interest_attributes), + )).all()) + def register(session): PushFrameValuesToTaskEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py index ad82af39a3..eec245070c 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delivery.py @@ -16,7 +16,7 @@ from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype_modules.ftrack.lib.custom_attributes import ( query_custom_attributes ) -from openpype.lib import config +from openpype.lib.dateutils import get_datetime_data from openpype.lib.delivery import ( path_from_representation, get_format_dict, @@ -555,7 +555,7 @@ class Delivery(BaseAction): format_dict = get_format_dict(anatomy, location_path) - datetime_data = config.get_datetime_data() + datetime_data = get_datetime_data() for repre in repres_to_deliver: source_path = repre.get("data", {}).get("path") debug_msg = "Processing representation {}".format(repre["_id"]) diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index d91649d7ba..fb1cdf340e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -11,13 +11,11 @@ from openpype.client import ( get_project, get_assets, ) -from openpype.settings import get_project_settings -from openpype.lib import ( - get_workfile_template_key, - get_workdir_data, - StringTemplate, -) +from openpype.settings import get_project_settings, get_system_settings +from openpype.lib import StringTemplate from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data +from openpype.pipeline.workfile import get_workfile_template_key from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks @@ -279,14 +277,19 @@ class FillWorkfileAttributeAction(BaseAction): extension = "{ext}" project_doc = get_project(project_name) project_settings = get_project_settings(project_name) + system_settings = get_system_settings() anatomy = Anatomy(project_name) templates_by_key = {} operations = [] for asset_doc, task_entities in asset_docs_with_task_entities: for task_entity in task_entities: - workfile_data = get_workdir_data( - project_doc, asset_doc, task_entity["name"], host_name + workfile_data = get_template_data( + project_doc, + asset_doc, + task_entity["name"], + host_name, + system_settings ) # Use version 1 for each workfile workfile_data["version"] = 1 @@ -294,7 +297,10 @@ class FillWorkfileAttributeAction(BaseAction): task_type = workfile_data["task"]["type"] template_key = get_workfile_template_key( - task_type, host_name, project_settings=project_settings + task_type, + host_name, + project_name, + project_settings=project_settings ) if template_key in templates_by_key: template = templates_by_key[template_key] diff --git a/openpype/modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/ftrack/ftrack_server/event_server_cli.py index 90ce757242..3ef7c8270a 100644 --- a/openpype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/openpype/modules/ftrack/ftrack_server/event_server_cli.py @@ -1,11 +1,9 @@ import os -import sys import signal import datetime import subprocess import socket import json -import platform import getpass import atexit import time @@ -13,12 +11,14 @@ import uuid import ftrack_api import pymongo +from openpype.client.mongo import ( + OpenPypeMongoConnection, + validate_mongo_connection, +) from openpype.lib import ( get_openpype_execute_args, - OpenPypeMongoConnection, get_openpype_version, get_build_version, - validate_mongo_connection ) from openpype_modules.ftrack import FTRACK_MODULE_DIR from openpype_modules.ftrack.lib import credentials diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py index 5c6d6352d2..947dacf917 100644 --- a/openpype/modules/ftrack/ftrack_server/lib.py +++ b/openpype/modules/ftrack/ftrack_server/lib.py @@ -7,6 +7,7 @@ import threading import datetime import time import queue +import collections import appdirs import pymongo @@ -24,7 +25,7 @@ except ImportError: from ftrack_api._weakref import WeakMethod from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info -from openpype.lib import OpenPypeMongoConnection +from openpype.client import OpenPypeMongoConnection from openpype.api import Logger TOPIC_STATUS_SERVER = "openpype.event.server.status" @@ -309,7 +310,20 @@ class CustomEventHubSession(ftrack_api.session.Session): # Currently pending operations. self.recorded_operations = ftrack_api.operation.Operations() - self.record_operations = True + + # OpenPype change - In new API are operations properties + new_api = hasattr(self.__class__, "record_operations") + + if new_api: + self._record_operations = collections.defaultdict( + lambda: True + ) + self._auto_populate = collections.defaultdict( + lambda: auto_populate + ) + else: + self.record_operations = True + self.auto_populate = auto_populate self.cache_key_maker = cache_key_maker if self.cache_key_maker is None: @@ -328,6 +342,9 @@ class CustomEventHubSession(ftrack_api.session.Session): if cache is not None: self.cache.caches.append(cache) + if new_api: + self.merge_lock = threading.RLock() + self._managed_request = None self._request = requests.Session() self._request.auth = ftrack_api.session.SessionAuthentication( @@ -335,8 +352,6 @@ class CustomEventHubSession(ftrack_api.session.Session): ) self.request_timeout = timeout - self.auto_populate = auto_populate - # Fetch server information and in doing so also check credentials. self._server_information = self._fetch_server_information() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py index c4f7b1f05d..20a69e060c 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py @@ -26,8 +26,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): families = ["ftrack"] def process(self, instance): - session = instance.context.data["ftrackSession"] - context = instance.context component_list = instance.data.get("ftrackComponentsList") if not component_list: self.log.info( @@ -36,8 +34,8 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): ) return - session = instance.context.data["ftrackSession"] context = instance.context + session = context.data["ftrackSession"] parent_entity = None default_asset_name = None @@ -89,6 +87,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): asset_versions_data_by_id = {} used_asset_versions = [] + # Iterate over components and publish for data in component_list: self.log.debug("data: {}".format(data)) @@ -118,9 +117,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): asset_version_status_ids_by_name ) - # Component - self.create_component(session, asset_version_entity, data) - # Store asset version and components items that were version_id = asset_version_entity["id"] if version_id not in asset_versions_data_by_id: @@ -137,6 +133,8 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): if asset_version_entity not in used_asset_versions: used_asset_versions.append(asset_version_entity) + self._create_components(session, asset_versions_data_by_id) + instance.data["ftrackIntegratedAssetVersionsData"] = ( asset_versions_data_by_id ) @@ -625,3 +623,40 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session.rollback() session._configure_locations() six.reraise(tp, value, tb) + + def _create_components(self, session, asset_versions_data_by_id): + for item in asset_versions_data_by_id.values(): + asset_version_entity = item["asset_version"] + component_items = item["component_items"] + + component_entities = session.query( + ( + "select id, name from Component where version_id is \"{}\"" + ).format(asset_version_entity["id"]) + ).all() + + existing_component_names = { + component["name"] + for component in component_entities + } + + contain_review = "ftrackreview-mp4" in existing_component_names + thumbnail_component_item = None + for component_item in component_items: + component_data = component_item.get("component_data") or {} + component_name = component_data.get("name") + if component_name == "ftrackreview-mp4": + contain_review = True + elif component_name == "ftrackreview-image": + thumbnail_component_item = component_item + + if contain_review and thumbnail_component_item: + thumbnail_component_item["component_data"]["name"] = ( + "thumbnail" + ) + + # Component + for component_item in component_items: + self.create_component( + session, asset_version_entity, component_item + ) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py index 047fd8462c..8cb2336391 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py @@ -13,7 +13,10 @@ class IntegrateFtrackComponentOverwrite(pyblish.api.InstancePlugin): active = False def process(self, instance): - component_list = instance.data['ftrackComponentsList'] + component_list = instance.data.get('ftrackComponentsList') + if not component_list: + self.log.info("No component to overwrite...") + return for cl in component_list: cl['component_overwrite'] = True diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py index c6a3d47f66..e7c265988e 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py @@ -6,9 +6,11 @@ Requires: """ import sys +import json import six import pyblish.api +from openpype.lib import StringTemplate class IntegrateFtrackDescription(pyblish.api.InstancePlugin): @@ -25,6 +27,10 @@ class IntegrateFtrackDescription(pyblish.api.InstancePlugin): description_template = "{comment}" def process(self, instance): + if not self.description_template: + self.log.info("Skipping. Description template is not set.") + return + # Check if there are any integrated AssetVersion entities asset_versions_key = "ftrackIntegratedAssetVersionsData" asset_versions_data_by_id = instance.data.get(asset_versions_key) @@ -38,39 +44,62 @@ class IntegrateFtrackDescription(pyblish.api.InstancePlugin): else: self.log.debug("Comment is set to `{}`".format(comment)) - session = instance.context.data["ftrackSession"] - intent = instance.context.data.get("intent") - intent_label = None - if intent and isinstance(intent, dict): - intent_val = intent.get("value") - intent_label = intent.get("label") - else: - intent_val = intent + if intent and "{intent}" in self.description_template: + value = intent.get("value") + if value: + intent = intent.get("label") or value - if not intent_label: - intent_label = intent_val or "" + if not intent and not comment: + self.log.info("Skipping. Intent and comment are empty.") + return # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) - if intent_label: - self.log.debug( - "Intent label is set to `{}`.".format(intent_label) - ) - + if intent: + self.log.debug("Intent is set to `{}`.".format(intent)) else: self.log.debug("Intent is not set.") + # If we would like to use more "optional" possibilities we would have + # come up with some expressions in templates or speicifc templates + # for all 3 possible combinations when comment and intent are + # set or not (when both are not set then description does not + # make sense). + fill_data = {} + if comment: + fill_data["comment"] = comment + if intent: + fill_data["intent"] = intent + + description = StringTemplate.format_template( + self.description_template, fill_data + ) + if not description.solved: + self.log.warning(( + "Couldn't solve template \"{}\" with data {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + if not description: + self.log.debug(( + "Skipping. Result of template is empty string." + " Template \"{}\" Fill data: {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + session = instance.context.data["ftrackSession"] for asset_version_data in asset_versions_data_by_id.values(): asset_version = asset_version_data["asset_version"] # Backwards compatibility for older settings using # attribute 'note_with_intent_template' - comment = self.description_template.format(**{ - "intent": intent_label, - "comment": comment - }) - asset_version["comment"] = comment + + asset_version["comment"] = description try: session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index c8d9e4117d..a1e5922730 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -3,7 +3,10 @@ import json import copy import pyblish.api -from openpype.lib import get_ffprobe_streams +from openpype.lib.transcoding import ( + get_ffprobe_streams, + convert_ffprobe_fps_to_float, +) from openpype.lib.profiles_filtering import filter_profiles @@ -58,7 +61,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): version_number = int(instance_version) family = instance.data["family"] - family_low = instance.data["family"].lower() + family_low = family.lower() asset_type = instance.data.get("ftrackFamily") if not asset_type and family_low in self.family_mapping: @@ -79,11 +82,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ).format(family)) return - # Prepare FPS - instance_fps = instance.data.get("fps") - if instance_fps is None: - instance_fps = instance.context.data["fps"] - status_name = self._get_asset_version_status_name(instance) # Base of component item data @@ -140,24 +138,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component = None first_thumbnail_component_repre = None for repre in thumbnail_representations: - published_path = repre.get("published_path") - if not published_path: - comp_files = repre["files"] - if isinstance(comp_files, (tuple, list, set)): - filename = comp_files[0] - else: - filename = comp_files - - published_path = os.path.join( - repre["stagingDir"], filename + repre_path = self._get_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." ) - if not os.path.exists(published_path): - continue - repre["published_path"] = published_path + continue # Create copy of base comp item and append it thumbnail_item = copy.deepcopy(base_component_item) - thumbnail_item["component_path"] = repre["published_path"] + thumbnail_item["component_path"] = repre_path thumbnail_item["component_data"] = { "name": "thumbnail" } @@ -176,10 +166,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Add item to component list component_list.append(thumbnail_item) - if ( - not review_representations - and first_thumbnail_component is not None - ): + if first_thumbnail_component is not None: width = first_thumbnail_component_repre.get("width") height = first_thumbnail_component_repre.get("height") if not width or not height: @@ -216,6 +203,13 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): extended_asset_name = "" multiple_reviewable = len(review_representations) > 1 for repre in review_representations: + repre_path = self._get_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." + ) + continue + # Create copy of base comp item and append it review_item = copy.deepcopy(base_component_item) @@ -254,33 +248,18 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component[ "asset_data"]["name"] = extended_asset_name - frame_start = repre.get("frameStartFtrack") - frame_end = repre.get("frameEndFtrack") - if frame_start is None or frame_end is None: - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - - # Frame end of uploaded video file should be duration in frames - # - frame start is always 0 - # - frame end is duration in frames - duration = frame_end - frame_start + 1 - - fps = repre.get("fps") - if fps is None: - fps = instance_fps + component_meta = self._prepare_component_metadata( + instance, repre, repre_path, True + ) # Change location - review_item["component_path"] = repre["published_path"] + review_item["component_path"] = repre_path # Change component data review_item["component_data"] = { # Default component name is "main". "name": "ftrackreview-mp4", "metadata": { - "ftr_meta": json.dumps({ - "frameIn": 0, - "frameOut": int(duration), - "frameRate": float(fps) - }) + "ftr_meta": json.dumps(component_meta) } } @@ -323,11 +302,18 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): component_data = copy_src_item["component_data"] component_name = component_data["name"] component_data["name"] = component_name + "_src" + component_meta = self._prepare_component_metadata( + instance, repre, copy_src_item["component_path"], False + ) + if component_meta: + component_data["metadata"] = { + "ftr_meta": json.dumps(component_meta) + } component_list.append(copy_src_item) # Add others representations as component for repre in other_representations: - published_path = repre.get("published_path") + published_path = self._get_repre_path(instance, repre, True) if not published_path: continue # Create copy of base comp item and append it @@ -340,9 +326,17 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ): other_item["asset_data"]["name"] = extended_asset_name - other_item["component_data"] = { + component_meta = self._prepare_component_metadata( + instance, repre, published_path, False + ) + component_data = { "name": repre["name"] } + if component_meta: + component_data["metadata"] = { + "ftr_meta": json.dumps(component_meta) + } + other_item["component_data"] = component_data other_item["component_location_name"] = unmanaged_location_name other_item["component_path"] = published_path component_list.append(other_item) @@ -360,6 +354,51 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): )) instance.data["ftrackComponentsList"] = component_list + def _get_repre_path(self, instance, repre, only_published): + """Get representation path that can be used for integration. + + When 'only_published' is set to true the validation of path is not + relevant. In that case we just need what is set in 'published_path' + as "reference". The reference is not used to get or upload the file but + for reference where the file was published. + + Args: + instance (pyblish.Instance): Processed instance object. Used + for source of staging dir if representation does not have + filled it. + repre (dict): Representation on instance which could be and + could not be integrated with main integrator. + only_published (bool): Care only about published paths and + ignore if filepath is not existing anymore. + + Returns: + str: Path to representation file. + None: Path is not filled or does not exists. + """ + + published_path = repre.get("published_path") + if published_path: + published_path = os.path.normpath(published_path) + if os.path.exists(published_path): + return published_path + + if only_published: + return published_path + + comp_files = repre["files"] + if isinstance(comp_files, (tuple, list, set)): + filename = comp_files[0] + else: + filename = comp_files + + staging_dir = repre.get("stagingDir") + if not staging_dir: + staging_dir = instance.data["stagingDir"] + src_path = os.path.normpath(os.path.join(staging_dir, filename)) + if os.path.exists(src_path): + return src_path + return None + def _get_asset_version_status_name(self, instance): if not self.asset_versions_status_profiles: return None @@ -380,3 +419,107 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): return None return matching_profile["status"] or None + + def _prepare_component_metadata( + self, instance, repre, component_path, is_review + ): + extension = os.path.splitext(component_path)[-1] + streams = [] + try: + streams = get_ffprobe_streams(component_path) + except Exception: + self.log.debug(( + "Failed to retrieve information about intput {}" + ).format(component_path)) + + # Find video streams + video_streams = [ + stream + for stream in streams + if stream["codec_type"] == "video" + ] + # Skip if there are not video streams + # - exr is special case which can have issues with reading through + # ffmpegh but we want to set fps for it + if not video_streams and extension not in [".exr"]: + return {} + + stream_width = None + stream_height = None + stream_fps = None + frame_out = None + for video_stream in video_streams: + tmp_width = video_stream.get("width") + tmp_height = video_stream.get("height") + if tmp_width and tmp_height: + stream_width = tmp_width + stream_height = tmp_height + + input_framerate = video_stream.get("r_frame_rate") + duration = video_stream.get("duration") + if input_framerate is None or duration is None: + continue + try: + stream_fps = convert_ffprobe_fps_to_float( + input_framerate + ) + except ValueError: + self.log.warning(( + "Could not convert ffprobe fps to float \"{}\"" + ).format(input_framerate)) + continue + + stream_width = tmp_width + stream_height = tmp_height + + self.log.debug("FPS from stream is {} and duration is {}".format( + input_framerate, duration + )) + frame_out = float(duration) * stream_fps + break + + # Prepare FPS + instance_fps = instance.data.get("fps") + if instance_fps is None: + instance_fps = instance.context.data["fps"] + + if not is_review: + output = {} + fps = stream_fps or instance_fps + if fps: + output["frameRate"] = fps + + if stream_width and stream_height: + output["width"] = int(stream_width) + output["height"] = int(stream_height) + return output + + frame_start = repre.get("frameStartFtrack") + frame_end = repre.get("frameEndFtrack") + if frame_start is None or frame_end is None: + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + + fps = None + repre_fps = repre.get("fps") + if repre_fps is not None: + repre_fps = float(repre_fps) + + fps = stream_fps or repre_fps or instance_fps + + # Frame end of uploaded video file should be duration in frames + # - frame start is always 0 + # - frame end is duration in frames + if not frame_out: + frame_out = frame_end - frame_start + 1 + + # Ftrack documentation says that it is required to have + # 'width' and 'height' in review component. But with those values + # review video does not play. + component_meta = { + "frameIn": 0, + "frameOut": frame_out, + "frameRate": float(fps) + } + + return component_meta diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py index 77a7ebdfcf..ac3fa874e0 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py @@ -9,9 +9,11 @@ Requires: """ import sys +import copy import six import pyblish.api +from openpype.lib import StringTemplate class IntegrateFtrackNote(pyblish.api.InstancePlugin): @@ -53,14 +55,10 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): intent = instance.context.data.get("intent") intent_label = None - if intent and isinstance(intent, dict): - intent_val = intent.get("value") - intent_label = intent.get("label") - else: - intent_val = intent - - if not intent_label: - intent_label = intent_val or "" + if intent: + value = intent["value"] + if value: + intent_label = intent["label"] or value # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) @@ -96,6 +94,14 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels.append(label) + base_format_data = { + "host_name": host_name, + "app_name": app_name, + "app_label": app_label, + "source": instance.data.get("source", '') + } + if comment: + base_format_data["comment"] = comment for asset_version_data in asset_versions_data_by_id.values(): asset_version = asset_version_data["asset_version"] component_items = asset_version_data["component_items"] @@ -109,23 +115,31 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): template = self.note_template if template is None: template = self.note_with_intent_template - format_data = { - "intent": intent_label, - "comment": comment, - "host_name": host_name, - "app_name": app_name, - "app_label": app_label, - "published_paths": "
".join(sorted(published_paths)), - "source": instance.data.get("source", '') - } - comment = template.format(**format_data) - if not comment: + format_data = copy.deepcopy(base_format_data) + format_data["published_paths"] = "
".join( + sorted(published_paths) + ) + if intent: + if "{intent}" in template: + format_data["intent"] = intent_label + else: + format_data["intent"] = intent + + note_text = StringTemplate.format_template(template, format_data) + if not note_text.solved: + self.log.warning(( + "Note template require more keys then can be provided." + "\nTemplate: {}\nData: {}" + ).format(template, format_data)) + continue + + if not note_text: self.log.info(( "Note for AssetVersion {} would be empty. Skipping." "\nTemplate: {}\nData: {}" ).format(asset_version["id"], template, format_data)) continue - asset_version.create_note(comment, author=user, labels=labels) + asset_version.create_note(note_text, author=user, labels=labels) try: session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py index 1a5d74bf26..b8855ee2bd 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py @@ -65,7 +65,13 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder - 0.04 label = 'Integrate Hierarchy To Ftrack' families = ["shot"] - hosts = ["hiero", "resolve", "standalonepublisher", "flame"] + hosts = [ + "hiero", + "resolve", + "standalonepublisher", + "flame", + "traypublisher" + ] optional = False def process(self, context): diff --git a/openpype/modules/ftrack/scripts/sub_event_storer.py b/openpype/modules/ftrack/scripts/sub_event_storer.py index 946ecbff79..204cce89e8 100644 --- a/openpype/modules/ftrack/scripts/sub_event_storer.py +++ b/openpype/modules/ftrack/scripts/sub_event_storer.py @@ -6,6 +6,8 @@ import socket import pymongo import ftrack_api + +from openpype.client import OpenPypeMongoConnection from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( SocketSession, @@ -15,7 +17,6 @@ from openpype_modules.ftrack.ftrack_server.lib import ( ) from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info from openpype.lib import ( - OpenPypeMongoConnection, get_openpype_version, get_build_version ) diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py index d28ded06c7..c9e78b59eb 100644 --- a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py @@ -39,10 +39,12 @@ class CollectKitsuEntities(pyblish.api.ContextPlugin): kitsu_entity = gazu.asset.get_asset(zou_asset_data["id"]) if not kitsu_entity: - raise AssertionError(f"{entity_type} not found in kitsu!") + raise AssertionError("{} not found in kitsu!".format(entity_type)) context.data["kitsu_entity"] = kitsu_entity - self.log.debug(f"Collect kitsu {entity_type}: {kitsu_entity}") + self.log.debug( + "Collect kitsu {}: {}".format(entity_type, kitsu_entity) + ) if zou_task_data: kitsu_task = gazu.task.get_task(zou_task_data["id"]) diff --git a/openpype/modules/kitsu/utils/update_op_with_zou.py b/openpype/modules/kitsu/utils/update_op_with_zou.py index 02c27382eb..e03cf2b30e 100644 --- a/openpype/modules/kitsu/utils/update_op_with_zou.py +++ b/openpype/modules/kitsu/utils/update_op_with_zou.py @@ -219,20 +219,25 @@ def update_op_assets( # Add parents for hierarchy item_data["parents"] = [] - while parent_zou_id is not None: - parent_doc = asset_doc_ids[parent_zou_id] + ancestor_id = parent_zou_id + while ancestor_id is not None: + parent_doc = asset_doc_ids[ancestor_id] item_data["parents"].insert(0, parent_doc["name"]) # Get parent entity parent_entity = parent_doc["data"]["zou"] - parent_zou_id = parent_entity.get("parent_id") + ancestor_id = parent_entity.get("parent_id") - if item_type in ["Shot", "Sequence"]: + # Build OpenPype compatible name + if item_type in ["Shot", "Sequence"] and parent_zou_id is not None: # Name with parents hierarchy "({episode}_){sequence}_{shot}" # to avoid duplicate name issue - item_name = "_".join(item_data["parents"] + [item_doc["name"]]) + item_name = f"{item_data['parents'][-1]}_{item['name']}" + + # Update doc name + asset_doc_ids[item["id"]]["name"] = item_name else: - item_name = item_doc["name"] + item_name = item["name"] # Set root folders parents item_data["parents"] = entity_parent_folders + item_data["parents"] @@ -276,7 +281,7 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: project_doc = create_project(project_name, project_name, dbcon=dbcon) # Project data and tasks - project_data = project["data"] or {} + project_data = project_doc["data"] or {} # Build project code and update Kitsu project_code = project.get("code") @@ -305,6 +310,7 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: "config.tasks": { t["name"]: {"short_name": t.get("short_name", t["name"])} for t in gazu.task.all_task_types_for_project(project) + or gazu.task.all_task_types() }, "data": project_data, } diff --git a/openpype/modules/shotgrid/README.md b/openpype/modules/shotgrid/README.md new file mode 100644 index 0000000000..cbee0e9bf4 --- /dev/null +++ b/openpype/modules/shotgrid/README.md @@ -0,0 +1,19 @@ +## Shotgrid Module + +### Pre-requisites + +Install and launch a [shotgrid leecher](https://github.com/Ellipsanime/shotgrid-leecher) server + +### Quickstart + +The goal of this tutorial is to synchronize an already existing shotgrid project with OpenPype. + +- Activate the shotgrid module in the **system settings** and inform the shotgrid leecher server API url + +- Create a new OpenPype project with the **project manager** + +- Inform the shotgrid authentication infos (url, script name, api key) and the shotgrid project ID related to this OpenPype project in the **project settings** + +- Use the batch interface (Tray > shotgrid > Launch batch), select your project and click "batch" + +- You can now access your shotgrid entities within the **avalon launcher** and publish informations to shotgrid with **pyblish** diff --git a/openpype/modules/shotgrid/__init__.py b/openpype/modules/shotgrid/__init__.py new file mode 100644 index 0000000000..f1337a9492 --- /dev/null +++ b/openpype/modules/shotgrid/__init__.py @@ -0,0 +1,5 @@ +from .shotgrid_module import ( + ShotgridModule, +) + +__all__ = ("ShotgridModule",) diff --git a/openpype/modules/shotgrid/lib/__init__.py b/openpype/modules/shotgrid/lib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/modules/shotgrid/lib/const.py b/openpype/modules/shotgrid/lib/const.py new file mode 100644 index 0000000000..2a34800fac --- /dev/null +++ b/openpype/modules/shotgrid/lib/const.py @@ -0,0 +1 @@ +MODULE_NAME = "shotgrid" diff --git a/openpype/modules/shotgrid/lib/credentials.py b/openpype/modules/shotgrid/lib/credentials.py new file mode 100644 index 0000000000..337c4f6ecb --- /dev/null +++ b/openpype/modules/shotgrid/lib/credentials.py @@ -0,0 +1,125 @@ + +from urllib.parse import urlparse + +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSecureRegistry, OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.record import Credentials + + +def _get_shotgrid_secure_key(hostname, key): + """Secure item key for entered hostname.""" + return f"shotgrid/{hostname}/{key}" + + +def _get_secure_value_and_registry( + hostname, + name, +): + key = _get_shotgrid_secure_key(hostname, name) + registry = OpenPypeSecureRegistry(key) + return registry.get_item(name, None), registry + + +def get_shotgrid_hostname(shotgrid_url): + + if not shotgrid_url: + raise Exception("Shotgrid url cannot be a null") + valid_shotgrid_url = ( + f"//{shotgrid_url}" if "//" not in shotgrid_url else shotgrid_url + ) + return urlparse(valid_shotgrid_url).hostname + + +# Credentials storing function (using keyring) + + +def get_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + if not hostname: + return None + login_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + return Credentials(login_value, password_value) + + +def save_credentials(login, password, shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + _, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + _, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + clear_credentials(shotgrid_url) + login_registry.set_item(Credentials.login_key_prefix(), login) + password_registry.set_item(Credentials.password_key_prefix(), password) + + +def clear_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + login_value, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + + if login_value is not None: + login_registry.delete_item(Credentials.login_key_prefix()) + + if password_value is not None: + password_registry.delete_item(Credentials.password_key_prefix()) + + +# Login storing function (using json) + + +def get_local_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None + + +def save_local_login(login): + reg = OpenPypeSettingsRegistry() + reg.set_item("shotgrid_login", login) + + +def clear_local_login(): + reg = OpenPypeSettingsRegistry() + reg.delete_item("shotgrid_login") + + +def check_credentials( + login, + password, + shotgrid_url, +): + + if not shotgrid_url or not login or not password: + return False + try: + session = shotgun_api3.Shotgun( + shotgrid_url, + login=login, + password=password, + ) + session.preferences_read() + session.close() + except AuthenticationFault: + return False + return True diff --git a/openpype/modules/shotgrid/lib/record.py b/openpype/modules/shotgrid/lib/record.py new file mode 100644 index 0000000000..f62f4855d5 --- /dev/null +++ b/openpype/modules/shotgrid/lib/record.py @@ -0,0 +1,20 @@ + +class Credentials: + login = None + password = None + + def __init__(self, login, password) -> None: + super().__init__() + self.login = login + self.password = password + + def is_empty(self): + return not (self.login and self.password) + + @staticmethod + def login_key_prefix(): + return "login" + + @staticmethod + def password_key_prefix(): + return "password" diff --git a/openpype/modules/shotgrid/lib/settings.py b/openpype/modules/shotgrid/lib/settings.py new file mode 100644 index 0000000000..924099f04b --- /dev/null +++ b/openpype/modules/shotgrid/lib/settings.py @@ -0,0 +1,18 @@ +from openpype.api import get_system_settings, get_project_settings +from openpype.modules.shotgrid.lib.const import MODULE_NAME + + +def get_shotgrid_project_settings(project): + return get_project_settings(project).get(MODULE_NAME, {}) + + +def get_shotgrid_settings(): + return get_system_settings().get("modules", {}).get(MODULE_NAME, {}) + + +def get_shotgrid_servers(): + return get_shotgrid_settings().get("shotgrid_settings", {}) + + +def get_leecher_backend_url(): + return get_shotgrid_settings().get("leecher_backend_url") diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py new file mode 100644 index 0000000000..0b03ac2e5d --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py @@ -0,0 +1,100 @@ +import os + +import pyblish.api +from openpype.lib.mongo import OpenPypeMongoConnection + + +class CollectShotgridEntities(pyblish.api.ContextPlugin): + """Collect shotgrid entities according to the current context""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Shotgrid entities" + + def process(self, context): + + avalon_project = context.data.get("projectEntity") + avalon_asset = context.data.get("assetEntity") + avalon_task_name = os.getenv("AVALON_TASK") + + self.log.info(avalon_project) + self.log.info(avalon_asset) + + sg_project = _get_shotgrid_project(context) + sg_task = _get_shotgrid_task( + avalon_project, + avalon_asset, + avalon_task_name + ) + sg_entity = _get_shotgrid_entity(avalon_project, avalon_asset) + + if sg_project: + context.data["shotgridProject"] = sg_project + self.log.info( + "Collected correspondig shotgrid project : {}".format( + sg_project + ) + ) + + if sg_task: + context.data["shotgridTask"] = sg_task + self.log.info( + "Collected correspondig shotgrid task : {}".format(sg_task) + ) + + if sg_entity: + context.data["shotgridEntity"] = sg_entity + self.log.info( + "Collected correspondig shotgrid entity : {}".format(sg_entity) + ) + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + + sg = context.data.get("shotgridSession") + return sg.find_one("Version", filters, []) + + +def _get_shotgrid_collection(project): + client = OpenPypeMongoConnection.get_mongo_client() + return client.get_database("shotgrid_openpype").get_collection(project) + + +def _get_shotgrid_project(context): + shotgrid_project_id = context.data["project_settings"].get( + "shotgrid_project_id") + if shotgrid_project_id: + return {"type": "Project", "id": shotgrid_project_id} + return {} + + +def _get_shotgrid_task(avalon_project, avalon_asset, avalon_task): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_task_hierarchy_row = sg_col.find_one( + { + "type": "Task", + "_id": {"$regex": "^" + avalon_task + "_[0-9]*"}, + "parent": {"$regex": ".*," + avalon_asset["name"] + ","}, + } + ) + if shotgrid_task_hierarchy_row: + return {"type": "Task", "id": shotgrid_task_hierarchy_row["src_id"]} + return {} + + +def _get_shotgrid_entity(avalon_project, avalon_asset): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_entity_hierarchy_row = sg_col.find_one( + {"_id": avalon_asset["name"]} + ) + if shotgrid_entity_hierarchy_row: + return { + "type": shotgrid_entity_hierarchy_row["type"], + "id": shotgrid_entity_hierarchy_row["src_id"], + } + return {} diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py new file mode 100644 index 0000000000..9d5d2271bf --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py @@ -0,0 +1,123 @@ +import os + +import pyblish.api +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.settings import ( + get_shotgrid_servers, + get_shotgrid_project_settings, +) + + +class CollectShotgridSession(pyblish.api.ContextPlugin): + """Collect shotgrid session using user credentials""" + + order = pyblish.api.CollectorOrder + label = "Shotgrid user session" + + def process(self, context): + + certificate_path = os.getenv("SHOTGUN_API_CACERTS") + if certificate_path is None or not os.path.exists(certificate_path): + self.log.info( + "SHOTGUN_API_CACERTS does not contains a valid \ + path: {}".format( + certificate_path + ) + ) + certificate_path = get_shotgrid_certificate() + self.log.info("Get Certificate from shotgrid_api") + + if not os.path.exists(certificate_path): + self.log.error( + "Could not find certificate in shotgun_api3: \ + {}".format( + certificate_path + ) + ) + return + + set_shotgrid_certificate(certificate_path) + self.log.info("Set Certificate: {}".format(certificate_path)) + + avalon_project = os.getenv("AVALON_PROJECT") + + shotgrid_settings = get_shotgrid_project_settings(avalon_project) + self.log.info("shotgrid settings: {}".format(shotgrid_settings)) + shotgrid_servers_settings = get_shotgrid_servers() + self.log.info( + "shotgrid_servers_settings: {}".format(shotgrid_servers_settings) + ) + + shotgrid_server = shotgrid_settings.get("shotgrid_server", "") + if not shotgrid_server: + self.log.error( + "No Shotgrid server found, please choose a credential" + "in script name and script key in OpenPype settings" + ) + + shotgrid_server_setting = shotgrid_servers_settings.get( + shotgrid_server, {} + ) + shotgrid_url = shotgrid_server_setting.get("shotgrid_url", "") + + shotgrid_script_name = shotgrid_server_setting.get( + "shotgrid_script_name", "" + ) + shotgrid_script_key = shotgrid_server_setting.get( + "shotgrid_script_key", "" + ) + if not shotgrid_script_name and not shotgrid_script_key: + self.log.error( + "No Shotgrid api credential found, please enter " + "script name and script key in OpenPype settings" + ) + + login = get_login() or os.getenv("OPENPYPE_SG_USER") + + if not login: + self.log.error( + "No Shotgrid login found, please " + "login to shotgrid withing openpype Tray" + ) + + session = shotgun_api3.Shotgun( + base_url=shotgrid_url, + script_name=shotgrid_script_name, + api_key=shotgrid_script_key, + sudo_as_login=login, + ) + + try: + session.preferences_read() + except AuthenticationFault: + raise ValueError( + "Could not connect to shotgrid {} with user {}".format( + shotgrid_url, login + ) + ) + + self.log.info( + "Logged to shotgrid {} with user {}".format(shotgrid_url, login) + ) + context.data["shotgridSession"] = session + context.data["shotgridUser"] = login + + +def get_shotgrid_certificate(): + shotgun_api_path = os.path.dirname(shotgun_api3.__file__) + return os.path.join(shotgun_api_path, "lib", "certifi", "cacert.pem") + + +def set_shotgrid_certificate(certificate): + os.environ["SHOTGUN_API_CACERTS"] = certificate + + +def get_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py new file mode 100644 index 0000000000..cfd2d10fd9 --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py @@ -0,0 +1,77 @@ +import os +import pyblish.api + + +class IntegrateShotgridPublish(pyblish.api.InstancePlugin): + """ + Create published Files from representations and add it to version. If + representation is tagged add shotgrid review, it will add it in + path to movie for a movie file or path to frame for an image sequence. + """ + + order = pyblish.api.IntegratorOrder + 0.499 + label = "Shotgrid Published Files" + + def process(self, instance): + + context = instance.context + + self.sg = context.data.get("shotgridSession") + + shotgrid_version = instance.data.get("shotgridVersion") + + for representation in instance.data.get("representations", []): + + local_path = representation.get("published_path") + code = os.path.basename(local_path) + + if representation.get("tags", []): + continue + + published_file = self._find_existing_publish( + code, context, shotgrid_version + ) + + published_file_data = { + "project": context.data.get("shotgridProject"), + "code": code, + "entity": context.data.get("shotgridEntity"), + "task": context.data.get("shotgridTask"), + "version": shotgrid_version, + "path": {"local_path": local_path}, + } + if not published_file: + published_file = self._create_published(published_file_data) + self.log.info( + "Create Shotgrid PublishedFile: {}".format(published_file) + ) + else: + self.sg.update( + published_file["type"], + published_file["id"], + published_file_data, + ) + self.log.info( + "Update Shotgrid PublishedFile: {}".format(published_file) + ) + + if instance.data["family"] == "image": + self.sg.upload_thumbnail( + published_file["type"], published_file["id"], local_path + ) + instance.data["shotgridPublishedFile"] = published_file + + def _find_existing_publish(self, code, context, shotgrid_version): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["version", "is", shotgrid_version], + ["code", "is", code], + ] + return self.sg.find_one("PublishedFile", filters, []) + + def _create_published(self, published_file_data): + + return self.sg.create("PublishedFile", published_file_data) diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py new file mode 100644 index 0000000000..a1b7140e22 --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py @@ -0,0 +1,92 @@ +import os +import pyblish.api + + +class IntegrateShotgridVersion(pyblish.api.InstancePlugin): + """Integrate Shotgrid Version""" + + order = pyblish.api.IntegratorOrder + 0.497 + label = "Shotgrid Version" + + sg = None + + def process(self, instance): + + context = instance.context + self.sg = context.data.get("shotgridSession") + + # TODO: Use path template solver to build version code from settings + anatomy = instance.data.get("anatomyData", {}) + code = "_".join( + [ + anatomy["project"]["code"], + anatomy["parent"], + anatomy["asset"], + anatomy["task"]["name"], + "v{:03}".format(int(anatomy["version"])), + ] + ) + + version = self._find_existing_version(code, context) + + if not version: + version = self._create_version(code, context) + self.log.info("Create Shotgrid version: {}".format(version)) + else: + self.log.info("Use existing Shotgrid version: {}".format(version)) + + data_to_update = {} + status = context.data.get("intent", {}).get("value") + if status: + data_to_update["sg_status_list"] = status + + for representation in instance.data.get("representations", []): + local_path = representation.get("published_path") + code = os.path.basename(local_path) + + if "shotgridreview" in representation.get("tags", []): + + if representation["ext"] in ["mov", "avi"]: + self.log.info( + "Upload review: {} for version shotgrid {}".format( + local_path, version.get("id") + ) + ) + self.sg.upload( + "Version", + version.get("id"), + local_path, + field_name="sg_uploaded_movie", + ) + + data_to_update["sg_path_to_movie"] = local_path + + elif representation["ext"] in ["jpg", "png", "exr", "tga"]: + path_to_frame = local_path.replace("0000", "#") + data_to_update["sg_path_to_frames"] = path_to_frame + + self.log.info("Update Shotgrid version with {}".format(data_to_update)) + self.sg.update("Version", version["id"], data_to_update) + + instance.data["shotgridVersion"] = version + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + return self.sg.find_one("Version", filters, []) + + def _create_version(self, code, context): + + version_data = { + "project": context.data.get("shotgridProject"), + "sg_task": context.data.get("shotgridTask"), + "entity": context.data.get("shotgridEntity"), + "code": code, + } + + return self.sg.create("Version", version_data) diff --git a/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py new file mode 100644 index 0000000000..c14c980e2a --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py @@ -0,0 +1,38 @@ +import pyblish.api +import openpype.api + + +class ValidateShotgridUser(pyblish.api.ContextPlugin): + """ + Check if user is valid and have access to the project. + """ + + label = "Validate Shotgrid User" + order = openpype.api.ValidateContentsOrder + + def process(self, context): + sg = context.data.get("shotgridSession") + + login = context.data.get("shotgridUser") + self.log.info("Login shotgrid set in OpenPype is {}".format(login)) + project = context.data.get("shotgridProject") + self.log.info("Current shotgun project is {}".format(project)) + + if not (login and sg and project): + raise KeyError() + + user = sg.find_one("HumanUser", [["login", "is", login]], ["projects"]) + + self.log.info(user) + self.log.info(login) + user_projects_id = [p["id"] for p in user.get("projects", [])] + if not project.get("id") in user_projects_id: + raise PermissionError( + "Login {} don't have access to the project {}".format( + login, project + ) + ) + + self.log.info( + "Login {} have access to the project {}".format(login, project) + ) diff --git a/openpype/modules/shotgrid/server/README.md b/openpype/modules/shotgrid/server/README.md new file mode 100644 index 0000000000..15e056ff3e --- /dev/null +++ b/openpype/modules/shotgrid/server/README.md @@ -0,0 +1,5 @@ + +### Shotgrid server + +Please refer to the external project that covers Openpype/Shotgrid communication: + - https://github.com/Ellipsanime/shotgrid-leecher diff --git a/openpype/modules/shotgrid/shotgrid_module.py b/openpype/modules/shotgrid/shotgrid_module.py new file mode 100644 index 0000000000..5644f0c35f --- /dev/null +++ b/openpype/modules/shotgrid/shotgrid_module.py @@ -0,0 +1,58 @@ +import os + +from openpype_interfaces import ( + ITrayModule, + IPluginPaths, + ILaunchHookPaths, +) + +from openpype.modules import OpenPypeModule + +SHOTGRID_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class ShotgridModule( + OpenPypeModule, ITrayModule, IPluginPaths, ILaunchHookPaths +): + leecher_manager_url = None + name = "shotgrid" + enabled = False + project_id = None + tray_wrapper = None + + def initialize(self, modules_settings): + shotgrid_settings = modules_settings.get(self.name, dict()) + self.enabled = shotgrid_settings.get("enabled", False) + self.leecher_manager_url = shotgrid_settings.get( + "leecher_manager_url", "" + ) + + def connect_with_modules(self, enabled_modules): + pass + + def get_global_environments(self): + return {"PROJECT_ID": self.project_id} + + def get_plugin_paths(self): + return { + "publish": [ + os.path.join(SHOTGRID_MODULE_DIR, "plugins", "publish") + ] + } + + def get_launch_hook_paths(self): + return os.path.join(SHOTGRID_MODULE_DIR, "hooks") + + def tray_init(self): + from .tray.shotgrid_tray import ShotgridTrayWrapper + + self.tray_wrapper = ShotgridTrayWrapper(self) + + def tray_start(self): + return self.tray_wrapper.validate() + + def tray_exit(self, *args, **kwargs): + return self.tray_wrapper + + def tray_menu(self, tray_menu): + return self.tray_wrapper.tray_menu(tray_menu) diff --git a/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py new file mode 100644 index 0000000000..1f78cf77c9 --- /dev/null +++ b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py @@ -0,0 +1,34 @@ +import pytest +from assertpy import assert_that + +import openpype.modules.shotgrid.lib.credentials as sut + + +def test_missing_shotgrid_url(): + with pytest.raises(Exception) as ex: + # arrange + url = "" + # act + sut.get_shotgrid_hostname(url) + # assert + assert_that(ex).is_equal_to("Shotgrid url cannot be a null") + + +def test_full_shotgrid_url(): + # arrange + url = "https://shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") + + +def test_incomplete_shotgrid_url(): + # arrange + url = "shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") diff --git a/openpype/modules/shotgrid/tray/credential_dialog.py b/openpype/modules/shotgrid/tray/credential_dialog.py new file mode 100644 index 0000000000..9d841d98be --- /dev/null +++ b/openpype/modules/shotgrid/tray/credential_dialog.py @@ -0,0 +1,201 @@ +import os +from Qt import QtCore, QtWidgets, QtGui + +from openpype import style +from openpype import resources +from openpype.modules.shotgrid.lib import settings, credentials + + +class CredentialsDialog(QtWidgets.QDialog): + SIZE_W = 450 + SIZE_H = 200 + + _module = None + _is_logged = False + url_label = None + login_label = None + password_label = None + url_input = None + login_input = None + password_input = None + input_layout = None + login_button = None + buttons_layout = None + main_widget = None + + login_changed = QtCore.Signal() + + def __init__(self, module, parent=None): + super(CredentialsDialog, self).__init__(parent) + + self._module = module + self._is_logged = False + + self.setWindowTitle("OpenPype - Shotgrid Login") + + icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + self.setWindowIcon(icon) + + self.setWindowFlags( + QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowMinimizeButtonHint + ) + self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) + self.setMaximumSize(QtCore.QSize(self.SIZE_W + 100, self.SIZE_H + 100)) + self.setStyleSheet(style.load_stylesheet()) + + self.ui_init() + + def ui_init(self): + self.url_label = QtWidgets.QLabel("Shotgrid server:") + self.login_label = QtWidgets.QLabel("Login:") + self.password_label = QtWidgets.QLabel("Password:") + + self.url_input = QtWidgets.QComboBox() + # self.url_input.setReadOnly(True) + + self.login_input = QtWidgets.QLineEdit() + self.login_input.setPlaceholderText("login") + + self.password_input = QtWidgets.QLineEdit() + self.password_input.setPlaceholderText("password") + self.password_input.setEchoMode(QtWidgets.QLineEdit.Password) + + self.error_label = QtWidgets.QLabel("") + self.error_label.setStyleSheet("color: red;") + self.error_label.setWordWrap(True) + self.error_label.hide() + + self.input_layout = QtWidgets.QFormLayout() + self.input_layout.setContentsMargins(10, 15, 10, 5) + + self.input_layout.addRow(self.url_label, self.url_input) + self.input_layout.addRow(self.login_label, self.login_input) + self.input_layout.addRow(self.password_label, self.password_input) + self.input_layout.addRow(self.error_label) + + self.login_button = QtWidgets.QPushButton("Login") + self.login_button.setToolTip("Log in shotgrid instance") + self.login_button.clicked.connect(self._on_shotgrid_login_clicked) + + self.logout_button = QtWidgets.QPushButton("Logout") + self.logout_button.setToolTip("Log out shotgrid instance") + self.logout_button.clicked.connect(self._on_shotgrid_logout_clicked) + + self.buttons_layout = QtWidgets.QHBoxLayout() + self.buttons_layout.addWidget(self.logout_button) + self.buttons_layout.addWidget(self.login_button) + + self.main_widget = QtWidgets.QVBoxLayout(self) + self.main_widget.addLayout(self.input_layout) + self.main_widget.addLayout(self.buttons_layout) + self.setLayout(self.main_widget) + + def show(self, *args, **kwargs): + super(CredentialsDialog, self).show(*args, **kwargs) + self._fill_shotgrid_url() + self._fill_shotgrid_login() + + def _fill_shotgrid_url(self): + servers = settings.get_shotgrid_servers() + + if servers: + for _, v in servers.items(): + self.url_input.addItem("{}".format(v.get('shotgrid_url'))) + self._valid_input(self.url_input) + self.login_button.show() + self.logout_button.show() + enabled = True + else: + self.set_error("Ask your admin to add shotgrid server in settings") + self._invalid_input(self.url_input) + self.login_button.hide() + self.logout_button.hide() + enabled = False + + self.login_input.setEnabled(enabled) + self.password_input.setEnabled(enabled) + + def _fill_shotgrid_login(self): + login = credentials.get_local_login() + + if login: + self.login_input.setText(login) + + def _clear_shotgrid_login(self): + self.login_input.setText("") + self.password_input.setText("") + + def _on_shotgrid_login_clicked(self): + login = self.login_input.text().strip() + password = self.password_input.text().strip() + missing = [] + + if login == "": + missing.append("login") + self._invalid_input(self.login_input) + + if password == "": + missing.append("password") + self._invalid_input(self.password_input) + + url = self.url_input.currentText() + if url == "": + missing.append("url") + self._invalid_input(self.url_input) + + if len(missing) > 0: + self.set_error("You didn't enter {}".format(" and ".join(missing))) + return + + # if credentials.check_credentials( + # login=login, + # password=password, + # shotgrid_url=url, + # ): + credentials.save_local_login( + login=login + ) + os.environ['OPENPYPE_SG_USER'] = login + self._on_login() + + self.set_error("CANT LOGIN") + + def _on_shotgrid_logout_clicked(self): + credentials.clear_local_login() + del os.environ['OPENPYPE_SG_USER'] + self._clear_shotgrid_login() + self._on_logout() + + def set_error(self, msg): + self.error_label.setText(msg) + self.error_label.show() + + def _on_login(self): + self._is_logged = True + self.login_changed.emit() + self._close_widget() + + def _on_logout(self): + self._is_logged = False + self.login_changed.emit() + + def _close_widget(self): + self.hide() + + def _valid_input(self, input_widget): + input_widget.setStyleSheet("") + + def _invalid_input(self, input_widget): + input_widget.setStyleSheet("border: 1px solid red;") + + def login_with_credentials( + self, url, login, password + ): + verification = credentials.check_credentials(url, login, password) + if verification: + credentials.save_credentials(login, password, False) + self._module.set_credentials_to_env(login, password) + self.set_credentials(login, password) + self.login_changed.emit() + return verification diff --git a/openpype/modules/shotgrid/tray/shotgrid_tray.py b/openpype/modules/shotgrid/tray/shotgrid_tray.py new file mode 100644 index 0000000000..4038d77b03 --- /dev/null +++ b/openpype/modules/shotgrid/tray/shotgrid_tray.py @@ -0,0 +1,75 @@ +import os +import webbrowser + +from Qt import QtWidgets + +from openpype.modules.shotgrid.lib import credentials +from openpype.modules.shotgrid.tray.credential_dialog import ( + CredentialsDialog, +) + + +class ShotgridTrayWrapper: + module = None + credentials_dialog = None + logged_user_label = None + + def __init__(self, module): + self.module = module + self.credentials_dialog = CredentialsDialog(module) + self.credentials_dialog.login_changed.connect(self.set_login_label) + self.logged_user_label = QtWidgets.QAction("") + self.logged_user_label.setDisabled(True) + self.set_login_label() + + def show_batch_dialog(self): + if self.module.leecher_manager_url: + webbrowser.open(self.module.leecher_manager_url) + + def show_connect_dialog(self): + self.show_credential_dialog() + + def show_credential_dialog(self): + self.credentials_dialog.show() + self.credentials_dialog.activateWindow() + self.credentials_dialog.raise_() + + def set_login_label(self): + login = credentials.get_local_login() + if login: + self.logged_user_label.setText("{}".format(login)) + else: + self.logged_user_label.setText( + "No User logged in {0}".format(login) + ) + + def tray_menu(self, tray_menu): + # Add login to user menu + menu = QtWidgets.QMenu("Shotgrid", tray_menu) + show_connect_action = QtWidgets.QAction("Connect to Shotgrid", menu) + show_connect_action.triggered.connect(self.show_connect_dialog) + menu.addAction(self.logged_user_label) + menu.addSeparator() + menu.addAction(show_connect_action) + tray_menu.addMenu(menu) + + # Add manager to Admin menu + for m in tray_menu.findChildren(QtWidgets.QMenu): + if m.title() == "Admin": + shotgrid_manager_action = QtWidgets.QAction( + "Shotgrid manager", menu + ) + shotgrid_manager_action.triggered.connect( + self.show_batch_dialog + ) + m.addAction(shotgrid_manager_action) + + def validate(self): + login = credentials.get_local_login() + + if not login: + self.show_credential_dialog() + else: + os.environ["OPENPYPE_SG_USER"] = login + + return True diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/slack/plugins/publish/integrate_slack_api.py index 10bde7d4c0..c3b288f0cd 100644 --- a/openpype/modules/slack/plugins/publish/integrate_slack_api.py +++ b/openpype/modules/slack/plugins/publish/integrate_slack_api.py @@ -4,8 +4,8 @@ import pyblish.api import copy from datetime import datetime +from openpype.client import OpenPypeMongoConnection from openpype.lib.plugin_tools import prepare_template_data -from openpype.lib import OpenPypeMongoConnection class IntegrateSlackAPI(pyblish.api.InstancePlugin): diff --git a/openpype/modules/timers_manager/plugins/publish/start_timer.py b/openpype/modules/timers_manager/plugins/publish/start_timer.py new file mode 100644 index 0000000000..6408327ca1 --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/start_timer.py @@ -0,0 +1,39 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + +import pyblish.api + +from openpype.pipeline import legacy_io + + +class StartTimer(pyblish.api.ContextPlugin): + label = "Start Timer" + order = pyblish.api.IntegratorOrder + 1 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + project_name = legacy_io.active_project() + asset_name = legacy_io.Session.get("AVALON_ASSET") + task_name = legacy_io.Session.get("AVALON_TASK") + if not project_name or not asset_name or not task_name: + self.log.info(( + "Current context does not contain all" + " required information to start a timer." + )) + return + timers_manager.start_timer_with_webserver( + project_name, asset_name, task_name, self.log + ) diff --git a/openpype/modules/timers_manager/plugins/publish/stop_timer.py b/openpype/modules/timers_manager/plugins/publish/stop_timer.py new file mode 100644 index 0000000000..a8674ff2ca --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/stop_timer.py @@ -0,0 +1,27 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + + +import pyblish.api + + +class StopTimer(pyblish.api.ContextPlugin): + label = "Stop Timer" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + timers_manager.stop_timer_with_webserver(self.log) diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/timers_manager/timers_manager.py index 3453e4bc4c..93332ace4f 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/openpype/modules/timers_manager/timers_manager.py @@ -6,12 +6,15 @@ from openpype.client import get_asset_by_name from openpype.modules import OpenPypeModule from openpype_interfaces import ( ITrayService, - ILaunchHookPaths + ILaunchHookPaths, + IPluginPaths ) from openpype.lib.events import register_event_callback from .exceptions import InvalidContextError +TIMER_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + class ExampleTimersManagerConnector: """Timers manager can handle timers of multiple modules/addons. @@ -33,6 +36,7 @@ class ExampleTimersManagerConnector: } ``` """ + # Not needed at all def __init__(self, module): # Store timer manager module to be able call it's methods when needed @@ -72,7 +76,12 @@ class ExampleTimersManagerConnector: self._timers_manager_module.timer_stopped(self._module.id) -class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): +class TimersManager( + OpenPypeModule, + ITrayService, + ILaunchHookPaths, + IPluginPaths +): """ Handles about Timers. Should be able to start/stop all timers at once. @@ -177,11 +186,19 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): def get_launch_hook_paths(self): """Implementation of `ILaunchHookPaths`.""" + return os.path.join( - os.path.dirname(os.path.abspath(__file__)), + TIMER_MODULE_DIR, "launch_hooks" ) + def get_plugin_paths(self): + """Implementation of `IPluginPaths`.""" + + return { + "publish": [os.path.join(TIMER_MODULE_DIR, "plugins", "publish")] + } + @staticmethod def get_timer_data_for_context( project_name, asset_name, task_name, logger=None @@ -388,6 +405,7 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): logger (logging.Logger): Logger object. Using 'print' if not passed. """ + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") if not webserver_url: msg = "Couldn't find webserver url" @@ -415,6 +433,36 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): return requests.post(rest_api_url, json=data) + @staticmethod + def stop_timer_with_webserver(logger=None): + """Prepared method for calling stop timers on REST api. + + Args: + logger (logging.Logger): Logger used for logging messages. + """ + + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + if not webserver_url: + msg = "Couldn't find webserver url" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) + try: + import requests + except Exception: + msg = "Couldn't start timer ('requests' is not available)" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + return requests.post(rest_api_url) + def on_host_install(self, host, host_name, project_name): self.log.debug("Installing task changed callback") register_event_callback("taskChanged", self._on_host_task_change) diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py index 80ad939ccd..5f763cd249 100644 --- a/openpype/pipeline/context_tools.py +++ b/openpype/pipeline/context_tools.py @@ -14,11 +14,18 @@ from openpype.client import ( get_project, get_asset_by_id, get_asset_by_name, + version_is_latest, ) from openpype.modules import load_modules, ModulesManager from openpype.settings import get_project_settings -from openpype.lib import filter_pyblish_plugins + +from .publish.lib import filter_pyblish_plugins from .anatomy import Anatomy +from .template_data import get_template_data_with_names +from .workfile import ( + get_workfile_template_key, + get_custom_workfile_template_by_string_context, +) from . import ( legacy_io, register_loader_plugin_path, @@ -334,3 +341,107 @@ def get_current_project_asset(asset_name=None, asset_id=None, fields=None): if not asset_name: return None return get_asset_by_name(project_name, asset_name, fields=fields) + + +def is_representation_from_latest(representation): + """Return whether the representation is from latest version + + Args: + representation (dict): The representation document from the database. + + Returns: + bool: Whether the representation is of latest version. + """ + + project_name = legacy_io.active_project() + return version_is_latest(project_name, representation["parent"]) + + +def get_template_data_from_session(session=None, system_settings=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + system_settings (Union[Dict[str, Any], Any]): Prepared system settings. + Optional are auto received if not passed. + + Returns: + Dict[str, Any]: All available data from session. + """ + + if session is None: + session = legacy_io.Session + + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] + host_name = session["AVALON_APP"] + + return get_template_data_with_names( + project_name, asset_name, task_name, host_name, system_settings + ) + + +def get_workdir_from_session(session=None, template_key=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + template_key (str): Prepared template key from which workdir is + calculated. + + Returns: + str: Workdir path. + """ + + if session is None: + session = legacy_io.Session + project_name = session["AVALON_PROJECT"] + host_name = session["AVALON_APP"] + anatomy = Anatomy(project_name) + template_data = get_template_data_from_session(session) + anatomy_filled = anatomy.format(template_data) + + if not template_key: + task_type = template_data["task"]["type"] + template_key = get_workfile_template_key( + task_type, + host_name, + project_name=project_name + ) + path = anatomy_filled[template_key]["folder"] + if path: + path = os.path.normpath(path) + return path + + +def get_custom_workfile_template_from_session( + session=None, project_settings=None +): + """Filter and fill workfile template profiles by current context. + + Current context is defined by `legacy_io.Session`. That's why this + function should be used only inside host where context is set and stable. + + Args: + session (Union[None, Dict[str, str]]): Session from which are taken + data. + project_settings(Dict[str, Any]): Template profiles from settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + """ + + if session is None: + session = legacy_io.Session + + return get_custom_workfile_template_by_string_context( + session["AVALON_PROJECT"], + session["AVALON_ASSET"], + session["AVALON_TASK"], + session["AVALON_APP"], + project_settings=project_settings + ) diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 1beeb4267b..bd196ccfd1 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -7,6 +7,7 @@ from .creator_plugins import ( BaseCreator, Creator, AutoCreator, + HiddenCreator, discover_creator_plugins, discover_legacy_creator_plugins, @@ -35,6 +36,7 @@ __all__ = ( "BaseCreator", "Creator", "AutoCreator", + "HiddenCreator", "discover_creator_plugins", "discover_legacy_creator_plugins", diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index 9b55c3b21e..eaaed39357 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -6,6 +6,7 @@ import inspect from uuid import uuid4 from contextlib import contextmanager +from openpype.client import get_assets from openpype.host import INewPublisher from openpype.pipeline import legacy_io from openpype.pipeline.mongodb import ( @@ -1082,15 +1083,10 @@ class CreateContext: for asset_name in task_names_by_asset_name.keys() if asset_name is not None ] - asset_docs = list(self.dbcon.find( - { - "type": "asset", - "name": {"$in": asset_names} - }, - { - "name": True, - "data.tasks": True - } + asset_docs = list(get_assets( + self.project_name, + asset_names=asset_names, + fields=["name", "data.tasks"] )) task_names_by_asset_name = {} diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 52c76db5ef..9a5d559774 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -1,3 +1,4 @@ +import os import copy from abc import ( @@ -7,10 +8,8 @@ from abc import ( ) import six -from openpype.lib import ( - get_subset_name_with_asset_doc, - set_plugin_attributes_from_settings, -) +from openpype.settings import get_system_settings, get_project_settings +from openpype.lib import get_subset_name_with_asset_doc from openpype.pipeline.plugin_discover import ( discover, register_plugin, @@ -416,6 +415,12 @@ class Creator(BaseCreator): return self.pre_create_attr_defs +class HiddenCreator(BaseCreator): + @abstractmethod + def create(self, instance_data, source_data): + pass + + class AutoCreator(BaseCreator): """Creator which is automatically triggered without user interaction. @@ -432,8 +437,24 @@ def discover_creator_plugins(): def discover_legacy_creator_plugins(): + from openpype.lib import Logger + + log = Logger.get_logger("CreatorDiscover") + plugins = discover(LegacyCreator) - set_plugin_attributes_from_settings(plugins, LegacyCreator) + project_name = os.environ.get("AVALON_PROJECT") + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to loader {}".format( + plugin.__name__ + ), + exc_info=True + ) return plugins diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index 46e0e3d663..2764b3cb95 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -5,6 +5,7 @@ Renamed classes and functions - 'create' -> 'legacy_create' """ +import os import logging import collections @@ -37,6 +38,48 @@ class LegacyCreator(object): self.data.update(data or {}) + @classmethod + def apply_settings(cls, project_settings, system_settings): + """Apply OpenPype settings to a plugin class.""" + + host_name = os.environ.get("AVALON_APP") + plugin_type = "create" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + setattr(cls, "active", False) + print(" - is disabled by preset") + else: + setattr(cls, option, value) + print(" - setting `{}`: `{}`".format(option, value)) + def process(self): pass diff --git a/openpype/pipeline/load/__init__.py b/openpype/pipeline/load/__init__.py index 6e7612d4c1..e46d9f152b 100644 --- a/openpype/pipeline/load/__init__.py +++ b/openpype/pipeline/load/__init__.py @@ -24,6 +24,10 @@ from .utils import ( loaders_from_repre_context, loaders_from_representation, + + any_outdated_containers, + get_outdated_containers, + filter_containers, ) from .plugins import ( @@ -66,6 +70,10 @@ __all__ = ( "loaders_from_repre_context", "loaders_from_representation", + "any_outdated_containers", + "get_outdated_containers", + "filter_containers", + # plugins.py "LoaderPlugin", "SubsetLoaderPlugin", diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py index a30a2188a4..8cba8d8217 100644 --- a/openpype/pipeline/load/plugins.py +++ b/openpype/pipeline/load/plugins.py @@ -1,6 +1,8 @@ +import os import logging -from openpype.lib import set_plugin_attributes_from_settings +from openpype.settings import get_system_settings, get_project_settings +from openpype.pipeline import legacy_io from openpype.pipeline.plugin_discover import ( discover, register_plugin, @@ -37,6 +39,46 @@ class LoaderPlugin(list): def __init__(self, context): self.fname = self.filepath_from_context(context) + @classmethod + def apply_settings(cls, project_settings, system_settings): + host_name = os.environ.get("AVALON_APP") + plugin_type = "load" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + setattr(cls, "active", False) + print(" - is disabled by preset") + else: + setattr(cls, option, value) + print(" - setting `{}`: `{}`".format(option, value)) + @classmethod def get_representations(cls): return cls.representations @@ -110,9 +152,25 @@ class SubsetLoaderPlugin(LoaderPlugin): pass -def discover_loader_plugins(): +def discover_loader_plugins(project_name=None): + from openpype.lib import Logger + + log = Logger.get_logger("LoaderDiscover") plugins = discover(LoaderPlugin) - set_plugin_attributes_from_settings(plugins, LoaderPlugin) + if not project_name: + project_name = legacy_io.active_project() + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to loader {}".format( + plugin.__name__ + ), + exc_info=True + ) return plugins diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index 2c213aff6f..fe5102353d 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -4,8 +4,10 @@ import copy import getpass import logging import inspect +import collections import numbers +from openpype.host import ILoadHost from openpype.client import ( get_project, get_assets, @@ -15,6 +17,7 @@ from openpype.client import ( get_last_version_by_subset_id, get_hero_version_by_subset_id, get_version_by_name, + get_last_versions, get_representations, get_representation_by_id, get_representation_by_name, @@ -28,6 +31,11 @@ from openpype.pipeline import ( log = logging.getLogger(__name__) +ContainersFilterResult = collections.namedtuple( + "ContainersFilterResult", + ["latest", "outdated", "not_foud", "invalid"] +) + class HeroVersionType(object): def __init__(self, version): @@ -685,3 +693,164 @@ def loaders_from_representation(loaders, representation): context = get_representation_context(representation) return loaders_from_repre_context(loaders, context) + + +def any_outdated_containers(host=None, project_name=None): + """Check if there are any outdated containers in scene.""" + + if get_outdated_containers(host, project_name): + return True + return False + + +def get_outdated_containers(host=None, project_name=None): + """Collect outdated containers from host scene. + + Currently registered host and project in global session are used if + arguments are not passed. + + Args: + host (ModuleType): Host implementation with 'ls' function available. + project_name (str): Name of project in which context we are. + """ + + if host is None: + from openpype.pipeline import registered_host + host = registered_host() + + if project_name is None: + project_name = legacy_io.active_project() + + if isinstance(host, ILoadHost): + containers = host.get_containers() + else: + containers = host.ls() + return filter_containers(containers, project_name).outdated + + +def filter_containers(containers, project_name): + """Filter containers and split them into 4 categories. + + Categories are 'latest', 'outdated', 'invalid' and 'not_found'. + The 'lastest' containers are from last version, 'outdated' are not, + 'invalid' are invalid containers (invalid content) and 'not_foud' has + some missing entity in database. + + Args: + containers (Iterable[dict]): List of containers referenced into scene. + project_name (str): Name of project in which context shoud look for + versions. + + Returns: + ContainersFilterResult: Named tuple with 'latest', 'outdated', + 'invalid' and 'not_found' containers. + """ + + # Make sure containers is list that won't change + containers = list(containers) + + outdated_containers = [] + uptodate_containers = [] + not_found_containers = [] + invalid_containers = [] + output = ContainersFilterResult( + uptodate_containers, + outdated_containers, + not_found_containers, + invalid_containers + ) + # Query representation docs to get it's version ids + repre_ids = { + container["representation"] + for container in containers + if container["representation"] + } + if not repre_ids: + if containers: + invalid_containers.extend(containers) + return output + + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] + ) + # Store representations by stringified representation id + repre_docs_by_str_id = {} + repre_docs_by_version_id = collections.defaultdict(list) + for repre_doc in repre_docs: + repre_id = str(repre_doc["_id"]) + version_id = repre_doc["parent"] + repre_docs_by_str_id[repre_id] = repre_doc + repre_docs_by_version_id[version_id].append(repre_doc) + + # Query version docs to get it's subset ids + # - also query hero version to be able identify if representation + # belongs to existing version + version_docs = get_versions( + project_name, + version_ids=repre_docs_by_version_id.keys(), + hero=True, + fields=["_id", "parent", "type"] + ) + verisons_by_id = {} + versions_by_subset_id = collections.defaultdict(list) + hero_version_ids = set() + for version_doc in version_docs: + version_id = version_doc["_id"] + # Store versions by their ids + verisons_by_id[version_id] = version_doc + # There's no need to query subsets for hero versions + # - they are considered as latest? + if version_doc["type"] == "hero_version": + hero_version_ids.add(version_id) + continue + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + last_versions = get_last_versions( + project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id"] + ) + # Figure out which versions are outdated + outdated_version_ids = set() + for subset_id, last_version_doc in last_versions.items(): + for version_doc in versions_by_subset_id[subset_id]: + version_id = version_doc["_id"] + if version_id != last_version_doc["_id"]: + outdated_version_ids.add(version_id) + + # Based on all collected data figure out which containers are outdated + # - log out if there are missing representation or version documents + for container in containers: + container_name = container["objectName"] + repre_id = container["representation"] + if not repre_id: + invalid_containers.append(container) + continue + + repre_doc = repre_docs_by_str_id.get(repre_id) + if not repre_doc: + log.debug(( + "Container '{}' has an invalid representation." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + continue + + version_id = repre_doc["parent"] + if version_id in outdated_version_ids: + outdated_containers.append(container) + + elif version_id not in verisons_by_id: + log.debug(( + "Representation on container '{}' has an invalid version." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + + else: + uptodate_containers.append(container) + + return output diff --git a/openpype/pipeline/mongodb.py b/openpype/pipeline/mongodb.py index dab5bb9e13..be2b67a5e7 100644 --- a/openpype/pipeline/mongodb.py +++ b/openpype/pipeline/mongodb.py @@ -5,6 +5,8 @@ import logging import pymongo from uuid import uuid4 +from openpype.client import OpenPypeMongoConnection + from . import schema @@ -156,8 +158,6 @@ class AvalonMongoDB: @property def mongo_client(self): - from openpype.lib import OpenPypeMongoConnection - return OpenPypeMongoConnection.get_mongo_client() @property diff --git a/openpype/pipeline/publish/abstract_collect_render.py b/openpype/pipeline/publish/abstract_collect_render.py index 2e537227c3..ccb2415346 100644 --- a/openpype/pipeline/publish/abstract_collect_render.py +++ b/openpype/pipeline/publish/abstract_collect_render.py @@ -63,6 +63,8 @@ class RenderInstance(object): family = attr.ib(default="renderlayer") families = attr.ib(default=["renderlayer"]) # list of families + # True if should be rendered on farm, eg not integrate + farm = attr.ib(default=False) # format settings multipartExr = attr.ib(default=False) # flag for multipart exrs diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index 739b2c8806..d5494cd8a4 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -6,6 +6,10 @@ import xml.etree.ElementTree import six import pyblish.plugin +import pyblish.api + +from openpype.lib import Logger +from openpype.settings import get_project_settings, get_system_settings class DiscoverResult: @@ -180,3 +184,92 @@ def publish_plugins_discover(paths=None): result.plugins = plugins return result + + +def filter_pyblish_plugins(plugins): + """Pyblish plugin filter which applies OpenPype settings. + + Apply OpenPype settings on discovered plugins. On plugin with implemented + class method 'def apply_settings(cls, project_settings, system_settings)' + is called the method. Default behavior looks for plugin name and current + host name to look for + + Args: + plugins (List[pyblish.plugin.Plugin]): Discovered plugins on which + are applied settings. + """ + + log = Logger.get_logger("filter_pyblish_plugins") + + # TODO: Don't use host from 'pyblish.api' but from defined host by us. + # - kept becau on farm is probably used host 'shell' which propably + # affect how settings are applied there + host = pyblish.api.current_host() + project_name = os.environ.get("AVALON_PROJECT") + + project_setting = get_project_settings(project_name) + system_settings = get_system_settings() + + # iterate over plugins + for plugin in plugins[:]: + if hasattr(plugin, "apply_settings"): + try: + # Use classmethod 'apply_settings' + # - can be used to target settings from custom settings place + # - skip default behavior when successful + plugin.apply_settings(project_setting, system_settings) + continue + + except Exception: + log.warning( + ( + "Failed to apply settings on plugin {}" + ).format(plugin.__name__), + exc_info=True + ) + + try: + config_data = ( + project_setting + [host] + ["publish"] + [plugin.__name__] + ) + except KeyError: + # host determined from path + file = os.path.normpath(inspect.getsourcefile(plugin)) + file = os.path.normpath(file) + + split_path = file.split(os.path.sep) + if len(split_path) < 4: + log.warning( + 'plugin path too short to extract host {}'.format(file) + ) + continue + + host_from_file = split_path[-4] + plugin_kind = split_path[-2] + + # TODO: change after all plugins are moved one level up + if host_from_file == "openpype": + host_from_file = "global" + + try: + config_data = ( + project_setting + [host_from_file] + [plugin_kind] + [plugin.__name__] + ) + except KeyError: + continue + + for option, value in config_data.items(): + if option == "enabled" and value is False: + log.info('removing plugin {}'.format(plugin.__name__)) + plugins.remove(plugin) + else: + log.info('setting {}:{} on plugin {}'.format( + option, value, plugin.__name__)) + + setattr(plugin, option, value) diff --git a/openpype/pipeline/template_data.py b/openpype/pipeline/template_data.py new file mode 100644 index 0000000000..824a25127c --- /dev/null +++ b/openpype/pipeline/template_data.py @@ -0,0 +1,228 @@ +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_system_settings +from openpype.lib.local_settings import get_openpype_username + + +def get_general_template_data(system_settings=None): + """General template data based on system settings or machine. + + Output contains formatting keys: + - 'studio[name]' - Studio name filled from system settings + - 'studio[code]' - Studio code filled from system settings + - 'user' - User's name using 'get_openpype_username' + + Args: + system_settings (Dict[str, Any]): System settings. + """ + + if not system_settings: + system_settings = get_system_settings() + studio_name = system_settings["general"]["studio_name"] + studio_code = system_settings["general"]["studio_code"] + return { + "studio": { + "name": studio_name, + "code": studio_code + }, + "user": get_openpype_username() + } + + +def get_project_template_data(project_doc): + """Extract data from project document that are used in templates. + + Project document must have 'name' and (at this moment) optional + key 'data.code'. + + Output contains formatting keys: + - 'project[name]' - Project name + - 'project[code]' - Project code + + Args: + project_doc (Dict[str, Any]): Queried project document. + + Returns: + Dict[str, Dict[str, str]]: Template data based on project document. + """ + + project_code = project_doc.get("data", {}).get("code") + return { + "project": { + "name": project_doc["name"], + "code": project_code + } + } + + +def get_asset_template_data(asset_doc, project_name): + """Extract data from asset document that are used in templates. + + Output dictionary contains keys: + - 'asset' - asset name + - 'hierarchy' - parent asset names joined with '/' + - 'parent' - direct parent name, project name used if is under project + + Required document fields: + Asset: 'name', 'data.parents' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + project_name (str): Is used for 'parent' key if asset doc does not have + any. + + Returns: + Dict[str, str]: Data that are based on asset document and can be used + in templates. + """ + + asset_parents = asset_doc["data"]["parents"] + hierarchy = "/".join(asset_parents) + if asset_parents: + parent_name = asset_parents[-1] + else: + parent_name = project_name + + return { + "asset": asset_doc["name"], + "hierarchy": hierarchy, + "parent": parent_name + } + + +def get_task_type(asset_doc, task_name): + """Get task type based on asset document and task name. + + Required document fields: + Asset: 'data.tasks' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + task_name (str): Task name which is under asset. + + Returns: + str: Task type name. + None: Task was not found on asset document. + """ + + asset_tasks_info = asset_doc["data"]["tasks"] + return asset_tasks_info.get(task_name, {}).get("type") + + +def get_task_template_data(project_doc, asset_doc, task_name): + """"Extract task specific data from project and asset documents. + + Required document fields: + Project: 'config.tasks' + Asset: 'data.tasks'. + + Args: + project_doc (Dict[str, Any]): Queried project document. + asset_doc (Dict[str, Any]): Queried asset document. + tas_name (str): Name of task for which data should be returned. + + Returns: + Dict[str, Dict[str, str]]: Template data + """ + + project_task_types = project_doc["config"]["tasks"] + task_type = get_task_type(asset_doc, task_name) + task_code = project_task_types.get(task_type, {}).get("short_name") + + return { + "task": { + "name": task_name, + "type": task_type, + "short": task_code, + } + } + + +def get_template_data( + project_doc, + asset_doc=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered documents and info. + + This function does not "auto fill" any values except system settings and + it's on purpose. + + Universal function to receive template data from passed arguments. Only + required argument is project document all other arguments are optional + and their values won't be added to template data if are not passed. + + Required document fields: + Project: 'name', 'data.code', 'config.tasks' + Asset: 'name', 'data.parents', 'data.tasks' + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]): Used to fill '{app}' key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed (may be slower). + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + template_data = get_general_template_data(system_settings) + template_data.update(get_project_template_data(project_doc)) + if asset_doc: + template_data.update(get_asset_template_data( + asset_doc, project_doc["name"] + )) + if task_name: + template_data.update(get_task_template_data( + project_doc, asset_doc, task_name + )) + + if host_name: + template_data["app"] = host_name + + return template_data + + +def get_template_data_with_names( + project_name, + asset_name=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered entity names and info. + + Copy of 'get_template_data' but based on entity names instead of documents. + Only difference is that documents are queried. + + Args: + project_name (str): Project name for which template data are + calculated. + asset_name (Union[str, None]): Asset name for which template data are + calculated. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]):Used to fill '{app}' key. + because workdir template may contain `{app}` key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed. + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + project_doc = get_project( + project_name, fields=["name", "data.code", "config.tasks"] + ) + asset_doc = None + if asset_name: + asset_doc = get_asset_by_name( + project_name, + asset_name, + fields=["name", "data.parents", "data.tasks"] + ) + return get_template_data( + project_doc, asset_doc, task_name, host_name, system_settings + ) diff --git a/openpype/pipeline/thumbnail.py b/openpype/pipeline/thumbnail.py index ec97b36954..eb383b16d9 100644 --- a/openpype/pipeline/thumbnail.py +++ b/openpype/pipeline/thumbnail.py @@ -2,6 +2,7 @@ import os import copy import logging +from openpype.client import get_project from . import legacy_io from .plugin_discover import ( discover, @@ -85,13 +86,8 @@ class TemplateResolver(ThumbnailResolver): self.log.debug("Thumbnail entity does not have set template") return - project = self.dbcon.find_one( - {"type": "project"}, - { - "name": True, - "data.code": True - } - ) + project_name = self.dbcon.active_project() + project = get_project(project_name, fields=["name", "data.code"]) template_data = copy.deepcopy( thumbnail_entity["data"].get("template_data") or {} diff --git a/openpype/pipeline/workfile/__init__.py b/openpype/pipeline/workfile/__init__.py new file mode 100644 index 0000000000..0aad29b6f9 --- /dev/null +++ b/openpype/pipeline/workfile/__init__.py @@ -0,0 +1,30 @@ +from .path_resolving import ( + get_workfile_template_key_from_context, + get_workfile_template_key, + get_workdir_with_workdir_data, + get_workdir, + + get_last_workfile_with_version, + get_last_workfile, + + get_custom_workfile_template, + get_custom_workfile_template_by_string_context, +) + +from .build_workfile import BuildWorkfile + + +__all__ = ( + "get_workfile_template_key_from_context", + "get_workfile_template_key", + "get_workdir_with_workdir_data", + "get_workdir", + + "get_last_workfile_with_version", + "get_last_workfile", + + "get_custom_workfile_template", + "get_custom_workfile_template_by_string_context", + + "BuildWorkfile", +) diff --git a/openpype/pipeline/workfile/build_workfile.py b/openpype/pipeline/workfile/build_workfile.py new file mode 100644 index 0000000000..bb6fcb4189 --- /dev/null +++ b/openpype/pipeline/workfile/build_workfile.py @@ -0,0 +1,693 @@ +import os +import re +import collections +import json + +from openpype.client import ( + get_asset_by_name, + get_subsets, + get_last_versions, + get_representations, +) +from openpype.settings import get_project_settings +from openpype.lib import ( + get_linked_assets, + filter_profiles, + Logger, +) +from openpype.pipeline import legacy_io +from openpype.pipeline.load import ( + discover_loader_plugins, + IncompatibleLoaderError, + load_container, +) + + +class BuildWorkfile: + """Wrapper for build workfile process. + + Load representations for current context by build presets. Build presets + are host related, since each host has it's loaders. + """ + + _log = None + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @staticmethod + def map_subsets_by_family(subsets): + subsets_by_family = collections.defaultdict(list) + for subset in subsets: + family = subset["data"].get("family") + if not family: + families = subset["data"].get("families") + if not families: + continue + family = families[0] + + subsets_by_family[family].append(subset) + return subsets_by_family + + def process(self): + """Main method of this wrapper. + + Building of workfile is triggered and is possible to implement + post processing of loaded containers if necessary. + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + return self.build_workfile() + + def build_workfile(self): + """Prepares and load containers into workfile. + + Loads latest versions of current and linked assets to workfile by logic + stored in Workfile profiles from presets. Profiles are set by host, + filtered by current task name and used by families. + + Each family can specify representation names and loaders for + representations and first available and successful loaded + representation is returned as container. + + At the end you'll get list of loaded containers per each asset. + + loaded_containers [{ + "asset_entity": , + "containers": [, , ...] + }, { + "asset_entity": , + "containers": [, ...] + }, { + ... + }] + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + loaded_containers = [] + + # Get current asset name and entity + project_name = legacy_io.active_project() + current_asset_name = legacy_io.Session["AVALON_ASSET"] + current_asset_entity = get_asset_by_name( + project_name, current_asset_name + ) + # Skip if asset was not found + if not current_asset_entity: + print("Asset entity with name `{}` was not found".format( + current_asset_name + )) + return loaded_containers + + # Prepare available loaders + loaders_by_name = {} + for loader in discover_loader_plugins(): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {0}!".format(loader_name) + ) + loaders_by_name[loader_name] = loader + + # Skip if there are any loaders + if not loaders_by_name: + self.log.warning("There are no registered loaders.") + return loaded_containers + + # Get current task name + current_task_name = legacy_io.Session["AVALON_TASK"] + + # Load workfile presets for task + self.build_presets = self.get_build_presets( + current_task_name, current_asset_entity + ) + + # Skip if there are any presets for task + if not self.build_presets: + self.log.warning( + "Current task `{}` does not have any loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + # Get presets for loading current asset + current_context_profiles = self.build_presets.get("current_context") + # Get presets for loading linked assets + link_context_profiles = self.build_presets.get("linked_assets") + # Skip if both are missing + if not current_context_profiles and not link_context_profiles: + self.log.warning( + "Current task `{}` has empty loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + elif not current_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any loading" + " preset for it's context." + ).format(current_task_name)) + + elif not link_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any" + "loading preset for it's linked assets." + ).format(current_task_name)) + + # Prepare assets to process by workfile presets + assets = [] + current_asset_id = None + if current_context_profiles: + # Add current asset entity if preset has current context set + assets.append(current_asset_entity) + current_asset_id = current_asset_entity["_id"] + + if link_context_profiles: + # Find and append linked assets if preset has set linked mapping + link_assets = get_linked_assets(current_asset_entity) + if link_assets: + assets.extend(link_assets) + + # Skip if there are no assets. This can happen if only linked mapping + # is set and there are no links for his asset. + if not assets: + self.log.warning( + "Asset does not have linked assets. Nothing to process." + ) + return loaded_containers + + # Prepare entities from database for assets + prepared_entities = self._collect_last_version_repres(assets) + + # Load containers by prepared entities and presets + # - Current asset containers + if current_asset_id and current_asset_id in prepared_entities: + current_context_data = prepared_entities.pop(current_asset_id) + loaded_data = self.load_containers_by_asset_data( + current_context_data, current_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # - Linked assets container + for linked_asset_data in prepared_entities.values(): + loaded_data = self.load_containers_by_asset_data( + linked_asset_data, link_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # Return list of loaded containers + return loaded_containers + + def get_build_presets(self, task_name, asset_doc): + """ Returns presets to build workfile for task name. + + Presets are loaded for current project set in + io.Session["AVALON_PROJECT"], filtered by registered host + and entered task name. + + Args: + task_name (str): Task name used for filtering build presets. + + Returns: + Dict[str, Any]: preset per entered task name + """ + + host_name = os.environ["AVALON_APP"] + project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + host_settings = project_settings.get(host_name) or {} + # Get presets for host + wb_settings = host_settings.get("workfile_builder") + if not wb_settings: + # backward compatibility + wb_settings = host_settings.get("workfile_build") or {} + + builder_profiles = wb_settings.get("profiles") + if not builder_profiles: + return None + + task_type = ( + asset_doc + .get("data", {}) + .get("tasks", {}) + .get(task_name, {}) + .get("type") + ) + filter_data = { + "task_types": task_type, + "tasks": task_name + } + return filter_profiles(builder_profiles, filter_data) + + def _filter_build_profiles(self, build_profiles, loaders_by_name): + """ Filter build profiles by loaders and prepare process data. + + Valid profile must have "loaders", "families" and "repre_names" keys + with valid values. + - "loaders" expects list of strings representing possible loaders. + - "families" expects list of strings for filtering + by main subset family. + - "repre_names" expects list of strings for filtering by + representation name. + + Lowered "families" and "repre_names" are prepared for each profile with + all required keys. + + Args: + build_profiles (Dict[str, Any]): Profiles for building workfile. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Filtered and prepared profiles. + """ + + valid_profiles = [] + for profile in build_profiles: + # Check loaders + profile_loaders = profile.get("loaders") + if not profile_loaders: + self.log.warning(( + "Build profile has missing loaders configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check if any loader is available + loaders_match = False + for loader_name in profile_loaders: + if loader_name in loaders_by_name: + loaders_match = True + break + + if not loaders_match: + self.log.warning(( + "All loaders from Build profile are not available: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check families + profile_families = profile.get("families") + if not profile_families: + self.log.warning(( + "Build profile is missing families configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check representation names + profile_repre_names = profile.get("repre_names") + if not profile_repre_names: + self.log.warning(( + "Build profile is missing" + " representation names filtering: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Prepare lowered families and representation names + profile["families_lowered"] = [ + fam.lower() for fam in profile_families + ] + profile["repre_names_lowered"] = [ + name.lower() for name in profile_repre_names + ] + + valid_profiles.append(profile) + + return valid_profiles + + def _prepare_profile_for_subsets(self, subsets, profiles): + """Select profile for each subset by it's data. + + Profiles are filtered for each subset individually. + Profile is filtered by subset's family, optionally by name regex and + representation names set in profile. + It is possible to not find matching profile for subset, in that case + subset is skipped and it is possible that none of subsets have + matching profile. + + Args: + subsets (List[Dict[str, Any]]): Subset documents. + profiles (List[Dict[str, Any]]): Build profiles. + + Returns: + Dict[str, Any]: Profile by subset's id. + """ + + # Prepare subsets + subsets_by_family = self.map_subsets_by_family(subsets) + + profiles_per_subset_id = {} + for family, subsets in subsets_by_family.items(): + family_low = family.lower() + for profile in profiles: + # Skip profile if does not contain family + if family_low not in profile["families_lowered"]: + continue + + # Precompile name filters as regexes + profile_regexes = profile.get("subset_name_filters") + if profile_regexes: + _profile_regexes = [] + for regex in profile_regexes: + _profile_regexes.append(re.compile(regex)) + profile_regexes = _profile_regexes + + # TODO prepare regex compilation + for subset in subsets: + # Verify regex filtering (optional) + if profile_regexes: + valid = False + for pattern in profile_regexes: + if re.match(pattern, subset["name"]): + valid = True + break + + if not valid: + continue + + profiles_per_subset_id[subset["_id"]] = profile + + # break profiles loop on finding the first matching profile + break + return profiles_per_subset_id + + def load_containers_by_asset_data( + self, asset_entity_data, build_profiles, loaders_by_name + ): + """Load containers for entered asset entity by Build profiles. + + Args: + asset_entity_data (Dict[str, Any]): Prepared data with subsets, + last versions and representations for specific asset. + build_profiles (Dict[str, Any]): Build profiles. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + Dict[str, Any]: Output contains asset document + and loaded containers. + """ + + # Make sure all data are not empty + if not asset_entity_data or not build_profiles or not loaders_by_name: + return + + asset_entity = asset_entity_data["asset_entity"] + + valid_profiles = self._filter_build_profiles( + build_profiles, loaders_by_name + ) + if not valid_profiles: + self.log.warning( + "There are not valid Workfile profiles. Skipping process." + ) + return + + self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) + + subsets_by_id = {} + version_by_subset_id = {} + repres_by_version_id = {} + for subset_id, in_data in asset_entity_data["subsets"].items(): + subset_entity = in_data["subset_entity"] + subsets_by_id[subset_entity["_id"]] = subset_entity + + version_data = in_data["version"] + version_entity = version_data["version_entity"] + version_by_subset_id[subset_id] = version_entity + repres_by_version_id[version_entity["_id"]] = ( + version_data["repres"] + ) + + if not subsets_by_id: + self.log.warning("There are not subsets for asset {0}".format( + asset_entity["name"] + )) + return + + profiles_per_subset_id = self._prepare_profile_for_subsets( + subsets_by_id.values(), valid_profiles + ) + if not profiles_per_subset_id: + self.log.warning("There are not valid subsets.") + return + + valid_repres_by_subset_id = collections.defaultdict(list) + for subset_id, profile in profiles_per_subset_id.items(): + profile_repre_names = profile["repre_names_lowered"] + + version_entity = version_by_subset_id[subset_id] + version_id = version_entity["_id"] + repres = repres_by_version_id[version_id] + for repre in repres: + repre_name_low = repre["name"].lower() + if repre_name_low in profile_repre_names: + valid_repres_by_subset_id[subset_id].append(repre) + + # DEBUG message + msg = "Valid representations for Asset: `{}`".format( + asset_entity["name"] + ) + for subset_id, repres in valid_repres_by_subset_id.items(): + subset = subsets_by_id[subset_id] + msg += "\n# Subset Name/ID: `{}`/{}".format( + subset["name"], subset_id + ) + for repre in repres: + msg += "\n## Repre name: `{}`".format(repre["name"]) + + self.log.debug(msg) + + containers = self._load_containers( + valid_repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ) + + return { + "asset_entity": asset_entity, + "containers": containers + } + + def _load_containers( + self, repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ): + """Real load by collected data happens here. + + Loading of representations per subset happens here. Each subset can + loads one representation. Loading is tried in specific order. + Representations are tried to load by names defined in configuration. + If subset has representation matching representation name each loader + is tried to load it until any is successful. If none of them was + successful then next representation name is tried. + Subset process loop ends when any representation is loaded or + all matching representations were already tried. + + Args: + repres_by_subset_id (Dict[str, Dict[str, Any]]): Available + representations mapped by their parent (subset) id. + subsets_by_id (Dict[str, Dict[str, Any]]): Subset documents + mapped by their id. + profiles_per_subset_id (Dict[str, Dict[str, Any]]): Build profiles + mapped by subset id. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Objects of loaded containers. + """ + + loaded_containers = [] + + # Get subset id order from build presets. + build_presets = self.build_presets.get("current_context", []) + build_presets += self.build_presets.get("linked_assets", []) + subset_ids_ordered = [] + for preset in build_presets: + for preset_family in preset["families"]: + for id, subset in subsets_by_id.items(): + if preset_family not in subset["data"].get("families", []): + continue + + subset_ids_ordered.append(id) + + # Order representations from subsets. + print("repres_by_subset_id", repres_by_subset_id) + representations_ordered = [] + representations = [] + for id in subset_ids_ordered: + for subset_id, repres in repres_by_subset_id.items(): + if repres in representations: + continue + + if id == subset_id: + representations_ordered.append((subset_id, repres)) + representations.append(repres) + + print("representations", representations) + + # Load ordered representations. + for subset_id, repres in representations_ordered: + subset_name = subsets_by_id[subset_id]["name"] + + profile = profiles_per_subset_id[subset_id] + loaders_last_idx = len(profile["loaders"]) - 1 + repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 + + repre_by_low_name = { + repre["name"].lower(): repre for repre in repres + } + + is_loaded = False + for repre_name_idx, profile_repre_name in enumerate( + profile["repre_names_lowered"] + ): + # Break iteration if representation was already loaded + if is_loaded: + break + + repre = repre_by_low_name.get(profile_repre_name) + if not repre: + continue + + for loader_idx, loader_name in enumerate(profile["loaders"]): + if is_loaded: + break + + loader = loaders_by_name.get(loader_name) + if not loader: + continue + try: + container = load_container( + loader, + repre["_id"], + name=subset_name + ) + loaded_containers.append(container) + is_loaded = True + + except Exception as exc: + if exc == IncompatibleLoaderError: + self.log.info(( + "Loader `{}` is not compatible with" + " representation `{}`" + ).format(loader_name, repre["name"])) + + else: + self.log.error( + "Unexpected error happened during loading", + exc_info=True + ) + + msg = "Loading failed." + if loader_idx < loaders_last_idx: + msg += " Trying next loader." + elif repre_name_idx < repre_names_last_idx: + msg += ( + " Loading of subset `{}` was not successful." + ).format(subset_name) + else: + msg += " Trying next representation." + self.log.info(msg) + + return loaded_containers + + def _collect_last_version_repres(self, asset_docs): + """Collect subsets, versions and representations for asset_entities. + + Args: + asset_docs (List[Dict[str, Any]]): Asset entities for which + want to find data. + + Returns: + Dict[str, Any]: collected entities + + Example output: + ``` + { + {Asset ID}: { + "asset_entity": , + "subsets": { + {Subset ID}: { + "subset_entity": , + "version": { + "version_entity": , + "repres": [ + , , ... + ] + } + }, + ... + } + }, + ... + } + output[asset_id]["subsets"][subset_id]["version"]["repres"] + ``` + """ + + output = {} + if not asset_docs: + return output + + asset_docs_by_ids = {asset["_id"]: asset for asset in asset_docs} + + project_name = legacy_io.active_project() + subsets = list(get_subsets( + project_name, asset_ids=asset_docs_by_ids.keys() + )) + subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} + + last_version_by_subset_id = get_last_versions( + project_name, subset_entity_by_ids.keys() + ) + last_version_docs_by_id = { + version["_id"]: version + for version in last_version_by_subset_id.values() + } + repre_docs = get_representations( + project_name, version_ids=last_version_docs_by_id.keys() + ) + + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_doc = last_version_docs_by_id[version_id] + + subset_id = version_doc["parent"] + subset_doc = subset_entity_by_ids[subset_id] + + asset_id = subset_doc["parent"] + asset_doc = asset_docs_by_ids[asset_id] + + if asset_id not in output: + output[asset_id] = { + "asset_entity": asset_doc, + "subsets": {} + } + + if subset_id not in output[asset_id]["subsets"]: + output[asset_id]["subsets"][subset_id] = { + "subset_entity": subset_doc, + "version": { + "version_entity": version_doc, + "repres": [] + } + } + + output[asset_id]["subsets"][subset_id]["version"]["repres"].append( + repre_doc + ) + + return output diff --git a/openpype/pipeline/workfile/path_resolving.py b/openpype/pipeline/workfile/path_resolving.py new file mode 100644 index 0000000000..ed1d1d793e --- /dev/null +++ b/openpype/pipeline/workfile/path_resolving.py @@ -0,0 +1,464 @@ +import os +import re +import copy +import platform + +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_project_settings +from openpype.lib import ( + filter_profiles, + Logger, + StringTemplate, +) +from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data + + +def get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings=None +): + """Helper function to get template key for workfile template. + + Do the same as `get_workfile_template_key` but returns value for "session + context". + + Args: + asset_name(str): Name of asset document. + task_name(str): Task name for which is template key retrieved. + Must be available on asset document under `data.tasks`. + host_name(str): Name of host implementation for which is workfile + used. + project_name(str): Project name where asset and task is. + project_settings(Dict[str, Any]): Project settings for passed + 'project_name'. Not required at all but makes function faster. + """ + + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.tasks"] + ) + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + task_info = asset_tasks.get(task_name) or {} + task_type = task_info.get("type") + + return get_workfile_template_key( + task_type, host_name, project_name, project_settings + ) + + +def get_workfile_template_key( + task_type, host_name, project_name, project_settings=None +): + """Workfile template key which should be used to get workfile template. + + Function is using profiles from project settings to return right template + for passet task type and host name. + + Args: + task_type(str): Name of task type. + host_name(str): Name of host implementation (e.g. "maya", "nuke", ...) + project_name(str): Name of project in which context should look for + settings. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. + """ + + default = "work" + if not task_type or not host_name: + return default + + if not project_settings: + project_settings = get_project_settings(project_name) + + try: + profiles = ( + project_settings + ["global"] + ["tools"] + ["Workfiles"] + ["workfile_template_profiles"] + ) + except Exception: + profiles = [] + + if not profiles: + return default + + profile_filter = { + "task_types": task_type, + "hosts": host_name + } + profile = filter_profiles(profiles, profile_filter) + if profile: + return profile["workfile_template"] or default + return default + + +def get_workdir_with_workdir_data( + workdir_data, + project_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + It is possible to pass only project's name instead of project's anatomy but + one of them **must** be entered. It is preferred to enter anatomy if is + available as initialization of a new Anatomy object may be time consuming. + + Args: + workdir_data (Dict[str, Any]): Data to fill workdir template. + project_name (str): Project's name. + anatomy (Anatomy): Anatomy object for specific project. Faster + processing if is passed. + template_key (str): Key of work templates in anatomy templates. If not + passed `get_workfile_template_key_from_context` is used to get it. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_name) + + if not template_key: + template_key = get_workfile_template_key( + workdir_data["task"]["type"], + workdir_data["app"], + workdir_data["project"]["name"], + project_settings + ) + + anatomy_filled = anatomy.format(workdir_data) + # Output is TemplateResult object which contain useful data + output = anatomy_filled[template_key]["folder"] + if output: + return output.normalized() + return output + + +def get_workdir( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (str): Task name for which are workdir data preapred. + host_name (str): Host which is used to workdir. This is required + because workdir template may contain `{app}` key. In `Session` + is stored under `AVALON_APP` key. + anatomy (Anatomy): Optional argument. Anatomy object is created using + project name from `project_doc`. It is preferred to pass this + argument as initialization of a new Anatomy object may be time + consuming. + template_key (str): Key of work templates in anatomy templates. Default + value is defined in `get_workdir_with_workdir_data`. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_doc["name"]) + + workdir_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # Output is TemplateResult object which contain useful data + return get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + template_key, + project_settings + ) + + +def get_last_workfile_with_version( + workdir, file_template, fill_data, extensions +): + """Return last workfile version. + + Usign workfile template and it's filling data find most possible last + version of workfile which was created for the context. + + Functionality is fully based on knowing which keys are optional or what + values are expected as value. + + The last modified file is used if more files can be considered as + last workfile. + + Args: + workdir (str): Path to dir where workfiles are stored. + file_template (str): Template of file name. + fill_data (Dict[str, Any]): Data for filling template. + extensions (Iterable[str]): All allowed file extensions of workfile. + + Returns: + Tuple[Union[str, None], Union[int, None]]: Last workfile with version + if there is any workfile otherwise None for both. + """ + + if not os.path.exists(workdir): + return None, None + + dotted_extensions = set() + for ext in extensions: + if not ext.startswith("."): + ext = ".{}".format(ext) + dotted_extensions.add(ext) + + # Fast match on extension + filenames = [ + filename + for filename in os.listdir(workdir) + if os.path.splitext(filename)[-1] in dotted_extensions + ] + + # Build template without optionals, version to digits only regex + # and comment to any definable value. + # Escape extensions dot for regex + regex_exts = [ + "\\" + ext + for ext in dotted_extensions + ] + ext_expression = "(?:" + "|".join(regex_exts) + ")" + + # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end + file_template = re.sub(r"\.?{ext}", ext_expression, file_template) + # Replace optional keys with optional content regex + file_template = re.sub(r"<.*?>", r".*?", file_template) + # Replace `{version}` with group regex + file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) + file_template = re.sub(r"{comment.*?}", r".+?", file_template) + file_template = StringTemplate.format_strict_template( + file_template, fill_data + ) + + # Match with ignore case on Windows due to the Windows + # OS not being case-sensitive. This avoids later running + # into the error that the file did exist if it existed + # with a different upper/lower-case. + kwargs = {} + if platform.system().lower() == "windows": + kwargs["flags"] = re.IGNORECASE + + # Get highest version among existing matching files + version = None + output_filenames = [] + for filename in sorted(filenames): + match = re.match(file_template, filename, **kwargs) + if not match: + continue + + file_version = int(match.group(1)) + if version is None or file_version > version: + output_filenames[:] = [] + version = file_version + + if file_version == version: + output_filenames.append(filename) + + output_filename = None + if output_filenames: + if len(output_filenames) == 1: + output_filename = output_filenames[0] + else: + last_time = None + for _output_filename in output_filenames: + full_path = os.path.join(workdir, _output_filename) + mod_time = os.path.getmtime(full_path) + if last_time is None or last_time < mod_time: + output_filename = _output_filename + last_time = mod_time + + return output_filename, version + + +def get_last_workfile( + workdir, file_template, fill_data, extensions, full_path=False +): + """Return last workfile filename. + + Returns file with version 1 if there is not workfile yet. + + Args: + workdir(str): Path to dir where workfiles are stored. + file_template(str): Template of file name. + fill_data(Dict[str, Any]): Data for filling template. + extensions(Iterable[str]): All allowed file extensions of workfile. + full_path(bool): Full path to file is returned if set to True. + + Returns: + str: Last or first workfile as filename of full path to filename. + """ + + filename, version = get_last_workfile_with_version( + workdir, file_template, fill_data, extensions + ) + if filename is None: + data = copy.deepcopy(fill_data) + data["version"] = 1 + data.pop("comment", None) + if not data.get("ext"): + data["ext"] = extensions[0] + data["ext"] = data["ext"].replace('.', '') + filename = StringTemplate.format_strict_template(file_template, data) + + if full_path: + return os.path.normpath(os.path.join(workdir, filename)) + + return filename + + +def get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Custom workfile template can be used as first version of workfiles. + Template is a file on a disk which is set in settings. Expected settings + structure to have this feature enabled is: + project settings + |- + |- workfile_builder + |- create_first_version - a bool which must be set to 'True' + |- custom_templates - profiles based on task name/type which + points to a file which is copied as + first workfile + + It is expected that passed argument are already queried documents of + project and asset as parents of processing task name. + + Args: + project_doc (Dict[str, Any]): Project document from MongoDB. + asset_doc (Dict[str, Any]): Asset document from MongoDB. + task_name (str): Name of task for which templates are filtered. + host_name (str): Name of host. + anatomy (Anatomy): Optionally passed anatomy object for passed project + name. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. Existence of formatted path is not validated. + None: If no profile is matching context. + """ + + log = Logger.get_logger("CustomWorkfileResolve") + + project_name = project_doc["name"] + if project_settings is None: + project_settings = get_project_settings(project_name) + + host_settings = project_settings.get(host_name) + if not host_settings: + log.info("Host \"{}\" doesn't have settings".format(host_name)) + return None + + workfile_builder_settings = host_settings.get("workfile_builder") + if not workfile_builder_settings: + log.info(( + "Seems like old version of settings is used." + " Can't access custom templates in host \"{}\"." + ).format(host_name)) + return + + if not workfile_builder_settings["create_first_version"]: + log.info(( + "Project \"{}\" has turned off to create first workfile for" + " host \"{}\"" + ).format(project_name, host_name)) + return + + # Backwards compatibility + template_profiles = workfile_builder_settings.get("custom_templates") + if not template_profiles: + log.info( + "Custom templates are not filled. Skipping template copy." + ) + return + + if anatomy is None: + anatomy = Anatomy(project_name) + + # get project, asset, task anatomy context data + anatomy_context_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # add root dict + anatomy_context_data["root"] = anatomy.roots + + # get task type for the task in context + current_task_type = anatomy_context_data["task"]["type"] + + # get path from matching profile + matching_item = filter_profiles( + template_profiles, + {"task_types": current_task_type} + ) + # when path is available try to format it in case + # there are some anatomy template strings + if matching_item: + template = matching_item["path"][platform.system().lower()] + return StringTemplate.format_strict_template( + template, anatomy_context_data + ).normalized() + + return None + + +def get_custom_workfile_template_by_string_context( + project_name, + asset_name, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Passed context are string representations of project, asset and task. + Function will query documents of project and asset to be able use + `get_custom_workfile_template` for rest of logic. + + Args: + project_name(str): Project name. + asset_name(str): Asset name. + task_name(str): Task name. + host_name (str): Name of host. + anatomy(Anatomy): Optionally prepared anatomy object for passed + project. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + None: If no profile is matching context. + """ + + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) + + return get_custom_workfile_template( + project_doc, asset_doc, task_name, host_name, anatomy, project_settings + ) diff --git a/openpype/plugins/load/delivery.py b/openpype/plugins/load/delivery.py index 7585ea4c59..f6e1d4f06b 100644 --- a/openpype/plugins/load/delivery.py +++ b/openpype/plugins/load/delivery.py @@ -4,10 +4,10 @@ from collections import defaultdict from Qt import QtWidgets, QtCore, QtGui from openpype.client import get_representations -from openpype.lib import config from openpype.pipeline import load, Anatomy from openpype import resources, style +from openpype.lib.dateutils import get_datetime_data from openpype.lib.delivery import ( sizeof_fmt, path_from_representation, @@ -160,7 +160,7 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): selected_repres = self._get_selected_repres() - datetime_data = config.get_datetime_data() + datetime_data = get_datetime_data() template_name = self.dropdown.currentText() format_dict = get_format_dict(self.anatomy, self.root_line_edit.text()) for repre in self._representations: diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py index 0794adfb67..8433816908 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/openpype/plugins/publish/collect_anatomy_context_data.py @@ -15,10 +15,8 @@ Provides: import json import pyblish.api -from openpype.lib import ( - get_system_general_anatomy_data -) from openpype.pipeline import legacy_io +from openpype.pipeline.template_data import get_template_data class CollectAnatomyContextData(pyblish.api.ContextPlugin): @@ -33,11 +31,15 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): "asset": "AssetName", "hierarchy": "path/to/asset", "task": "Working", + "user": "MeDespicable", + # Duplicated entry "username": "MeDespicable", + # Current host name + "app": "maya" + *** OPTIONAL *** - "app": "maya" # Current application base name - + mutliple keys from `datetimeData` # see it's collector + + mutliple keys from `datetimeData` (See it's collector) } """ @@ -45,52 +47,26 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): label = "Collect Anatomy Context Data" def process(self, context): + host_name = context.data["hostName"] + system_settings = context.data["system_settings"] project_entity = context.data["projectEntity"] - context_data = { - "project": { - "name": project_entity["name"], - "code": project_entity["data"].get("code") - }, - "username": context.data["user"], - "app": context.data["hostName"] - } - - context.data["anatomyData"] = context_data - - # add system general settings anatomy data - system_general_data = get_system_general_anatomy_data() - context_data.update(system_general_data) - - datetime_data = context.data.get("datetimeData") or {} - context_data.update(datetime_data) - asset_entity = context.data.get("assetEntity") + task_name = None if asset_entity: task_name = legacy_io.Session["AVALON_TASK"] - asset_tasks = asset_entity["data"]["tasks"] - task_type = asset_tasks.get(task_name, {}).get("type") + anatomy_data = get_template_data( + project_entity, asset_entity, task_name, host_name, system_settings + ) + anatomy_data.update(context.data.get("datetimeData") or {}) - project_task_types = project_entity["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") + username = context.data["user"] + anatomy_data["user"] = username + # Backwards compatibility for 'username' key + anatomy_data["username"] = username - asset_parents = asset_entity["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_entity["name"] - if asset_parents: - parent_name = asset_parents[-1] - - context_data.update({ - "asset": asset_entity["name"], - "parent": parent_name, - "hierarchy": hierarchy, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - } - }) + # Store + context.data["anatomyData"] = anatomy_data self.log.info("Global anatomy Data collected") - self.log.debug(json.dumps(context_data, indent=4)) + self.log.debug(json.dumps(anatomy_data, indent=4)) diff --git a/openpype/plugins/publish/collect_anatomy_object.py b/openpype/plugins/publish/collect_anatomy_object.py index b1415098b6..725cae2b14 100644 --- a/openpype/plugins/publish/collect_anatomy_object.py +++ b/openpype/plugins/publish/collect_anatomy_object.py @@ -1,29 +1,32 @@ """Collect Anatomy object. Requires: - os.environ -> AVALON_PROJECT + context -> projectName Provides: context -> anatomy (openpype.pipeline.anatomy.Anatomy) """ -import os + import pyblish.api -from openpype.pipeline import Anatomy +from openpype.pipeline import Anatomy, KnownPublishError class CollectAnatomyObject(pyblish.api.ContextPlugin): - """Collect Anatomy object into Context""" + """Collect Anatomy object into Context. + + Order offset could be changed to '-0.45'. + """ order = pyblish.api.CollectorOrder - 0.4 label = "Collect Anatomy Object" def process(self, context): - project_name = os.environ.get("AVALON_PROJECT") + project_name = context.data.get("projectName") if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` is not set." + raise KnownPublishError(( + "Project name is not set in 'projectName'." "Could not initialize project's Anatomy." - ) + )) context.data["anatomy"] = Anatomy(project_name) diff --git a/openpype/plugins/publish/collect_avalon_entities.py b/openpype/plugins/publish/collect_avalon_entities.py index 6cd0d136e8..3b05b6ae98 100644 --- a/openpype/plugins/publish/collect_avalon_entities.py +++ b/openpype/plugins/publish/collect_avalon_entities.py @@ -1,35 +1,38 @@ """Collect Anatomy and global anatomy data. Requires: - session -> AVALON_PROJECT, AVALON_ASSET + session -> AVALON_ASSET + context -> projectName Provides: - context -> projectEntity - project entity from database - context -> assetEntity - asset entity from database + context -> projectEntity - Project document from database. + context -> assetEntity - Asset document from database only if 'asset' is + set in context. """ import pyblish.api from openpype.client import get_project, get_asset_by_name -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, KnownPublishError class CollectAvalonEntities(pyblish.api.ContextPlugin): - """Collect Anatomy into Context""" + """Collect Anatomy into Context.""" order = pyblish.api.CollectorOrder - 0.1 label = "Collect Avalon Entities" def process(self, context): legacy_io.install() - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = context.data["projectName"] asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] project_entity = get_project(project_name) - assert project_entity, ( - "Project '{0}' was not found." - ).format(project_name) + if not project_entity: + raise KnownPublishError( + "Project '{0}' was not found.".format(project_name) + ) self.log.debug("Collected Project \"{}\"".format(project_entity)) context.data["projectEntity"] = project_entity diff --git a/openpype/plugins/publish/collect_current_context.py b/openpype/plugins/publish/collect_current_context.py new file mode 100644 index 0000000000..7e42700d7d --- /dev/null +++ b/openpype/plugins/publish/collect_current_context.py @@ -0,0 +1,47 @@ +""" +Provides: + context -> projectName (str) + context -> asset (str) + context -> task (str) +""" + +import pyblish.api +from openpype.pipeline import legacy_io + + +class CollectCurrentContext(pyblish.api.ContextPlugin): + """Collect project context into publish context data. + + Plugin does not override any value if is already set. + """ + + order = pyblish.api.CollectorOrder - 0.5 + label = "Collect Current context" + + def process(self, context): + # Make sure 'legacy_io' is intalled + legacy_io.install() + + # Check if values are already set + project_name = context.data.get("projectName") + asset_name = context.data.get("asset") + task_name = context.data.get("task") + if not project_name: + project_name = legacy_io.current_project() + context.data["projectName"] = project_name + + if not asset_name: + asset_name = legacy_io.Session.get("AVALON_ASSET") + context.data["asset"] = asset_name + + if not task_name: + task_name = legacy_io.Session.get("AVALON_TASK") + context.data["task"] = task_name + + # QUESTION should we be explicit with keys? (the same on instances) + # - 'asset' -> 'assetName' + # - 'task' -> 'taskName' + + self.log.info(( + "Collected project context\nProject: {}\nAsset: {}\nTask: {}" + ).format(project_name, asset_name, task_name)) diff --git a/openpype/plugins/publish/collect_datetime_data.py b/openpype/plugins/publish/collect_datetime_data.py index 1675ae1a98..b3178ca3d2 100644 --- a/openpype/plugins/publish/collect_datetime_data.py +++ b/openpype/plugins/publish/collect_datetime_data.py @@ -5,14 +5,14 @@ Provides: """ import pyblish.api -from openpype.api import config +from openpype.lib.dateutils import get_datetime_data class CollectDateTimeData(pyblish.api.ContextPlugin): - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 label = "Collect DateTime data" def process(self, context): key = "datetimeData" if key not in context.data: - context.data[key] = config.get_datetime_data() + context.data[key] = get_datetime_data() diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index d2be633cbe..9236c698ed 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -19,6 +19,9 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): if not create_context: return + project_name = create_context.project_name + if project_name: + context.data["projectName"] = project_name for created_instance in create_context.instances: instance_data = created_instance.data_to_store() if instance_data["active"]: @@ -44,7 +47,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): "subset": subset, "asset": in_data["asset"], "task": in_data["task"], - "label": subset, + "label": in_data.get("label") or subset, "name": subset, "family": in_data["family"], "families": instance_families, diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index 91d5162d62..687397be8a 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -1,7 +1,5 @@ import pyblish.api -from openpype.pipeline import legacy_io - class CollectHierarchy(pyblish.api.ContextPlugin): """Collecting hierarchy from `parents`. @@ -20,7 +18,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): def process(self, context): temp_context = {} - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = context.data["projectName"] final_context = {} final_context[project_name] = {} final_context[project_name]['entity_type'] = 'Project' diff --git a/openpype/plugins/publish/collect_machine_name.py b/openpype/plugins/publish/collect_machine_name.py index 72ef68f8ed..8c25966031 100644 --- a/openpype/plugins/publish/collect_machine_name.py +++ b/openpype/plugins/publish/collect_machine_name.py @@ -11,7 +11,7 @@ import pyblish.api class CollectMachineName(pyblish.api.ContextPlugin): label = "Local Machine Name" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 hosts = ["*"] def process(self, context): diff --git a/openpype/plugins/publish/collect_modules.py b/openpype/plugins/publish/collect_modules.py index 2f6cb1ef0e..d76096bcd9 100644 --- a/openpype/plugins/publish/collect_modules.py +++ b/openpype/plugins/publish/collect_modules.py @@ -7,7 +7,7 @@ import pyblish.api class CollectModules(pyblish.api.ContextPlugin): """Collect OpenPype modules.""" - order = pyblish.api.CollectorOrder - 0.45 + order = pyblish.api.CollectorOrder - 0.5 label = "OpenPype Modules" def process(self, context): diff --git a/openpype/plugins/publish/collect_otio_frame_ranges.py b/openpype/plugins/publish/collect_otio_frame_ranges.py index c86e777850..40e89e29bc 100644 --- a/openpype/plugins/publish/collect_otio_frame_ranges.py +++ b/openpype/plugins/publish/collect_otio_frame_ranges.py @@ -23,7 +23,7 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): label = "Collect OTIO Frame Ranges" order = pyblish.api.CollectorOrder - 0.08 families = ["shot", "clip"] - hosts = ["resolve", "hiero", "flame"] + hosts = ["resolve", "hiero", "flame", "traypublisher"] def process(self, instance): # get basic variables diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py index fc6a9b50f2..9c19f8a78e 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/openpype/plugins/publish/collect_otio_subset_resources.py @@ -116,8 +116,10 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): # check in two way if it is sequence if hasattr(otio.schema, "ImageSequenceReference"): # for OpenTimelineIO 0.13 and newer - if isinstance(media_ref, - otio.schema.ImageSequenceReference): + if isinstance( + media_ref, + otio.schema.ImageSequenceReference + ): is_sequence = True else: # for OpenTimelineIO 0.12 and older @@ -139,11 +141,9 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): padding=media_ref.frame_zero_padding ) collection.indexes.update( - [i for i in range(a_frame_start_h, (a_frame_end_h + 1))]) + list(range(a_frame_start_h, (a_frame_end_h + 1))) + ) - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) else: # in case it is file sequence but not new OTIO schema # `ImageSequenceReference` @@ -152,9 +152,9 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): path, trimmed_media_range_h, metadata) self.staging_dir, collection = collection_data - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) + self.log.debug(collection) + repre = self._create_representation( + frame_start, frame_end, collection=collection) else: _trim = False dirname, filename = os.path.split(media_ref.target_url) @@ -198,7 +198,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): if kwargs.get("collection"): collection = kwargs.get("collection") - files = [f for f in collection] + files = list(collection) ext = collection.format("{tail}") representation_data.update({ "name": ext[1:], @@ -220,7 +220,5 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): }) if kwargs.get("trim") is True: - representation_data.update({ - "tags": ["trim"] - }) + representation_data["tags"] = ["trim"] return representation_data diff --git a/openpype/plugins/publish/collect_rendered_files.py b/openpype/plugins/publish/collect_rendered_files.py index 670e57ed10..8f8d0a5eeb 100644 --- a/openpype/plugins/publish/collect_rendered_files.py +++ b/openpype/plugins/publish/collect_rendered_files.py @@ -1,7 +1,7 @@ """Loads publishing context from json and continues in publish process. Requires: - anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11) + anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.4) Provides: context, instances -> All data from previous publishing process. @@ -12,7 +12,7 @@ import json import pyblish.api -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, KnownPublishError class CollectRenderedFiles(pyblish.api.ContextPlugin): @@ -20,7 +20,12 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): This collector will try to find json files in provided `OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + Note: + We should split this collector and move the part which handle reading + of file and it's context from session data before collect anatomy + and instance creation dependent on anatomy can be done here. """ + order = pyblish.api.CollectorOrder - 0.2 # Keep "filesequence" for backwards compatibility of older jobs targets = ["filesequence", "farm"] @@ -118,23 +123,20 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): def process(self, context): self._context = context - assert os.environ.get("OPENPYPE_PUBLISH_DATA"), ( - "Missing `OPENPYPE_PUBLISH_DATA`") + if not os.environ.get("OPENPYPE_PUBLISH_DATA"): + raise KnownPublishError("Missing `OPENPYPE_PUBLISH_DATA`") + + # QUESTION + # Do we support (or want support) multiple files in the variable? + # - what if they have different context? paths = os.environ["OPENPYPE_PUBLISH_DATA"].split(os.pathsep) - project_name = os.environ.get("AVALON_PROJECT") - if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` was not found." - "Could not set project `root` which may cause issues." - ) - - # TODO root filling should happen after collect Anatomy + # Using already collected Anatomy + anatomy = context.data["anatomy"] self.log.info("Getting root setting for project \"{}\"".format( - project_name + anatomy.project_name )) - anatomy = context.data["anatomy"] self.log.info("anatomy: {}".format(anatomy.roots)) try: session_is_set = False diff --git a/openpype/plugins/publish/collect_resources_path.py b/openpype/plugins/publish/collect_resources_path.py index 8bdf70b529..00f65b8b67 100644 --- a/openpype/plugins/publish/collect_resources_path.py +++ b/openpype/plugins/publish/collect_resources_path.py @@ -13,8 +13,6 @@ import copy import pyblish.api -from openpype.pipeline import legacy_io - class CollectResourcesPath(pyblish.api.InstancePlugin): """Generate directory path where the files and resources will be stored""" @@ -58,7 +56,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "effect", "staticMesh", "skeletalMesh" - ] def process(self, instance): @@ -86,11 +83,10 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) file_path = anatomy_filled["publish"]["path"] # Directory diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index 8d447ba595..6b4e5f48c5 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -30,9 +30,15 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) self.project = None - self.import_to_avalon(project_name, hierarchy_context) + self.import_to_avalon(context, project_name, hierarchy_context) - def import_to_avalon(self, project_name, input_data, parent=None): + def import_to_avalon( + self, + context, + project_name, + input_data, + parent=None, + ): for name in input_data: self.log.info("input_data[name]: {}".format(input_data[name])) entity_data = input_data[name] @@ -127,12 +133,19 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if unarchive_entity is None: # Create entity if doesn"t exist entity = self.create_avalon_asset( - project_name, name, data + name, data ) else: # Unarchive if entity was archived entity = self.unarchive_entity(unarchive_entity, data) + # make sure all relative instances have correct avalon data + self._set_avalon_data_to_relative_instances( + context, + project_name, + entity + ) + if update_data: # Update entity data with input data legacy_io.update_many( @@ -142,7 +155,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if "childs" in entity_data: self.import_to_avalon( - project_name, entity_data["childs"], entity + context, project_name, entity_data["childs"], entity ) def unarchive_entity(self, entity, data): @@ -159,20 +172,52 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): {"_id": entity["_id"]}, new_entity ) + return new_entity - def create_avalon_asset(self, project_name, name, data): - item = { + def create_avalon_asset(self, name, data): + asset_doc = { "schema": "openpype:asset-3.0", "name": name, "parent": self.project["_id"], "type": "asset", "data": data } - self.log.debug("Creating asset: {}".format(item)) - entity_id = legacy_io.insert_one(item).inserted_id + self.log.debug("Creating asset: {}".format(asset_doc)) + asset_doc["_id"] = legacy_io.insert_one(asset_doc).inserted_id - return get_asset_by_id(project_name, entity_id) + return asset_doc + + def _set_avalon_data_to_relative_instances( + self, + context, + project_name, + asset_doc + ): + for instance in context: + # Skip instance if has filled asset entity + if instance.data.get("assetEntity"): + continue + asset_name = asset_doc["name"] + inst_asset_name = instance.data["asset"] + + if asset_name == inst_asset_name: + instance.data["assetEntity"] = asset_doc + + # get parenting data + parents = asset_doc["data"].get("parents") or list() + + # equire only relative parent + parent_name = project_name + if parents: + parent_name = parents[-1] + + # update avalon data on instance + instance.data["anatomyData"].update({ + "hierarchy": "/".join(parents), + "task": {}, + "parent": parent_name + }) def _get_active_assets(self, context): """ Returns only asset dictionary. diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/openpype/plugins/publish/extract_otio_audio_tracks.py index 00c1748cdc..ed30a2f0f5 100644 --- a/openpype/plugins/publish/extract_otio_audio_tracks.py +++ b/openpype/plugins/publish/extract_otio_audio_tracks.py @@ -57,15 +57,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): audio_inputs.insert(0, empty) # create cmd - cmd = path_to_subprocess_arg(self.ffmpeg_path) + " " - cmd += self.create_cmd(audio_inputs) - cmd += path_to_subprocess_arg(audio_temp_fpath) - - # run subprocess - self.log.debug("Executing: {}".format(cmd)) - openpype.api.run_subprocess( - cmd, shell=True, logger=self.log - ) + self.mix_audio(audio_inputs, audio_temp_fpath) # remove empty os.remove(empty["mediaPath"]) @@ -245,46 +237,80 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): "durationSec": max_duration_sec } - def create_cmd(self, inputs): + def mix_audio(self, audio_inputs, audio_temp_fpath): """Creating multiple input cmd string Args: - inputs (list): list of input dicts. Order mater. + audio_inputs (list): list of input dicts. Order mater. Returns: str: the command body - """ + + longest_input = 0 + for audio_input in audio_inputs: + audio_len = audio_input["durationSec"] + if audio_len > longest_input: + longest_input = audio_len + # create cmd segments - _inputs = "" - _filters = "-filter_complex \"" - _channels = "" - for index, input in enumerate(inputs): - input_format = input.copy() - input_format.update({"i": index}) - input_format["mediaPath"] = path_to_subprocess_arg( - input_format["mediaPath"] + input_args = [] + filters = [] + tag_names = [] + for index, audio_input in enumerate(audio_inputs): + input_args.extend([ + "-ss", str(audio_input["startSec"]), + "-t", str(audio_input["durationSec"]), + "-i", audio_input["mediaPath"] + ]) + + # Output tag of a filtered audio input + tag_name = "[r{}]".format(index) + tag_names.append(tag_name) + # Delay in audio by delay in item + filters.append("[{}]adelay={}:all=1{}".format( + index, audio_input["delayMilSec"], tag_name + )) + + # Mixing filter + # - dropout transition (when audio will get loader) is set to be + # higher then any input audio item + # - volume is set to number of inputs - each mix adds 1/n volume + # where n is input inder (to get more info read ffmpeg docs and + # send a giftcard to contributor) + filters.append( + ( + "{}amix=inputs={}:duration=first:" + "dropout_transition={},volume={}[a]" + ).format( + "".join(tag_names), + len(audio_inputs), + (longest_input * 1000) + 1000, + len(audio_inputs), ) + ) - _inputs += ( - "-ss {startSec} " - "-t {durationSec} " - "-i {mediaPath} " - ).format(**input_format) + # Store filters to a file (separated by ',') + # - this is to avoid "too long" command issue in ffmpeg + with tempfile.NamedTemporaryFile( + delete=False, mode="w", suffix=".txt" + ) as tmp_file: + filters_tmp_filepath = tmp_file.name + tmp_file.write(",".join(filters)) - _filters += "[{i}]adelay={delayMilSec}:all=1[r{i}]; ".format( - **input_format) - _channels += "[r{}]".format(index) + args = [self.ffmpeg_path] + args.extend(input_args) + args.extend([ + "-filter_complex_script", filters_tmp_filepath, + "-map", "[a]" + ]) + args.append(audio_temp_fpath) - # merge all cmd segments together - cmd = _inputs + _filters + _channels - cmd += str( - "amix=inputs={inputs}:duration=first:" - "dropout_transition=1000,volume={inputs}[a]\" " - ).format(inputs=len(inputs)) - cmd += "-map \"[a]\" " + # run subprocess + self.log.debug("Executing: {}".format(args)) + openpype.api.run_subprocess(args, logger=self.log) - return cmd + os.remove(filters_tmp_filepath) def create_temp_file(self, name): """Create temp wav file diff --git a/openpype/plugins/publish/extract_otio_file.py b/openpype/plugins/publish/extract_otio_file.py index 3bd217d5d4..4d310ce109 100644 --- a/openpype/plugins/publish/extract_otio_file.py +++ b/openpype/plugins/publish/extract_otio_file.py @@ -12,7 +12,7 @@ class ExtractOTIOFile(openpype.api.Extractor): label = "Extract OTIO file" order = pyblish.api.ExtractorOrder - 0.45 families = ["workfile"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "traypublisher"] def process(self, instance): # create representation data diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 1b6e2a1d61..7442d3aacb 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -360,6 +360,7 @@ class ExtractReview(pyblish.api.InstancePlugin): os.unlink(f) new_repre.update({ + "fps": temp_data["fps"], "name": "{}_{}".format(output_name, output_ext), "outputName": output_name, "outputDef": output_def, @@ -1368,33 +1369,14 @@ class ExtractReview(pyblish.api.InstancePlugin): or input_width != output_width or pixel_aspect != 1 ): - if input_res_ratio < output_res_ratio: - self.log.debug( - "Input's resolution ratio is lower then output's" - ) - width_scale = int(input_width * scale_factor_by_height) - width_half_pad = int((output_width - width_scale) / 2) - height_scale = output_height - height_half_pad = 0 - else: - self.log.debug("Input is heigher then output") - width_scale = output_width - width_half_pad = 0 - height_scale = int(input_height * scale_factor_by_width) - height_half_pad = int((output_height - height_scale) / 2) - - self.log.debug("width_scale: `{}`".format(width_scale)) - self.log.debug("width_half_pad: `{}`".format(width_half_pad)) - self.log.debug("height_scale: `{}`".format(height_scale)) - self.log.debug("height_half_pad: `{}`".format(height_half_pad)) - filters.extend([ - "scale={}x{}:flags=lanczos".format( - width_scale, height_scale - ), - "pad={}:{}:{}:{}:{}".format( + ( + "scale={}x{}" + ":flags=lanczos" + ":force_original_aspect_ratio=decrease" + ).format(output_width, output_height), + "pad={}:{}:(ow-iw)/2:(oh-ih)/2:{}".format( output_width, output_height, - width_half_pad, height_half_pad, overscan_color_value ), "setsar=1" diff --git a/openpype/plugins/publish/extract_thumbnail.py b/openpype/plugins/publish/extract_thumbnail.py index 7933595b89..14b43beae8 100644 --- a/openpype/plugins/publish/extract_thumbnail.py +++ b/openpype/plugins/publish/extract_thumbnail.py @@ -1,4 +1,5 @@ import os +import tempfile import pyblish.api from openpype.lib import ( @@ -8,8 +9,6 @@ from openpype.lib import ( run_subprocess, path_to_subprocess_arg, - - execute, ) @@ -20,7 +19,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): order = pyblish.api.ExtractorOrder families = [ "imagesequence", "render", "render2d", "prerender", - "source", "plate", "take" + "source", "clip", "take" ] hosts = ["shell", "fusion", "resolve", "traypublisher"] enabled = False @@ -29,7 +28,27 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ffmpeg_args = None def process(self, instance): - self.log.info("subset {}".format(instance.data['subset'])) + subset_name = instance.data["subset"] + instance_repres = instance.data.get("representations") + if not instance_repres: + self.log.debug(( + "Instance {} does not have representations. Skipping" + ).format(subset_name)) + return + + self.log.info( + "Processing instance with subset name {}".format(subset_name) + ) + + # Skip if instance have 'review' key in data set to 'False' + if not self._is_review_instance(instance): + self.log.info("Skipping - no review set on instance.") + return + + # Check if already has thumbnail created + if self._already_has_thumbnail(instance_repres): + self.log.info("Thumbnail representation already present.") + return # skip crypto passes. # TODO: This is just a quick fix and has its own side-effects - it is @@ -37,20 +56,29 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): # This must be solved properly, maybe using tags on # representation that can be determined much earlier and # with better precision. - if 'crypto' in instance.data['subset'].lower(): + if "crypto" in subset_name.lower(): self.log.info("Skipping crypto passes.") return - # Skip if review not set. - if not instance.data.get("review", True): - self.log.info("Skipping - no review set on instance.") - return - - if self._already_has_thumbnail(instance): - self.log.info("Thumbnail representation already present.") - return - filtered_repres = self._get_filtered_repres(instance) + if not filtered_repres: + self.log.info(( + "Instance don't have representations" + " that can be used as source for thumbnail. Skipping" + )) + return + + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + instance.context.data["cleanupFullPaths"].append(dst_staging) + + thumbnail_created = False + oiio_supported = is_oiio_supported() for repre in filtered_repres: repre_files = repre["files"] if not isinstance(repre_files, (list, tuple)): @@ -59,41 +87,43 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): file_index = int(float(len(repre_files)) * 0.5) input_file = repre_files[file_index] - stagingdir = os.path.normpath(repre["stagingDir"]) - - full_input_path = os.path.join(stagingdir, input_file) + src_staging = os.path.normpath(repre["stagingDir"]) + full_input_path = os.path.join(src_staging, input_file) self.log.info("input {}".format(full_input_path)) filename = os.path.splitext(input_file)[0] - if not filename.endswith('.'): - filename += "." - jpeg_file = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpeg_file) + jpeg_file = filename + ".jpg" + full_output_path = os.path.join(dst_staging, jpeg_file) - thumbnail_created = False - # Try to use FFMPEG if OIIO is not supported (for cases when - # oiiotool isn't available) - if not is_oiio_supported(): - thumbnail_created = self.create_thumbnail_ffmpeg(full_input_path, full_output_path) # noqa - else: + if oiio_supported: + self.log.info("Trying to convert with OIIO") # If the input can read by OIIO then use OIIO method for # conversion otherwise use ffmpeg - self.log.info("Trying to convert with OIIO") # noqa - thumbnail_created = self.create_thumbnail_oiio(full_input_path, full_output_path) # noqa + thumbnail_created = self.create_thumbnail_oiio( + full_input_path, full_output_path + ) - if not thumbnail_created: - self.log.info("Converting with FFMPEG because input can't be read by OIIO.") # noqa - thumbnail_created = self.create_thumbnail_ffmpeg(full_input_path, full_output_path) # noqa - - # Skip the rest of the process if the thumbnail wasn't created + # Try to use FFMPEG if OIIO is not supported or for cases when + # oiiotool isn't available if not thumbnail_created: - self.log.warning("Thumbanil has not been created.") - return + if oiio_supported: + self.log.info(( + "Converting with FFMPEG because input" + " can't be read by OIIO." + )) + + thumbnail_created = self.create_thumbnail_ffmpeg( + full_input_path, full_output_path + ) + + # Skip representation and try next one if wasn't created + if not thumbnail_created: + continue new_repre = { "name": "thumbnail", "ext": "jpg", "files": jpeg_file, - "stagingDir": stagingdir, + "stagingDir": dst_staging, "thumbnail": True, "tags": ["thumbnail"] } @@ -106,12 +136,21 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): # There is no need to create more then one thumbnail break - def _already_has_thumbnail(self, instance): - for repre in instance.data.get("representations", []): + if not thumbnail_created: + self.log.warning("Thumbanil has not been created.") + + def _is_review_instance(self, instance): + # TODO: We should probably handle "not creating" of thumbnail + # other way then checking for "review" key on instance data? + if instance.data.get("review", True): + return True + return False + + def _already_has_thumbnail(self, repres): + for repre in repres: self.log.info("repre {}".format(repre)) if repre["name"] == "thumbnail": return True - return False def _get_filtered_repres(self, instance): @@ -136,12 +175,12 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): def create_thumbnail_oiio(self, src_path, dst_path): self.log.info("outputting {}".format(dst_path)) oiio_tool_path = get_oiio_tools_path() - oiio_cmd = [oiio_tool_path, "-a", - src_path, "-o", - dst_path - ] - subprocess_exr = " ".join(oiio_cmd) - self.log.info(f"running: {subprocess_exr}") + oiio_cmd = [ + oiio_tool_path, + "-a", src_path, + "-o", dst_path + ] + self.log.info("running: {}".format(" ".join(oiio_cmd))) try: run_subprocess(oiio_cmd, logger=self.log) return True diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py b/openpype/plugins/publish/extract_trim_video_audio.py similarity index 74% rename from openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py rename to openpype/plugins/publish/extract_trim_video_audio.py index 51dc84e9a2..06817c4b5a 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py +++ b/openpype/plugins/publish/extract_trim_video_audio.py @@ -14,7 +14,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): # must be before `ExtractThumbnailSP` order = pyblish.api.ExtractorOrder - 0.01 label = "Extract Trim Video/Audio" - hosts = ["standalonepublisher"] + hosts = ["standalonepublisher", "traypublisher"] families = ["clip", "trimming"] # make sure it is enabled only if at least both families are available @@ -40,6 +40,21 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): fps = instance.data["fps"] video_file_path = instance.data["editorialSourcePath"] extensions = instance.data.get("extensions", ["mov"]) + output_file_type = instance.data.get("outputFileType") + reviewable = "review" in instance.data["families"] + + frame_start = int(instance.data["frameStart"]) + frame_end = int(instance.data["frameEnd"]) + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + + clip_start_h = float(instance.data["clipInH"]) + _dur = instance.data["clipDuration"] + handle_dur = (handle_start + handle_end) + clip_dur_h = float(_dur + handle_dur) + + if output_file_type: + extensions = [output_file_type] for ext in extensions: self.log.info("Processing ext: `{}`".format(ext)) @@ -49,16 +64,10 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): clip_trimed_path = os.path.join( staging_dir, instance.data["name"] + ext) - # # check video file metadata - # input_data = plib.get_ffprobe_streams(video_file_path)[0] - # self.log.debug(f"__ input_data: `{input_data}`") - - start = float(instance.data["clipInH"]) - dur = float(instance.data["clipDurationH"]) if ext == ".wav": # offset time as ffmpeg is having bug - start += 0.5 + clip_start_h += 0.5 # remove "review" from families instance.data["families"] = [ fml for fml in instance.data["families"] @@ -67,9 +76,9 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): ffmpeg_args = [ ffmpeg_path, - "-ss", str(start / fps), + "-ss", str(clip_start_h / fps), "-i", video_file_path, - "-t", str(dur / fps) + "-t", str(clip_dur_h / fps) ] if ext in [".mov", ".mp4"]: ffmpeg_args.extend([ @@ -98,14 +107,15 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): "ext": ext[1:], "files": os.path.basename(clip_trimed_path), "stagingDir": staging_dir, - "frameStart": int(instance.data["frameStart"]), - "frameEnd": int(instance.data["frameEnd"]), - "frameStartFtrack": int(instance.data["frameStartH"]), - "frameEndFtrack": int(instance.data["frameEndH"]), + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartFtrack": frame_start - handle_start, + "frameEndFtrack": frame_end + handle_end, "fps": fps, + "tags": [] } - if ext in [".mov", ".mp4"]: + if ext in [".mov", ".mp4"] and reviewable: repre.update({ "thumbnail": True, "tags": ["review", "ftrackreview", "delete"]}) diff --git a/openpype/plugins/publish/help/validate_containers.xml b/openpype/plugins/publish/help/validate_containers.xml new file mode 100644 index 0000000000..5d18bb4c19 --- /dev/null +++ b/openpype/plugins/publish/help/validate_containers.xml @@ -0,0 +1,23 @@ + + + +Not up-to-date assets + +## Outdated containers found + +Scene contains one or more outdated loaded containers, eg. versions loaded into scene by Loader are not latest. + +### How to repair? + +Use 'Scene Inventory' and update all highlighted old container to latest OR + refresh Publish and switch 'Validate Containers' toggle on 'Options' tab. + + WARNING: Skipping this validator will result in publishing (and probably rendering) old version of loaded assets. + + +### __Detailed Info__ (optional) + +This validates whether you're working with the latest versions of published content loaded into your scene. This protects you from using outdated versions of an asset. + + + \ No newline at end of file diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py index 8532691e61..f99c718f8a 100644 --- a/openpype/plugins/publish/integrate.py +++ b/openpype/plugins/publish/integrate.py @@ -5,11 +5,24 @@ import copy import clique import six +from openpype.client.operations import ( + OperationsSession, + new_subset_document, + new_version_doc, + new_representation_doc, + prepare_subset_update_data, + prepare_version_update_data, + prepare_representation_update_data, +) from bson.objectid import ObjectId -from pymongo import DeleteMany, ReplaceOne, InsertOne, UpdateOne import pyblish.api -import openpype.api +from openpype.client import ( + get_representations, + get_subset_by_name, + get_version_by_name, +) +from openpype.lib import source_hash from openpype.lib.profiles_filtering import filter_profiles from openpype.lib.file_transaction import FileTransaction from openpype.pipeline import legacy_io @@ -18,41 +31,6 @@ from openpype.pipeline.publish import KnownPublishError log = logging.getLogger(__name__) -def assemble(files): - """Convenience `clique.assemble` wrapper for files of a single collection. - - Unlike `clique.assemble` this wrapper does not allow more than a single - Collection nor any remainder files. Errors will be raised when not only - a single collection is assembled. - - Returns: - clique.Collection: A single sequence Collection - - Raises: - ValueError: Error is raised when files do not result in a single - collected Collection. - - """ - # todo: move this to lib? - # Get the sequence as a collection. The files must be of a single - # sequence and have no remainder outside of the collections. - patterns = [clique.PATTERNS["frames"]] - collections, remainder = clique.assemble(files, - minimum_items=1, - patterns=patterns) - if not collections: - raise ValueError("No collections found in files: " - "{}".format(files)) - if remainder: - raise ValueError("Files found not detected as part" - " of a sequence: {}".format(remainder)) - if len(collections) > 1: - raise ValueError("Files in sequence are not part of a" - " single sequence collection: " - "{}".format(collections)) - return collections[0] - - def get_instance_families(instance): """Get all families of the instance""" # todo: move this to lib? @@ -73,12 +51,6 @@ def get_frame_padded(frame, padding): return "{frame:0{padding}d}".format(padding=padding, frame=frame) -def get_first_frame_padded(collection): - """Return first frame as padded number from `clique.Collection`""" - start_frame = next(iter(collection.indexes)) - return get_frame_padded(start_frame, padding=collection.padding) - - class IntegrateAsset(pyblish.api.InstancePlugin): """Register publish in the database and transfer files to destinations. @@ -156,14 +128,14 @@ class IntegrateAsset(pyblish.api.InstancePlugin): "mvUsdOverride", "simpleUnrealTexture" ] - exclude_families = ["clip", "render.farm"] + default_template_name = "publish" # Representation context keys that should always be written to # the database even if not used by the destination template db_representation_context_keys = [ "project", "asset", "task", "subset", "version", "representation", - "family", "hierarchy", "username" + "family", "hierarchy", "username", "output" ] skip_host_families = [] @@ -190,14 +162,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin): ).format(instance.data["family"])) return - # Exclude instances that also contain families from exclude families - families = set(get_instance_families(instance)) - exclude = families & set(self.exclude_families) - if exclude: - self.log.debug("Instance not integrated due to exclude " - "families found: {}".format(", ".join(exclude))) - return - file_transactions = FileTransaction(log=self.log) try: self.register(instance, file_transactions, filtered_repres) @@ -274,6 +238,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): return filtered_repres def register(self, instance, file_transactions, filtered_repres): + project_name = legacy_io.active_project() + instance_stagingdir = instance.data.get("stagingDir") if not instance_stagingdir: self.log.info(( @@ -289,19 +255,22 @@ class IntegrateAsset(pyblish.api.InstancePlugin): template_name = self.get_template_name(instance) - subset, subset_writes = self.prepare_subset(instance) - version, version_writes = self.prepare_version(instance, subset) + op_session = OperationsSession() + subset = self.prepare_subset( + instance, op_session, project_name + ) + version = self.prepare_version( + instance, op_session, subset, project_name + ) instance.data["versionEntity"] = version # Get existing representations (if any) existing_repres_by_name = { - repres["name"].lower(): repres for repres in legacy_io.find( - { - "parent": version["_id"], - "type": "representation" - }, - # Only care about id and name of existing representations - projection={"_id": True, "name": True} + repre_doc["name"].lower(): repre_doc + for repre_doc in get_representations( + project_name, + version_ids=[version["_id"]], + fields=["_id", "name"] ) } @@ -341,7 +310,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Transaction to reduce the chances of another publish trying to # publish to the same version number since that chance can greatly # increase if the file transaction takes a long time. - legacy_io.bulk_write(subset_writes + version_writes) + op_session.commit() + self.log.info("Subset {subset[name]} and Version {version[name]} " "written to database..".format(subset=subset, version=version)) @@ -373,49 +343,49 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Finalize the representations now the published files are integrated # Get 'files' info for representations and its attached resources - representation_writes = [] new_repre_names_low = set() for prepared in prepared_representations: - representation = prepared["representation"] + repre_doc = prepared["representation"] + repre_update_data = prepared["repre_doc_update_data"] transfers = prepared["transfers"] destinations = [dst for src, dst in transfers] - representation["files"] = self.get_files_info( + repre_doc["files"] = self.get_files_info( destinations, sites=sites, anatomy=anatomy ) # Add the version resource file infos to each representation - representation["files"] += resource_file_infos + repre_doc["files"] += resource_file_infos # Set up representation for writing to the database. Since # we *might* be overwriting an existing entry if the version # already existed we'll use ReplaceOnce with `upsert=True` - representation_writes.append(ReplaceOne( - filter={"_id": representation["_id"]}, - replacement=representation, - upsert=True - )) + if repre_update_data is None: + op_session.create_entity( + project_name, repre_doc["type"], repre_doc + ) + else: + op_session.update_entity( + project_name, + repre_doc["type"], + repre_doc["_id"], + repre_update_data + ) - new_repre_names_low.add(representation["name"].lower()) + new_repre_names_low.add(repre_doc["name"].lower()) # Delete any existing representations that didn't get any new data # if the instance is not set to append mode if not instance.data.get("append", False): - delete_names = set() for name, existing_repres in existing_repres_by_name.items(): if name not in new_repre_names_low: # We add the exact representation name because `name` is # lowercase for name matching only and not in the database - delete_names.add(existing_repres["name"]) - if delete_names: - representation_writes.append(DeleteMany( - filter={ - "parent": version["_id"], - "name": {"$in": list(delete_names)} - } - )) + op_session.delete_entity( + project_name, "representation", existing_repres["_id"] + ) - # Write representations to the database - legacy_io.bulk_write(representation_writes) + self.log.debug("{}".format(op_session.to_data())) + op_session.commit() # Backwards compatibility # todo: can we avoid the need to store this? @@ -426,17 +396,16 @@ class IntegrateAsset(pyblish.api.InstancePlugin): self.log.info("Registered {} representations" "".format(len(prepared_representations))) - def prepare_subset(self, instance): - asset = instance.data.get("assetEntity") + def prepare_subset(self, instance, op_session, project_name): + asset_doc = instance.data["assetEntity"] subset_name = instance.data["subset"] + family = instance.data["family"] self.log.debug("Subset: {}".format(subset_name)) # Get existing subset if it exists - subset = legacy_io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": subset_name - }) + existing_subset_doc = get_subset_by_name( + project_name, subset_name, asset_doc["_id"] + ) # Define subset data data = { @@ -447,69 +416,79 @@ class IntegrateAsset(pyblish.api.InstancePlugin): if subset_group: data["subsetGroup"] = subset_group - bulk_writes = [] - if subset is None: + subset_id = None + if existing_subset_doc: + subset_id = existing_subset_doc["_id"] + subset_doc = new_subset_document( + subset_name, family, asset_doc["_id"], data, subset_id + ) + + if existing_subset_doc is None: # Create a new subset self.log.info("Subset '%s' not found, creating ..." % subset_name) - subset = { - "_id": ObjectId(), - "schema": "openpype:subset-3.0", - "type": "subset", - "name": subset_name, - "data": data, - "parent": asset["_id"] - } - bulk_writes.append(InsertOne(subset)) + op_session.create_entity( + project_name, subset_doc["type"], subset_doc + ) else: # Update existing subset data with new data and set in database. # We also change the found subset in-place so we don't need to # re-query the subset afterwards - subset["data"].update(data) - bulk_writes.append(UpdateOne( - {"type": "subset", "_id": subset["_id"]}, - {"$set": { - "data": subset["data"] - }} - )) + subset_doc["data"].update(data) + update_data = prepare_subset_update_data( + existing_subset_doc, subset_doc + ) + op_session.update_entity( + project_name, + subset_doc["type"], + subset_doc["_id"], + update_data + ) self.log.info("Prepared subset: {}".format(subset_name)) - return subset, bulk_writes - - def prepare_version(self, instance, subset): + return subset_doc + def prepare_version(self, instance, op_session, subset_doc, project_name): version_number = instance.data["version"] - version = { - "schema": "openpype:version-3.0", - "type": "version", - "parent": subset["_id"], - "name": version_number, - "data": self.create_version_data(instance) - } + existing_version = get_version_by_name( + project_name, + version_number, + subset_doc["_id"], + fields=["_id"] + ) + version_id = None + if existing_version: + version_id = existing_version["_id"] - existing_version = legacy_io.find_one({ - 'type': 'version', - 'parent': subset["_id"], - 'name': version_number - }, projection={"_id": True}) + version_data = self.create_version_data(instance) + version_doc = new_version_doc( + version_number, + subset_doc["_id"], + version_data, + version_id + ) if existing_version: self.log.debug("Updating existing version ...") - version["_id"] = existing_version["_id"] + update_data = prepare_version_update_data( + existing_version, version_doc + ) + op_session.update_entity( + project_name, + version_doc["type"], + version_doc["_id"], + update_data + ) else: self.log.debug("Creating new version ...") - version["_id"] = ObjectId() + op_session.create_entity( + project_name, version_doc["type"], version_doc + ) - bulk_writes = [ReplaceOne( - filter={"_id": version["_id"]}, - replacement=version, - upsert=True - )] + self.log.info("Prepared version: v{0:03d}".format(version_doc["name"])) - self.log.info("Prepared version: v{0:03d}".format(version["name"])) - - return version, bulk_writes + return version_doc def prepare_representation(self, repre, template_name, @@ -520,20 +499,22 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # pre-flight validations if repre["ext"].startswith("."): - raise ValueError("Extension must not start with a dot '.': " - "{}".format(repre["ext"])) + raise KnownPublishError(( + "Extension must not start with a dot '.': {}" + ).format(repre["ext"])) if repre.get("transfers"): - raise ValueError("Representation is not allowed to have transfers" - "data before integration. They are computed in " - "the integrator" - "Got: {}".format(repre["transfers"])) + raise KnownPublishError(( + "Representation is not allowed to have transfers" + "data before integration. They are computed in " + "the integrator. Got: {}" + ).format(repre["transfers"])) # create template data for Anatomy template_data = copy.deepcopy(instance.data["anatomyData"]) # required representation keys - files = repre['files'] + files = repre["files"] template_data["representation"] = repre["name"] template_data["ext"] = repre["ext"] @@ -549,95 +530,119 @@ class IntegrateAsset(pyblish.api.InstancePlugin): }.items(): # Allow to take value from representation # if not found also consider instance.data - if key in repre: - value = repre[key] - elif key in instance.data: - value = instance.data[key] - else: - continue - template_data[anatomy_key] = value + value = repre.get(key) + if value is None: + value = instance.data.get(key) - if repre.get('stagingDir'): - stagingdir = repre['stagingDir'] - else: + if value is not None: + template_data[anatomy_key] = value + + stagingdir = repre.get("stagingDir") + if not stagingdir: # Fall back to instance staging dir if not explicitly # set for representation in the instance - self.log.debug("Representation uses instance staging dir: " - "{}".format(instance_stagingdir)) + self.log.debug(( + "Representation uses instance staging dir: {}" + ).format(instance_stagingdir)) stagingdir = instance_stagingdir + if not stagingdir: - raise ValueError("No staging directory set for representation: " - "{}".format(repre)) + raise KnownPublishError( + "No staging directory set for representation: {}".format(repre) + ) self.log.debug("Anatomy template name: {}".format(template_name)) - anatomy = instance.context.data['anatomy'] - template = os.path.normpath(anatomy.templates[template_name]["path"]) + anatomy = instance.context.data["anatomy"] + publish_template_category = anatomy.templates[template_name] + template = os.path.normpath(publish_template_category["path"]) is_udim = bool(repre.get("udim")) + is_sequence_representation = isinstance(files, (list, tuple)) if is_sequence_representation: # Collection of files (sequence) - assert not any(os.path.isabs(fname) for fname in files), ( - "Given file names contain full paths" - ) + if any(os.path.isabs(fname) for fname in files): + raise KnownPublishError("Given file names contain full paths") - src_collection = assemble(files) + src_collections, remainders = clique.assemble(files) + if len(files) < 2 or len(src_collections) != 1 or remainders: + raise KnownPublishError(( + "Files of representation does not contain proper" + " sequence files.\nCollected collections: {}" + "\nCollected remainders: {}" + ).format( + ", ".join([str(col) for col in src_collections]), + ", ".join([str(rem) for rem in remainders]) + )) - # If the representation has `frameStart` set it renumbers the - # frame indices of the published collection. It will start from - # that `frameStart` index instead. Thus if that frame start - # differs from the collection we want to shift the destination - # frame indices from the source collection. + src_collection = src_collections[0] destination_indexes = list(src_collection.indexes) - destination_padding = len(get_first_frame_padded(src_collection)) - if repre.get("frameStart") is not None and not is_udim: - index_frame_start = int(repre.get("frameStart")) - - render_template = anatomy.templates[template_name] - # todo: should we ALWAYS manage the frame padding even when not - # having `frameStart` set? - frame_start_padding = int( - render_template.get( - "frame_padding", - render_template.get("padding") - ) + # Use last frame for minimum padding + # - that should cover both 'udim' and 'frame' minimum padding + destination_padding = len(str(destination_indexes[-1])) + if not is_udim: + # Change padding for frames if template has defined higher + # padding. + template_padding = int( + publish_template_category["frame_padding"] ) + if template_padding > destination_padding: + destination_padding = template_padding - # Shift destination sequence to the start frame - src_start_frame = next(iter(src_collection.indexes)) - shift = index_frame_start - src_start_frame - if shift: + # If the representation has `frameStart` set it renumbers the + # frame indices of the published collection. It will start from + # that `frameStart` index instead. Thus if that frame start + # differs from the collection we want to shift the destination + # frame indices from the source collection. + repre_frame_start = repre.get("frameStart") + if repre_frame_start is not None: + index_frame_start = int(repre["frameStart"]) + # Shift destination sequence to the start frame destination_indexes = [ - frame + shift for frame in destination_indexes + index_frame_start + idx + for idx in range(len(destination_indexes)) ] - destination_padding = frame_start_padding # To construct the destination template with anatomy we require # a Frame or UDIM tile set for the template data. We use the first # index of the destination for that because that could've shifted # from the source indexes, etc. - first_index_padded = get_frame_padded(frame=destination_indexes[0], - padding=destination_padding) - if is_udim: - # UDIM representations handle ranges in a different manner - template_data["udim"] = first_index_padded - else: - template_data["frame"] = first_index_padded + first_index_padded = get_frame_padded( + frame=destination_indexes[0], + padding=destination_padding + ) # Construct destination collection from template - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled[template_name]["path"] - repre_context = template_filled.used_values - self.log.debug("Template filled: {}".format(str(template_filled))) - dst_collection = assemble([os.path.normpath(template_filled)]) + repre_context = None + dst_filepaths = [] + for index in destination_indexes: + if is_udim: + template_data["udim"] = index + else: + template_data["frame"] = index + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + dst_filepaths.append(template_filled) + if repre_context is None: + self.log.debug( + "Template filled: {}".format(str(template_filled)) + ) + repre_context = template_filled.used_values + + # Make sure context contains frame + # NOTE: Frame would not be available only if template does not + # contain '{frame}' in template -> Do we want support it? + if not is_udim: + repre_context["frame"] = first_index_padded # Update the destination indexes and padding - dst_collection.indexes.clear() - dst_collection.indexes.update(set(destination_indexes)) + dst_collection = clique.assemble(dst_filepaths)[0][0] dst_collection.padding = destination_padding - assert ( - len(src_collection.indexes) == len(dst_collection.indexes) - ), "This is a bug" + if len(src_collection.indexes) != len(dst_collection.indexes): + raise KnownPublishError(( + "This is a bug. Source sequence frames length" + " does not match integration frames length" + )) # Multiple file transfers transfers = [] @@ -648,9 +653,13 @@ class IntegrateAsset(pyblish.api.InstancePlugin): else: # Single file fname = files - assert not os.path.isabs(fname), ( - "Given file name is a full path" - ) + if os.path.isabs(fname): + self.log.error( + "Filename in representation is filepath {}".format(fname) + ) + raise KnownPublishError( + "This is a bug. Representation file name is full path" + ) # Manage anatomy template data template_data.pop("frame", None) @@ -680,9 +689,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Also add these values to the context even if not used by the # destination template value = template_data.get(key) - if not value: - continue - repre_context[key] = template_data[key] + if value is not None: + repre_context[key] = value # Explicitly store the full list even though template data might # have a different value because it uses just a single udim tile @@ -691,47 +699,34 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Use previous representation's id if there is a name match existing = existing_repres_by_name.get(repre["name"].lower()) + repre_id = None if existing: repre_id = existing["_id"] - else: - repre_id = ObjectId() - # Backwards compatibility: # Store first transferred destination as published path data - # todo: can we remove this? - # todo: We shouldn't change data that makes its way back into - # instance.data[] until we know the publish actually succeeded - # otherwise `published_path` might not actually be valid? + # - used primarily for reviews that are integrated to custom modules + # TODO we should probably store all integrated files + # related to the representation? published_path = transfers[0][1] - repre["published_path"] = published_path # Backwards compatibility + repre["published_path"] = published_path # todo: `repre` is not the actual `representation` entity # we should simplify/clarify difference between data above # and the actual representation entity for the database data = repre.get("data", {}) - data.update({'path': published_path, 'template': template}) - representation = { - "_id": repre_id, - "schema": "openpype:representation-2.0", - "type": "representation", - "parent": version["_id"], - "name": repre['name'], - "data": data, - - # Imprint shortcut to context for performance reasons. - "context": repre_context - } - - # todo: simplify/streamline which additional data makes its way into - # the representation context - if repre.get("outputName"): - representation["context"]["output"] = repre['outputName'] - - if is_sequence_representation and repre.get("frameStart") is not None: - representation['context']['frame'] = template_data["frame"] + data.update({"path": published_path, "template": template}) + repre_doc = new_representation_doc( + repre["name"], version["_id"], repre_context, data, repre_id + ) + update_data = None + if repre_id is not None: + update_data = prepare_representation_update_data( + existing, repre_doc + ) return { - "representation": representation, + "representation": repre_doc, + "repre_doc_update_data": update_data, "anatomy_data": template_data, "transfers": transfers, # todo: avoid the need for 'published_files' used by Integrate Hero @@ -789,7 +784,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): version_data[key] = instance.data[key] # Include instance.data[versionData] directly - version_data_instance = instance.data.get('versionData') + version_data_instance = instance.data.get("versionData") if version_data_instance: version_data.update(version_data_instance) @@ -829,6 +824,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): def get_profile_filter_criteria(self, instance): """Return filter criteria for `filter_profiles`""" + # Anatomy data is pre-filled by Collectors anatomy_data = instance.data["anatomyData"] @@ -859,6 +855,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): path: modified path if possible, or unmodified path + warning logged """ + success, rootless_path = anatomy.find_root_template_from_path(path) if success: path = rootless_path @@ -880,6 +877,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): output_resources: array of dictionaries to be added to 'files' key in representation """ + file_infos = [] for file_path in destinations: file_info = self.prepare_file_info(file_path, anatomy, sites=sites) @@ -899,10 +897,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin): Returns: dict: file info dictionary """ + return { "_id": ObjectId(), "path": self.get_rootless_path(anatomy, path), "size": os.path.getsize(path), - "hash": openpype.api.source_hash(path), + "hash": source_hash(path), "sites": sites } diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index 5f97a9bd41..7d698ff98d 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -71,7 +71,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): template_key = self._get_template_key(instance) anatomy = instance.context.data["anatomy"] - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = anatomy.project_name if template_key not in anatomy.templates: self.log.warning(( "!!! Anatomy of project \"{}\" does not have set" @@ -313,13 +313,9 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): } repre_context = template_filled.used_values for key in self.db_representation_context_keys: - if ( - key in repre_context or - key not in anatomy_data - ): - continue - - repre_context[key] = anatomy_data[key] + value = anatomy_data.get(key) + if value is not None: + repre_context[key] = value # Prepare new repre repre = copy.deepcopy(repre_info["representation"]) @@ -454,7 +450,6 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): ) if bulk_writes: - project_name = legacy_io.Session["AVALON_PROJECT"] legacy_io.database[project_name].bulk_write( bulk_writes ) @@ -517,11 +512,10 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): anatomy_filled = anatomy.format(template_data) # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) file_path = anatomy_filled[template_key]["path"] # Directory diff --git a/openpype/plugins/publish/integrate_thumbnail.py b/openpype/plugins/publish/integrate_thumbnail.py index fd50858a91..8ae0dd2d60 100644 --- a/openpype/plugins/publish/integrate_thumbnail.py +++ b/openpype/plugins/publish/integrate_thumbnail.py @@ -39,9 +39,8 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): ) return - project_name = legacy_io.Session["AVALON_PROJECT"] - anatomy = instance.context.data["anatomy"] + project_name = anatomy.project_name if "publish" not in anatomy.templates: self.log.warning("Anatomy is missing the \"publish\" key!") return diff --git a/openpype/plugins/publish/start_timer.py b/openpype/plugins/publish/start_timer.py deleted file mode 100644 index 112d92bef0..0000000000 --- a/openpype/plugins/publish/start_timer.py +++ /dev/null @@ -1,14 +0,0 @@ -import pyblish.api - -from openpype.lib import change_timer_to_current_context - - -class StartTimer(pyblish.api.ContextPlugin): - label = "Start Timer" - order = pyblish.api.IntegratorOrder + 1 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - change_timer_to_current_context() diff --git a/openpype/plugins/publish/stop_timer.py b/openpype/plugins/publish/stop_timer.py deleted file mode 100644 index 414e43a3c4..0000000000 --- a/openpype/plugins/publish/stop_timer.py +++ /dev/null @@ -1,17 +0,0 @@ -import os -import requests - -import pyblish.api - - -class StopTimer(pyblish.api.ContextPlugin): - label = "Stop Timer" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) - requests.post(rest_api_url) diff --git a/openpype/plugins/publish/validate_asset_docs.py b/openpype/plugins/publish/validate_asset_docs.py index bc1f9b9e6c..9a1ca5b8de 100644 --- a/openpype/plugins/publish/validate_asset_docs.py +++ b/openpype/plugins/publish/validate_asset_docs.py @@ -24,6 +24,10 @@ class ValidateAssetDocs(pyblish.api.InstancePlugin): if instance.data.get("assetEntity"): self.log.info("Instance has set asset document in its data.") + elif instance.data.get("newAssetPublishing"): + # skip if it is editorial + self.log.info("Editorial instance is no need to check...") + else: raise PublishValidationError(( "Instance \"{}\" doesn't have asset document " diff --git a/openpype/plugins/publish/validate_containers.py b/openpype/plugins/publish/validate_containers.py index ce91bd3396..79759450e1 100644 --- a/openpype/plugins/publish/validate_containers.py +++ b/openpype/plugins/publish/validate_containers.py @@ -1,5 +1,9 @@ import pyblish.api -import openpype.lib +from openpype.pipeline.load import any_outdated_containers +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) class ShowInventory(pyblish.api.Action): @@ -14,15 +18,21 @@ class ShowInventory(pyblish.api.Action): host_tools.show_scene_inventory() -class ValidateContainers(pyblish.api.ContextPlugin): +class ValidateContainers(OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin): + """Containers are must be updated to latest version on publish.""" label = "Validate Containers" order = pyblish.api.ValidatorOrder - hosts = ["maya", "houdini", "nuke", "harmony", "photoshop"] + hosts = ["maya", "houdini", "nuke", "harmony", "photoshop", "aftereffects"] optional = True actions = [ShowInventory] def process(self, context): - if openpype.lib.any_outdated(): - raise ValueError("There are outdated containers in the scene.") + if not self.is_active(context.data): + return + + if any_outdated_containers(): + msg = "There are outdated containers in the scene." + raise PublishXmlValidationError(self, msg) diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/openpype/plugins/publish/validate_editorial_asset_name.py index 702e87b58d..694788c414 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/openpype/plugins/publish/validate_editorial_asset_name.py @@ -19,7 +19,8 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): "hiero", "standalonepublisher", "resolve", - "flame" + "flame", + "traypublisher" ] def process(self, context): diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index 124eacbe39..a447aa916b 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -15,6 +15,7 @@ from openpype.lib.remote_publish import ( fail_batch, find_variant_key, get_task_data, + get_timeout, IN_PROGRESS_STATUS ) @@ -170,7 +171,7 @@ class PypeCommands: log.info("Publish finished.") @staticmethod - def remotepublishfromapp(project, batch_path, host_name, + def remotepublishfromapp(project_name, batch_path, host_name, user_email, targets=None): """Opens installed variant of 'host' and run remote publish there. @@ -189,8 +190,8 @@ class PypeCommands: Runs publish process as user would, in automatic fashion. Args: - project (str): project to publish (only single context is expected - per call of remotepublish + project_name (str): project to publish (only single context is + expected per call of remotepublish batch_path (str): Path batch folder. Contains subfolders with resources (workfile, another subfolder 'renders' etc.) host_name (str): 'photoshop' @@ -222,10 +223,17 @@ class PypeCommands: batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS})) if len(batches_in_progress) > 1: - fail_batch(_id, batches_in_progress, dbcon) + running_batches = [str(batch["_id"]) + for batch in batches_in_progress + if batch["_id"] != _id] + msg = "There are still running batches {}\n". \ + format("\n".join(running_batches)) + msg += "Ask admin to check them and reprocess current batch" + fail_batch(_id, dbcon, msg) print("Another batch running, probably stuck, ask admin for help") - asset, task_name, _ = get_batch_asset_task_info(task_data["context"]) + asset_name, task_name, task_type = get_batch_asset_task_info( + task_data["context"]) application_manager = ApplicationManager() found_variant_key = find_variant_key(application_manager, host_name) @@ -233,8 +241,8 @@ class PypeCommands: # must have for proper launch of app env = get_app_environments_for_context( - project, - asset, + project_name, + asset_name, task_name, app_name ) @@ -262,15 +270,22 @@ class PypeCommands: data = { "last_workfile_path": workfile_path, "start_last_workfile": True, - "project_name": project, - "asset_name": asset, + "project_name": project_name, + "asset_name": asset_name, "task_name": task_name } launched_app = application_manager.launch(app_name, **data) + timeout = get_timeout(project_name, host_name, task_type) + + time_start = time.time() while launched_app.poll() is None: time.sleep(0.5) + if time.time() - time_start > timeout: + launched_app.terminate() + msg = "Timeout reached" + fail_batch(_id, dbcon, msg) @staticmethod def remotepublish(project, batch_path, user_email, targets=None): diff --git a/openpype/resources/app_icons/shotgrid.png b/openpype/resources/app_icons/shotgrid.png new file mode 100644 index 0000000000..6d0cc047f9 Binary files /dev/null and b/openpype/resources/app_icons/shotgrid.png differ diff --git a/openpype/scripts/fusion_switch_shot.py b/openpype/scripts/fusion_switch_shot.py index 245fc665f0..fc22f060a2 100644 --- a/openpype/scripts/fusion_switch_shot.py +++ b/openpype/scripts/fusion_switch_shot.py @@ -3,6 +3,8 @@ import re import sys import logging +from openpype.client import get_asset_by_name, get_versions + # Pipeline imports from openpype.hosts.fusion import api import openpype.hosts.fusion.api.lib as fusion_lib @@ -15,13 +17,10 @@ from openpype.pipeline import ( legacy_io, ) -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Update Slap Comp") -self = sys.modules[__name__] -self._project = None - def _format_version_folder(folder): """Format a version folder based on the filepath @@ -131,8 +130,8 @@ def update_frame_range(comp, representations): """ version_ids = [r["parent"] for r in representations] - versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) - versions = list(versions) + project_name = legacy_io.active_project() + versions = list(get_versions(project_name, version_ids=version_ids)) start = min(v["data"]["frameStart"] for v in versions) end = max(v["data"]["frameEnd"] for v in versions) @@ -162,15 +161,10 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = legacy_io.find_one({"type": "asset", "name": asset_name}) + project_name = legacy_io.active_project() + asset = get_asset_by_name(project_name, asset_name) assert asset, "Could not find '%s' in the database" % asset_name - # Get current project - self._project = legacy_io.find_one({ - "type": "project", - "name": legacy_io.Session["AVALON_PROJECT"] - }) - # Go to comp if not filepath: current_comp = api.get_current_comp() diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 70cda68cb4..3e86581a03 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -301,7 +301,9 @@ "traypublisher" ], "families": [ - "plate" + "plate", + "review", + "audio" ], "task_types": [], "tasks": [], @@ -447,6 +449,9 @@ "enabled": false, "ftrack_custom_attributes": {} }, + "IntegrateFtrackComponentOverwrite": { + "enabled": true + }, "IntegrateFtrackInstance": { "family_mapping": { "camera": "cam", diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index c96acbff6d..ac0f161cf2 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -31,6 +31,37 @@ } ] }, + "RenderSettings": { + "apply_render_settings": true, + "default_render_image_folder": "", + "aov_separator": "underscore", + "reset_current_frame": false, + "arnold_renderer": { + "image_prefix": "maya///_", + "image_format": "exr", + "multilayer_exr": true, + "tiled": true, + "aov_list": [], + "additional_options": [] + }, + "vray_renderer": { + "image_prefix": "maya///", + "engine": "1", + "image_format": "png", + "aov_list": [], + "additional_options": [] + }, + "redshift_renderer": { + "image_prefix": "maya///", + "primary_gi_engine": "0", + "secondary_gi_engine": "0", + "image_format": "iff", + "multilayer_exr": true, + "force_combine": true, + "aov_list": [], + "additional_options": [] + } + }, "create": { "CreateLook": { "enabled": true, @@ -43,9 +74,7 @@ "enabled": true, "defaults": [ "Main" - ], - "aov_separator": "underscore", - "default_render_image_folder": "renders" + ] }, "CreateUnrealStaticMesh": { "enabled": true, @@ -70,6 +99,20 @@ "enabled": true, "publish_mip_map": true }, + "CreateAnimation": { + "enabled": true, + "write_color_sets": false, + "defaults": [ + "Main" + ] + }, + "CreatePointCache": { + "enabled": true, + "write_color_sets": false, + "defaults": [ + "Main" + ] + }, "CreateMultiverseUsd": { "enabled": true, "defaults": [ @@ -88,12 +131,6 @@ "Main" ] }, - "CreateAnimation": { - "enabled": true, - "defaults": [ - "Main" - ] - }, "CreateAss": { "enabled": true, "defaults": [ @@ -132,12 +169,6 @@ "Sculpt" ] }, - "CreatePointCache": { - "enabled": true, - "defaults": [ - "Main" - ] - }, "CreateRenderSetup": { "enabled": true, "defaults": [ diff --git a/openpype/settings/defaults/project_settings/shotgrid.json b/openpype/settings/defaults/project_settings/shotgrid.json new file mode 100644 index 0000000000..83b6f69074 --- /dev/null +++ b/openpype/settings/defaults/project_settings/shotgrid.json @@ -0,0 +1,22 @@ +{ + "shotgrid_project_id": 0, + "shotgrid_server": "", + "event": { + "enabled": false + }, + "fields": { + "asset": { + "type": "sg_asset_type" + }, + "sequence": { + "episode_link": "episode" + }, + "shot": { + "episode_link": "sg_episode", + "sequence_link": "sg_sequence" + }, + "task": { + "step": "step" + } + } +} diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/openpype/settings/defaults/project_settings/traypublisher.json index 8bf3e3b306..5db2a79772 100644 --- a/openpype/settings/defaults/project_settings/traypublisher.json +++ b/openpype/settings/defaults/project_settings/traypublisher.json @@ -236,9 +236,70 @@ "extensions": [] } ], + "editorial_creators": { + "editorial_simple": { + "default_variants": [ + "Main" + ], + "clip_name_tokenizer": { + "_sequence_": "(sc\\d{3})", + "_shot_": "(sh\\d{3})" + }, + "shot_rename": { + "enabled": true, + "shot_rename_template": "{project[code]}_{_sequence_}_{_shot_}" + }, + "shot_hierarchy": { + "enabled": true, + "parents_path": "{project}/{folder}/{sequence}", + "parents": [ + { + "type": "Project", + "name": "project", + "value": "{project[name]}" + }, + { + "type": "Folder", + "name": "folder", + "value": "shots" + }, + { + "type": "Sequence", + "name": "sequence", + "value": "{_sequence_}" + } + ] + }, + "shot_add_tasks": {}, + "family_presets": [ + { + "family": "review", + "variant": "Reference", + "review": true, + "output_file_type": ".mp4" + }, + { + "family": "plate", + "variant": "", + "review": false, + "output_file_type": ".mov" + }, + { + "family": "audio", + "variant": "", + "review": false, + "output_file_type": ".wav" + } + ] + } + }, "BatchMovieCreator": { - "default_variants": ["Main"], - "default_tasks": ["Compositing"], + "default_variants": [ + "Main" + ], + "default_tasks": [ + "Compositing" + ], "extensions": [ ".mov" ] diff --git a/openpype/settings/defaults/project_settings/webpublisher.json b/openpype/settings/defaults/project_settings/webpublisher.json index 77168c25e6..cba472514e 100644 --- a/openpype/settings/defaults/project_settings/webpublisher.json +++ b/openpype/settings/defaults/project_settings/webpublisher.json @@ -1,4 +1,13 @@ { + "timeout_profiles": [ + { + "hosts": [ + "photoshop" + ], + "task_types": [], + "timeout": 600 + } + ], "publish": { "CollectPublishedFiles": { "task_type_to_family": { diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json index 8cd4114cb0..c84d23d3fc 100644 --- a/openpype/settings/defaults/system_settings/modules.json +++ b/openpype/settings/defaults/system_settings/modules.json @@ -26,13 +26,14 @@ "linux": [] }, "intent": { + "allow_empty_intent": true, + "empty_intent_label": "", "items": { - "-": "-", "wip": "WIP", "final": "Final", "test": "Test" }, - "default": "-" + "default": "" }, "custom_attributes": { "show": { @@ -135,6 +136,13 @@ "enabled": false, "server": "" }, + "shotgrid": { + "enabled": false, + "leecher_manager_url": "http://127.0.0.1:3000", + "leecher_backend_url": "http://127.0.0.1:8090", + "filter_projects_by_login": true, + "shotgrid_settings": {} + }, "timers_manager": { "enabled": true, "auto_stop": true, diff --git a/openpype/settings/entities/__init__.py b/openpype/settings/entities/__init__.py index a173e2454f..b2cb2204f4 100644 --- a/openpype/settings/entities/__init__.py +++ b/openpype/settings/entities/__init__.py @@ -107,6 +107,7 @@ from .enum_entity import ( TaskTypeEnumEntity, DeadlineUrlEnumEntity, AnatomyTemplatesEnumEntity, + ShotgridUrlEnumEntity ) from .list_entity import ListEntity @@ -171,6 +172,7 @@ __all__ = ( "ToolsEnumEntity", "TaskTypeEnumEntity", "DeadlineUrlEnumEntity", + "ShotgridUrlEnumEntity", "AnatomyTemplatesEnumEntity", "ListEntity", diff --git a/openpype/settings/entities/enum_entity.py b/openpype/settings/entities/enum_entity.py index 03998677ce..defe4aa1f0 100644 --- a/openpype/settings/entities/enum_entity.py +++ b/openpype/settings/entities/enum_entity.py @@ -1,10 +1,7 @@ import copy from .input_entities import InputEntity from .exceptions import EntitySchemaError -from .lib import ( - NOT_SET, - STRING_TYPE -) +from .lib import NOT_SET, STRING_TYPE class BaseEnumEntity(InputEntity): @@ -26,7 +23,7 @@ class BaseEnumEntity(InputEntity): for item in self.enum_items: key = tuple(item.keys())[0] if key in enum_keys: - reason = "Key \"{}\" is more than once in enum items.".format( + reason = 'Key "{}" is more than once in enum items.'.format( key ) raise EntitySchemaError(self, reason) @@ -34,7 +31,7 @@ class BaseEnumEntity(InputEntity): enum_keys.add(key) if not isinstance(key, STRING_TYPE): - reason = "Key \"{}\" has invalid type {}, expected {}.".format( + reason = 'Key "{}" has invalid type {}, expected {}.'.format( key, type(key), STRING_TYPE ) raise EntitySchemaError(self, reason) @@ -59,7 +56,7 @@ class BaseEnumEntity(InputEntity): for item in check_values: if item not in self.valid_keys: raise ValueError( - "{} Invalid value \"{}\". Expected one of: {}".format( + '{} Invalid value "{}". Expected one of: {}'.format( self.path, item, self.valid_keys ) ) @@ -84,7 +81,7 @@ class EnumEntity(BaseEnumEntity): self.valid_keys = set(all_keys) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) value_on_not_set = [] if enum_default: if not isinstance(enum_default, list): @@ -109,7 +106,7 @@ class EnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -152,6 +149,7 @@ class HostsEnumEntity(BaseEnumEntity): Host name is not the same as application name. Host name defines implementation instead of application name. """ + schema_types = ["hosts-enum"] all_host_names = [ "aftereffects", @@ -211,7 +209,7 @@ class HostsEnumEntity(BaseEnumEntity): self.valid_keys = valid_keys if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: for key in valid_keys: @@ -219,7 +217,7 @@ class HostsEnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -227,14 +225,10 @@ class HostsEnumEntity(BaseEnumEntity): def schema_validations(self): if self.hosts_filter: enum_len = len(self.enum_items) - if ( - enum_len == 0 - or (enum_len == 1 and self.use_empty_value) - ): - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) + if enum_len == 0 or (enum_len == 1 and self.use_empty_value): + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) reason = ( "All host names were removed after applying" " host filters. {}" @@ -247,24 +241,25 @@ class HostsEnumEntity(BaseEnumEntity): invalid_filters.add(item) if invalid_filters: - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) - expected_hosts = ", ".join([ - '"{}"'.format(item) - for item in self.all_host_names - ]) - self.log.warning(( - "Host filters containt invalid host names:" - " \"{}\" Expected values are {}" - ).format(joined_filters, expected_hosts)) + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) + expected_hosts = ", ".join( + ['"{}"'.format(item) for item in self.all_host_names] + ) + self.log.warning( + ( + "Host filters containt invalid host names:" + ' "{}" Expected values are {}' + ).format(joined_filters, expected_hosts) + ) super(HostsEnumEntity, self).schema_validations() class AppsEnumEntity(BaseEnumEntity): """Enum of applications for project anatomy attributes.""" + schema_types = ["apps-enum"] def _item_initialization(self): @@ -272,7 +267,7 @@ class AppsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -353,7 +348,7 @@ class ToolsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -410,10 +405,10 @@ class TaskTypeEnumEntity(BaseEnumEntity): def _item_initialization(self): self.multiselection = self.schema_data.get("multiselection", True) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) self.value_on_not_set = "" self.enum_items = [] @@ -508,7 +503,8 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): enum_items_list = [] for server_name, url_entity in deadline_urls_entity.items(): enum_items_list.append( - {server_name: "{}: {}".format(server_name, url_entity.value)}) + {server_name: "{}: {}".format(server_name, url_entity.value)} + ) valid_keys.add(server_name) return enum_items_list, valid_keys @@ -531,6 +527,50 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): self._current_value = tuple(self.valid_keys)[0] +class ShotgridUrlEnumEntity(BaseEnumEntity): + schema_types = ["shotgrid_url-enum"] + + def _item_initialization(self): + self.multiselection = False + + self.enum_items = [] + self.valid_keys = set() + + self.valid_value_types = (STRING_TYPE,) + self.value_on_not_set = "" + + # GUI attribute + self.placeholder = self.schema_data.get("placeholder") + + def _get_enum_values(self): + shotgrid_settings = self.get_entity_from_path( + "system_settings/modules/shotgrid/shotgrid_settings" + ) + + valid_keys = set() + enum_items_list = [] + for server_name, settings in shotgrid_settings.items(): + enum_items_list.append( + { + server_name: "{}: {}".format( + server_name, settings["shotgrid_url"].value + ) + } + ) + valid_keys.add(server_name) + return enum_items_list, valid_keys + + def set_override_state(self, *args, **kwargs): + super(ShotgridUrlEnumEntity, self).set_override_state(*args, **kwargs) + + self.enum_items, self.valid_keys = self._get_enum_values() + if not self.valid_keys: + self._current_value = "" + + elif self._current_value not in self.valid_keys: + self._current_value = tuple(self.valid_keys)[0] + + class AnatomyTemplatesEnumEntity(BaseEnumEntity): schema_types = ["anatomy-templates-enum"] diff --git a/openpype/settings/entities/schemas/projects_schema/schema_main.json b/openpype/settings/entities/schemas/projects_schema/schema_main.json index 6c07209de3..80b1baad1b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_main.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_main.json @@ -62,6 +62,10 @@ "type": "schema", "name": "schema_project_ftrack" }, + { + "type": "schema", + "name": "schema_project_shotgrid" + }, { "type": "schema", "name": "schema_project_kitsu" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index e008fd85ee..c06bec0f58 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -930,6 +930,21 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateFtrackComponentOverwrite", + "label": "IntegrateFtrackComponentOverwrite", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "dict", "key": "IntegrateFtrackInstance", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json index 40e98b0333..cb380194a7 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json @@ -57,6 +57,10 @@ "type": "schema", "name": "schema_scriptsmenu" }, + { + "type": "schema", + "name": "schema_maya_render_settings" + }, { "type": "schema", "name": "schema_maya_create" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json new file mode 100644 index 0000000000..4faeca89f3 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json @@ -0,0 +1,98 @@ +{ + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "is_file": true, + "children": [ + { + "type": "number", + "key": "shotgrid_project_id", + "label": "Shotgrid project id" + }, + { + "type": "shotgrid_url-enum", + "key": "shotgrid_server", + "label": "Shotgrid Server" + }, + { + "type": "dict", + "key": "event", + "label": "Event Handler", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, + { + "type": "dict", + "key": "fields", + "label": "Fields Template", + "collapsible": true, + "children": [ + { + "type": "dict", + "key": "asset", + "label": "Asset", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "type", + "label": "Asset Type" + } + ] + }, + { + "type": "dict", + "key": "sequence", + "label": "Sequence", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + } + ] + }, + { + "type": "dict", + "key": "shot", + "label": "Shot", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + }, + { + "type": "text", + "key": "sequence_link", + "label": "Sequence link" + } + ] + }, + { + "type": "dict", + "key": "task", + "label": "Task", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "step", + "label": "Step link" + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json index 8f0f864dc2..7c61aeed50 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json @@ -84,7 +84,197 @@ ] } }, - { + { + "type": "dict", + "collapsible": true, + "key": "editorial_creators", + "label": "Editorial creator plugins", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "editorial_simple", + "label": "Editorial simple creator", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "splitter" + }, + { + "type": "collapsible-wrap", + "label": "Shot metadata creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "key": "clip_name_tokenizer", + "label": "Clip name tokenizer", + "type": "dict-modifiable", + "highlight_content": true, + "tooltip": "Using Regex expression to create tokens. \nThose can be used later in \"Shot rename\" creator \nor \"Shot hierarchy\". \n\nTokens should be decorated with \"_\" on each side", + "object_type": { + "type": "text" + } + }, + { + "type": "dict", + "key": "shot_rename", + "label": "Shot rename", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "shot_rename_template", + "label": "Shot rename template", + "tooltip":"Template only supports Anatomy keys and Tokens \nfrom \"Clip name tokenizer\"" + } + ] + }, + { + "type": "dict", + "key": "shot_hierarchy", + "label": "Shot hierarchy", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "parents_path", + "label": "Parents path template", + "tooltip": "Using keys from \"Token to parent convertor\" or tokens directly" + }, + { + "key": "parents", + "label": "Token to parent convertor", + "type": "list", + "highlight_content": true, + "tooltip": "The left side is key to be used in template. \nThe right is value build from Tokens comming from \n\"Clip name tokenizer\"", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "type", + "label": "Parent type", + "enum_items": [ + {"Project": "Project"}, + {"Folder": "Folder"}, + {"Episode": "Episode"}, + {"Sequence": "Sequence"} + ] + }, + { + "type": "text", + "key": "name", + "label": "Parent token name", + "tooltip": "Unique name used in \"Parent path template\"" + }, + { + "type": "text", + "key": "value", + "label": "Parent name value", + "tooltip": "Template where any text, Anatomy keys and Tokens could be used" + } + ] + } + } + ] + }, + { + "key": "shot_add_tasks", + "label": "Add tasks to shot", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "task-types-enum", + "key": "type", + "label": "Task type", + "multiselection": false + } + ] + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Shot's subset creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "family_presets", + "label": "Family presets", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "family", + "label": "Family", + "enum_items": [ + {"review": "review"}, + {"plate": "plate"}, + {"audio": "audio"} + ] + }, + { + "type": "text", + "key": "variant", + "label": "Variant", + "placeholder": "< Inherited >" + }, + { + "type": "boolean", + "key": "review", + "label": "Review", + "default": true + }, + { + "type": "enum", + "key": "output_file_type", + "label": "Integrating file type", + "enum_items": [ + {".mp4": "MP4"}, + {".mov": "MOV"}, + {".wav": "WAV"} + ] + } + ] + } + } + ] + } + ] + } + ] + }, + { "type": "dict", "collapsible": true, "key": "BatchMovieCreator", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json index b76a0fa844..2ef7a05b21 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json @@ -5,6 +5,38 @@ "label": "Web Publisher", "is_file": true, "children": [ + { + "type": "list", + "collapsible": true, + "use_label_wrap": true, + "key": "timeout_profiles", + "label": "Timeout profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum", + "multiselection": true + }, + { + "type": "separator" + }, + { + "type": "number", + "key": "timeout", + "label": "Timeout (sec)" + } + ] + } + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json index 09287a8b50..431add28df 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json @@ -29,42 +29,9 @@ } ] }, - { - "type": "dict", - "collapsible": true, - "key": "CreateRender", - "label": "Create Render", - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "list", - "key": "defaults", - "label": "Default Subsets", - "object_type": "text" - }, - { - "key": "aov_separator", - "label": "AOV Separator character", - "type": "enum", - "multiselection": false, - "default": "underscore", - "enum_items": [ - {"dash": "- (dash)"}, - {"underscore": "_ (underscore)"}, - {"dot": ". (dot)"} - ] - }, - { - "type": "text", - "key": "default_render_image_folder", - "label": "Default render image folder" - } - ] + { + "type": "schema", + "name": "schema_maya_create_render" }, { "type": "dict", @@ -143,6 +110,57 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CreateAnimation", + "label": "Create Animation", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreatePointCache", + "label": "Create Point Cache", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { "type": "schema_template", "name": "template_create_plugin", @@ -159,10 +177,6 @@ "key": "CreateMultiverseUsdOver", "label": "Create Multiverse USD Override" }, - { - "key": "CreateAnimation", - "label": "Create Animation" - }, { "key": "CreateAss", "label": "Create Ass" @@ -187,10 +201,6 @@ "key": "CreateModel", "label": "Create Model" }, - { - "key": "CreatePointCache", - "label": "Create Cache" - }, { "key": "CreateRenderSetup", "label": "Create Render Setup" diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json new file mode 100644 index 0000000000..68ad7ad63d --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json @@ -0,0 +1,20 @@ +{ + "type": "dict", + "collapsible": true, + "key": "CreateRender", + "label": "Create Render", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json new file mode 100644 index 0000000000..af197604f8 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json @@ -0,0 +1,418 @@ +{ + "type": "dict", + "collapsible": true, + "key": "RenderSettings", + "label": "Render Settings", + "children": [ + { + "type": "boolean", + "key": "apply_render_settings", + "label": "Apply Render Settings on creation" + }, + { + "type": "text", + "key": "default_render_image_folder", + "label": "Default render image folder" + }, + { + "key": "aov_separator", + "label": "AOV Separator character", + "type": "enum", + "multiselection": false, + "default": "underscore", + "enum_items": [ + {"dash": "- (dash)"}, + {"underscore": "_ (underscore)"}, + {"dot": ". (dot)"} + ] + }, + { + "key": "reset_current_frame", + "label": "Reset Current Frame", + "type": "boolean" + }, + { + "type": "dict", + "collapsible": true, + "key": "arnold_renderer", + "label": "Arnold Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"jpeg": "jpeg"}, + {"png": "png"}, + {"deepexr": "deep exr"}, + {"tif": "tif"}, + {"exr": "exr"}, + {"maya": "maya"}, + {"mtoa_shaders": "mtoa_shaders"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "tiled", + "label": "Tiled (tif, exr)", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"ID": "ID"}, + {"N": "N"}, + {"P": "P"}, + {"Pref": "Pref"}, + {"RGBA": "RGBA"}, + {"Z": "Z"}, + {"albedo": "albedo"}, + {"background": "background"}, + {"coat": "coat"}, + {"coat_albedo": "coat_albedo"}, + {"coat_direct": "coat_direct"}, + {"coat_indirect": "coat_indirect"}, + {"cputime": "cputime"}, + {"crypto_asset": "crypto_asset"}, + {"crypto_material": "cypto_material"}, + {"crypto_object": "crypto_object"}, + {"diffuse": "diffuse"}, + {"diffuse_albedo": "diffuse_albedo"}, + {"diffuse_direct": "diffuse_direct"}, + {"diffuse_indirect": "diffuse_indirect"}, + {"direct": "direct"}, + {"emission": "emission"}, + {"highlight": "highlight"}, + {"indirect": "indirect"}, + {"motionvector": "motionvector"}, + {"opacity": "opacity"}, + {"raycount": "raycount"}, + {"rim_light": "rim_light"}, + {"shadow": "shadow"}, + {"shadow_diff": "shadow_diff"}, + {"shadow_mask": "shadow_mask"}, + {"shadow_matte": "shadow_matte"}, + {"sheen": "sheen"}, + {"sheen_albedo": "sheen_albedo"}, + {"sheen_direct": "sheen_direct"}, + {"sheen_indirect": "sheen_indirect"}, + {"specular": "specular"}, + {"specular_albedo": "specular_albedo"}, + {"specular_direct": "specular_direct"}, + {"specular_indirect": "specular_indirect"}, + {"sss": "sss"}, + {"sss_albedo": "sss_albedo"}, + {"sss_direct": "sss_direct"}, + {"sss_indirect": "sss_indirect"}, + {"transmission": "transmission"}, + {"transmission_albedo": "transmission_albedo"}, + {"transmission_direct": "transmission_direct"}, + {"transmission_indirect": "transmission_indirect"}, + {"volume": "volume"}, + {"volume_Z": "volume_Z"}, + {"volume_albedo": "volume_albedo"}, + {"volume_direct": "volume_direct"}, + {"volume_indirect": "volume_indirect"}, + {"volume_opacity": "volume_opacity"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like AASamples" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "vray_renderer", + "label": "V-Ray Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "engine", + "label": "Production Engine", + "type": "enum", + "multiselection": false, + "defaults": "1", + "enum_items": [ + {"1": "V-Ray"}, + {"2": "V-Ray GPU"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"png": "png"}, + {"jpg": "jpg"}, + {"vrimg": "vrimg"}, + {"hdr": "hdr"}, + {"exr": "exr"}, + {"exr (multichannel)": "exr (multichannel)"}, + {"exr (deep)": "exr (deep)"}, + {"tga": "tga"}, + {"bmp": "bmp"}, + {"sgi": "sgi"} + ] + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"atmosphereChannel": "atmosphere"}, + {"backgroundChannel": "background"}, + {"bumpNormalsChannel": "bumpnormals"}, + {"causticsChannel": "caustics"}, + {"coatFilterChannel": "coat_filter"}, + {"coatGlossinessChannel": "coatGloss"}, + {"coatReflectionChannel": "coat_reflection"}, + {"vrayCoatChannel": "coat_specular"}, + {"CoverageChannel": "coverage"}, + {"cryptomatteChannel": "cryptomatte"}, + {"customColor": "custom_color"}, + {"drBucketChannel": "DR"}, + {"denoiserChannel": "denoiser"}, + {"diffuseChannel": "diffuse"}, + {"ExtraTexElement": "extraTex"}, + {"giChannel": "GI"}, + {"LightMixElement": "None"}, + {"lightingChannel": "lighting"}, + {"LightingAnalysisChannel": "LightingAnalysis"}, + {"materialIDChannel": "materialID"}, + {"MaterialSelectElement": "materialSelect"}, + {"matteShadowChannel": "matteShadow"}, + {"MultiMatteElement": "multimatte"}, + {"multimatteIDChannel": "multimatteID"}, + {"normalsChannel": "normals"}, + {"nodeIDChannel": "objectId"}, + {"objectSelectChannel": "objectSelect"}, + {"rawCoatFilterChannel": "raw_coat_filter"}, + {"rawCoatReflectionChannel": "raw_coat_reflection"}, + {"rawDiffuseFilterChannel": "rawDiffuseFilter"}, + {"rawGiChannel": "rawGI"}, + {"rawLightChannel": "rawLight"}, + {"rawReflectionChannel": "rawReflection"}, + {"rawReflectionFilterChannel": "rawReflectionFilter"}, + {"rawRefractionChannel": "rawRefraction"}, + {"rawRefractionFilterChannel": "rawRefractionFilter"}, + {"rawShadowChannel": "rawShadow"}, + {"rawSheenFilterChannel": "raw_sheen_filter"}, + {"rawSheenReflectionChannel": "raw_sheen_reflection"}, + {"rawTotalLightChannel": "rawTotalLight"}, + {"reflectIORChannel": "reflIOR"}, + {"reflectChannel": "reflect"}, + {"reflectionFilterChannel": "reflectionFilter"}, + {"reflectGlossinessChannel": "reflGloss"}, + {"refractChannel": "refract"}, + {"refractionFilterChannel": "refractionFilter"}, + {"refractGlossinessChannel": "refrGloss"}, + {"renderIDChannel": "renderId"}, + {"FastSSS2Channel": "SSS"}, + {"sampleRateChannel": "sampleRate"}, + {"samplerInfo": "samplerInfo"}, + {"selfIllumChannel": "selfIllum"}, + {"shadowChannel": "shadow"}, + {"sheenFilterChannel": "sheen_filter"}, + {"sheenGlossinessChannel": "sheenGloss"}, + {"sheenReflectionChannel": "sheen_reflection"}, + {"vraySheenChannel": "sheen_specular"}, + {"specularChannel": "specular"}, + {"Toon": "Toon"}, + {"toonLightingChannel": "toonLighting"}, + {"toonSpecularChannel": "toonSpecular"}, + {"totalLightChannel": "totalLight"}, + {"unclampedColorChannel": "unclampedColor"}, + {"VRScansPaintMaskChannel": "VRScansPaintMask"}, + {"VRScansZoneMaskChannel": "VRScansZoneMask"}, + {"velocityChannel": "velocity"}, + {"zdepthChannel": "zDepth"}, + {"LightSelectElement": "lightselect"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like aaFilterSize" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "redshift_renderer", + "label": "Redshift Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "primary_gi_engine", + "label": "Primary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"1": "Photon Map"}, + {"2": "Irradiance Cache"}, + {"3": "Brute Force"} + ] + }, + { + "key": "secondary_gi_engine", + "label": "Secondary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"1": "Photon Map"}, + {"2": "Irradiance Cache"}, + {"3": "Brute Force"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"iff": "Maya IFF"}, + {"exr": "OpenEXR"}, + {"tif": "TIFF"}, + {"png": "PNG"}, + {"tga": "Targa"}, + {"jpg": "JPEG"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "force_combine", + "label": "Force combine beauty and AOVs", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< none >"}, + {"AO": "Ambient Occlusion"}, + {"Background": "Background"}, + {"Beauty": "Beauty"}, + {"BumpNormals": "Bump Normals"}, + {"Caustics": "Caustics"}, + {"CausticsRaw": "Caustics Raw"}, + {"Cryptomatte": "Cryptomatte"}, + {"Custom": "Custom"}, + {"Z": "Depth"}, + {"DiffuseFilter": "Diffuse Filter"}, + {"DiffuseLighting": "Diffuse Lighting"}, + {"DiffuseLightingRaw": "Diffuse Lighting Raw"}, + {"Emission": "Emission"}, + {"GI": "Global Illumination"}, + {"GIRaw": "Global Illumination Raw"}, + {"Matte": "Matte"}, + {"MotionVectors": "Ambient Occlusion"}, + {"N": "Normals"}, + {"ID": "ObjectID"}, + {"ObjectBumpNormal": "Object-Space Bump Normals"}, + {"ObjectPosition": "Object-Space Positions"}, + {"PuzzleMatte": "Puzzle Matte"}, + {"Reflections": "Reflections"}, + {"ReflectionsFilter": "Reflections Filter"}, + {"ReflectionsRaw": "Reflections Raw"}, + {"Refractions": "Refractions"}, + {"RefractionsFilter": "Refractions Filter"}, + {"RefractionsRaw": "Refractions Filter"}, + {"Shadows": "Shadows"}, + {"SpecularLighting": "Specular Lighting"}, + {"SSS": "Sub Surface Scatter"}, + {"SSSRaw": "Sub Surface Scatter Raw"}, + {"TotalDiffuseLightingRaw": "Total Diffuse Lighting Raw"}, + {"TotalTransLightingRaw": "Total Translucency Filter"}, + {"TransTint": "Translucency Filter"}, + {"TransGIRaw": "Translucency Lighting Raw"}, + {"VolumeFogEmission": "Volume Fog Emission"}, + {"VolumeFogTint": "Volume Fog Tint"}, + {"VolumeLighting": "Volume Lighting"}, + {"P": "World Position"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like reflectionMaxTraceDepth" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json index 484fbf9d07..a4b28f47bc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json @@ -13,6 +13,9 @@ { "ftrackreview": "Add review to Ftrack" }, + { + "shotgridreview": "Add review to Shotgrid" + }, { "delete": "Delete output" }, diff --git a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json index 654ddf2938..7c5774415c 100644 --- a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json +++ b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json @@ -50,8 +50,15 @@ "is_group": true, "children": [ { - "type": "label", - "label": "Intent" + "type": "boolean", + "key": "allow_empty_intent", + "label": "Allow empty intent" + }, + { + "type": "text", + "key": "empty_intent_label", + "label": "Empty item label", + "placeholder": "< Not set >" }, { "type": "dict-modifiable", @@ -64,7 +71,8 @@ { "key": "default", "type": "text", - "label": "Default Intent" + "label": "Default Intent", + "placeholder": "< First available >" }, { "type": "separator" diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json index d22b9016a7..952b38040c 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_modules.json +++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json @@ -48,6 +48,60 @@ "type": "schema", "name": "schema_kitsu" }, + { + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "leecher_manager_url", + "label": "Shotgrid Leecher Manager URL" + }, + { + "type": "text", + "key": "leecher_backend_url", + "label": "Shotgrid Leecher Backend URL" + }, + { + "type": "boolean", + "key": "filter_projects_by_login", + "label": "Filter projects by SG login" + }, + { + "type": "dict-modifiable", + "key": "shotgrid_settings", + "label": "Shotgrid Servers", + "object_type": { + "type": "dict", + "children": [ + { + "key": "shotgrid_url", + "label": "Server URL", + "type": "text" + }, + { + "key": "shotgrid_script_name", + "label": "Script Name", + "type": "text" + }, + { + "key": "shotgrid_script_key", + "label": "Script api key", + "type": "text" + } + ] + } + } + ] + }, { "type": "dict", "key": "timers_manager", diff --git a/openpype/settings/handlers.py b/openpype/settings/handlers.py index c99fc6080b..15ae2351fd 100644 --- a/openpype/settings/handlers.py +++ b/openpype/settings/handlers.py @@ -7,6 +7,8 @@ from abc import ABCMeta, abstractmethod import six import openpype.version +from openpype.client.mongo import OpenPypeMongoConnection +from openpype.client.entities import get_project_connection, get_project from .constants import ( GLOBAL_SETTINGS_KEY, @@ -337,9 +339,6 @@ class MongoSettingsHandler(SettingsHandler): def __init__(self): # Get mongo connection - from openpype.lib import OpenPypeMongoConnection - from openpype.pipeline import AvalonMongoDB - settings_collection = OpenPypeMongoConnection.get_mongo_client() self._anatomy_keys = None @@ -362,7 +361,6 @@ class MongoSettingsHandler(SettingsHandler): self.collection_name = collection_name self.collection = settings_collection[database_name][collection_name] - self.avalon_db = AvalonMongoDB() self.system_settings_cache = CacheValues() self.project_settings_cache = collections.defaultdict(CacheValues) @@ -607,16 +605,14 @@ class MongoSettingsHandler(SettingsHandler): new_data = data_cache.data_copy() # Prepare avalon project document - collection = self.avalon_db.database[project_name] - project_doc = collection.find_one({ - "type": "project" - }) + project_doc = get_project(project_name) if not project_doc: raise ValueError(( "Project document of project \"{}\" does not exists." " Create project first." ).format(project_name)) + collection = get_project_connection(project_name) # Project's data update_dict_data = {} project_doc_data = project_doc.get("data") or {} @@ -1145,8 +1141,7 @@ class MongoSettingsHandler(SettingsHandler): document, version ) else: - collection = self.avalon_db.database[project_name] - project_doc = collection.find_one({"type": "project"}) + project_doc = get_project(project_name) self.project_anatomy_cache[project_name].update_data( self.project_doc_to_anatomy_data(project_doc), self._current_version diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py index ccccc76a08..c8952e5a1c 100644 --- a/openpype/tests/test_lib_restructuralization.py +++ b/openpype/tests/test_lib_restructuralization.py @@ -22,7 +22,6 @@ def test_backward_compatibility(printer): from openpype.lib import any_outdated from openpype.lib import get_asset from openpype.lib import get_linked_assets - from openpype.lib import get_latest_version from openpype.lib import get_ffprobe_streams from openpype.hosts.fusion.lib import switch_item diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index 13e18b3757..48c038418a 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -434,7 +434,8 @@ class SubsetWidget(QtWidgets.QWidget): # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) if self.tool_name: available_loaders = lib.remove_tool_name_from_loaders( available_loaders, self.tool_name @@ -1330,7 +1331,8 @@ class RepresentationWidget(QtWidgets.QWidget): selected_side = self._get_selected_side(point_index, rows) # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) filtered_loaders = [] for loader in available_loaders: diff --git a/openpype/tools/publisher/publish_report_viewer/model.py b/openpype/tools/publisher/publish_report_viewer/model.py index bd03376c55..704feeb4bd 100644 --- a/openpype/tools/publisher/publish_report_viewer/model.py +++ b/openpype/tools/publisher/publish_report_viewer/model.py @@ -1,9 +1,9 @@ import uuid -import html from Qt import QtCore, QtGui import pyblish.api +from openpype.tools.utils.lib import html_escape from .constants import ( ITEM_ID_ROLE, ITEM_IS_GROUP_ROLE, @@ -46,7 +46,7 @@ class InstancesModel(QtGui.QStandardItemModel): all_removed = True for instance_item in instance_items: item = QtGui.QStandardItem(instance_item.label) - instance_label = html.escape(instance_item.label) + instance_label = html_escape(instance_item.label) item.setData(instance_label, ITEM_LABEL_ROLE) item.setData(instance_item.errored, ITEM_ERRORED_ROLE) item.setData(instance_item.id, ITEM_ID_ROLE) diff --git a/openpype/tools/publisher/widgets/card_view_widgets.py b/openpype/tools/publisher/widgets/card_view_widgets.py index bd591138f4..fa391f4ba0 100644 --- a/openpype/tools/publisher/widgets/card_view_widgets.py +++ b/openpype/tools/publisher/widgets/card_view_widgets.py @@ -22,13 +22,13 @@ Only one item can be selected at a time. import re import collections -import html from Qt import QtWidgets, QtCore from openpype.widgets.nice_checkbox import NiceCheckbox from openpype.tools.utils import BaseClickableFrame +from openpype.tools.utils.lib import html_escape from .widgets import ( AbstractInstanceView, ContextWarningLabel, @@ -308,7 +308,7 @@ class InstanceCardWidget(CardWidget): self._last_variant = variant self._last_subset_name = subset_name # Make `variant` bold - label = html.escape(self.instance.label) + label = html_escape(self.instance.label) found_parts = set(re.findall(variant, label, re.IGNORECASE)) if found_parts: for part in found_parts: diff --git a/openpype/tools/publisher/widgets/list_view_widgets.py b/openpype/tools/publisher/widgets/list_view_widgets.py index 3e4fd5b72d..6e31ba635b 100644 --- a/openpype/tools/publisher/widgets/list_view_widgets.py +++ b/openpype/tools/publisher/widgets/list_view_widgets.py @@ -23,12 +23,12 @@ selection can be enabled disabled using checkbox or keyboard key presses: ``` """ import collections -import html from Qt import QtWidgets, QtCore, QtGui from openpype.style import get_objected_colors from openpype.widgets.nice_checkbox import NiceCheckbox +from openpype.tools.utils.lib import html_escape from .widgets import AbstractInstanceView from ..constants import ( INSTANCE_ID_ROLE, @@ -114,7 +114,7 @@ class InstanceListItemWidget(QtWidgets.QWidget): self.instance = instance - instance_label = html.escape(instance.label) + instance_label = html_escape(instance.label) subset_name_label = QtWidgets.QLabel(instance_label, self) subset_name_label.setObjectName("ListViewSubsetName") @@ -181,7 +181,7 @@ class InstanceListItemWidget(QtWidgets.QWidget): # Check subset name label = self.instance.label if label != self._instance_label_widget.text(): - self._instance_label_widget.setText(html.escape(label)) + self._instance_label_widget.setText(html_escape(label)) # Check active state self.set_active(self.instance["active"]) # Check valid states diff --git a/openpype/tools/pyblish_pype/model.py b/openpype/tools/pyblish_pype/model.py index 2931a379b3..31aa63677e 100644 --- a/openpype/tools/pyblish_pype/model.py +++ b/openpype/tools/pyblish_pype/model.py @@ -86,7 +86,7 @@ class IntentModel(QtGui.QStandardItemModel): First and default value is {"< Not Set >": None} """ - default_item = {"< Not Set >": None} + default_empty_label = "< Not set >" def __init__(self, parent=None): super(IntentModel, self).__init__(parent) @@ -102,27 +102,39 @@ class IntentModel(QtGui.QStandardItemModel): self._item_count = 0 self.default_index = 0 - intents_preset = ( + intent_settings = ( get_system_settings() .get("modules", {}) .get("ftrack", {}) .get("intent", {}) ) - default = intents_preset.get("default") - items = intents_preset.get("items", {}) + items = intent_settings.get("items", {}) if not items: return - for idx, item_value in enumerate(items.keys()): + allow_empty_intent = intent_settings.get("allow_empty_intent", True) + empty_intent_label = ( + intent_settings.get("empty_intent_label") + or self.default_empty_label + ) + listed_items = list(items.items()) + if allow_empty_intent: + listed_items.insert(0, ("", empty_intent_label)) + + default = intent_settings.get("default") + + for idx, item in enumerate(listed_items): + item_value = item[0] if item_value == default: self.default_index = idx break - self.add_items(items) + self._add_items(listed_items) - def add_items(self, items): - for value, label in items.items(): + def _add_items(self, items): + for item in items: + value, label = item new_item = QtGui.QStandardItem() new_item.setData(label, QtCore.Qt.DisplayRole) new_item.setData(value, Roles.IntentItemValue) diff --git a/openpype/tools/pyblish_pype/window.py b/openpype/tools/pyblish_pype/window.py index 78590259bc..e167405325 100644 --- a/openpype/tools/pyblish_pype/window.py +++ b/openpype/tools/pyblish_pype/window.py @@ -523,6 +523,7 @@ class Window(QtWidgets.QDialog): instance_item.setData(enable_value, Roles.IsEnabledRole) def _add_intent_to_context(self): + context_value = None if ( self.intent_model.has_items and "intent" not in self.controller.context.data @@ -530,11 +531,17 @@ class Window(QtWidgets.QDialog): idx = self.intent_model.index(self.intent_box.currentIndex(), 0) intent_value = self.intent_model.data(idx, Roles.IntentItemValue) intent_label = self.intent_model.data(idx, QtCore.Qt.DisplayRole) + if intent_value: + context_value = { + "value": intent_value, + "label": intent_label + } - self.controller.context.data["intent"] = { - "value": intent_value, - "label": intent_label - } + # Unset intent if is set to empty value + if context_value is None: + self.controller.context.data.pop("intent", None) + else: + self.controller.context.data["intent"] = context_value def on_instance_toggle(self, index, state=None): """An item is requesting to be toggled""" diff --git a/openpype/tools/sceneinventory/view.py b/openpype/tools/sceneinventory/view.py index 63d181b2d6..e0e43aaba7 100644 --- a/openpype/tools/sceneinventory/view.py +++ b/openpype/tools/sceneinventory/view.py @@ -551,16 +551,16 @@ class SceneInventoryView(QtWidgets.QTreeView): "toggle": selection_model.Toggle, }[options.get("mode", "select")] - for item in iter_model_rows(model, 0): - item = item.data(InventoryModel.ItemRole) + for index in iter_model_rows(model, 0): + item = index.data(InventoryModel.ItemRole) if item.get("isGroupNode"): continue name = item.get("objectName") if name in object_names: - self.scrollTo(item) # Ensure item is visible + self.scrollTo(index) # Ensure item is visible flags = select_mode | selection_model.Rows - selection_model.select(item, flags) + selection_model.select(index, flags) object_names.remove(name) diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py index ea1362945f..2169cf8ef1 100644 --- a/openpype/tools/utils/lib.py +++ b/openpype/tools/utils/lib.py @@ -37,6 +37,19 @@ def center_window(window): window.move(geo.topLeft()) +def html_escape(text): + """Basic escape of html syntax symbols in text.""" + + return ( + text + .replace("&", "&") + .replace("<", "<") + .replace(">", ">") + .replace('"', """) + .replace("'", "'") + ) + + def set_style_property(widget, property_name, property_value): """Set widget's property that may affect style. diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py index 34692b7102..a4109c511e 100644 --- a/openpype/tools/workfiles/files_widget.py +++ b/openpype/tools/workfiles/files_widget.py @@ -12,7 +12,6 @@ from openpype.tools.utils import PlaceholderLineEdit from openpype.tools.utils.delegates import PrettyTimeDelegate from openpype.lib import ( emit_event, - get_workfile_template_key, create_workdir_extra_folders, ) from openpype.lib.avalon_context import ( @@ -24,6 +23,8 @@ from openpype.pipeline import ( legacy_io, Anatomy, ) +from openpype.pipeline.workfile import get_workfile_template_key + from .model import ( WorkAreaFilesModel, PublishFilesModel, diff --git a/openpype/tools/workfiles/save_as_dialog.py b/openpype/tools/workfiles/save_as_dialog.py index b62fd2c889..cded4eb1a5 100644 --- a/openpype/tools/workfiles/save_as_dialog.py +++ b/openpype/tools/workfiles/save_as_dialog.py @@ -5,18 +5,12 @@ import logging from Qt import QtWidgets, QtCore -from openpype.client import ( - get_project, - get_asset_by_name, -) -from openpype.lib import ( - get_last_workfile_with_version, - get_workdir_data, -) from openpype.pipeline import ( registered_host, legacy_io, ) +from openpype.pipeline.workfile import get_last_workfile_with_version +from openpype.pipeline.template_data import get_template_data_with_names from openpype.tools.utils import PlaceholderLineEdit log = logging.getLogger(__name__) @@ -30,16 +24,10 @@ def build_workfile_data(session): asset_name = session["AVALON_ASSET"] task_name = session["AVALON_TASK"] host_name = session["AVALON_APP"] - project_doc = get_project( - project_name, fields=["name", "data.code", "config.tasks"] - ) - asset_doc = get_asset_by_name( - project_name, - asset_name, - fields=["name", "data.tasks", "data.parents"] - ) - data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) data.update({ "version": 1, "comment": "", diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py index 0b0d67e589..de42b80d64 100644 --- a/openpype/tools/workfiles/window.py +++ b/openpype/tools/workfiles/window.py @@ -1,18 +1,20 @@ import os import datetime +import copy from Qt import QtCore, QtWidgets, QtGui from openpype.client import ( - get_asset_by_id, get_asset_by_name, get_workfile_info, ) +from openpype.client.operations import ( + OperationsSession, + new_workfile_info_doc, + prepare_workfile_info_update_data, +) from openpype import style from openpype import resources -from openpype.lib import ( - create_workfile_doc, - save_workfile_data_to_doc, -) +from openpype.pipeline import Anatomy from openpype.pipeline import legacy_io from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.tasks_widget import TasksWidget @@ -324,10 +326,23 @@ class Window(QtWidgets.QWidget): workfile_doc, data = self.side_panel.get_workfile_data() if not workfile_doc: filepath = self.files_widget._get_selected_filepath() - self._create_workfile_doc(filepath, force=True) - workfile_doc = self._get_current_workfile_doc() + workfile_doc = self._create_workfile_doc(filepath) - save_workfile_data_to_doc(workfile_doc, data, legacy_io) + new_workfile_doc = copy.deepcopy(workfile_doc) + new_workfile_doc["data"] = data + update_data = prepare_workfile_info_update_data( + workfile_doc, new_workfile_doc + ) + if not update_data: + return + + project_name = legacy_io.active_project() + + session = OperationsSession() + session.update_entity( + project_name, "workfile", workfile_doc["_id"], update_data + ) + session.commit() def _get_current_workfile_doc(self, filepath=None): if filepath is None: @@ -343,20 +358,32 @@ class Window(QtWidgets.QWidget): project_name, asset_id, task_name, filename ) - def _create_workfile_doc(self, filepath, force=False): - workfile_doc = None - if not force: - workfile_doc = self._get_current_workfile_doc(filepath) + def _create_workfile_doc(self, filepath): + workfile_doc = self._get_current_workfile_doc(filepath) + if workfile_doc: + return workfile_doc - if not workfile_doc: - workdir, filename = os.path.split(filepath) - asset_id = self.assets_widget.get_selected_asset_id() - project_name = legacy_io.active_project() - asset_doc = get_asset_by_id(project_name, asset_id) - task_name = self.tasks_widget.get_selected_task_name() - create_workfile_doc( - asset_doc, task_name, filename, workdir, legacy_io - ) + workdir, filename = os.path.split(filepath) + + project_name = legacy_io.active_project() + asset_id = self.assets_widget.get_selected_asset_id() + task_name = self.tasks_widget.get_selected_task_name() + + anatomy = Anatomy(project_name) + success, rootless_dir = anatomy.find_root_template_from_path(workdir) + filepath = "/".join([ + os.path.normpath(rootless_dir).replace("\\", "/"), + filename + ]) + + workfile_doc = new_workfile_info_doc( + filename, asset_id, task_name, [filepath] + ) + + session = OperationsSession() + session.create_entity(project_name, "workfile", workfile_doc) + session.commit() + return workfile_doc def refresh(self): # Refresh asset widget diff --git a/openpype/vendor/python/common/capture.py b/openpype/vendor/python/common/capture.py index 71b86a5f1a..86c1c60e56 100644 --- a/openpype/vendor/python/common/capture.py +++ b/openpype/vendor/python/common/capture.py @@ -665,7 +665,10 @@ def _applied_camera_options(options, panel): _iteritems = getattr(options, "iteritems", options.items) for opt, value in _iteritems(): - _safe_setAttr(camera + "." + opt, value) + if cmds.getAttr(camera + "." + opt, lock=True): + continue + else: + _safe_setAttr(camera + "." + opt, value) try: yield @@ -673,7 +676,11 @@ def _applied_camera_options(options, panel): if old_options: _iteritems = getattr(old_options, "iteritems", old_options.items) for opt, value in _iteritems(): - _safe_setAttr(camera + "." + opt, value) + # + if cmds.getAttr(camera + "." + opt, lock=True): + continue + else: + _safe_setAttr(camera + "." + opt, value) @contextlib.contextmanager diff --git a/openpype/version.py b/openpype/version.py index 9dda1eacce..6ff5dfb7b5 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.12.2-nightly.3" +__version__ = "3.13.1-nightly.2" diff --git a/poetry.lock b/poetry.lock index 7221e191ff..919a352505 100644 --- a/poetry.lock +++ b/poetry.lock @@ -92,7 +92,14 @@ version = "1.4.4" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "main" optional = false -python-versions = "*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +develop = false + +[package.source] +type = "git" +url = "https://github.com/ActiveState/appdirs.git" +reference = "master" +resolved_reference = "193a2cbba58cce2542882fcedd0e49f6763672ed" [[package]] name = "arrow" @@ -221,7 +228,7 @@ python-versions = "~=3.7" [[package]] name = "certifi" -version = "2022.5.18.1" +version = "2022.6.15" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false @@ -456,19 +463,20 @@ python-versions = ">=3.7" [[package]] name = "ftrack-python-api" -version = "2.0.0" +version = "2.3.3" description = "Python API for ftrack." category = "main" optional = false -python-versions = ">=2.7.9, <4.0" +python-versions = ">=2.7.9, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, < 3.10" [package.dependencies] +appdirs = ">=1,<2" arrow = ">=0.4.4,<1" -clique = ">=1.2.0,<2" +clique = "1.6.1" future = ">=0.16.0,<1" pyparsing = ">=2.0,<3" requests = ">=2,<3" -six = ">=1,<2" +six = ">=1.13.0,<2" termcolor = ">=1.1.0,<2" websocket-client = ">=0.40.0,<1" @@ -1375,6 +1383,21 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +[[package]] +name = "shotgun-api3" +version = "3.3.3" +description = "Shotgun Python API" +category = "main" +optional = false +python-versions = "*" +develop = false + +[package.source] +type = "git" +url = "https://github.com/shotgunsoftware/python-api.git" +reference = "v3.3.3" +resolved_reference = "b9f066c0edbea6e0733242e18f32f75489064840" + [[package]] name = "six" version = "1.16.0" @@ -1812,10 +1835,7 @@ ansicon = [ {file = "ansicon-1.89.0-py2.py3-none-any.whl", hash = "sha256:f1def52d17f65c2c9682cf8370c03f541f410c1752d6a14029f97318e4b9dfec"}, {file = "ansicon-1.89.0.tar.gz", hash = "sha256:e4d039def5768a47e4afec8e89e83ec3ae5a26bf00ad851f914d1240b444d2b1"}, ] -appdirs = [ - {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, - {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, -] +appdirs = [] arrow = [ {file = "arrow-0.17.0-py2.py3-none-any.whl", hash = "sha256:e098abbd9af3665aea81bdd6c869e93af4feb078e98468dd351c383af187aac5"}, {file = "arrow-0.17.0.tar.gz", hash = "sha256:ff08d10cda1d36c68657d6ad20d74fbea493d980f8b2d45344e00d6ed2bf6ed4"}, @@ -1870,8 +1890,8 @@ cachetools = [ {file = "cachetools-5.2.0.tar.gz", hash = "sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757"}, ] certifi = [ - {file = "certifi-2022.5.18.1-py3-none-any.whl", hash = "sha256:f1d53542ee8cbedbe2118b5686372fb33c297fcd6379b050cca0ef13a597382a"}, - {file = "certifi-2022.5.18.1.tar.gz", hash = "sha256:9c5705e395cd70084351dd8ad5c41e65655e08ce46f2ec9cf6c2c08390f71eb7"}, + {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"}, + {file = "certifi-2022.6.15.tar.gz", hash = "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d"}, ] cffi = [ {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"}, @@ -2137,10 +2157,7 @@ frozenlist = [ {file = "frozenlist-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:772965f773757a6026dea111a15e6e2678fbd6216180f82a48a40b27de1ee2ab"}, {file = "frozenlist-1.3.0.tar.gz", hash = "sha256:ce6f2ba0edb7b0c1d8976565298ad2deba6f8064d2bebb6ffce2ca896eb35b0b"}, ] -ftrack-python-api = [ - {file = "ftrack-python-api-2.0.0.tar.gz", hash = "sha256:dd6f02c31daf5a10078196dc9eac4671e4297c762fbbf4df98de668ac12281d9"}, - {file = "ftrack_python_api-2.0.0-py2.py3-none-any.whl", hash = "sha256:d0df0f2df4b53947272f95e179ec98b477ee425bf4217b37bb59030ad989771e"}, -] +ftrack-python-api = [] future = [ {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, ] @@ -2820,6 +2837,7 @@ semver = [ {file = "semver-2.13.0-py2.py3-none-any.whl", hash = "sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4"}, {file = "semver-2.13.0.tar.gz", hash = "sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f"}, ] +shotgun-api3 = [] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, diff --git a/pyproject.toml b/pyproject.toml index eebc8a5600..9cbdc295ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.12.2-nightly.3" # OpenPype +version = "3.13.1-nightly.2" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" @@ -33,13 +33,14 @@ aiohttp = "^3.7" aiohttp_json_rpc = "*" # TVPaint server acre = { git = "https://github.com/pypeclub/acre.git" } opentimelineio = { version = "0.14.0.dev1", source = "openpype" } -appdirs = "^1.4.3" +appdirs = { git = "https://github.com/ActiveState/appdirs.git", branch = "master" } blessed = "^1.17" # openpype terminal formatting coolname = "*" clique = "1.6.*" Click = "^7" dnspython = "^2.1.0" -ftrack-python-api = "2.0.*" +ftrack-python-api = "^2.3.3" +shotgun_api3 = {git = "https://github.com/shotgunsoftware/python-api.git", rev = "v3.3.3"} gazu = "^0.8.28" google-api-python-client = "^1.12.8" # sync server google support (should be separate?) jsonschema = "^2.6.0" diff --git a/setup.py b/setup.py index 8b5a545c16..eab0187983 100644 --- a/setup.py +++ b/setup.py @@ -152,7 +152,7 @@ build_exe_options = dict( ) bdist_mac_options = dict( - bundle_name="OpenPype", + bundle_name=f"OpenPype {__version__}", iconfile=mac_icon_path ) diff --git a/start.py b/start.py index ace33ab92a..5cdffafb6e 100644 --- a/start.py +++ b/start.py @@ -103,6 +103,9 @@ import site import distutils.spawn from pathlib import Path + +silent_mode = False + # OPENPYPE_ROOT is variable pointing to build (or code) directory # WARNING `OPENPYPE_ROOT` must be defined before igniter import # - igniter changes cwd which cause that filepath of this script won't lead @@ -138,40 +141,44 @@ if sys.__stdout__: term = blessed.Terminal() def _print(message: str): + if silent_mode: + return if message.startswith("!!! "): - print("{}{}".format(term.orangered2("!!! "), message[4:])) + print(f'{term.orangered2("!!! ")}{message[4:]}') return if message.startswith(">>> "): - print("{}{}".format(term.aquamarine3(">>> "), message[4:])) + print(f'{term.aquamarine3(">>> ")}{message[4:]}') return if message.startswith("--- "): - print("{}{}".format(term.darkolivegreen3("--- "), message[4:])) + print(f'{term.darkolivegreen3("--- ")}{message[4:]}') return if message.startswith("*** "): - print("{}{}".format(term.gold("*** "), message[4:])) + print(f'{term.gold("*** ")}{message[4:]}') return if message.startswith(" - "): - print("{}{}".format(term.wheat(" - "), message[4:])) + print(f'{term.wheat(" - ")}{message[4:]}') return if message.startswith(" . "): - print("{}{}".format(term.tan(" . "), message[4:])) + print(f'{term.tan(" . ")}{message[4:]}') return if message.startswith(" - "): - print("{}{}".format(term.seagreen3(" - "), message[7:])) + print(f'{term.seagreen3(" - ")}{message[7:]}') return if message.startswith(" ! "): - print("{}{}".format(term.goldenrod(" ! "), message[7:])) + print(f'{term.goldenrod(" ! ")}{message[7:]}') return if message.startswith(" * "): - print("{}{}".format(term.aquamarine1(" * "), message[7:])) + print(f'{term.aquamarine1(" * ")}{message[7:]}') return if message.startswith(" "): - print("{}{}".format(term.darkseagreen3(" "), message[4:])) + print(f'{term.darkseagreen3(" ")}{message[4:]}') return print(message) else: def _print(message: str): + if silent_mode: + return print(message) @@ -187,9 +194,8 @@ else: if "--headless" in sys.argv: os.environ["OPENPYPE_HEADLESS_MODE"] = "1" sys.argv.remove("--headless") -else: - if os.getenv("OPENPYPE_HEADLESS_MODE") != "1": - os.environ.pop("OPENPYPE_HEADLESS_MODE", None) +elif os.getenv("OPENPYPE_HEADLESS_MODE") != "1": + os.environ.pop("OPENPYPE_HEADLESS_MODE", None) # Enabled logging debug mode when "--debug" is passed if "--verbose" in sys.argv: @@ -203,8 +209,8 @@ if "--verbose" in sys.argv: value = sys.argv.pop(idx) else: raise RuntimeError(( - "Expect value after \"--verbose\" argument. {}" - ).format(expected_values)) + f"Expect value after \"--verbose\" argument. {expected_values}" + )) log_level = None low_value = value.lower() @@ -225,8 +231,9 @@ if "--verbose" in sys.argv: if log_level is None: raise RuntimeError(( - "Unexpected value after \"--verbose\" argument \"{}\". {}" - ).format(value, expected_values)) + "Unexpected value after \"--verbose\" " + f"argument \"{value}\". {expected_values}" + )) os.environ["OPENPYPE_LOG_LEVEL"] = str(log_level) @@ -242,13 +249,14 @@ from igniter.tools import ( get_openpype_global_settings, get_openpype_path_from_settings, validate_mongo_connection, - OpenPypeVersionNotFound + OpenPypeVersionNotFound, + OpenPypeVersionIncompatible ) # noqa from igniter.bootstrap_repos import OpenPypeVersion # noqa: E402 bootstrap = BootstrapRepos() silent_commands = {"run", "igniter", "standalonepublisher", - "extractenvironments"} + "extractenvironments", "version"} def list_versions(openpype_versions: list, local_version=None) -> None: @@ -270,8 +278,11 @@ def set_openpype_global_environments() -> None: general_env = get_general_environments() + # first resolve general environment because merge doesn't expect + # values to be list. + # TODO: switch to OpenPype environment functions merged_env = acre.merge( - acre.parse(general_env), + acre.compute(acre.parse(general_env), cleanup=False), dict(os.environ) ) env = acre.compute( @@ -333,34 +344,33 @@ def run_disk_mapping_commands(settings): destination = destination.rstrip('/') source = source.rstrip('/') - if low_platform == "windows": - args = ["subst", destination, source] - elif low_platform == "darwin": - scr = "do shell script \"ln -s {} {}\" with administrator privileges".format(source, destination) # noqa: E501 + if low_platform == "darwin": + scr = f'do shell script "ln -s {source} {destination}" with administrator privileges' # noqa + args = ["osascript", "-e", scr] + elif low_platform == "windows": + args = ["subst", destination, source] else: args = ["sudo", "ln", "-s", source, destination] - _print("disk mapping args:: {}".format(args)) + _print(f"*** disk mapping arguments: {args}") try: if not os.path.exists(destination): output = subprocess.Popen(args) if output.returncode and output.returncode != 0: - exc_msg = "Executing was not successful: \"{}\"".format( - args) + exc_msg = f'Executing was not successful: "{args}"' raise RuntimeError(exc_msg) except TypeError as exc: - _print("Error {} in mapping drive {}, {}".format(str(exc), - source, - destination)) + _print( + f"Error {str(exc)} in mapping drive {source}, {destination}") raise def set_avalon_environments(): """Set avalon specific environments. - These are non modifiable environments for avalon workflow that must be set + These are non-modifiable environments for avalon workflow that must be set before avalon module is imported because avalon works with globals set with environment variables. """ @@ -505,7 +515,7 @@ def _process_arguments() -> tuple: ) if m and m.group('version'): use_version = m.group('version') - _print(">>> Requested version [ {} ]".format(use_version)) + _print(f">>> Requested version [ {use_version} ]") if "+staging" in use_version: use_staging = True break @@ -611,8 +621,8 @@ def _determine_mongodb() -> str: try: openpype_mongo = bootstrap.secure_registry.get_item( "openPypeMongo") - except ValueError: - raise RuntimeError("Missing MongoDB url") + except ValueError as e: + raise RuntimeError("Missing MongoDB url") from e return openpype_mongo @@ -684,40 +694,47 @@ def _find_frozen_openpype(use_version: str = None, # Specific version is defined if use_version.lower() == "latest": # Version says to use latest version - _print("Finding latest version defined by use version") + _print(">>> Finding latest version defined by use version") openpype_version = bootstrap.find_latest_openpype_version( - use_staging + use_staging, compatible_with=installed_version ) else: - _print("Finding specified version \"{}\"".format(use_version)) + _print(f">>> Finding specified version \"{use_version}\"") openpype_version = bootstrap.find_openpype_version( use_version, use_staging ) if openpype_version is None: raise OpenPypeVersionNotFound( - "Requested version \"{}\" was not found.".format( - use_version - ) + f"Requested version \"{use_version}\" was not found." ) + if not openpype_version.is_compatible(installed_version): + raise OpenPypeVersionIncompatible(( + f"Requested version \"{use_version}\" is not compatible " + f"with installed version \"{installed_version}\"" + )) + elif studio_version is not None: # Studio has defined a version to use - _print("Finding studio version \"{}\"".format(studio_version)) + _print(f">>> Finding studio version \"{studio_version}\"") openpype_version = bootstrap.find_openpype_version( - studio_version, use_staging + studio_version, use_staging, compatible_with=installed_version ) if openpype_version is None: raise OpenPypeVersionNotFound(( - "Requested OpenPype version \"{}\" defined by settings" + "Requested OpenPype version " + f"\"{studio_version}\" defined by settings" " was not found." - ).format(studio_version)) + )) else: # Default behavior to use latest version - _print("Finding latest version") + _print(( + ">>> Finding latest version compatible " + f"with [ {installed_version} ]")) openpype_version = bootstrap.find_latest_openpype_version( - use_staging + use_staging, compatible_with=installed_version ) if openpype_version is None: if use_staging: @@ -798,7 +815,7 @@ def _bootstrap_from_code(use_version, use_staging): if getattr(sys, 'frozen', False): local_version = bootstrap.get_version(Path(_openpype_root)) - switch_str = f" - will switch to {use_version}" if use_version else "" + switch_str = f" - will switch to {use_version}" if use_version and use_version != local_version else "" # noqa _print(f" - booting version: {local_version}{switch_str}") assert local_version else: @@ -813,11 +830,8 @@ def _bootstrap_from_code(use_version, use_staging): use_version, use_staging ) if version_to_use is None: - raise OpenPypeVersionNotFound( - "Requested version \"{}\" was not found.".format( - use_version - ) - ) + raise OpenPypeVersionIncompatible( + f"Requested version \"{use_version}\" was not found.") else: # Staging version should be used version_to_use = bootstrap.find_latest_openpype_version( @@ -903,7 +917,7 @@ def _boot_validate_versions(use_version, local_version): use_version, openpype_versions ) valid, message = bootstrap.validate_openpype_version(version_path) - _print("{}{}".format(">>> " if valid else "!!! ", message)) + _print(f'{">>> " if valid else "!!! "}{message}') def _boot_print_versions(use_staging, local_version, openpype_root): @@ -914,13 +928,24 @@ def _boot_print_versions(use_staging, local_version, openpype_root): _print("--- This will list only staging versions detected.") _print(" To see other version, omit --use-staging argument.") - openpype_versions = bootstrap.find_openpype(include_zips=True, - staging=use_staging) if getattr(sys, 'frozen', False): local_version = bootstrap.get_version(Path(openpype_root)) else: local_version = OpenPypeVersion.get_installed_version_str() + compatible_with = OpenPypeVersion(version=local_version) + if "--all" in sys.argv: + compatible_with = None + _print("--- Showing all version (even those not compatible).") + else: + _print(("--- Showing only compatible versions " + f"with [ {compatible_with.major}.{compatible_with.minor} ]")) + + openpype_versions = bootstrap.find_openpype( + include_zips=True, + staging=use_staging, + compatible_with=compatible_with) + list_versions(openpype_versions, local_version) @@ -937,6 +962,9 @@ def _boot_handle_missing_version(local_version, use_staging, message): def boot(): """Bootstrap OpenPype.""" + global silent_mode + if any(arg in silent_commands for arg in sys.argv): + silent_mode = True # ------------------------------------------------------------------------ # Set environment to OpenPype root path @@ -1040,7 +1068,7 @@ def boot(): if not result[0]: _print(f"!!! Invalid version: {result[1]}") sys.exit(1) - _print(f"--- version is valid") + _print("--- version is valid") else: try: version_path = _bootstrap_from_code(use_version, use_staging) @@ -1113,8 +1141,12 @@ def boot(): def get_info(use_staging=None) -> list: """Print additional information to console.""" - from openpype.lib.mongo import get_default_components - from openpype.lib.log import PypeLogger + from openpype.client.mongo import get_default_components + try: + from openpype.lib.log import Logger + except ImportError: + # Backwards compatibility for 'PypeLogger' + from openpype.lib.log import PypeLogger as Logger components = get_default_components() @@ -1141,14 +1173,14 @@ def get_info(use_staging=None) -> list: os.environ.get("MUSTER_REST_URL"))) # Reinitialize - PypeLogger.initialize() + Logger.initialize() mongo_components = get_default_components() if mongo_components["host"]: inf.append(("Logging to MongoDB", mongo_components["host"])) inf.append((" - port", mongo_components["port"] or "")) - inf.append((" - database", PypeLogger.log_database_name)) - inf.append((" - collection", PypeLogger.log_collection_name)) + inf.append((" - database", Logger.log_database_name)) + inf.append((" - collection", Logger.log_collection_name)) inf.append((" - user", mongo_components["username"] or "")) if mongo_components["auth_db"]: inf.append((" - auth source", mongo_components["auth_db"])) @@ -1157,8 +1189,7 @@ def get_info(use_staging=None) -> list: formatted = [] for info in inf: padding = (maximum - len(info[0])) + 1 - formatted.append( - "... {}:{}[ {} ]".format(info[0], " " * padding, info[1])) + formatted.append(f'... {info[0]}:{" " * padding}[ {info[1]} ]') return formatted diff --git a/tests/integration/hosts/aftereffects/test_publish_in_aftereffects_multiframe.py b/tests/integration/hosts/aftereffects/test_publish_in_aftereffects_multiframe.py new file mode 100644 index 0000000000..c882e0f9b2 --- /dev/null +++ b/tests/integration/hosts/aftereffects/test_publish_in_aftereffects_multiframe.py @@ -0,0 +1,64 @@ +import logging + +from tests.lib.assert_classes import DBAssert +from tests.integration.hosts.aftereffects.lib import AfterEffectsTestClass + +log = logging.getLogger("test_publish_in_aftereffects") + + +class TestPublishInAfterEffects(AfterEffectsTestClass): + """Basic test case for publishing in AfterEffects + + Should publish 5 frames + """ + PERSIST = True + + TEST_FILES = [ + ("12aSDRjthn4X3yw83gz_0FZJcRRiVDEYT", + "test_aftereffects_publish_multiframe.zip", + "") + ] + + APP = "aftereffects" + APP_VARIANT = "" + + APP_NAME = "{}/{}".format(APP, APP_VARIANT) + + TIMEOUT = 120 # publish timeout + + def test_db_asserts(self, dbcon, publish_finished): + """Host and input data dependent expected results in DB.""" + print("test_db_asserts") + failures = [] + + failures.append(DBAssert.count_of_types(dbcon, "version", 2)) + + failures.append( + DBAssert.count_of_types(dbcon, "version", 0, name={"$ne": 1})) + + failures.append( + DBAssert.count_of_types(dbcon, "subset", 1, + name="imageMainBackgroundcopy")) + + failures.append( + DBAssert.count_of_types(dbcon, "subset", 1, + name="workfileTest_task")) + + failures.append( + DBAssert.count_of_types(dbcon, "subset", 1, + name="reviewTesttask")) + + failures.append( + DBAssert.count_of_types(dbcon, "representation", 4)) + + additional_args = {"context.subset": "renderTestTaskDefault", + "context.ext": "png"} + failures.append( + DBAssert.count_of_types(dbcon, "representation", 1, + additional_args=additional_args)) + + assert not any(failures) + + +if __name__ == "__main__": + test_case = TestPublishInAfterEffects() diff --git a/tests/lib/testing_classes.py b/tests/lib/testing_classes.py index f991f02227..2b4d7deb48 100644 --- a/tests/lib/testing_classes.py +++ b/tests/lib/testing_classes.py @@ -314,30 +314,22 @@ class PublishTest(ModuleUnitTest): Compares only presence, not size nor content! """ - published_dir_base = download_test_data - published_dir = os.path.join(output_folder_url, - self.PROJECT, - self.ASSET, - self.TASK, - "**") - expected_dir_base = os.path.join(published_dir_base, + published_dir_base = output_folder_url + expected_dir_base = os.path.join(download_test_data, "expected") - expected_dir = os.path.join(expected_dir_base, - self.PROJECT, - self.ASSET, - self.TASK, - "**") - print("Comparing published:'{}' : expected:'{}'".format(published_dir, - expected_dir)) - published = set(f.replace(published_dir_base, '') for f in - glob.glob(published_dir, recursive=True) if - f != published_dir_base and os.path.exists(f)) - expected = set(f.replace(expected_dir_base, '') for f in - glob.glob(expected_dir, recursive=True) if - f != expected_dir_base and os.path.exists(f)) - not_matched = expected.difference(published) - assert not not_matched, "Missing {} files".format(not_matched) + print("Comparing published:'{}' : expected:'{}'".format( + published_dir_base, expected_dir_base)) + published = set(f.replace(published_dir_base, '') for f in + glob.glob(published_dir_base + "\\**", recursive=True) + if f != published_dir_base and os.path.exists(f)) + expected = set(f.replace(expected_dir_base, '') for f in + glob.glob(expected_dir_base + "\\**", recursive=True) + if f != expected_dir_base and os.path.exists(f)) + + not_matched = expected.symmetric_difference(published) + assert not not_matched, "Missing {} files".format( + "\n".join(sorted(not_matched))) class HostFixtures(PublishTest): diff --git a/tools/build.sh b/tools/build.sh index 79fb748cd5..fa2c580648 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -193,15 +193,15 @@ if [ "$disable_submodule_update" == 1 ]; then if [[ "$OSTYPE" == "darwin"* ]]; then # fix code signing issue - codesign --remove-signature "$openpype_root/build/OpenPype.app/Contents/MacOS/lib/Python" + codesign --remove-signature "$openpype_root/build/OpenPype $openpype_version.app/Contents/MacOS/lib/Python" if command -v create-dmg > /dev/null 2>&1; then create-dmg \ - --volname "OpenPype Installer" \ + --volname "OpenPype $openpype_version Installer" \ --window-pos 200 120 \ --window-size 600 300 \ --app-drop-link 100 50 \ - "$openpype_root/build/OpenPype-Installer.dmg" \ - "$openpype_root/build/OpenPype.app" + "$openpype_root/build/OpenPype-Installer-$openpype_version.dmg" \ + "$openpype_root/build/OpenPype $openpype_version.app" else echo -e "${BIYellow}!!!${RST} ${BIWhite}create-dmg${RST} command is not available." fi diff --git a/tools/build_dependencies.py b/tools/build_dependencies.py index d3566dd289..d186ead881 100644 --- a/tools/build_dependencies.py +++ b/tools/build_dependencies.py @@ -29,6 +29,7 @@ import shutil import blessed import enlighten import time +import re term = blessed.Terminal() @@ -52,7 +53,7 @@ def _print(msg: str, type: int = 0) -> None: else: header = term.darkolivegreen3("--- ") - print("{}{}".format(header, msg)) + print(f"{header}{msg}") def count_folders(path: Path) -> int: @@ -95,16 +96,22 @@ assert site_pkg, "No venv site-packages are found." _print(f"Working with: {site_pkg}", 2) openpype_root = Path(os.path.dirname(__file__)).parent +version = {} +with open(openpype_root / "openpype" / "version.py") as fp: + exec(fp.read(), version) + +version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) +openpype_version = version_match[1] # create full path if platform.system().lower() == "darwin": build_dir = openpype_root.joinpath( "build", - "OpenPype.app", + f"OpenPype {openpype_version}.app", "Contents", "MacOS") else: - build_subdir = "exe.{}-{}".format(get_platform(), sys.version[0:3]) + build_subdir = f"exe.{get_platform()}-{sys.version[:3]}" build_dir = openpype_root / "build" / build_subdir _print(f"Using build at {build_dir}", 2) diff --git a/tools/create_zip.py b/tools/create_zip.py index 2fc351469a..6392428f58 100644 --- a/tools/create_zip.py +++ b/tools/create_zip.py @@ -61,7 +61,7 @@ def _print(msg: str, message_type: int = 0) -> None: else: header = term.darkolivegreen3("--- ") - print("{}{}".format(header, msg)) + print(f"{header}{msg}") if __name__ == "__main__": diff --git a/vendor/configs/OpenColorIO-Configs b/vendor/configs/OpenColorIO-Configs new file mode 160000 index 0000000000..0bb079c08b --- /dev/null +++ b/vendor/configs/OpenColorIO-Configs @@ -0,0 +1 @@ +Subproject commit 0bb079c08be410030669cbf5f19ff869b88af953