diff --git a/.gitignore b/.gitignore index 7eaef69873..ea5b20eb69 100644 --- a/.gitignore +++ b/.gitignore @@ -102,5 +102,8 @@ website/.docusaurus .poetry/ .python-version +.editorconfig +.pre-commit-config.yaml +mypy.ini tools/run_eventserver.* diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..bac3132b77 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,10 @@ +[submodule "tools/modules/powershell/BurntToast"] + path = tools/modules/powershell/BurntToast + url = https://github.com/Windos/BurntToast.git + +[submodule "tools/modules/powershell/PSWriteColor"] + path = tools/modules/powershell/PSWriteColor + url = https://github.com/EvotecIT/PSWriteColor.git +[submodule "vendor/configs/OpenColorIO-Configs"] + path = vendor/configs/OpenColorIO-Configs + url = https://github.com/imageworks/OpenColorIO-Configs diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a0c058f73..2adb4ac154 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,143 +1,139 @@ # Changelog -## [3.12.1-nightly.4](https://github.com/pypeclub/OpenPype/tree/HEAD) +## [3.13.1-nightly.2](https://github.com/pypeclub/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.0...HEAD) - -### 📖 Documentation - -- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) - -**🆕 New features** - -- Maya: Add VDB to Arnold loader [\#3433](https://github.com/pypeclub/OpenPype/pull/3433) - -**🚀 Enhancements** - -- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) -- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) -- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) -- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) -- Blender: Bugfix - Set fps properly on open [\#3426](https://github.com/pypeclub/OpenPype/pull/3426) -- Hiero: Add custom scripts menu [\#3425](https://github.com/pypeclub/OpenPype/pull/3425) -- Blender: pre pyside install for all platforms [\#3400](https://github.com/pypeclub/OpenPype/pull/3400) -- Maya: Ability to set resolution for playblasts from asset, and override through review instance. [\#3360](https://github.com/pypeclub/OpenPype/pull/3360) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.13.0...HEAD) **🐛 Bug fixes** -- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) -- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) -- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) -- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) -- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) -- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) -- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) -- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) -- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) -- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) -- Nuke: Slate frame is integrated [\#3427](https://github.com/pypeclub/OpenPype/pull/3427) -- Maya: Camera extra data - additional fix for \#3304 [\#3386](https://github.com/pypeclub/OpenPype/pull/3386) -- Maya: Handle excluding `model` family from frame range validator. [\#3370](https://github.com/pypeclub/OpenPype/pull/3370) +- General: Hero version representations have full context [\#3638](https://github.com/pypeclub/OpenPype/pull/3638) +- Maya: FBX support for update in reference loader [\#3631](https://github.com/pypeclub/OpenPype/pull/3631) **🔀 Refactored code** -- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) -- Maya: Re-use `maintained\_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) -- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) -- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) -- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) -- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) -- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) -- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) -- Fusion: Use client query functions [\#3380](https://github.com/pypeclub/OpenPype/pull/3380) -- Resolve: Use client query functions [\#3379](https://github.com/pypeclub/OpenPype/pull/3379) +- TimersManager: Plugins are in timers manager module [\#3639](https://github.com/pypeclub/OpenPype/pull/3639) +- General: Move workfiles functions into pipeline [\#3637](https://github.com/pypeclub/OpenPype/pull/3637) + +**Merged pull requests:** + +- Kitsu|Fix: Movie project type fails & first loop children names [\#3636](https://github.com/pypeclub/OpenPype/pull/3636) + +## [3.13.0](https://github.com/pypeclub/OpenPype/tree/3.13.0) (2022-08-09) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.13.0-nightly.1...3.13.0) + +**🆕 New features** + +- Support for mutliple installed versions - 3.13 [\#3605](https://github.com/pypeclub/OpenPype/pull/3605) + +**🚀 Enhancements** + +- Editorial: Mix audio use side file for ffmpeg filters [\#3630](https://github.com/pypeclub/OpenPype/pull/3630) +- Ftrack: Comment template can contain optional keys [\#3615](https://github.com/pypeclub/OpenPype/pull/3615) +- Ftrack: Add more metadata to ftrack components [\#3612](https://github.com/pypeclub/OpenPype/pull/3612) +- General: Add context to pyblish context [\#3594](https://github.com/pypeclub/OpenPype/pull/3594) +- Kitsu: Shot&Sequence name with prefix over appends [\#3593](https://github.com/pypeclub/OpenPype/pull/3593) +- Photoshop: implemented {layer} placeholder in subset template [\#3591](https://github.com/pypeclub/OpenPype/pull/3591) +- General: Python module appdirs from git [\#3589](https://github.com/pypeclub/OpenPype/pull/3589) +- Ftrack: Update ftrack api to 2.3.3 [\#3588](https://github.com/pypeclub/OpenPype/pull/3588) +- General: New Integrator small fixes [\#3583](https://github.com/pypeclub/OpenPype/pull/3583) + +**🐛 Bug fixes** + +- Maya: fix aov separator in Redshift [\#3625](https://github.com/pypeclub/OpenPype/pull/3625) +- Fix for multi-version build on Mac [\#3622](https://github.com/pypeclub/OpenPype/pull/3622) +- Ftrack: Sync hierarchical attributes can handle new created entities [\#3621](https://github.com/pypeclub/OpenPype/pull/3621) +- General: Extract review aspect ratio scale is calculated by ffmpeg [\#3620](https://github.com/pypeclub/OpenPype/pull/3620) +- Maya: Fix types of default settings [\#3617](https://github.com/pypeclub/OpenPype/pull/3617) +- Integrator: Don't force to have dot before frame [\#3611](https://github.com/pypeclub/OpenPype/pull/3611) +- AfterEffects: refactored integrate doesnt work formulti frame publishes [\#3610](https://github.com/pypeclub/OpenPype/pull/3610) +- Maya look data contents fails with custom attribute on group [\#3607](https://github.com/pypeclub/OpenPype/pull/3607) +- TrayPublisher: Fix wrong conflict merge [\#3600](https://github.com/pypeclub/OpenPype/pull/3600) +- Bugfix: Add OCIO as submodule to prepare for handling `maketx` color space conversion. [\#3590](https://github.com/pypeclub/OpenPype/pull/3590) +- Fix general settings environment variables resolution [\#3587](https://github.com/pypeclub/OpenPype/pull/3587) +- Editorial publishing workflow improvements [\#3580](https://github.com/pypeclub/OpenPype/pull/3580) +- General: Update imports in start script [\#3579](https://github.com/pypeclub/OpenPype/pull/3579) +- Nuke: render family integration consistency [\#3576](https://github.com/pypeclub/OpenPype/pull/3576) +- Ftrack: Handle missing published path in integrator [\#3570](https://github.com/pypeclub/OpenPype/pull/3570) +- Maya: fix Review image plane attribute [\#3569](https://github.com/pypeclub/OpenPype/pull/3569) +- Nuke: publish existing frames with slate with correct range [\#3555](https://github.com/pypeclub/OpenPype/pull/3555) + +**🔀 Refactored code** + +- General: Plugin settings handled by plugins [\#3623](https://github.com/pypeclub/OpenPype/pull/3623) +- General: Naive implementation of document create, update, delete [\#3601](https://github.com/pypeclub/OpenPype/pull/3601) +- General: Use query functions in general code [\#3596](https://github.com/pypeclub/OpenPype/pull/3596) +- General: Separate extraction of template data into more functions [\#3574](https://github.com/pypeclub/OpenPype/pull/3574) +- General: Lib cleanup [\#3571](https://github.com/pypeclub/OpenPype/pull/3571) + +**Merged pull requests:** + +- Webpublisher: timeout for PS studio processing [\#3619](https://github.com/pypeclub/OpenPype/pull/3619) +- Core: translated validate\_containers.py into New publisher style [\#3614](https://github.com/pypeclub/OpenPype/pull/3614) +- Enable write color sets on animation publish automatically [\#3582](https://github.com/pypeclub/OpenPype/pull/3582) + +## [3.12.2](https://github.com/pypeclub/OpenPype/tree/3.12.2) (2022-07-27) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.2-nightly.4...3.12.2) + +### 📖 Documentation + +- Update website with more studios [\#3554](https://github.com/pypeclub/OpenPype/pull/3554) +- Documentation: Update publishing dev docs [\#3549](https://github.com/pypeclub/OpenPype/pull/3549) + +**🚀 Enhancements** + +- General: Global thumbnail extractor is ready for more cases [\#3561](https://github.com/pypeclub/OpenPype/pull/3561) +- Maya: add additional validators to Settings [\#3540](https://github.com/pypeclub/OpenPype/pull/3540) +- General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) +- Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) + +**🐛 Bug fixes** + +- Maya: Fix animated attributes \(ie. overscan\) on loaded cameras breaking review publishing. [\#3562](https://github.com/pypeclub/OpenPype/pull/3562) +- NewPublisher: Python 2 compatible html escape [\#3559](https://github.com/pypeclub/OpenPype/pull/3559) +- Remove invalid submodules from `/vendor` [\#3557](https://github.com/pypeclub/OpenPype/pull/3557) +- General: Remove hosts filter on integrator plugins [\#3556](https://github.com/pypeclub/OpenPype/pull/3556) +- Settings: Clean default values of environments [\#3550](https://github.com/pypeclub/OpenPype/pull/3550) +- Module interfaces: Fix import error [\#3547](https://github.com/pypeclub/OpenPype/pull/3547) +- Workfiles tool: Show of tool and it's flags [\#3539](https://github.com/pypeclub/OpenPype/pull/3539) +- General: Create workfile documents works again [\#3538](https://github.com/pypeclub/OpenPype/pull/3538) +- Additional fixes for powershell scripts [\#3525](https://github.com/pypeclub/OpenPype/pull/3525) +- Maya: Added wrapper around cmds.setAttr [\#3523](https://github.com/pypeclub/OpenPype/pull/3523) +- Nuke: double slate [\#3521](https://github.com/pypeclub/OpenPype/pull/3521) +- General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) +- Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) +- TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) + +**🔀 Refactored code** + +- General: Use query functions in integrator [\#3563](https://github.com/pypeclub/OpenPype/pull/3563) +- General: Mongo core connection moved to client [\#3531](https://github.com/pypeclub/OpenPype/pull/3531) +- Refactor Integrate Asset [\#3530](https://github.com/pypeclub/OpenPype/pull/3530) +- General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) +- General: Move load related functions into pipeline [\#3527](https://github.com/pypeclub/OpenPype/pull/3527) +- General: Get current context document functions [\#3522](https://github.com/pypeclub/OpenPype/pull/3522) + +**Merged pull requests:** + +- Maya: fix active pane loss [\#3566](https://github.com/pypeclub/OpenPype/pull/3566) + +## [3.12.1](https://github.com/pypeclub/OpenPype/tree/3.12.1) (2022-07-13) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.1-nightly.6...3.12.1) ## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.0-nightly.3...3.12.0) -### 📖 Documentation - -- Fix typo in documentation: pyenv on mac [\#3417](https://github.com/pypeclub/OpenPype/pull/3417) -- Linux: update OIIO package [\#3401](https://github.com/pypeclub/OpenPype/pull/3401) - -**🚀 Enhancements** - -- Webserver: Added CORS middleware [\#3422](https://github.com/pypeclub/OpenPype/pull/3422) -- Attribute Defs UI: Files widget show what is allowed to drop in [\#3411](https://github.com/pypeclub/OpenPype/pull/3411) -- General: Add ability to change user value for templates [\#3366](https://github.com/pypeclub/OpenPype/pull/3366) -- Hosts: More options for in-host callbacks [\#3357](https://github.com/pypeclub/OpenPype/pull/3357) -- Multiverse: expose some settings to GUI [\#3350](https://github.com/pypeclub/OpenPype/pull/3350) - -**🐛 Bug fixes** - -- NewPublisher: Fix subset name change on change of creator plugin [\#3420](https://github.com/pypeclub/OpenPype/pull/3420) -- Bug: fix invalid avalon import [\#3418](https://github.com/pypeclub/OpenPype/pull/3418) -- Nuke: Fix keyword argument in query function [\#3414](https://github.com/pypeclub/OpenPype/pull/3414) -- Houdini: fix loading and updating vbd/bgeo sequences [\#3408](https://github.com/pypeclub/OpenPype/pull/3408) -- Nuke: Collect representation files based on Write [\#3407](https://github.com/pypeclub/OpenPype/pull/3407) -- General: Filter representations before integration start [\#3398](https://github.com/pypeclub/OpenPype/pull/3398) -- Maya: look collector typo [\#3392](https://github.com/pypeclub/OpenPype/pull/3392) -- TVPaint: Make sure exit code is set to not None [\#3382](https://github.com/pypeclub/OpenPype/pull/3382) -- Maya: vray device aspect ratio fix [\#3381](https://github.com/pypeclub/OpenPype/pull/3381) -- Flame: bunch of publishing issues [\#3377](https://github.com/pypeclub/OpenPype/pull/3377) -- Harmony: added unc path to zifile command in Harmony [\#3372](https://github.com/pypeclub/OpenPype/pull/3372) -- Standalone: settings improvements [\#3355](https://github.com/pypeclub/OpenPype/pull/3355) - -**🔀 Refactored code** - -- Unreal: Use client query functions [\#3421](https://github.com/pypeclub/OpenPype/pull/3421) -- General: Move editorial lib to pipeline [\#3419](https://github.com/pypeclub/OpenPype/pull/3419) -- Kitsu: renaming to plural func sync\_all\_projects [\#3397](https://github.com/pypeclub/OpenPype/pull/3397) -- Houdini: Use client query functions [\#3395](https://github.com/pypeclub/OpenPype/pull/3395) -- Hiero: Use client query functions [\#3393](https://github.com/pypeclub/OpenPype/pull/3393) -- Nuke: Use client query functions [\#3391](https://github.com/pypeclub/OpenPype/pull/3391) -- Maya: Use client query functions [\#3385](https://github.com/pypeclub/OpenPype/pull/3385) -- Harmony: Use client query functions [\#3378](https://github.com/pypeclub/OpenPype/pull/3378) -- Celaction: Use client query functions [\#3376](https://github.com/pypeclub/OpenPype/pull/3376) -- Photoshop: Use client query functions [\#3375](https://github.com/pypeclub/OpenPype/pull/3375) -- AfterEffects: Use client query functions [\#3374](https://github.com/pypeclub/OpenPype/pull/3374) - -**Merged pull requests:** - -- Sync Queue: Added far future value for null values for dates [\#3371](https://github.com/pypeclub/OpenPype/pull/3371) -- Maya - added support for single frame playblast review [\#3369](https://github.com/pypeclub/OpenPype/pull/3369) - ## [3.11.1](https://github.com/pypeclub/OpenPype/tree/3.11.1) (2022-06-20) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.11.1-nightly.1...3.11.1) -**🆕 New features** - -- Flame: custom export temp folder [\#3346](https://github.com/pypeclub/OpenPype/pull/3346) -- Nuke: removing third-party plugins [\#3344](https://github.com/pypeclub/OpenPype/pull/3344) - -**🚀 Enhancements** - -- Pyblish Pype: Hiding/Close issues [\#3367](https://github.com/pypeclub/OpenPype/pull/3367) -- Ftrack: Removed requirement of pypeclub role from default settings [\#3354](https://github.com/pypeclub/OpenPype/pull/3354) -- Kitsu: Prevent crash on missing frames information [\#3352](https://github.com/pypeclub/OpenPype/pull/3352) - -**🐛 Bug fixes** - -- Nuke: bake streams with slate on farm [\#3368](https://github.com/pypeclub/OpenPype/pull/3368) -- Harmony: audio validator has wrong logic [\#3364](https://github.com/pypeclub/OpenPype/pull/3364) -- Nuke: Fix missing variable in extract thumbnail [\#3363](https://github.com/pypeclub/OpenPype/pull/3363) -- Nuke: Fix precollect writes [\#3361](https://github.com/pypeclub/OpenPype/pull/3361) -- AE- fix validate\_scene\_settings and renderLocal [\#3358](https://github.com/pypeclub/OpenPype/pull/3358) -- deadline: fixing misidentification of revieables [\#3356](https://github.com/pypeclub/OpenPype/pull/3356) -- General: Create only one thumbnail per instance [\#3351](https://github.com/pypeclub/OpenPype/pull/3351) -- nuke: adding extract thumbnail settings 3.10 [\#3347](https://github.com/pypeclub/OpenPype/pull/3347) -- General: Fix last version function [\#3345](https://github.com/pypeclub/OpenPype/pull/3345) - ## [3.11.0](https://github.com/pypeclub/OpenPype/tree/3.11.0) (2022-06-17) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.11.0-nightly.4...3.11.0) -**🐛 Bug fixes** - -- General: Handle empty source key on instance [\#3342](https://github.com/pypeclub/OpenPype/pull/3342) - ## [3.10.0](https://github.com/pypeclub/OpenPype/tree/3.10.0) (2022-05-26) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.10.0-nightly.6...3.10.0) diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index 08333885c0..750b2f1bf7 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -122,7 +122,7 @@ class OpenPypeVersion(semver.VersionInfo): if self.staging: if kwargs.get("build"): if "staging" not in kwargs.get("build"): - kwargs["build"] = "{}-staging".format(kwargs.get("build")) + kwargs["build"] = f"{kwargs.get('build')}-staging" else: kwargs["build"] = "staging" @@ -136,8 +136,7 @@ class OpenPypeVersion(semver.VersionInfo): return bool(result and self.staging == other.staging) def __repr__(self): - return "<{}: {} - path={}>".format( - self.__class__.__name__, str(self), self.path) + return f"<{self.__class__.__name__}: {str(self)} - path={self.path}>" def __lt__(self, other: OpenPypeVersion): result = super().__lt__(other) @@ -232,10 +231,7 @@ class OpenPypeVersion(semver.VersionInfo): return openpype_version def __hash__(self): - if self.path: - return hash(self.path) - else: - return hash(str(self)) + return hash(self.path) if self.path else hash(str(self)) @staticmethod def is_version_in_dir( @@ -384,7 +380,8 @@ class OpenPypeVersion(semver.VersionInfo): @classmethod def get_local_versions( - cls, production: bool = None, staging: bool = None + cls, production: bool = None, + staging: bool = None, compatible_with: OpenPypeVersion = None ) -> List: """Get all versions available on this machine. @@ -394,6 +391,8 @@ class OpenPypeVersion(semver.VersionInfo): Args: production (bool): Return production versions. staging (bool): Return staging versions. + compatible_with (OpenPypeVersion): Return only those compatible + with specified version. """ # Return all local versions if arguments are set to None if production is None and staging is None: @@ -410,10 +409,19 @@ class OpenPypeVersion(semver.VersionInfo): if not production and not staging: return [] + # DEPRECATED: backwards compatible way to look for versions in root dir_to_search = Path(user_data_dir("openpype", "pypeclub")) versions = OpenPypeVersion.get_versions_from_directory( - dir_to_search + dir_to_search, compatible_with=compatible_with ) + if compatible_with: + dir_to_search = Path( + user_data_dir("openpype", "pypeclub")) / f"{compatible_with.major}.{compatible_with.minor}" # noqa + versions += OpenPypeVersion.get_versions_from_directory( + dir_to_search, compatible_with=compatible_with + ) + + filtered_versions = [] for version in versions: if version.is_staging(): @@ -425,7 +433,8 @@ class OpenPypeVersion(semver.VersionInfo): @classmethod def get_remote_versions( - cls, production: bool = None, staging: bool = None + cls, production: bool = None, + staging: bool = None, compatible_with: OpenPypeVersion = None ) -> List: """Get all versions available in OpenPype Path. @@ -435,6 +444,8 @@ class OpenPypeVersion(semver.VersionInfo): Args: production (bool): Return production versions. staging (bool): Return staging versions. + compatible_with (OpenPypeVersion): Return only those compatible + with specified version. """ # Return all local versions if arguments are set to None if production is None and staging is None: @@ -468,7 +479,14 @@ class OpenPypeVersion(semver.VersionInfo): if not dir_to_search: return [] - versions = cls.get_versions_from_directory(dir_to_search) + # DEPRECATED: look for version in root directory + versions = cls.get_versions_from_directory( + dir_to_search, compatible_with=compatible_with) + if compatible_with: + dir_to_search = dir_to_search / f"{compatible_with.major}.{compatible_with.minor}" # noqa + versions += cls.get_versions_from_directory( + dir_to_search, compatible_with=compatible_with) + filtered_versions = [] for version in versions: if version.is_staging(): @@ -479,11 +497,15 @@ class OpenPypeVersion(semver.VersionInfo): return list(sorted(set(filtered_versions))) @staticmethod - def get_versions_from_directory(openpype_dir: Path) -> List: + def get_versions_from_directory( + openpype_dir: Path, + compatible_with: OpenPypeVersion = None) -> List: """Get all detected OpenPype versions in directory. Args: openpype_dir (Path): Directory to scan. + compatible_with (OpenPypeVersion): Return only versions compatible + with build version specified as OpenPypeVersion. Returns: list of OpenPypeVersion @@ -492,10 +514,10 @@ class OpenPypeVersion(semver.VersionInfo): ValueError: if invalid path is specified. """ - if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") - _openpype_versions = [] + if not openpype_dir.exists() and not openpype_dir.is_dir(): + return _openpype_versions + # iterate over directory in first level and find all that might # contain OpenPype. for item in openpype_dir.iterdir(): @@ -518,6 +540,10 @@ class OpenPypeVersion(semver.VersionInfo): )[0]: continue + if compatible_with and not detected_version.is_compatible( + compatible_with): + continue + detected_version.path = item _openpype_versions.append(detected_version) @@ -549,8 +575,9 @@ class OpenPypeVersion(semver.VersionInfo): def get_latest_version( staging: bool = False, local: bool = None, - remote: bool = None - ) -> OpenPypeVersion: + remote: bool = None, + compatible_with: OpenPypeVersion = None + ) -> Union[OpenPypeVersion, None]: """Get latest available version. The version does not contain information about path and source. @@ -568,6 +595,9 @@ class OpenPypeVersion(semver.VersionInfo): staging (bool, optional): List staging versions if True. local (bool, optional): List local versions if True. remote (bool, optional): List remote versions if True. + compatible_with (OpenPypeVersion, optional) Return only version + compatible with compatible_with. + """ if local is None and remote is None: local = True @@ -598,7 +628,12 @@ class OpenPypeVersion(semver.VersionInfo): return None all_versions.sort() - return all_versions[-1] + latest_version: OpenPypeVersion + latest_version = all_versions[-1] + if compatible_with and not latest_version.is_compatible( + compatible_with): + return None + return latest_version @classmethod def get_expected_studio_version(cls, staging=False, global_settings=None): @@ -621,6 +656,21 @@ class OpenPypeVersion(semver.VersionInfo): return None return OpenPypeVersion(version=result) + def is_compatible(self, version: OpenPypeVersion): + """Test build compatibility. + + This will simply compare major and minor versions (ignoring patch + and the rest). + + Args: + version (OpenPypeVersion): Version to check compatibility with. + + Returns: + bool: if the version is compatible + + """ + return self.major == version.major and self.minor == version.minor + class BootstrapRepos: """Class for bootstrapping local OpenPype installation. @@ -741,8 +791,9 @@ class BootstrapRepos: return # create destination directory - if not self.data_dir.exists(): - self.data_dir.mkdir(parents=True) + destination = self.data_dir / f"{installed_version.major}.{installed_version.minor}" # noqa + if not destination.exists(): + destination.mkdir(parents=True) # create zip inside temporary directory. with tempfile.TemporaryDirectory() as temp_dir: @@ -770,7 +821,9 @@ class BootstrapRepos: Path to moved zip on success. """ - destination = self.data_dir / zip_file.name + version = OpenPypeVersion.version_in_str(zip_file.name) + destination_dir = self.data_dir / f"{version.major}.{version.minor}" + destination = destination_dir / zip_file.name if destination.exists(): self._print( @@ -782,7 +835,7 @@ class BootstrapRepos: self._print(str(e), LOG_ERROR, exc_info=True) return None try: - shutil.move(zip_file.as_posix(), self.data_dir.as_posix()) + shutil.move(zip_file.as_posix(), destination_dir.as_posix()) except shutil.Error as e: self._print(str(e), LOG_ERROR, exc_info=True) return None @@ -995,6 +1048,16 @@ class BootstrapRepos: @staticmethod def _validate_dir(path: Path) -> tuple: + """Validate checksums in a given path. + + Args: + path (Path): path to folder to validate. + + Returns: + tuple(bool, str): returns status and reason as a bool + and str in a tuple. + + """ checksums_file = Path(path / "checksums") if not checksums_file.exists(): # FIXME: This should be set to False sometimes in the future @@ -1076,7 +1139,20 @@ class BootstrapRepos: sys.path.insert(0, directory.as_posix()) @staticmethod - def find_openpype_version(version, staging): + def find_openpype_version( + version: Union[str, OpenPypeVersion], + staging: bool, + compatible_with: OpenPypeVersion = None + ) -> Union[OpenPypeVersion, None]: + """Find location of specified OpenPype version. + + Args: + version (Union[str, OpenPypeVersion): Version to find. + staging (bool): Filter staging versions. + compatible_with (OpenPypeVersion, optional): Find only + versions compatible with specified one. + + """ if isinstance(version, str): version = OpenPypeVersion(version=version) @@ -1085,7 +1161,8 @@ class BootstrapRepos: return installed_version local_versions = OpenPypeVersion.get_local_versions( - staging=staging, production=not staging + staging=staging, production=not staging, + compatible_with=compatible_with ) zip_version = None for local_version in local_versions: @@ -1099,7 +1176,8 @@ class BootstrapRepos: return zip_version remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging, production=not staging + staging=staging, production=not staging, + compatible_with=compatible_with ) for remote_version in remote_versions: if remote_version == version: @@ -1107,13 +1185,14 @@ class BootstrapRepos: return None @staticmethod - def find_latest_openpype_version(staging): + def find_latest_openpype_version( + staging, compatible_with: OpenPypeVersion = None): installed_version = OpenPypeVersion.get_installed_version() local_versions = OpenPypeVersion.get_local_versions( - staging=staging + staging=staging, compatible_with=compatible_with ) remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging + staging=staging, compatible_with=compatible_with ) all_versions = local_versions + remote_versions if not staging: @@ -1138,7 +1217,9 @@ class BootstrapRepos: self, openpype_path: Union[Path, str] = None, staging: bool = False, - include_zips: bool = False) -> Union[List[OpenPypeVersion], None]: + include_zips: bool = False, + compatible_with: OpenPypeVersion = None + ) -> Union[List[OpenPypeVersion], None]: """Get ordered dict of detected OpenPype version. Resolution order for OpenPype is following: @@ -1154,6 +1235,8 @@ class BootstrapRepos: otherwise. include_zips (bool, optional): If set True it will try to find OpenPype in zip files in given directory. + compatible_with (OpenPypeVersion, optional): Find only those + versions compatible with the one specified. Returns: dict of Path: Dictionary of detected OpenPype version. @@ -1172,30 +1255,56 @@ class BootstrapRepos: ("Finding OpenPype in non-filesystem locations is" " not implemented yet.")) - dir_to_search = self.data_dir - user_versions = self.get_openpype_versions(self.data_dir, staging) - # if we have openpype_path specified, search only there. + version_dir = "" + if compatible_with: + version_dir = f"{compatible_with.major}.{compatible_with.minor}" + + # if checks bellow for OPENPYPE_PATH and registry fails, use data_dir + # DEPRECATED: lookup in root of this folder is deprecated in favour + # of major.minor sub-folders. + dirs_to_search = [ + self.data_dir + ] + if compatible_with: + dirs_to_search.append(self.data_dir / version_dir) + if openpype_path: - dir_to_search = openpype_path + dirs_to_search = [openpype_path] + + if compatible_with: + dirs_to_search.append(openpype_path / version_dir) else: - if os.getenv("OPENPYPE_PATH"): - if Path(os.getenv("OPENPYPE_PATH")).exists(): - dir_to_search = Path(os.getenv("OPENPYPE_PATH")) + # first try OPENPYPE_PATH and if that is not available, + # try registry. + if os.getenv("OPENPYPE_PATH") \ + and Path(os.getenv("OPENPYPE_PATH")).exists(): + dirs_to_search = [Path(os.getenv("OPENPYPE_PATH"))] + + if compatible_with: + dirs_to_search.append( + Path(os.getenv("OPENPYPE_PATH")) / version_dir) else: try: registry_dir = Path( str(self.registry.get_item("openPypePath"))) if registry_dir.exists(): - dir_to_search = registry_dir + dirs_to_search = [registry_dir] + if compatible_with: + dirs_to_search.append(registry_dir / version_dir) except ValueError: # nothing found in registry, we'll use data dir pass - openpype_versions = self.get_openpype_versions(dir_to_search, staging) - openpype_versions += user_versions + openpype_versions = [] + for dir_to_search in dirs_to_search: + try: + openpype_versions += self.get_openpype_versions( + dir_to_search, staging, compatible_with=compatible_with) + except ValueError: + # location is invalid, skip it + pass - # remove zip file version if needed. if not include_zips: openpype_versions = [ v for v in openpype_versions if v.path.suffix != ".zip" @@ -1308,9 +1417,8 @@ class BootstrapRepos: raise ValueError( f"version {version} is not associated with any file") - destination = self.data_dir / version.path.stem - if destination.exists(): - assert destination.is_dir() + destination = self.data_dir / f"{version.major}.{version.minor}" / version.path.stem # noqa + if destination.exists() and destination.is_dir(): try: shutil.rmtree(destination) except OSError as e: @@ -1379,7 +1487,7 @@ class BootstrapRepos: else: dir_name = openpype_version.path.stem - destination = self.data_dir / dir_name + destination = self.data_dir / f"{openpype_version.major}.{openpype_version.minor}" / dir_name # noqa # test if destination directory already exist, if so lets delete it. if destination.exists() and force: @@ -1557,14 +1665,18 @@ class BootstrapRepos: return False return True - def get_openpype_versions(self, - openpype_dir: Path, - staging: bool = False) -> list: + def get_openpype_versions( + self, + openpype_dir: Path, + staging: bool = False, + compatible_with: OpenPypeVersion = None) -> list: """Get all detected OpenPype versions in directory. Args: openpype_dir (Path): Directory to scan. staging (bool, optional): Find staging versions if True. + compatible_with (OpenPypeVersion, optional): Get only versions + compatible with the one specified. Returns: list of OpenPypeVersion @@ -1574,7 +1686,7 @@ class BootstrapRepos: """ if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") + raise ValueError(f"specified directory {openpype_dir} is invalid") _openpype_versions = [] # iterate over directory in first level and find all that might @@ -1599,6 +1711,10 @@ class BootstrapRepos: ): continue + if compatible_with and \ + not detected_version.is_compatible(compatible_with): + continue + detected_version.path = item if staging and detected_version.is_staging(): _openpype_versions.append(detected_version) diff --git a/igniter/tools.py b/igniter/tools.py index 57159b5e52..a9d592acf0 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -21,6 +21,11 @@ class OpenPypeVersionNotFound(Exception): pass +class OpenPypeVersionIncompatible(Exception): + """OpenPype version is not compatible with the installed one (build).""" + pass + + def should_add_certificate_path_to_mongo_url(mongo_url): """Check if should add ca certificate to mongo url. diff --git a/openpype/api.py b/openpype/api.py index fac2ae572b..c2227c1a52 100644 --- a/openpype/api.py +++ b/openpype/api.py @@ -9,6 +9,7 @@ from .settings import ( ) from .lib import ( PypeLogger, + Logger, Anatomy, config, execute, @@ -58,8 +59,6 @@ from .action import ( RepairContextAction ) -# for backward compatibility with Pype 2 -Logger = PypeLogger __all__ = [ "get_system_settings", diff --git a/openpype/cli.py b/openpype/cli.py index 2aa4a46929..ffe288040e 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -2,7 +2,7 @@ """Package for handling pype command line arguments.""" import os import sys - +import code import click # import sys @@ -424,3 +424,45 @@ def pack_project(project, dirpath): def unpack_project(zipfile, root): """Create a package of project with all files and database dump.""" PypeCommands().unpack_project(zipfile, root) + + +@main.command() +def interactive(): + """Interative (Python like) console. + + Helpfull command not only for development to directly work with python + interpreter. + + Warning: + Executable 'openpype_gui' on windows won't work. + """ + + from openpype.version import __version__ + + banner = "OpenPype {}\nPython {} on {}".format( + __version__, sys.version, sys.platform + ) + code.interact(banner) + + +@main.command() +@click.option("--build", help="Print only build version", + is_flag=True, default=False) +def version(build): + """Print OpenPype version.""" + + from openpype.version import __version__ + from igniter.bootstrap_repos import BootstrapRepos, OpenPypeVersion + from pathlib import Path + import os + + if getattr(sys, 'frozen', False): + local_version = BootstrapRepos.get_version( + Path(os.getenv("OPENPYPE_ROOT"))) + else: + local_version = OpenPypeVersion.get_installed_version_str() + + if build: + print(local_version) + return + print(f"{__version__} (booted: {local_version})") diff --git a/openpype/client/__init__.py b/openpype/client/__init__.py index 97e6755d09..64a82334d9 100644 --- a/openpype/client/__init__.py +++ b/openpype/client/__init__.py @@ -1,3 +1,7 @@ +from .mongo import ( + OpenPypeMongoConnection, +) + from .entities import ( get_projects, get_project, @@ -25,6 +29,8 @@ from .entities import ( get_last_version_by_subset_name, get_output_link_versions, + version_is_latest, + get_representation_by_id, get_representation_by_name, get_representations, @@ -40,6 +46,8 @@ from .entities import ( ) __all__ = ( + "OpenPypeMongoConnection", + "get_projects", "get_project", "get_whole_project", @@ -66,6 +74,8 @@ __all__ = ( "get_last_version_by_subset_name", "get_output_link_versions", + "version_is_latest", + "get_representation_by_id", "get_representation_by_name", "get_representations", diff --git a/openpype/client/entities.py b/openpype/client/entities.py index 9d65355d1b..7362f57d7f 100644 --- a/openpype/client/entities.py +++ b/openpype/client/entities.py @@ -6,24 +6,12 @@ that has project name as a context (e.g. on 'ProjectEntity'?). + We will need more specific functions doing wery specific queires really fast. """ -import os import collections import six from bson.objectid import ObjectId -from openpype.lib.mongo import OpenPypeMongoConnection - - -def _get_project_database(): - db_name = os.environ.get("AVALON_DB") or "avalon" - return OpenPypeMongoConnection.get_mongo_client()[db_name] - - -def _get_project_connection(project_name): - if not project_name: - raise ValueError("Invalid project name {}".format(str(project_name))) - return _get_project_database()[project_name] +from .mongo import get_project_database, get_project_connection def _prepare_fields(fields, required_fields=None): @@ -58,7 +46,7 @@ def _convert_ids(in_ids): def get_projects(active=True, inactive=False, fields=None): - mongodb = _get_project_database() + mongodb = get_project_database() for project_name in mongodb.collection_names(): if project_name in ("system.indexes",): continue @@ -93,7 +81,7 @@ def get_project(project_name, active=True, inactive=False, fields=None): {"data.active": False}, ] - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -108,7 +96,7 @@ def get_whole_project(project_name): project collection. """ - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find({}) @@ -117,8 +105,8 @@ def get_asset_by_id(project_name, asset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - asset_id (str|ObjectId): Asset's id. - fields (list[str]): Fields that should be returned. All fields are + asset_id (Union[str, ObjectId]): Asset's id. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -131,7 +119,7 @@ def get_asset_by_id(project_name, asset_id, fields=None): return None query_filter = {"type": "asset", "_id": asset_id} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -141,7 +129,7 @@ def get_asset_by_name(project_name, asset_name, fields=None): Args: project_name (str): Name of project where to look for queried entities. asset_name (str): Asset's name. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -153,7 +141,7 @@ def get_asset_by_name(project_name, asset_name, fields=None): return None query_filter = {"type": "asset", "name": asset_name} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -178,12 +166,13 @@ def _get_assets( Args: project_name (str): Name of project where to look for queried entities. - asset_ids (list[str|ObjectId]): Asset ids that should be found. - asset_names (list[str]): Name assets that should be found. - parent_ids (list[str|ObjectId]): Parent asset ids. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. standard (bool): Query standart assets (type 'asset'). archived (bool): Query archived assets (type 'archived_asset'). - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -222,7 +211,7 @@ def _get_assets( return [] query_filter["data.visualParent"] = {"$in": parent_ids} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -244,11 +233,12 @@ def get_assets( Args: project_name (str): Name of project where to look for queried entities. - asset_ids (list[str|ObjectId]): Asset ids that should be found. - asset_names (list[str]): Name assets that should be found. - parent_ids (list[str|ObjectId]): Parent asset ids. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. archived (bool): Add also archived assets. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -283,10 +273,11 @@ def get_archived_assets( Args: project_name (str): Name of project where to look for queried entities. - asset_ids (list[str|ObjectId]): Asset ids that should be found. - asset_names (list[str]): Name assets that should be found. - parent_ids (list[str|ObjectId]): Parent asset ids. - fields (list[str]): Fields that should be returned. All fields are + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -304,10 +295,11 @@ def get_asset_ids_with_subsets(project_name, asset_ids=None): Args: project_name (str): Name of project where to look for queried entities. - asset_ids (list[str|ObjectId]): Look only for entered asset ids. + asset_ids (Iterable[Union[str, ObjectId]]): Look only for entered + asset ids. Returns: - List[ObjectId]: Asset ids that have existing subsets. + Iterable[ObjectId]: Asset ids that have existing subsets. """ subset_query = { @@ -319,7 +311,7 @@ def get_asset_ids_with_subsets(project_name, asset_ids=None): return [] subset_query["parent"] = {"$in": asset_ids} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) result = conn.aggregate([ { "$match": subset_query @@ -345,8 +337,8 @@ def get_subset_by_id(project_name, subset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - subset_id (str|ObjectId): Id of subset which should be found. - fields (list[str]): Fields that should be returned. All fields are + subset_id (Union[str, ObjectId]): Id of subset which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -359,7 +351,7 @@ def get_subset_by_id(project_name, subset_id, fields=None): return None query_filters = {"type": "subset", "_id": subset_id} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filters, _prepare_fields(fields)) @@ -369,8 +361,8 @@ def get_subset_by_name(project_name, subset_name, asset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. subset_name (str): Name of subset. - asset_id (str|ObjectId): Id of parent asset. - fields (list[str]): Fields that should be returned. All fields are + asset_id (Union[str, ObjectId]): Id of parent asset. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -390,7 +382,7 @@ def get_subset_by_name(project_name, subset_name, asset_id, fields=None): "name": subset_name, "parent": asset_id } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filters, _prepare_fields(fields)) @@ -409,16 +401,16 @@ def get_subsets( Args: project_name (str): Name of project where to look for queried entities. - subset_ids (list[str|ObjectId]): Subset ids that should be queried. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should be + queried. Filter ignored if 'None' is passed. + subset_names (Iterable[str]): Subset names that should be queried. Filter ignored if 'None' is passed. - subset_names (list[str]): Subset names that should be queried. - Filter ignored if 'None' is passed. - asset_ids (list[str|ObjectId]): Asset ids under which should look for - the subsets. Filter ignored if 'None' is passed. - names_by_asset_ids (dict[ObjectId, list[str]]): Complex filtering + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids under which + should look for the subsets. Filter ignored if 'None' is passed. + names_by_asset_ids (dict[ObjectId, List[str]]): Complex filtering using asset ids and list of subset names under the asset. archived (bool): Look for archived subsets too. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -463,7 +455,7 @@ def get_subsets( return [] query_filter["$or"] = or_query - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -472,8 +464,8 @@ def get_subset_families(project_name, subset_ids=None): Args: project_name (str): Name of project where to look for queried entities. - subset_ids (list[str|ObjectId]): Subset ids that should be queried. - All subsets from project are used if 'None' is passed. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should + be queried. All subsets from project are used if 'None' is passed. Returns: set[str]: Main families of matching subsets. @@ -487,7 +479,7 @@ def get_subset_families(project_name, subset_ids=None): return set() subset_filter["_id"] = {"$in": list(subset_ids)} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) result = list(conn.aggregate([ {"$match": subset_filter}, {"$project": { @@ -508,8 +500,8 @@ def get_version_by_id(project_name, version_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - version_id (str|ObjectId): Id of version which should be found. - fields (list[str]): Fields that should be returned. All fields are + version_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -525,7 +517,7 @@ def get_version_by_id(project_name, version_id, fields=None): "type": {"$in": ["version", "hero_version"]}, "_id": version_id } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -535,8 +527,8 @@ def get_version_by_name(project_name, version, subset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. version (int): name of version entity (it's version). - subset_id (str|ObjectId): Id of version which should be found. - fields (list[str]): Fields that should be returned. All fields are + subset_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -548,7 +540,7 @@ def get_version_by_name(project_name, version, subset_id, fields=None): if not subset_id: return None - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) query_filter = { "type": "version", "parent": subset_id, @@ -557,6 +549,42 @@ def get_version_by_name(project_name, version, subset_id, fields=None): return conn.find_one(query_filter, _prepare_fields(fields)) +def version_is_latest(project_name, version_id): + """Is version the latest from it's subset. + + Note: + Hero versions are considered as latest. + + Todo: + Maybe raise exception when version was not found? + + Args: + project_name (str):Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Version id which is checked. + + Returns: + bool: True if is latest version from subset else False. + """ + + version_id = _convert_id(version_id) + if not version_id: + return False + version_doc = get_version_by_id( + project_name, version_id, fields=["_id", "type", "parent"] + ) + # What to do when version is not found? + if not version_doc: + return False + + if version_doc["type"] == "hero_version": + return True + + last_version = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) + return last_version["_id"] == version_id + + def _get_versions( project_name, subset_ids=None, @@ -602,7 +630,7 @@ def _get_versions( else: query_filter["name"] = {"$in": versions} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -621,14 +649,14 @@ def get_versions( Args: project_name (str): Name of project where to look for queried entities. - version_ids (list[str|ObjectId]): Version ids that will be queried. + version_ids (Iterable[Union[str, ObjectId]]): Version ids that will + be queried. Filter ignored if 'None' is passed. + subset_ids (Iterable[str]): Subset ids that will be queried. Filter ignored if 'None' is passed. - subset_ids (list[str]): Subset ids that will be queried. - Filter ignored if 'None' is passed. - versions (list[int]): Version names (as integers). + versions (Iterable[int]): Version names (as integers). Filter ignored if 'None' is passed. hero (bool): Look also for hero versions. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -651,8 +679,9 @@ def get_hero_version_by_subset_id(project_name, subset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - subset_id (str|ObjectId): Subset id under which is hero version. - fields (list[str]): Fields that should be returned. All fields are + subset_id (Union[str, ObjectId]): Subset id under which + is hero version. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -681,8 +710,8 @@ def get_hero_version_by_id(project_name, version_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - version_id (str|ObjectId): Hero version id. - fields (list[str]): Fields that should be returned. All fields are + version_id (Union[str, ObjectId]): Hero version id. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -716,11 +745,11 @@ def get_hero_versions( Args: project_name (str): Name of project where to look for queried entities. - subset_ids (list[str|ObjectId]): Subset ids for which should look for - hero versions. Filter ignored if 'None' is passed. - version_ids (list[str|ObjectId]): Hero version ids. Filter ignored if - 'None' is passed. - fields (list[str]): Fields that should be returned. All fields are + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids for which + should look for hero versions. Filter ignored if 'None' is passed. + version_ids (Iterable[Union[str, ObjectId]]): Hero version ids. Filter + ignored if 'None' is passed. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -746,13 +775,13 @@ def get_output_link_versions(project_name, version_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - version_id (str|ObjectId): Version id which can be used as input link - for other versions. - fields (list[str]): Fields that should be returned. All fields are + version_id (Union[str, ObjectId]): Version id which can be used + as input link for other versions. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: - Cursor|list: Iterable cursor yielding versions that are used as input + Iterable: Iterable cursor yielding versions that are used as input links for passed version. """ @@ -760,11 +789,11 @@ def get_output_link_versions(project_name, version_id, fields=None): if not version_id: return [] - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) # Does make sense to look for hero versions? query_filter = { "type": "version", - "data.inputLinks.input": version_id + "data.inputLinks.id": version_id } return conn.find(query_filter, _prepare_fields(fields)) @@ -774,8 +803,8 @@ def get_last_versions(project_name, subset_ids, fields=None): Args: project_name (str): Name of project where to look for queried entities. - subset_ids (list): List of subset ids. - fields (list[str]): Fields that should be returned. All fields are + subset_ids (Iterable[Union[str, ObjectId]]): List of subset ids. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -825,7 +854,7 @@ def get_last_versions(project_name, subset_ids, fields=None): {"$group": group_item} ] - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) aggregate_result = conn.aggregate(aggregation_pipeline) if limit_query: output = {} @@ -859,8 +888,8 @@ def get_last_version_by_subset_id(project_name, subset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - subset_id (str|ObjectId): Id of version which should be found. - fields (list[str]): Fields that should be returned. All fields are + subset_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -889,10 +918,10 @@ def get_last_version_by_subset_name( Args: project_name (str): Name of project where to look for queried entities. subset_name (str): Name of subset. - asset_id (str|ObjectId): Asset id which is parent of passed + asset_id (Union[str, ObjectId]): Asset id which is parent of passed subset name. asset_name (str): Asset name which is parent of passed subset name. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -923,8 +952,8 @@ def get_representation_by_id(project_name, representation_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - representation_id (str|ObjectId): Representation id. - fields (list[str]): Fields that should be returned. All fields are + representation_id (Union[str, ObjectId]): Representation id. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -943,7 +972,7 @@ def get_representation_by_id(project_name, representation_id, fields=None): if representation_id is not None: query_filter["_id"] = _convert_id(representation_id) - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -956,8 +985,8 @@ def get_representation_by_name( Args: project_name (str): Name of project where to look for queried entities. representation_name (str): Representation name. - version_id (str|ObjectId): Id of parent version entity. - fields (list[str]): Fields that should be returned. All fields are + version_id (Union[str, ObjectId]): Id of parent version entity. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -976,7 +1005,7 @@ def get_representation_by_name( "parent": version_id } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -1039,7 +1068,7 @@ def _get_representations( return [] query_filter["$or"] = or_query - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -1061,18 +1090,18 @@ def get_representations( Args: project_name (str): Name of project where to look for queried entities. - representation_ids (list[str|ObjectId]): Representation ids used as - filter. Filter ignored if 'None' is passed. - representation_names (list[str]): Representations names used as filter. - Filter ignored if 'None' is passed. - version_ids (list[str]): Subset ids used as parent filter. Filter + representation_ids (Iterable[Union[str, ObjectId]]): Representation ids + used as filter. Filter ignored if 'None' is passed. + representation_names (Iterable[str]): Representations names used + as filter. Filter ignored if 'None' is passed. + version_ids (Iterable[str]): Subset ids used as parent filter. Filter ignored if 'None' is passed. - extensions (list[str]): Filter by extension of main representation + extensions (Iterable[str]): Filter by extension of main representation file (without dot). names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering using version ids and list of names under the version. archived (bool): Output will also contain archived representations. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -1107,17 +1136,17 @@ def get_archived_representations( Args: project_name (str): Name of project where to look for queried entities. - representation_ids (list[str|ObjectId]): Representation ids used as - filter. Filter ignored if 'None' is passed. - representation_names (list[str]): Representations names used as filter. - Filter ignored if 'None' is passed. - version_ids (list[str]): Subset ids used as parent filter. Filter + representation_ids (Iterable[Union[str, ObjectId]]): Representation ids + used as filter. Filter ignored if 'None' is passed. + representation_names (Iterable[str]): Representations names used + as filter. Filter ignored if 'None' is passed. + version_ids (Iterable[str]): Subset ids used as parent filter. Filter ignored if 'None' is passed. - extensions (list[str]): Filter by extension of main representation + extensions (Iterable[str]): Filter by extension of main representation file (without dot). - names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering + names_by_version_ids (dict[ObjectId, List[str]]): Complex filtering using version ids and list of names under the version. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -1145,7 +1174,7 @@ def get_representations_parents(project_name, representations): Args: project_name (str): Name of project where to look for queried entities. - representations (list[dict]): Representation entities with at least + representations (List[dict]): Representation entities with at least '_id' and 'parent' keys. Returns: @@ -1238,7 +1267,7 @@ def get_thumbnail_id_from_source(project_name, src_type, src_id): Args: project_name (str): Name of project where to look for queried entities. src_type (str): Type of source entity ('asset', 'version'). - src_id (str|objectId): Id of source entity. + src_id (Union[str, ObjectId]): Id of source entity. Returns: ObjectId: Thumbnail id assigned to entity. @@ -1250,7 +1279,7 @@ def get_thumbnail_id_from_source(project_name, src_type, src_id): query_filter = {"_id": _convert_id(src_id)} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) src_doc = conn.find_one(query_filter, {"data.thumbnail_id"}) if src_doc: return src_doc.get("data", {}).get("thumbnail_id") @@ -1265,8 +1294,9 @@ def get_thumbnails(project_name, thumbnail_ids, fields=None): Args: project_name (str): Name of project where to look for queried entities. - thumbnail_ids (list[str|ObjectId]): Ids of thumbnail entities. - fields (list[str]): Fields that should be returned. All fields are + thumbnail_ids (Iterable[Union[str, ObjectId]]): Ids of thumbnail + entities. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -1282,7 +1312,7 @@ def get_thumbnails(project_name, thumbnail_ids, fields=None): "type": "thumbnail", "_id": {"$in": thumbnail_ids} } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -1291,8 +1321,8 @@ def get_thumbnail(project_name, thumbnail_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - thumbnail_id (str|ObjectId): Id of thumbnail entity. - fields (list[str]): Fields that should be returned. All fields are + thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -1303,7 +1333,7 @@ def get_thumbnail(project_name, thumbnail_id, fields=None): if not thumbnail_id: return None query_filter = {"type": "thumbnail", "_id": _convert_id(thumbnail_id)} - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -1319,9 +1349,9 @@ def get_workfile_info( Args: project_name (str): Name of project where to look for queried entities. - asset_id (str|ObjectId): Id of asset entity. + asset_id (Union[str, ObjectId]): Id of asset entity. task_name (str): Task name on asset. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. """ @@ -1334,7 +1364,7 @@ def get_workfile_info( "task_name": task_name, "filename": filename } - conn = _get_project_connection(project_name) + conn = get_project_connection(project_name) return conn.find_one(query_filter, _prepare_fields(fields)) @@ -1348,622 +1378,18 @@ def get_workfile_info( - openpype/hosts/maya/api/shader_definition_editor.py - openpype/hosts/maya/plugins/publish/validate_model_name.py -## Global launch hooks -- openpype/hooks/pre_global_host_data.py - Query: - - project - - asset - -## Global load plugins -- openpype/plugins/load/delete_old_versions.py - Query: - - versions - - representations -- openpype/plugins/load/delivery.py - Query: - - representations - ## Global publish plugins -- openpype/plugins/publish/collect_avalon_entities.py - Query: - - asset - - project -- openpype/plugins/publish/collect_anatomy_instance_data.py - Query: - - assets - - subsets - - last version -- openpype/plugins/publish/collect_scene_loaded_versions.py - Query: - - representations - openpype/plugins/publish/extract_hierarchy_avalon.py - Query: - - asset - - assets - - project Create: - asset Update: - asset -- openpype/plugins/publish/integrate_hero_version.py - Query: - - version - - hero version - - representations -- openpype/plugins/publish/integrate_new.py - Query: - - asset - - subset - - version - - representations -- openpype/plugins/publish/integrate_thumbnail.py - Query: - - version -- openpype/plugins/publish/validate_editorial_asset_name.py - Query: - - assets ## Lib -- openpype/lib/applications.py - Query: - - project - - asset - openpype/lib/avalon_context.py - Query: - - project - - asset - - linked assets (new function get_linked_assets?) - - subset - - subsets - - version - - versions - - last version - - representations - - linked representations (new function get_linked_ids_for_representations) Update: - workfile data -- openpype/lib/plugin_tools.py - Query: - - asset - openpype/lib/project_backpack.py - Query: - - project - - everything from mongo Update: - project -- openpype/lib/usdlib.py - Query: - - project - - asset - -## Pipeline -- openpype/pipeline/load/utils.py - Query: - - project - - assets - - subsets - - version - - versions - - representation - - representations -- openpype/pipeline/mongodb.py - Query: - - project -- openpype/pipeline/thumbnail.py - Query: - - project - -## Hosts -### Aftereffects -- openpype/hosts/aftereffects/plugins/create/workfile_creator.py - Query: - - asset - -### Blender -- openpype/hosts/blender/api/pipeline.py - Query: - - asset -- openpype/hosts/blender/plugins/publish/extract_layout.py - Query: - - representation - -### Celaction -- openpype/hosts/celaction/plugins/publish/collect_audio.py - Query: - - subsets - - last versions - - representations - -### Fusion -- openpype/hosts/fusion/api/lib.py - Query: - - asset - - subset - - version - - representation -- openpype/hosts/fusion/plugins/load/load_sequence.py - Query: - - version -- openpype/hosts/fusion/scripts/fusion_switch_shot.py - Query: - - project - - asset - - versions -- openpype/hosts/fusion/utility_scripts/switch_ui.py - Query: - - assets - -### Harmony -- openpype/hosts/harmony/api/pipeline.py - Query: - - representation - -### Hiero -- openpype/hosts/hiero/api/lib.py - Query: - - project - - version - - versions - - representation -- openpype/hosts/hiero/api/tags.py - Query: - - task types - - assets -- openpype/hosts/hiero/plugins/load/load_clip.py - Query: - - version - - versions -- openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py - Query: - - assets - -### Houdini -- openpype/hosts/houdini/api/lib.py - Query: - - asset -- openpype/hosts/houdini/api/usd.py - Query: - - asset -- openpype/hosts/houdini/plugins/create/create_hda.py - Query: - - asset - - subsets -- openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py - Query: - - asset - - subset -- openpype/hosts/houdini/plugins/publish/extract_usd_layered.py - Query: - - asset - - subset - - version - - representation -- openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py - Query: - - asset - - subset -- openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py - Query: - - project - - asset - -### Maya -- openpype/hosts/maya/api/action.py - Query: - - asset -- openpype/hosts/maya/api/commands.py - Query: - - asset - - project -- openpype/hosts/maya/api/lib.py - Query: - - project - - asset - - subset - - subsets - - version - - representation -- openpype/hosts/maya/api/setdress.py - Query: - - version - - representation -- openpype/hosts/maya/plugins/inventory/import_modelrender.py - Query: - - representation -- openpype/hosts/maya/plugins/load/load_audio.py - Query: - - asset - - subset - - version -- openpype/hosts/maya/plugins/load/load_image_plane.py - Query: - - asset - - subset - - version -- openpype/hosts/maya/plugins/load/load_look.py - Query: - - representation -- openpype/hosts/maya/plugins/load/load_vrayproxy.py - Query: - - representation -- openpype/hosts/maya/plugins/load/load_yeti_cache.py - Query: - - representation -- openpype/hosts/maya/plugins/publish/collect_review.py - Query: - - subsets -- openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py - Query: - - assets -- openpype/hosts/maya/plugins/publish/validate_node_ids_related.py - Query: - - asset -- openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py - Query: - - asset - - subset - -### Nuke -- openpype/hosts/nuke/api/command.py - Query: - - project - - asset -- openpype/hosts/nuke/api/lib.py - Query: - - project - - asset - - version - - versions - - representation -- openpype/hosts/nuke/plugins/load/load_backdrop.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_camera_abc.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_clip.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_effects_ip.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_effects.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_gizmo_ip.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_gizmo.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_image.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_model.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_script_precomp.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/publish/collect_reads.py - Query: - - asset -- openpype/hosts/nuke/plugins/publish/precollect_instances.py - Query: - - asset -- openpype/hosts/nuke/plugins/publish/precollect_writes.py - Query: - - representation -- openpype/hosts/nuke/plugins/publish/validate_script.py - Query: - - asset - - project - -### Photoshop -- openpype/hosts/photoshop/plugins/create/workfile_creator.py - Query: - - asset - -### Resolve -- openpype/hosts/resolve/plugins/load/load_clip.py - Query: - - version - - versions - -### Standalone publisher -- openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py - Query: - - asset -- openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py - Query: - - assets -- openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py - Query: - - project - - asset -- openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py - Query: - - assets - -### TVPaint -- openpype/hosts/tvpaint/api/pipeline.py - Query: - - project - - asset -- openpype/hosts/tvpaint/plugins/load/load_workfile.py - Query: - - project - - asset -- openpype/hosts/tvpaint/plugins/publish/collect_instances.py - Query: - - asset -- openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py - Query: - - asset -- openpype/hosts/tvpaint/plugins/publish/collect_workfile.py - Query: - - asset - -### Unreal -- openpype/hosts/unreal/plugins/load/load_camera.py - Query: - - asset - - assets -- openpype/hosts/unreal/plugins/load/load_layout.py - Query: - - asset - - assets -- openpype/hosts/unreal/plugins/publish/extract_layout.py - Query: - - representation - -### Webpublisher -- openpype/hosts/webpublisher/webserver_service/webpublish_routes.py - Query: - - assets -- openpype/hosts/webpublisher/plugins/publish/collect_published_files.py - Query: - - last versions - -## Tools -openpype/tools/assetlinks/widgets.py -- SimpleLinkView - Query: - - get_versions - - get_subsets - - get_assets - - get_output_link_versions - -openpype/tools/creator/window.py -- CreatorWindow - Query: - - get_asset_by_name - - get_subsets - -openpype/tools/launcher/models.py -- LauncherModel - Query: - - get_project - - get_assets - -openpype/tools/libraryloader/app.py -- LibraryLoaderWindow - Query: - - get_project - -openpype/tools/loader/app.py -- LoaderWindow - Query: - - get_project -- show - Query: - - get_projects - -openpype/tools/loader/model.py -- SubsetsModel - Query: - - get_assets - - get_subsets - - get_last_versions - - get_versions - - get_hero_versions - - get_version_by_name -- RepresentationModel - Query: - - get_representations - - sync server specific queries (separated into multiple functions?) - - NOT REPLACED - -openpype/tools/loader/widgets.py -- FamilyModel - Query: - - get_subset_families -- VersionTextEdit - Query: - - get_subset_by_id - - get_version_by_id -- SubsetWidget - Query: - - get_subsets - - get_representations - Update: - - Subset groups (combination of asset id and subset names) -- RepresentationWidget - Query: - - get_subsets - - get_versions - - get_representations -- ThumbnailWidget - Query: - - get_thumbnail_id_from_source - - get_thumbnail - -openpype/tools/mayalookassigner/app.py -- MayaLookAssignerWindow - Query: - - get_last_version_by_subset_id - -openpype/tools/mayalookassigner/commands.py -- create_items_from_nodes - Query: - - get_asset_by_id - -openpype/tools/mayalookassigner/vray_proxies.py -- get_look_relationships - Query: - - get_representation_by_name -- load_look - Query: - - get_representation_by_name -- vrayproxy_assign_look - Query: - - get_last_version_by_subset_name - -openpype/tools/project_manager/project_manager/model.py -- HierarchyModel - Query: - - get_asset_ids_with_subsets - - get_project - - get_assets - -openpype/tools/project_manager/project_manager/view.py -- ProjectDocCache - Query: - - get_project - -openpype/tools/project_manager/project_manager/widgets.py -- CreateProjectDialog - Query: - - get_projects - -openpype/tools/publisher/widgets/create_dialog.py -- CreateDialog - Query: - - get_asset_by_name - - get_subsets - -openpype/tools/publisher/control.py -- AssetDocsCache - Query: - - get_assets - -openpype/tools/sceneinventory/model.py -- InventoryModel - Query: - - get_asset_by_id - - get_subset_by_id - - get_version_by_id - - get_last_version_by_subset_id - - get_representation - -openpype/tools/sceneinventory/switch_dialog.py -- SwitchAssetDialog - Query: - - get_asset_by_name - - get_assets - - get_subset_by_name - - get_subsets - - get_versions - - get_hero_versions - - get_last_versions - - get_representations - -openpype/tools/sceneinventory/view.py -- SceneInventoryView - Query: - - get_version_by_id - - get_versions - - get_hero_versions - - get_representation_by_id - - get_representations - -openpype/tools/standalonepublish/widgets/model_asset.py -- AssetModel - Query: - - get_assets - -openpype/tools/standalonepublish/widgets/widget_asset.py -- AssetWidget - Query: - - get_project - - get_asset_by_id - -openpype/tools/standalonepublish/widgets/widget_family.py -- FamilyWidget - Query: - - get_asset_by_name - - get_subset_by_name - - get_subsets - - get_last_version_by_subset_id - -openpype/tools/standalonepublish/app.py -- Window - Query: - - get_asset_by_id - -openpype/tools/texture_copy/app.py -- TextureCopy - Query: - - get_project - - get_asset_by_name - -openpype/tools/workfiles/files_widget.py -- FilesWidget - Query: - - get_asset_by_id - -openpype/tools/workfiles/model.py -- PublishFilesModel - Query: - - get_subsets - - get_versions - - get_representations - -openpype/tools/workfiles/save_as_dialog.py -- build_workfile_data - Query: - - get_project - - get_asset_by_name - -openpype/tools/workfiles/window.py -- Window - Query: - - get_asset_by_id - - get_asset_by_name - -openpype/tools/utils/assets_widget.py -- AssetModel - Query: - - get_project - - get_assets - -openpype/tools/utils/delegates.py -- VersionDelegate - Query: - - get_versions - - get_hero_versions - -openpype/tools/utils/lib.py -- GroupsConfig - Query: - - get_project -- FamilyConfigCache - Query: - - get_asset_by_name - -openpype/tools/utils/tasks_widget.py -- TasksModel - Query: - - get_project - - get_asset_by_id """ diff --git a/openpype/client/mongo.py b/openpype/client/mongo.py new file mode 100644 index 0000000000..72acbc5476 --- /dev/null +++ b/openpype/client/mongo.py @@ -0,0 +1,235 @@ +import os +import sys +import time +import logging +import pymongo +import certifi + +if sys.version_info[0] == 2: + from urlparse import urlparse, parse_qs +else: + from urllib.parse import urlparse, parse_qs + + +class MongoEnvNotSet(Exception): + pass + + +def _decompose_url(url): + """Decompose mongo url to basic components. + + Used for creation of MongoHandler which expect mongo url components as + separated kwargs. Components are at the end not used as we're setting + connection directly this is just a dumb components for MongoHandler + validation pass. + """ + + # Use first url from passed url + # - this is because it is possible to pass multiple urls for multiple + # replica sets which would crash on urlparse otherwise + # - please don't use comma in username of password + url = url.split(",")[0] + components = { + "scheme": None, + "host": None, + "port": None, + "username": None, + "password": None, + "auth_db": None + } + + result = urlparse(url) + if result.scheme is None: + _url = "mongodb://{}".format(url) + result = urlparse(_url) + + components["scheme"] = result.scheme + components["host"] = result.hostname + try: + components["port"] = result.port + except ValueError: + raise RuntimeError("invalid port specified") + components["username"] = result.username + components["password"] = result.password + + try: + components["auth_db"] = parse_qs(result.query)['authSource'][0] + except KeyError: + # no auth db provided, mongo will use the one we are connecting to + pass + + return components + + +def get_default_components(): + mongo_url = os.environ.get("OPENPYPE_MONGO") + if mongo_url is None: + raise MongoEnvNotSet( + "URL for Mongo logging connection is not set." + ) + return _decompose_url(mongo_url) + + +def should_add_certificate_path_to_mongo_url(mongo_url): + """Check if should add ca certificate to mongo url. + + Since 30.9.2021 cloud mongo requires newer certificates that are not + available on most of workstation. This adds path to certifi certificate + which is valid for it. To add the certificate path url must have scheme + 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. + """ + + parsed = urlparse(mongo_url) + query = parse_qs(parsed.query) + lowered_query_keys = set(key.lower() for key in query.keys()) + add_certificate = False + # Check if url 'ssl' or 'tls' are set to 'true' + for key in ("ssl", "tls"): + if key in query and "true" in query["ssl"]: + add_certificate = True + break + + # Check if url contains 'mongodb+srv' + if not add_certificate and parsed.scheme == "mongodb+srv": + add_certificate = True + + # Check if url does already contain certificate path + if add_certificate and "tlscafile" in lowered_query_keys: + add_certificate = False + + return add_certificate + + +def validate_mongo_connection(mongo_uri): + """Check if provided mongodb URL is valid. + + Args: + mongo_uri (str): URL to validate. + + Raises: + ValueError: When port in mongo uri is not valid. + pymongo.errors.InvalidURI: If passed mongo is invalid. + pymongo.errors.ServerSelectionTimeoutError: If connection timeout + passed so probably couldn't connect to mongo server. + + """ + + client = OpenPypeMongoConnection.create_connection( + mongo_uri, retry_attempts=1 + ) + client.close() + + +class OpenPypeMongoConnection: + """Singleton MongoDB connection. + + Keeps MongoDB connections by url. + """ + + mongo_clients = {} + log = logging.getLogger("OpenPypeMongoConnection") + + @staticmethod + def get_default_mongo_url(): + return os.environ["OPENPYPE_MONGO"] + + @classmethod + def get_mongo_client(cls, mongo_url=None): + if mongo_url is None: + mongo_url = cls.get_default_mongo_url() + + connection = cls.mongo_clients.get(mongo_url) + if connection: + # Naive validation of existing connection + try: + connection.server_info() + with connection.start_session(): + pass + except Exception: + connection = None + + if not connection: + cls.log.debug("Creating mongo connection to {}".format(mongo_url)) + connection = cls.create_connection(mongo_url) + cls.mongo_clients[mongo_url] = connection + + return connection + + @classmethod + def create_connection(cls, mongo_url, timeout=None, retry_attempts=None): + parsed = urlparse(mongo_url) + # Force validation of scheme + if parsed.scheme not in ["mongodb", "mongodb+srv"]: + raise pymongo.errors.InvalidURI(( + "Invalid URI scheme:" + " URI must begin with 'mongodb://' or 'mongodb+srv://'" + )) + + if timeout is None: + timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000) + + kwargs = { + "serverSelectionTimeoutMS": timeout + } + if should_add_certificate_path_to_mongo_url(mongo_url): + kwargs["ssl_ca_certs"] = certifi.where() + + mongo_client = pymongo.MongoClient(mongo_url, **kwargs) + + if retry_attempts is None: + retry_attempts = 3 + + elif not retry_attempts: + retry_attempts = 1 + + last_exc = None + valid = False + t1 = time.time() + for attempt in range(1, retry_attempts + 1): + try: + mongo_client.server_info() + with mongo_client.start_session(): + pass + valid = True + break + + except Exception as exc: + last_exc = exc + if attempt < retry_attempts: + cls.log.warning( + "Attempt {} failed. Retrying... ".format(attempt) + ) + time.sleep(1) + + if not valid: + raise last_exc + + cls.log.info("Connected to {}, delay {:.3f}s".format( + mongo_url, time.time() - t1 + )) + return mongo_client + + +def get_project_database(): + db_name = os.environ.get("AVALON_DB") or "avalon" + return OpenPypeMongoConnection.get_mongo_client()[db_name] + + +def get_project_connection(project_name): + """Direct access to mongo collection. + + We're trying to avoid using direct access to mongo. This should be used + only for Create, Update and Remove operations until there are implemented + api calls for that. + + Args: + project_name(str): Project name for which collection should be + returned. + + Returns: + pymongo.Collection: Collection realated to passed project. + """ + + if not project_name: + raise ValueError("Invalid project name {}".format(str(project_name))) + return get_project_database()[project_name] diff --git a/openpype/client/operations.py b/openpype/client/operations.py new file mode 100644 index 0000000000..c4b95bf696 --- /dev/null +++ b/openpype/client/operations.py @@ -0,0 +1,634 @@ +import uuid +import copy +import collections +from abc import ABCMeta, abstractmethod, abstractproperty + +import six +from bson.objectid import ObjectId +from pymongo import DeleteOne, InsertOne, UpdateOne + +from .mongo import get_project_connection + +REMOVED_VALUE = object() + +CURRENT_PROJECT_SCHEMA = "openpype:project-3.0" +CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0" +CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0" +CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0" +CURRENT_VERSION_SCHEMA = "openpype:version-3.0" +CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0" +CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0" + + +def _create_or_convert_to_mongo_id(mongo_id): + if mongo_id is None: + return ObjectId() + return ObjectId(mongo_id) + + +def new_project_document( + project_name, project_code, config, data=None, entity_id=None +): + """Create skeleton data of project document. + + Args: + project_name (str): Name of project. Used as identifier of a project. + project_code (str): Shorter version of projet without spaces and + special characters (in most of cases). Should be also considered + as unique name across projects. + config (Dic[str, Any]): Project config consist of roots, templates, + applications and other project Anatomy related data. + data (Dict[str, Any]): Project data with information about it's + attributes (e.g. 'fps' etc.) or integration specific keys. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of project document. + """ + + if data is None: + data = {} + + data["code"] = project_code + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "name": project_name, + "type": CURRENT_PROJECT_SCHEMA, + "entity_data": data, + "config": config + } + + +def new_asset_document( + name, project_id, parent_id, parents, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + name (str): Is considered as unique identifier of asset in project. + project_id (Union[str, ObjectId]): Id of project doument. + parent_id (Union[str, ObjectId]): Id of parent asset. + parents (List[str]): List of parent assets names. + data (Dict[str, Any]): Asset document data. Empty dictionary is used + if not passed. Value of 'parent_id' is used to fill 'visualParent'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of asset document. + """ + + if data is None: + data = {} + if parent_id is not None: + parent_id = ObjectId(parent_id) + data["visualParent"] = parent_id + data["parents"] = parents + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "asset", + "name": name, + "parent": ObjectId(project_id), + "data": data, + "schema": CURRENT_ASSET_DOC_SCHEMA + } + + +def new_subset_document(name, family, asset_id, data=None, entity_id=None): + """Create skeleton data of subset document. + + Args: + name (str): Is considered as unique identifier of subset under asset. + family (str): Subset's family. + asset_id (Union[str, ObjectId]): Id of parent asset. + data (Dict[str, Any]): Subset document data. Empty dictionary is used + if not passed. Value of 'family' is used to fill 'family'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of subset document. + """ + + if data is None: + data = {} + data["family"] = family + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_SUBSET_SCHEMA, + "type": "subset", + "name": name, + "data": data, + "parent": asset_id + } + + +def new_version_doc(version, subset_id, data=None, entity_id=None): + """Create skeleton data of version document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + subset_id (Union[str, ObjectId]): Id of parent subset. + data (Dict[str, Any]): Version document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_VERSION_SCHEMA, + "type": "version", + "name": int(version), + "parent": subset_id, + "data": data + } + + +def new_representation_doc( + name, version_id, context, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + version_id (Union[str, ObjectId]): Id of parent version. + context (Dict[str, Any]): Representation context used for fill template + of to query. + data (Dict[str, Any]): Representation document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_REPRESENTATION_SCHEMA, + "type": "representation", + "parent": version_id, + "name": name, + "data": data, + + # Imprint shortcut to context for performance reasons. + "context": context + } + + +def new_workfile_info_doc( + filename, asset_id, task_name, files, data=None, entity_id=None +): + """Create skeleton data of workfile info document. + + Workfile document is at this moment used primarily for artist notes. + + Args: + filename (str): Filename of workfile. + asset_id (Union[str, ObjectId]): Id of asset under which workfile live. + task_name (str): Task under which was workfile created. + files (List[str]): List of rootless filepaths related to workfile. + data (Dict[str, Any]): Additional metadata. + + Returns: + Dict[str, Any]: Skeleton of workfile info document. + """ + + if not data: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "workfile", + "parent": ObjectId(asset_id), + "task_name": task_name, + "filename": filename, + "data": data, + "files": files + } + + +def _prepare_update_data(old_doc, new_doc, replace): + changes = {} + for key, value in new_doc.items(): + if key not in old_doc or value != old_doc[key]: + changes[key] = value + + if replace: + for key in old_doc.keys(): + if key not in new_doc: + changes[key] = REMOVED_VALUE + return changes + + +def prepare_subset_update_data(old_doc, new_doc, replace=True): + """Compare two subset documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_version_update_data(old_doc, new_doc, replace=True): + """Compare two version documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_representation_update_data(old_doc, new_doc, replace=True): + """Compare two representation documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): + """Compare two workfile info documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +@six.add_metaclass(ABCMeta) +class AbstractOperation(object): + """Base operation class. + + Opration represent a call into database. The call can create, change or + remove data. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + """ + + def __init__(self, project_name, entity_type): + self._project_name = project_name + self._entity_type = entity_type + self._id = str(uuid.uuid4()) + + @property + def project_name(self): + return self._project_name + + @property + def id(self): + """Identifier of operation.""" + + return self._id + + @property + def entity_type(self): + return self._entity_type + + @abstractproperty + def operation_name(self): + """Stringified type of operation.""" + + pass + + @abstractmethod + def to_mongo_operation(self): + """Convert operation to Mongo batch operation.""" + + pass + + def to_data(self): + """Convert opration to data that can be converted to json or others. + + Warning: + Current state returns ObjectId objects which cannot be parsed by + json. + + Returns: + Dict[str, Any]: Description of operation. + """ + + return { + "id": self._id, + "entity_type": self.entity_type, + "project_name": self.project_name, + "operation": self.operation_name + } + + +class CreateOperation(AbstractOperation): + """Opeartion to create an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + data (Dict[str, Any]): Data of entity that will be created. + """ + + operation_name = "create" + + def __init__(self, project_name, entity_type, data): + super(CreateOperation, self).__init__(project_name, entity_type) + + if not data: + data = {} + else: + data = copy.deepcopy(dict(data)) + + if "_id" not in data: + data["_id"] = ObjectId() + else: + data["_id"] = ObjectId(data["_id"]) + + self._entity_id = data["_id"] + self._data = data + + def __setitem__(self, key, value): + self.set_value(key, value) + + def __getitem__(self, key): + return self.data[key] + + def set_value(self, key, value): + self.data[key] = value + + def get(self, key, *args, **kwargs): + return self.data.get(key, *args, **kwargs) + + @property + def entity_id(self): + return self._entity_id + + @property + def data(self): + return self._data + + def to_mongo_operation(self): + return InsertOne(copy.deepcopy(self._data)) + + def to_data(self): + output = super(CreateOperation, self).to_data() + output["data"] = copy.deepcopy(self.data) + return output + + +class UpdateOperation(AbstractOperation): + """Opeartion to update an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Identifier of an entity. + update_data (Dict[str, Any]): Key -> value changes that will be set in + database. If value is set to 'REMOVED_VALUE' the key will be + removed. Only first level of dictionary is checked (on purpose). + """ + + operation_name = "update" + + def __init__(self, project_name, entity_type, entity_id, update_data): + super(UpdateOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + self._update_data = update_data + + @property + def entity_id(self): + return self._entity_id + + @property + def update_data(self): + return self._update_data + + def to_mongo_operation(self): + unset_data = {} + set_data = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + unset_data[key] = value + else: + set_data[key] = value + + op_data = {} + if unset_data: + op_data["$unset"] = unset_data + if set_data: + op_data["$set"] = set_data + + if not op_data: + return None + + return UpdateOne( + {"_id": self.entity_id}, + op_data + ) + + def to_data(self): + changes = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + value = None + changes[key] = value + + output = super(UpdateOperation, self).to_data() + output.update({ + "entity_id": self.entity_id, + "changes": changes + }) + return output + + +class DeleteOperation(AbstractOperation): + """Opeartion to delete an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Entity id that will be removed. + """ + + operation_name = "delete" + + def __init__(self, project_name, entity_type, entity_id): + super(DeleteOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + + @property + def entity_id(self): + return self._entity_id + + def to_mongo_operation(self): + return DeleteOne({"_id": self.entity_id}) + + def to_data(self): + output = super(DeleteOperation, self).to_data() + output["entity_id"] = self.entity_id + return output + + +class OperationsSession(object): + """Session storing operations that should happen in an order. + + At this moment does not handle anything special can be sonsidered as + stupid list of operations that will happen after each other. If creation + of same entity is there multiple times it's handled in any way and document + values are not validated. + + All operations must be related to single project. + + Args: + project_name (str): Project name to which are operations related. + """ + + def __init__(self): + self._operations = [] + + def add(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + if not isinstance( + operation, + (CreateOperation, UpdateOperation, DeleteOperation) + ): + raise TypeError("Expected Operation object got {}".format( + str(type(operation)) + )) + + self._operations.append(operation) + + def append(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + + self.add(operation) + + def extend(self, operations): + """Add operations to be processed. + + Args: + operations (List[BaseOperation]): Operations that should be + processed. + """ + + for operation in operations: + self.add(operation) + + def remove(self, operation): + """Remove operation.""" + + self._operations.remove(operation) + + def clear(self): + """Clear all registered operations.""" + + self._operations = [] + + def to_data(self): + return [ + operation.to_data() + for operation in self._operations + ] + + def commit(self): + """Commit session operations.""" + + operations, self._operations = self._operations, [] + if not operations: + return + + operations_by_project = collections.defaultdict(list) + for operation in operations: + operations_by_project[operation.project_name].append(operation) + + for project_name, operations in operations_by_project.items(): + bulk_writes = [] + for operation in operations: + mongo_op = operation.to_mongo_operation() + if mongo_op is not None: + bulk_writes.append(mongo_op) + + if bulk_writes: + collection = get_project_connection(project_name) + collection.bulk_write(bulk_writes) + + def create_entity(self, project_name, entity_type, data): + """Fast access to 'CreateOperation'. + + Returns: + CreateOperation: Object of update operation. + """ + + operation = CreateOperation(project_name, entity_type, data) + self.add(operation) + return operation + + def update_entity(self, project_name, entity_type, entity_id, update_data): + """Fast access to 'UpdateOperation'. + + Returns: + UpdateOperation: Object of update operation. + """ + + operation = UpdateOperation( + project_name, entity_type, entity_id, update_data + ) + self.add(operation) + return operation + + def delete_entity(self, project_name, entity_type, entity_id): + """Fast access to 'DeleteOperation'. + + Returns: + DeleteOperation: Object of delete operation. + """ + + operation = DeleteOperation(project_name, entity_type, entity_id) + self.add(operation) + return operation diff --git a/openpype/hooks/pre_copy_template_workfile.py b/openpype/hooks/pre_copy_template_workfile.py index dffac22ee2..70c549919f 100644 --- a/openpype/hooks/pre_copy_template_workfile.py +++ b/openpype/hooks/pre_copy_template_workfile.py @@ -1,11 +1,11 @@ import os import shutil -from openpype.lib import ( - PreLaunchHook, - get_custom_workfile_template_by_context, +from openpype.lib import PreLaunchHook +from openpype.settings import get_project_settings +from openpype.pipeline.workfile import ( + get_custom_workfile_template, get_custom_workfile_template_by_string_context ) -from openpype.settings import get_project_settings class CopyTemplateWorkfile(PreLaunchHook): @@ -54,41 +54,22 @@ class CopyTemplateWorkfile(PreLaunchHook): project_name = self.data["project_name"] asset_name = self.data["asset_name"] task_name = self.data["task_name"] + host_name = self.application.host_name project_settings = get_project_settings(project_name) - host_settings = project_settings[self.application.host_name] - - workfile_builder_settings = host_settings.get("workfile_builder") - if not workfile_builder_settings: - # TODO remove warning when deprecated - self.log.warning(( - "Seems like old version of settings is used." - " Can't access custom templates in host \"{}\"." - ).format(self.application.full_label)) - return - - if not workfile_builder_settings["create_first_version"]: - self.log.info(( - "Project \"{}\" has turned off to create first workfile for" - " application \"{}\"" - ).format(project_name, self.application.full_label)) - return - - # Backwards compatibility - template_profiles = workfile_builder_settings.get("custom_templates") - if not template_profiles: - self.log.info( - "Custom templates are not filled. Skipping template copy." - ) - return project_doc = self.data.get("project_doc") asset_doc = self.data.get("asset_doc") anatomy = self.data.get("anatomy") if project_doc and asset_doc: self.log.debug("Started filtering of custom template paths.") - template_path = get_custom_workfile_template_by_context( - template_profiles, project_doc, asset_doc, task_name, anatomy + template_path = get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy, + project_settings ) else: @@ -96,10 +77,13 @@ class CopyTemplateWorkfile(PreLaunchHook): "Global data collection probably did not execute." " Using backup solution." )) - dbcon = self.data.get("dbcon") template_path = get_custom_workfile_template_by_string_context( - template_profiles, project_name, asset_name, task_name, - dbcon, anatomy + project_name, + asset_name, + task_name, + host_name, + anatomy, + project_settings ) if not template_path: diff --git a/openpype/hooks/pre_global_host_data.py b/openpype/hooks/pre_global_host_data.py index 6577e37cbe..8a178915fb 100644 --- a/openpype/hooks/pre_global_host_data.py +++ b/openpype/hooks/pre_global_host_data.py @@ -1,3 +1,4 @@ +from openpype.client import get_project, get_asset_by_name from openpype.lib import ( PreLaunchHook, EnvironmentPrepData, @@ -69,7 +70,7 @@ class GlobalHostDataHook(PreLaunchHook): self.data["dbcon"] = dbcon # Project document - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) self.data["project_doc"] = project_doc asset_name = self.data.get("asset_name") @@ -79,8 +80,5 @@ class GlobalHostDataHook(PreLaunchHook): ) return - asset_doc = dbcon.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) self.data["asset_doc"] = asset_doc diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py index 0bc47665b0..c13c22ced5 100644 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ b/openpype/hosts/aftereffects/api/pipeline.py @@ -1,5 +1,4 @@ import os -import sys from Qt import QtWidgets @@ -15,6 +14,7 @@ from openpype.pipeline import ( AVALON_CONTAINER_ID, legacy_io, ) +from openpype.pipeline.load import any_outdated_containers import openpype.hosts.aftereffects from openpype.lib import register_event_callback @@ -136,7 +136,7 @@ def ls(): def check_inventory(): """Checks loaded containers if they are of highest version""" - if not lib.any_outdated(): + if not any_outdated_containers(): return # Warn about outdated containers. diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index bb199a61f7..d444ead6dc 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -102,7 +102,6 @@ class CollectAERender(publish.AbstractCollectRender): attachTo=False, setMembers='', publish=True, - renderer='aerender', name=subset_name, resolutionWidth=render_q.width, resolutionHeight=render_q.height, @@ -113,7 +112,6 @@ class CollectAERender(publish.AbstractCollectRender): frameStart=frame_start, frameEnd=frame_end, frameStep=1, - toBeRenderedOn='deadline', fps=fps, app_version=app_version, publish_attributes=inst.data.get("publish_attributes", {}), @@ -138,6 +136,9 @@ class CollectAERender(publish.AbstractCollectRender): fam = "render.farm" if fam not in instance.families: instance.families.append(fam) + instance.toBeRenderedOn = "deadline" + instance.renderer = "aerender" + instance.farm = True # to skip integrate instances.append(instance) instances_to_remove.append(inst) diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index c1b5add518..4f8410da74 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -220,12 +220,9 @@ class LaunchQtApp(bpy.types.Operator): self._app.store_window(self.bl_idname, window) self._window = window - if not isinstance( - self._window, - (QtWidgets.QMainWindow, QtWidgets.QDialog, ModuleType) - ): + if not isinstance(self._window, (QtWidgets.QWidget, ModuleType)): raise AttributeError( - "`window` should be a `QDialog or module`. Got: {}".format( + "`window` should be a `QWidget or module`. Got: {}".format( str(type(window)) ) ) @@ -249,9 +246,9 @@ class LaunchQtApp(bpy.types.Operator): self._window.setWindowFlags(on_top_flags) self._window.show() - if on_top_flags != origin_flags: - self._window.setWindowFlags(origin_flags) - self._window.show() + # if on_top_flags != origin_flags: + # self._window.setWindowFlags(origin_flags) + # self._window.show() return {'FINISHED'} diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 5db89a0ab9..992db62c75 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -136,7 +136,8 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): "tasks": { task["name"]: {"type": task["type"]} for task in self.add_tasks}, - "representations": [] + "representations": [], + "newAssetPublishing": True }) self.log.debug("__ inst_data: {}".format(pformat(inst_data))) diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py index da9553cc2a..4d45f67ded 100644 --- a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -3,9 +3,9 @@ import copy from collections import OrderedDict from pprint import pformat import pyblish -from openpype.lib import get_workdir import openpype.hosts.flame.api as opfapi import openpype.pipeline as op_pipeline +from openpype.pipeline.workfile import get_workdir class IntegrateBatchGroup(pyblish.api.InstancePlugin): @@ -323,6 +323,14 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin): def _get_shot_task_dir_path(self, instance, task_data): project_doc = instance.data["projectEntity"] asset_entity = instance.data["assetEntity"] + anatomy = instance.context.data["anatomy"] + project_settings = instance.context.data["project_settings"] return get_workdir( - project_doc, asset_entity, task_data["name"], "flame") + project_doc, + asset_entity, + task_data["name"], + "flame", + anatomy, + project_settings=project_settings + ) diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py index 52a157c56e..49ef340679 100644 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py @@ -3,9 +3,7 @@ import re import sys import logging -# Pipeline imports from openpype.client import ( - get_project, get_asset_by_name, get_versions, ) @@ -17,13 +15,10 @@ from openpype.pipeline import ( from openpype.lib import version_up from openpype.hosts.fusion import api from openpype.hosts.fusion.api import lib -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Update Slap Comp") -self = sys.modules[__name__] -self._project = None - def _format_version_folder(folder): """Format a version folder based on the filepath @@ -212,9 +207,6 @@ def switch(asset_name, filepath=None, new=True): asset = get_asset_by_name(project_name, asset_name) assert asset, "Could not find '%s' in the database" % asset_name - # Get current project - self._project = get_project(project_name) - # Go to comp if not filepath: current_comp = api.get_current_comp() diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/utility_scripts/switch_ui.py index 01d55db647..93f775b24b 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/utility_scripts/switch_ui.py @@ -14,7 +14,7 @@ from openpype.pipeline import ( legacy_io, ) from openpype.hosts.fusion import api -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Fusion Switch Shot") diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index 86b5753f7e..4d71b9380d 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -4,17 +4,16 @@ import logging import pyblish.api -from openpype import lib -from openpype.client import get_representation_by_id from openpype.lib import register_event_callback from openpype.pipeline import ( - legacy_io, register_loader_plugin_path, register_creator_plugin_path, deregister_loader_plugin_path, deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.load import get_outdated_containers +from openpype.pipeline.context_tools import get_current_project_asset import openpype.hosts.harmony import openpype.hosts.harmony.api as harmony @@ -50,7 +49,9 @@ def get_asset_settings(): dict: Scene data. """ - asset_data = lib.get_asset()["data"] + + asset_doc = get_current_project_asset() + asset_data = asset_doc["data"] fps = asset_data.get("fps") frame_start = asset_data.get("frameStart") frame_end = asset_data.get("frameEnd") @@ -105,16 +106,7 @@ def check_inventory(): in Harmony. """ - project_name = legacy_io.active_project() - outdated_containers = [] - for container in ls(): - representation_id = container['representation'] - representation_doc = get_representation_by_id( - project_name, representation_id, fields=["parent"] - ) - if representation_doc and not lib.is_latest(representation_doc): - outdated_containers.append(container) - + outdated_containers = get_outdated_containers() if not outdated_containers: return diff --git a/openpype/hosts/harmony/plugins/load/load_background.py b/openpype/hosts/harmony/plugins/load/load_background.py index 9c01fe3cd8..c28a87791e 100644 --- a/openpype/hosts/harmony/plugins/load/load_background.py +++ b/openpype/hosts/harmony/plugins/load/load_background.py @@ -5,8 +5,8 @@ from openpype.pipeline import ( load, get_representation_path, ) +from openpype.pipeline.context_tools import is_representation_from_latest import openpype.hosts.harmony.api as harmony -import openpype.lib copy_files = """function copyFile(srcFilename, dstFilename) @@ -280,9 +280,7 @@ class BackgroundLoader(load.LoaderPlugin): ) def update(self, container, representation): - path = get_representation_path(representation) - with open(path) as json_file: data = json.load(json_file) @@ -300,10 +298,9 @@ class BackgroundLoader(load.LoaderPlugin): bg_folder = os.path.dirname(path) - path = get_representation_path(representation) - print(container) + is_latest = is_representation_from_latest(representation) for layer in sorted(layers): file_to_import = [ os.path.join(bg_folder, layer).replace("\\", "/") @@ -347,7 +344,7 @@ class BackgroundLoader(load.LoaderPlugin): } %s """ % (sig, sig) - if openpype.lib.is_latest(representation): + if is_latest: harmony.send({"function": func, "args": [node, "green"]}) else: harmony.send({"function": func, "args": [node, "red"]}) diff --git a/openpype/hosts/harmony/plugins/load/load_imagesequence.py b/openpype/hosts/harmony/plugins/load/load_imagesequence.py index 18695438d5..1b64aff595 100644 --- a/openpype/hosts/harmony/plugins/load/load_imagesequence.py +++ b/openpype/hosts/harmony/plugins/load/load_imagesequence.py @@ -10,8 +10,8 @@ from openpype.pipeline import ( load, get_representation_path, ) +from openpype.pipeline.context_tools import is_representation_from_latest import openpype.hosts.harmony.api as harmony -import openpype.lib class ImageSequenceLoader(load.LoaderPlugin): @@ -109,7 +109,7 @@ class ImageSequenceLoader(load.LoaderPlugin): ) # Colour node. - if openpype.lib.is_latest(representation): + if is_representation_from_latest(representation): harmony.send( { "function": "PypeHarmony.setColor", diff --git a/openpype/hosts/harmony/plugins/load/load_template.py b/openpype/hosts/harmony/plugins/load/load_template.py index c6dc9d913b..f3c69a9104 100644 --- a/openpype/hosts/harmony/plugins/load/load_template.py +++ b/openpype/hosts/harmony/plugins/load/load_template.py @@ -10,8 +10,8 @@ from openpype.pipeline import ( load, get_representation_path, ) +from openpype.pipeline.context_tools import is_representation_from_latest import openpype.hosts.harmony.api as harmony -import openpype.lib class TemplateLoader(load.LoaderPlugin): @@ -83,7 +83,7 @@ class TemplateLoader(load.LoaderPlugin): self_name = self.__class__.__name__ update_and_replace = False - if openpype.lib.is_latest(representation): + if is_representation_from_latest(representation): self._set_green(node) else: self._set_red(node) diff --git a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py index 4c3a6c4465..936533abd6 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py @@ -55,6 +55,10 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): def process(self, instance): """Plugin entry point.""" + + # TODO 'get_asset_settings' could expect asset document as argument + # which is available on 'context.data["assetEntity"]' + # - the same approach can be used in 'ValidateSceneSettingsRepair' expected_settings = harmony.get_asset_settings() self.log.info("scene settings from DB:".format(expected_settings)) diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index add416d04e..28a9dfb492 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -10,6 +10,7 @@ import qargparse import openpype.api as openpype from openpype.pipeline import LoaderPlugin, LegacyCreator +from openpype.pipeline.context_tools import get_current_project_asset from . import lib log = openpype.Logger().get_logger(__name__) @@ -484,7 +485,7 @@ class ClipLoader: """ asset_name = self.context["representation"]["context"]["asset"] - asset_doc = openpype.get_asset(asset_name) + asset_doc = get_current_project_asset(asset_name) log.debug("__ asset_doc: {}".format(pformat(asset_doc))) self.data["assetData"] = asset_doc["data"] diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py index 2d0ec6fc99..0c7dbc1f22 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py @@ -109,7 +109,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin): "clipAnnotations": annotations, # add all additional tags - "tags": phiero.get_track_item_tags(track_item) + "tags": phiero.get_track_item_tags(track_item), + "newAssetPublishing": True }) # otio clip data diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index dd8a5ba473..c8a7f92bb9 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -5,8 +5,8 @@ from contextlib import contextmanager import six from openpype.client import get_asset_by_name -from openpype.api import get_asset from openpype.pipeline import legacy_io +from openpype.pipeline.context_tools import get_current_project_asset import hou @@ -16,7 +16,7 @@ log = logging.getLogger(__name__) def get_asset_fps(): """Return current asset fps.""" - return get_asset()["data"].get("fps") + return get_current_project_asset()["data"].get("fps") def set_id(node, unique_id, overwrite=False): diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index 7048accceb..b5f5459392 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -12,13 +12,13 @@ from openpype.pipeline import ( register_loader_plugin_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.load import any_outdated_containers import openpype.hosts.houdini from openpype.hosts.houdini.api import lib from openpype.lib import ( register_event_callback, emit_event, - any_outdated, ) from .lib import get_asset_fps @@ -245,7 +245,7 @@ def on_open(): # ensure it is using correct FPS for the asset lib.validate_fps() - if any_outdated(): + if any_outdated_containers(): from openpype.widgets import popup log.warning("Scene has outdated content.") diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 34340a13a5..58e160cb2f 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -23,7 +23,6 @@ from openpype.client import ( get_last_versions, get_representation_by_name ) -from openpype import lib from openpype.api import get_anatomy_settings from openpype.pipeline import ( legacy_io, @@ -33,6 +32,7 @@ from openpype.pipeline import ( load_container, registered_host, ) +from openpype.pipeline.context_tools import get_current_project_asset from .commands import reset_frame_range @@ -2174,7 +2174,7 @@ def reset_scene_resolution(): project_name = legacy_io.active_project() project_doc = get_project(project_name) project_data = project_doc["data"] - asset_data = lib.get_asset()["data"] + asset_data = get_current_project_asset()["data"] # Set project resolution width_key = "resolutionWidth" @@ -2208,7 +2208,8 @@ def set_context_settings(): project_name = legacy_io.active_project() project_doc = get_project(project_name) project_data = project_doc["data"] - asset_data = lib.get_asset()["data"] + asset_doc = get_current_project_asset(fields=["data.fps"]) + asset_data = asset_doc.get("data", {}) # Set project fps fps = asset_data.get("fps", project_data.get("fps", 25)) @@ -2233,7 +2234,7 @@ def validate_fps(): """ - fps = lib.get_asset()["data"]["fps"] + fps = get_current_project_asset(fields=["data.fps"])["data"]["fps"] # TODO(antirotor): This is hack as for framerates having multiple # decimal places. FTrack is ceiling decimal values on # fps to two decimal places but Maya 2019+ is reporting those fps @@ -2522,12 +2523,30 @@ def load_capture_preset(data=None): temp_options2['multiSampleEnable'] = False temp_options2['multiSampleCount'] = preset[id][key] + if key == 'renderDepthOfField': + temp_options2['renderDepthOfField'] = preset[id][key] + if key == 'ssaoEnable': if preset[id][key] is True: temp_options2['ssaoEnable'] = True else: temp_options2['ssaoEnable'] = False + if key == 'ssaoSamples': + temp_options2['ssaoSamples'] = preset[id][key] + + if key == 'ssaoAmount': + temp_options2['ssaoAmount'] = preset[id][key] + + if key == 'ssaoRadius': + temp_options2['ssaoRadius'] = preset[id][key] + + if key == 'hwFogDensity': + temp_options2['hwFogDensity'] = preset[id][key] + + if key == 'ssaoFilterRadius': + temp_options2['ssaoFilterRadius'] = preset[id][key] + if key == 'alphaCut': temp_options2['transparencyAlgorithm'] = 5 temp_options2['transparencyQuality'] = 1 @@ -2535,6 +2554,48 @@ def load_capture_preset(data=None): if key == 'headsUpDisplay': temp_options['headsUpDisplay'] = True + if key == 'fogging': + temp_options['fogging'] = preset[id][key] or False + + if key == 'hwFogStart': + temp_options2['hwFogStart'] = preset[id][key] + + if key == 'hwFogEnd': + temp_options2['hwFogEnd'] = preset[id][key] + + if key == 'hwFogAlpha': + temp_options2['hwFogAlpha'] = preset[id][key] + + if key == 'hwFogFalloff': + temp_options2['hwFogFalloff'] = int(preset[id][key]) + + if key == 'hwFogColorR': + temp_options2['hwFogColorR'] = preset[id][key] + + if key == 'hwFogColorG': + temp_options2['hwFogColorG'] = preset[id][key] + + if key == 'hwFogColorB': + temp_options2['hwFogColorB'] = preset[id][key] + + if key == 'motionBlurEnable': + if preset[id][key] is True: + temp_options2['motionBlurEnable'] = True + else: + temp_options2['motionBlurEnable'] = False + + if key == 'motionBlurSampleCount': + temp_options2['motionBlurSampleCount'] = preset[id][key] + + if key == 'motionBlurShutterOpenFraction': + temp_options2['motionBlurShutterOpenFraction'] = preset[id][key] + + if key == 'lineAAEnable': + if preset[id][key] is True: + temp_options2['lineAAEnable'] = True + else: + temp_options2['lineAAEnable'] = False + else: temp_options[str(key)] = preset[id][key] @@ -2544,7 +2605,24 @@ def load_capture_preset(data=None): 'gpuCacheDisplayFilter', 'multiSample', 'ssaoEnable', - 'textureMaxResolution' + 'ssaoSamples', + 'ssaoAmount', + 'ssaoFilterRadius', + 'ssaoRadius', + 'hwFogStart', + 'hwFogEnd', + 'hwFogAlpha', + 'hwFogFalloff', + 'hwFogColorR', + 'hwFogColorG', + 'hwFogColorB', + 'hwFogDensity', + 'textureMaxResolution', + 'motionBlurEnable', + 'motionBlurSampleCount', + 'motionBlurShutterOpenFraction', + 'lineAAEnable', + 'renderDepthOfField' ]: temp_options.pop(key, None) @@ -2974,8 +3052,9 @@ def update_content_on_context_change(): This will update scene content to match new asset on context change """ scene_sets = cmds.listSets(allSets=True) - new_asset = legacy_io.Session["AVALON_ASSET"] - new_data = lib.get_asset()["data"] + asset_doc = get_current_project_asset() + new_asset = asset_doc["name"] + new_data = asset_doc["data"] for s in scene_sets: try: if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance": diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index 2d3bda5245..1e883ea43f 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -82,6 +82,14 @@ IMAGE_PREFIXES = { RENDERMAN_IMAGE_DIR = "maya//" + +def has_tokens(string, tokens): + """Return whether any of tokens is in input string (case-insensitive)""" + pattern = "({})".format("|".join(re.escape(token) for token in tokens)) + match = re.search(pattern, string, re.IGNORECASE) + return bool(match) + + @attr.s class LayerMetadata(object): """Data class for Render Layer metadata.""" @@ -99,6 +107,12 @@ class LayerMetadata(object): # Render Products products = attr.ib(init=False, default=attr.Factory(list)) + # The AOV separator token. Note that not all renderers define an explicit + # render separator but allow to put the AOV/RenderPass token anywhere in + # the file path prefix. For those renderers we'll fall back to whatever + # is between the last occurrences of and tokens. + aov_separator = attr.ib(default="_") + @attr.s class RenderProduct(object): @@ -183,7 +197,6 @@ class ARenderProducts: self.layer = layer self.render_instance = render_instance self.multipart = False - self.aov_separator = render_instance.data.get("aovSeparator", "_") # Initialize self.layer_data = self._get_layer_data() @@ -296,6 +309,42 @@ class ARenderProducts: return lib.get_attr_in_layer(plug, layer=self.layer) + @staticmethod + def extract_separator(file_prefix): + """Extract AOV separator character from the prefix. + + Default behavior extracts the part between + last occurrences of and + + Todo: + This code also triggers for V-Ray which overrides it explicitly + so this code will invalidly debug log it couldn't extract the + AOV separator even though it does set it in RenderProductsVray. + + Args: + file_prefix (str): File prefix with tokens. + + Returns: + str or None: prefix character if it can be extracted. + """ + layer_tokens = ["", ""] + aov_tokens = ["", ""] + + def match_last(tokens, text): + """regex match the last occurence from a list of tokens""" + pattern = "(?:.*)({})".format("|".join(tokens)) + return re.search(pattern, text, re.IGNORECASE) + + layer_match = match_last(layer_tokens, file_prefix) + aov_match = match_last(aov_tokens, file_prefix) + separator = None + if layer_match and aov_match: + matches = sorted((layer_match, aov_match), + key=lambda match: match.end(1)) + separator = file_prefix[matches[0].end(1):matches[1].start(1)] + return separator + + def _get_layer_data(self): # type: () -> LayerMetadata # ______________________________________________ @@ -304,7 +353,7 @@ class ARenderProducts: # ____________________/ _, scene_basename = os.path.split(cmds.file(q=True, loc=True)) scene_name, _ = os.path.splitext(scene_basename) - + kwargs = {} file_prefix = self.get_renderer_prefix() # If the Render Layer belongs to a Render Setup layer then the @@ -319,6 +368,13 @@ class ARenderProducts: # defaultRenderLayer renders as masterLayer layer_name = "masterLayer" + separator = self.extract_separator(file_prefix) + if separator: + kwargs["aov_separator"] = separator + else: + log.debug("Couldn't extract aov separator from " + "file prefix: {}".format(file_prefix)) + # todo: Support Custom Frames sequences 0,5-10,100-120 # Deadline allows submitting renders with a custom frame list # to support those cases we might want to allow 'custom frames' @@ -335,7 +391,8 @@ class ARenderProducts: layerName=layer_name, renderer=self.renderer, defaultExt=self._get_attr("defaultRenderGlobals.imfPluginKey"), - filePrefix=file_prefix + filePrefix=file_prefix, + **kwargs ) def _generate_file_sequence( @@ -680,9 +737,17 @@ class RenderProductsVray(ARenderProducts): """ prefix = super(RenderProductsVray, self).get_renderer_prefix() - prefix = "{}{}".format(prefix, self.aov_separator) + aov_separator = self._get_aov_separator() + prefix = "{}{}".format(prefix, aov_separator) return prefix + def _get_aov_separator(self): + # type: () -> str + """Return the V-Ray AOV/Render Elements separator""" + return self._get_attr( + "vraySettings.fileNameRenderElementSeparator" + ) + def _get_layer_data(self): # type: () -> LayerMetadata """Override to get vray specific extension.""" @@ -694,6 +759,8 @@ class RenderProductsVray(ARenderProducts): layer_data.defaultExt = default_ext layer_data.padding = self._get_attr("vraySettings.fileNamePadding") + layer_data.aov_separator = self._get_aov_separator() + return layer_data def get_render_products(self): @@ -913,8 +980,9 @@ class RenderProductsRedshift(ARenderProducts): :func:`ARenderProducts.get_renderer_prefix()` """ - prefix = super(RenderProductsRedshift, self).get_renderer_prefix() - prefix = "{}{}".format(prefix, self.aov_separator) + file_prefix = super(RenderProductsRedshift, self).get_renderer_prefix() + separator = self.extract_separator(file_prefix) + prefix = "{}{}".format(file_prefix, separator or "_") return prefix def get_render_products(self): @@ -1087,7 +1155,7 @@ class RenderProductsRenderman(ARenderProducts): "d_tiff": "tif" } - displays = get_displays()["displays"] + displays = get_displays(override_dst="render")["displays"] for name, display in displays.items(): enabled = display["params"]["enable"]["value"] if not enabled: @@ -1106,9 +1174,33 @@ class RenderProductsRenderman(ARenderProducts): display["driverNode"]["type"], "exr") for camera in cameras: - product = RenderProduct(productName=aov_name, - ext=extensions, - camera=camera) + # Create render product and set it as multipart only on + # display types supporting it. In all other cases, Renderman + # will create separate output per channel. + if display["driverNode"]["type"] in ["d_openexr", "d_deepexr", "d_tiff"]: # noqa + product = RenderProduct( + productName=aov_name, + ext=extensions, + camera=camera, + multipart=True + ) + else: + # this code should handle the case where no multipart + # capable format is selected. But since it involves + # shady logic to determine what channel become what + # lets not do that as all productions will use exr anyway. + """ + for channel in display['params']['displayChannels']['value']: # noqa + product = RenderProduct( + productName="{}_{}".format(aov_name, channel), + ext=extensions, + camera=camera, + multipart=False + ) + """ + raise UnsupportedImageFormatException( + "Only exr, deep exr and tiff formats are supported.") + products.append(product) return products @@ -1201,3 +1293,7 @@ class UnsupportedRendererException(Exception): Raised when requesting data from unsupported renderer. """ + + +class UnsupportedImageFormatException(Exception): + """Custom exception to report unsupported output image format.""" diff --git a/openpype/hosts/maya/api/lib_rendersettings.py b/openpype/hosts/maya/api/lib_rendersettings.py new file mode 100644 index 0000000000..7cd2193086 --- /dev/null +++ b/openpype/hosts/maya/api/lib_rendersettings.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- +"""Class for handling Render Settings.""" +from maya import cmds # noqa +import maya.mel as mel +import six +import sys + +from openpype.api import ( + get_project_settings, + get_current_project_settings +) + +from openpype.pipeline import legacy_io +from openpype.pipeline import CreatorError +from openpype.pipeline.context_tools import get_current_project_asset +from openpype.hosts.maya.api.commands import reset_frame_range + + +class RenderSettings(object): + + _image_prefix_nodes = { + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + _image_prefixes = { + 'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa + 'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa + 'renderman': 'maya///{aov_separator}', + 'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa + } + + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + @classmethod + def get_image_prefix_attr(cls, renderer): + return cls._image_prefix_nodes[renderer] + + def __init__(self, project_settings=None): + self._project_settings = project_settings + if not self._project_settings: + self._project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + def set_default_renderer_settings(self, renderer=None): + """Set basic settings based on renderer.""" + if not renderer: + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + + asset_doc = get_current_project_asset() + # project_settings/maya/create/CreateRender/aov_separator + try: + aov_separator = self._aov_chars[( + self._project_settings["maya"] + ["RenderSettings"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "_" + reset_frame = self._project_settings["maya"]["RenderSettings"]["reset_current_frame"] # noqa + + if reset_frame: + start_frame = cmds.getAttr("defaultRenderGlobals.startFrame") + cmds.currentTime(start_frame, edit=True) + + if renderer in self._image_prefix_nodes: + prefix = self._image_prefixes[renderer] + prefix = prefix.replace("{aov_separator}", aov_separator) + cmds.setAttr(self._image_prefix_nodes[renderer], + prefix, type="string") # noqa + else: + print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa + + # TODO: handle not having res values in the doc + width = asset_doc["data"].get("resolutionWidth") + height = asset_doc["data"].get("resolutionHeight") + + if renderer == "arnold": + # set renderer settings for Arnold from project settings + self._set_arnold_settings(width, height) + + if renderer == "vray": + self._set_vray_settings(aov_separator, width, height) + + if renderer == "redshift": + self._set_redshift_settings(width, height) + + def _set_arnold_settings(self, width, height): + """Sets settings for Arnold.""" + from mtoa.core import createOptions # noqa + from mtoa.aovs import AOVInterface # noqa + createOptions() + arnold_render_presets = self._project_settings["maya"]["RenderSettings"]["arnold_renderer"] # noqa + # Force resetting settings and AOV list to avoid having to deal with + # AOV checking logic, for now. + # This is a work around because the standard + # function to revert render settings does not reset AOVs list in MtoA + # Fetch current aovs in case there's any. + current_aovs = AOVInterface().getAOVs() + # Remove fetched AOVs + AOVInterface().removeAOVs(current_aovs) + mel.eval("unifiedRenderGlobalsRevertToDefault") + img_ext = arnold_render_presets["image_format"] + img_prefix = arnold_render_presets["image_prefix"] + aovs = arnold_render_presets["aov_list"] + img_tiled = arnold_render_presets["tiled"] + multi_exr = arnold_render_presets["multilayer_exr"] + additional_options = arnold_render_presets["additional_options"] + for aov in aovs: + AOVInterface('defaultArnoldRenderOptions').addAOV(aov) + + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + + self._set_global_output_settings() + + cmds.setAttr( + "defaultRenderGlobals.imageFilePrefix", img_prefix, type="string") + + cmds.setAttr( + "defaultArnoldDriver.ai_translator", img_ext, type="string") + + cmds.setAttr( + "defaultArnoldDriver.exrTiled", img_tiled) + + cmds.setAttr( + "defaultArnoldDriver.mergeAOVs", multi_exr) + # Passes additional options in from the schema as a list + # but converts it to a dictionary because ftrack doesn't + # allow fullstops in custom attributes. Then checks for + # type of MtoA attribute passed to adjust the `setAttr` + # command accordingly. + self._additional_attribs_setter(additional_options) + for item in additional_options: + attribute, value = item + if (cmds.getAttr(str(attribute), type=True)) == "long": + cmds.setAttr(str(attribute), int(value)) + elif (cmds.getAttr(str(attribute), type=True)) == "bool": + cmds.setAttr(str(attribute), int(value), type = "Boolean") # noqa + elif (cmds.getAttr(str(attribute), type=True)) == "string": + cmds.setAttr(str(attribute), str(value), type = "string") # noqa + reset_frame_range() + + def _set_redshift_settings(self, width, height): + """Sets settings for Redshift.""" + redshift_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["redshift_renderer"] + ) + additional_options = redshift_render_presets["additional_options"] + ext = redshift_render_presets["image_format"] + img_exts = ["iff", "exr", "tif", "png", "tga", "jpg"] + img_ext = img_exts.index(ext) + + self._set_global_output_settings() + cmds.setAttr("redshiftOptions.imageFormat", img_ext) + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + self._additional_attribs_setter(additional_options) + + def _set_vray_settings(self, aov_separator, width, height): + # type: (str, int, int) -> None + """Sets important settings for Vray.""" + settings = cmds.ls(type="VRaySettingsNode") + node = settings[0] if settings else cmds.createNode("VRaySettingsNode") + vray_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["vray_renderer"] + ) + # Set aov separator + # First we need to explicitly set the UI items in Render Settings + # because that is also what V-Ray updates to when that Render Settings + # UI did initialize before and refreshes again. + MENU = "vrayRenderElementSeparator" + if cmds.optionMenuGrp(MENU, query=True, exists=True): + items = cmds.optionMenuGrp(MENU, query=True, ill=True) + separators = [cmds.menuItem(i, query=True, label=True) for i in items] # noqa: E501 + try: + sep_idx = separators.index(aov_separator) + except ValueError as e: + six.reraise( + CreatorError, + CreatorError( + "AOV character {} not in {}".format( + aov_separator, separators)), + sys.exc_info()[2]) + + cmds.optionMenuGrp(MENU, edit=True, select=sep_idx + 1) + + # Set the render element attribute as string. This is also what V-Ray + # sets whenever the `vrayRenderElementSeparator` menu items switch + cmds.setAttr( + "{}.fileNameRenderElementSeparator".format(node), + aov_separator, + type="string" + ) + + # Set render file format to exr + cmds.setAttr("{}.imageFormatStr".format(node), "exr", type="string") + + # animType + cmds.setAttr("{}.animType".format(node), 1) + + # resolution + cmds.setAttr("{}.width".format(node), width) + cmds.setAttr("{}.height".format(node), height) + + additional_options = vray_render_presets["additional_options"] + + self._additional_attribs_setter(additional_options) + + @staticmethod + def _set_global_output_settings(): + # enable animation + cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) + cmds.setAttr("defaultRenderGlobals.animation", 1) + cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) + cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + + def _additional_attribs_setter(self, additional_attribs): + print(additional_attribs) + for item in additional_attribs: + attribute, value = item + if (cmds.getAttr(str(attribute), type=True)) == "long": + cmds.setAttr(str(attribute), int(value)) + elif (cmds.getAttr(str(attribute), type=True)) == "bool": + cmds.setAttr(str(attribute), int(value)) # noqa + elif (cmds.getAttr(str(attribute), type=True)) == "string": + cmds.setAttr(str(attribute), str(value), type = "string") # noqa diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 97f06c43af..b7ab529a55 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -6,11 +6,11 @@ from Qt import QtWidgets, QtGui import maya.utils import maya.cmds as cmds -from openpype.api import BuildWorkfile from openpype.settings import get_project_settings from openpype.pipeline import legacy_io +from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools -from openpype.hosts.maya.api import lib +from openpype.hosts.maya.api import lib, lib_rendersettings from .lib import get_main_window, IS_HEADLESS from .commands import reset_frame_range @@ -44,6 +44,7 @@ def install(): parent="MayaWindow" ) + renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() # Create context menu context_label = "{}, {}".format( legacy_io.Session["AVALON_ASSET"], @@ -98,6 +99,13 @@ def install(): cmds.menuItem(divider=True) + cmds.menuItem( + "Set Render Settings", + command=lambda *args: lib_rendersettings.RenderSettings().set_default_renderer_settings() # noqa + ) + + cmds.menuItem(divider=True) + cmds.menuItem( "Work Files...", command=lambda *args: host_tools.show_workfiles( diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index d08e8d1926..f565f6a308 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -13,7 +13,6 @@ from openpype.host import HostBase, IWorkfileHost, ILoadHost import openpype.hosts.maya from openpype.tools.utils import host_tools from openpype.lib import ( - any_outdated, register_event_callback, emit_event ) @@ -28,6 +27,7 @@ from openpype.pipeline import ( deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.load import any_outdated_containers from openpype.hosts.maya.lib import copy_workspace_mel from . import menu, lib from .workio import ( @@ -470,7 +470,7 @@ def on_open(): lib.validate_fps() lib.fix_incompatible_containers() - if any_outdated(): + if any_outdated_containers(): log.warning("Scene has outdated content.") # Find maya main window diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 9280805945..e50ebfccad 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -208,7 +208,8 @@ class ReferenceLoader(Loader): file_type = { "ma": "mayaAscii", "mb": "mayaBinary", - "abc": "Alembic" + "abc": "Alembic", + "fbx": "FBX" }.get(representation["name"]) assert file_type, "Unsupported representation: %s" % representation @@ -234,7 +235,7 @@ class ReferenceLoader(Loader): path = self.prepare_root_value(path, representation["context"] ["project"] - ["code"]) + ["name"]) content = cmds.file(path, loadReference=reference_node, type=file_type, diff --git a/openpype/hosts/maya/api/shader_definition_editor.py b/openpype/hosts/maya/api/shader_definition_editor.py index 911db48ac2..6ea5e1a127 100644 --- a/openpype/hosts/maya/api/shader_definition_editor.py +++ b/openpype/hosts/maya/api/shader_definition_editor.py @@ -6,7 +6,7 @@ Shader names are stored as simple text file over GridFS in mongodb. """ import os from Qt import QtWidgets, QtCore, QtGui -from openpype.lib.mongo import OpenPypeMongoConnection +from openpype.client.mongo import OpenPypeMongoConnection from openpype import resources import gridfs diff --git a/openpype/hosts/maya/plugins/create/create_animation.py b/openpype/hosts/maya/plugins/create/create_animation.py index 5cd1f7090a..e47d4e5b5a 100644 --- a/openpype/hosts/maya/plugins/create/create_animation.py +++ b/openpype/hosts/maya/plugins/create/create_animation.py @@ -11,6 +11,7 @@ class CreateAnimation(plugin.Creator): label = "Animation" family = "animation" icon = "male" + write_color_sets = False def __init__(self, *args, **kwargs): super(CreateAnimation, self).__init__(*args, **kwargs) @@ -22,7 +23,7 @@ class CreateAnimation(plugin.Creator): self.data[key] = value # Write vertex colors with the geometry. - self.data["writeColorSets"] = False + self.data["writeColorSets"] = self.write_color_sets self.data["writeFaceSets"] = False # Include only renderable visible shapes. diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py index e876015adb..5516445de8 100644 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ b/openpype/hosts/maya/plugins/create/create_pointcache.py @@ -11,6 +11,7 @@ class CreatePointCache(plugin.Creator): label = "Point Cache" family = "pointcache" icon = "gears" + write_color_sets = False def __init__(self, *args, **kwargs): super(CreatePointCache, self).__init__(*args, **kwargs) @@ -18,7 +19,8 @@ class CreatePointCache(plugin.Creator): # Add animation data self.data.update(lib.collect_animation_data()) - self.data["writeColorSets"] = False # Vertex colors with the geometry. + # Vertex colors with the geometry. + self.data["writeColorSets"] = self.write_color_sets self.data["writeFaceSets"] = False # Vertex colors with the geometry. self.data["renderableOnly"] = False # Only renderable visible shapes self.data["visibleOnly"] = False # only nodes that are visible diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 93ee6679e5..fbe670b1ea 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -1,27 +1,34 @@ # -*- coding: utf-8 -*- """Create ``Render`` instance in Maya.""" -import os import json +import os + import appdirs import requests from maya import cmds -import maya.app.renderSetup.model.renderSetup as renderSetup +from maya.app.renderSetup.model import renderSetup +from openpype.api import ( + get_system_settings, + get_project_settings, +) from openpype.hosts.maya.api import ( lib, + lib_rendersettings, plugin ) from openpype.lib import requests_get from openpype.api import ( get_system_settings, - get_project_settings, - get_asset) + get_project_settings) from openpype.modules import ModulesManager +from openpype.pipeline import legacy_io from openpype.pipeline import ( CreatorError, legacy_io, ) +from openpype.pipeline.context_tools import get_current_project_asset class CreateRender(plugin.Creator): @@ -69,35 +76,6 @@ class CreateRender(plugin.Creator): _user = None _password = None - # renderSetup instance - _rs = None - - _image_prefix_nodes = { - 'mentalray': 'defaultRenderGlobals.imageFilePrefix', - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'rmanGlobals.imageFileFormat', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', - } - - _image_prefixes = { - 'mentalray': 'maya///{aov_separator}', # noqa - 'vray': 'maya///', - 'arnold': 'maya///{aov_separator}', # noqa - # this needs `imageOutputDir` - # (/renders/maya/) set separately - 'renderman': '_..', - 'redshift': 'maya///', # noqa - 'mayahardware2': 'maya///', # noqa - } - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - _project_settings = None def __init__(self, *args, **kwargs): @@ -109,18 +87,8 @@ class CreateRender(plugin.Creator): return self._project_settings = get_project_settings( legacy_io.Session["AVALON_PROJECT"]) - - # project_settings/maya/create/CreateRender/aov_separator - try: - self.aov_separator = self._aov_chars[( - self._project_settings["maya"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - self.aov_separator = "_" - + if self._project_settings["maya"]["RenderSettings"]["apply_render_settings"]: # noqa + lib_rendersettings.RenderSettings().set_default_renderer_settings() manager = ModulesManager() self.deadline_module = manager.modules_by_name["deadline"] try: @@ -177,13 +145,13 @@ class CreateRender(plugin.Creator): ]) cmds.setAttr("{}.machineList".format(self.instance), lock=True) - self._rs = renderSetup.instance() - layers = self._rs.getRenderLayers() + rs = renderSetup.instance() + layers = rs.getRenderLayers() if use_selection: - print(">>> processing existing layers") + self.log.info("Processing existing layers") sets = [] for layer in layers: - print(" - creating set for {}:{}".format( + self.log.info(" - creating set for {}:{}".format( namespace, layer.name())) render_set = cmds.sets( n="{}:{}".format(namespace, layer.name())) @@ -193,17 +161,10 @@ class CreateRender(plugin.Creator): # if no render layers are present, create default one with # asterisk selector if not layers: - render_layer = self._rs.createRenderLayer('Main') + render_layer = rs.createRenderLayer('Main') collection = render_layer.createCollection("defaultCollection") collection.getSelector().setPattern('*') - renderer = cmds.getAttr( - 'defaultRenderGlobals.currentRenderer').lower() - # handle various renderman names - if renderer.startswith('renderman'): - renderer = 'renderman' - - self._set_default_renderer_settings(renderer) return self.instance def _deadline_webservice_changed(self): @@ -237,7 +198,7 @@ class CreateRender(plugin.Creator): def _create_render_settings(self): """Create instance settings.""" - # get pools + # get pools (slave machines of the render farm) pool_names = [] default_priority = 50 @@ -281,7 +242,8 @@ class CreateRender(plugin.Creator): # if 'default' server is not between selected, # use first one for initial list of pools. deadline_url = next(iter(self.deadline_servers.values())) - + # Uses function to get pool machines from the assigned deadline + # url in settings pool_names = self.deadline_module.get_deadline_pools(deadline_url, self.log) maya_submit_dl = self._project_settings.get( @@ -400,102 +362,36 @@ class CreateRender(plugin.Creator): self.log.error("Cannot show login form to Muster") raise Exception("Cannot show login form to Muster") - def _set_default_renderer_settings(self, renderer): - """Set basic settings based on renderer. + def _requests_post(self, *args, **kwargs): + """Wrap request post method. - Args: - renderer (str): Renderer name. + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. """ - prefix = self._image_prefixes[renderer] - prefix = prefix.replace("{aov_separator}", self.aov_separator) - cmds.setAttr(self._image_prefix_nodes[renderer], - prefix, - type="string") + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.post(*args, **kwargs) - asset = get_asset() + def _requests_get(self, *args, **kwargs): + """Wrap request get method. - if renderer == "arnold": - # set format to exr + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. - cmds.setAttr( - "defaultArnoldDriver.ai_translator", "exr", type="string") - self._set_global_output_settings() - # resolution - cmds.setAttr( - "defaultResolution.width", - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "defaultResolution.height", - asset["data"].get("resolutionHeight")) + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. - if renderer == "vray": - self._set_vray_settings(asset) - if renderer == "redshift": - cmds.setAttr("redshiftOptions.imageFormat", 1) - - # resolution - cmds.setAttr( - "defaultResolution.width", - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "defaultResolution.height", - asset["data"].get("resolutionHeight")) - - self._set_global_output_settings() - - if renderer == "renderman": - cmds.setAttr("rmanGlobals.imageOutputDir", - "maya//", type="string") - - def _set_vray_settings(self, asset): - # type: (dict) -> None - """Sets important settings for Vray.""" - settings = cmds.ls(type="VRaySettingsNode") - node = settings[0] if settings else cmds.createNode("VRaySettingsNode") - - # set separator - # set it in vray menu - if cmds.optionMenuGrp("vrayRenderElementSeparator", exists=True, - q=True): - items = cmds.optionMenuGrp( - "vrayRenderElementSeparator", ill=True, query=True) - - separators = [cmds.menuItem(i, label=True, query=True) for i in items] # noqa: E501 - try: - sep_idx = separators.index(self.aov_separator) - except ValueError: - raise CreatorError( - "AOV character {} not in {}".format( - self.aov_separator, separators)) - - cmds.optionMenuGrp( - "vrayRenderElementSeparator", sl=sep_idx + 1, edit=True) - cmds.setAttr( - "{}.fileNameRenderElementSeparator".format(node), - self.aov_separator, - type="string" - ) - # set format to exr - cmds.setAttr( - "{}.imageFormatStr".format(node), "exr", type="string") - - # animType - cmds.setAttr( - "{}.animType".format(node), 1) - - # resolution - cmds.setAttr( - "{}.width".format(node), - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "{}.height".format(node), - asset["data"].get("resolutionHeight")) - - @staticmethod - def _set_global_output_settings(): - # enable animation - cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) - cmds.setAttr("defaultRenderGlobals.animation", 1) - cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) - cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index ec583bcce7..157be5717b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -551,7 +551,9 @@ class CollectLook(pyblish.api.InstancePlugin): if cmds.getAttr(attribute, type=True) == "message": continue node_attributes[attr] = cmds.getAttr(attribute) - + # Only include if there are any properties we care about + if not node_attributes: + continue attributes.append({"name": node, "uuid": lib.get_id(node), "attributes": node_attributes}) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 8b911a867d..704541fc44 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -72,7 +72,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): def process(self, context): """Entry point to collector.""" render_instance = None - deadline_url = None for instance in context: if "rendering" in instance.data["families"]: @@ -96,23 +95,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin): asset = legacy_io.Session["AVALON_ASSET"] workspace = context.data["workspaceDir"] - deadline_settings = ( - context.data - ["system_settings"] - ["modules"] - ["deadline"] - ) - - if deadline_settings["enabled"]: - deadline_url = render_instance.data.get("deadlineUrl") - self._rs = renderSetup.instance() - current_layer = self._rs.getVisibleRenderLayer() + # Retrieve render setup layers + rs = renderSetup.instance() maya_render_layers = { - layer.name(): layer for layer in self._rs.getRenderLayers() + layer.name(): layer for layer in rs.getRenderLayers() } - self.maya_layers = maya_render_layers - for layer in collected_render_layers: try: if layer.startswith("LAYER_"): @@ -147,49 +135,34 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.warning(msg) continue - # test if there are sets (subsets) to attach render to + # detect if there are sets (subsets) to attach render to sets = cmds.sets(layer, query=True) or [] attach_to = [] - if sets: - for s in sets: - if "family" not in cmds.listAttr(s): - continue + for s in sets: + if not cmds.attributeQuery("family", node=s, exists=True): + continue - attach_to.append( - { - "version": None, # we need integrator for that - "subset": s, - "family": cmds.getAttr("{}.family".format(s)), - } - ) - self.log.info(" -> attach render to: {}".format(s)) + attach_to.append( + { + "version": None, # we need integrator for that + "subset": s, + "family": cmds.getAttr("{}.family".format(s)), + } + ) + self.log.info(" -> attach render to: {}".format(s)) layer_name = "rs_{}".format(expected_layer_name) # collect all frames we are expecting to be rendered - renderer = cmds.getAttr( - "defaultRenderGlobals.currentRenderer" - ).lower() + renderer = self.get_render_attribute("currentRenderer", + layer=layer_name) # handle various renderman names if renderer.startswith("renderman"): renderer = "renderman" - try: - aov_separator = self._aov_chars[( - context.data["project_settings"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - - render_instance.data["aovSeparator"] = aov_separator - # return all expected files for all cameras and aovs in given # frame range - layer_render_products = get_layer_render_products( - layer_name, render_instance) + layer_render_products = get_layer_render_products(layer_name) render_products = layer_render_products.layer_data.products assert render_products, "no render products generated" exp_files = [] @@ -226,13 +199,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin): ) # append full path - full_exp_files = [] aov_dict = {} default_render_file = context.data.get('project_settings')\ .get('maya')\ - .get('create')\ - .get('CreateRender')\ - .get('default_render_image_folder') + .get('RenderSettings')\ + .get('default_render_image_folder') or "" # replace relative paths with absolute. Render products are # returned as list of dictionaries. publish_meta_path = None @@ -246,6 +217,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): full_paths.append(full_path) publish_meta_path = os.path.dirname(full_path) aov_dict[aov_first_key] = full_paths + full_exp_files = [aov_dict] frame_start_render = int(self.get_render_attribute( "startFrame", layer=layer_name)) @@ -269,8 +241,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): frame_start_handle = frame_start_render frame_end_handle = frame_end_render - full_exp_files.append(aov_dict) - # find common path to store metadata # so if image prefix is branching to many directories # metadata file will be located in top-most common @@ -299,16 +269,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides - try: - aov_separator = self._aov_chars[( - context.data["project_settings"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - data = { "subset": expected_layer_name, "attachTo": attach_to, @@ -357,11 +317,15 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "useReferencedAovs": render_instance.data.get( "useReferencedAovs") or render_instance.data.get( "vrayUseReferencedAovs") or False, - "aovSeparator": aov_separator + "aovSeparator": layer_render_products.layer_data.aov_separator # noqa: E501 } - if deadline_url: - data["deadlineUrl"] = deadline_url + # Collect Deadline url if Deadline module is enabled + deadline_settings = ( + context.data["system_settings"]["modules"]["deadline"] + ) + if deadline_settings["enabled"]: + data["deadlineUrl"] = render_instance.data.get("deadlineUrl") if self.sync_workfile_version: data["version"] = context.data["version"] @@ -370,19 +334,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): if instance.data['family'] == "workfile": instance.data["version"] = context.data["version"] - # Apply each user defined attribute as data - for attr in cmds.listAttr(layer, userDefined=True) or list(): - try: - value = cmds.getAttr("{}.{}".format(layer, attr)) - except Exception: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - - data[attr] = value - # handle standalone renderers if render_instance.data.get("vrayScene") is True: data["families"].append("vrayscene_render") @@ -490,10 +441,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): return pool_a, pool_b - def _get_overrides(self, layer): - rset = self.maya_layers[layer].renderSettingsCollectionInstance() - return rset.getOverrides() - @staticmethod def get_render_attribute(attr, layer): """Get attribute from render options. diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index d35b529c76..8be0c7aae5 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -27,6 +27,29 @@ def escape_space(path): return '"{}"'.format(path) if " " in path else path +def get_ocio_config_path(profile_folder): + """Path to OpenPype vendorized OCIO. + + Vendorized OCIO config file path is grabbed from the specific path + hierarchy specified below. + + "{OPENPYPE_ROOT}/vendor/OpenColorIO-Configs/{profile_folder}/config.ocio" + Args: + profile_folder (str): Name of folder to grab config file from. + + Returns: + str: Path to vendorized config file. + """ + return os.path.join( + os.environ["OPENPYPE_ROOT"], + "vendor", + "configs", + "OpenColorIO-Configs", + profile_folder, + "config.ocio" + ) + + def find_paths_by_hash(texture_hash): """Find the texture hash key in the dictionary. @@ -79,10 +102,11 @@ def maketx(source, destination, *args): # use oiio-optimized settings for tile-size, planarconfig, metadata "--oiio", "--filter lanczos3", + escape_space(source) ] cmd.extend(args) - cmd.extend(["-o", escape_space(destination), escape_space(source)]) + cmd.extend(["-o", escape_space(destination)]) cmd = " ".join(cmd) @@ -405,7 +429,19 @@ class ExtractLook(openpype.api.Extractor): # node doesn't have color space attribute color_space = "Raw" else: - if files_metadata[source]["color_space"] == "Raw": + # get the resolved files + metadata = files_metadata.get(source) + # if the files are unresolved from `source` + # assume color space from the first file of + # the resource + if not metadata: + first_file = next(iter(resource.get( + "files", [])), None) + if not first_file: + continue + first_filepath = os.path.normpath(first_file) + metadata = files_metadata[first_filepath] + if metadata["color_space"] == "Raw": # set color space to raw if we linearized it color_space = "Raw" # Remap file node filename to destination @@ -493,6 +529,8 @@ class ExtractLook(openpype.api.Extractor): else: colorconvert = "" + config_path = get_ocio_config_path("nuke-default") + color_config = "--colorconfig {0}".format(config_path) # Ensure folder exists if not os.path.exists(os.path.dirname(converted)): os.makedirs(os.path.dirname(converted)) @@ -502,10 +540,11 @@ class ExtractLook(openpype.api.Extractor): filepath, converted, # Include `source-hash` as string metadata - "-sattrib", + "--sattrib", "sourceHash", escape_space(texture_hash), colorconvert, + color_config ) return converted, COPY, texture_hash diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index 233a0b60c2..54ef09e060 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -128,8 +128,10 @@ class ExtractPlayblast(openpype.api.Extractor): # Update preset with current panel setting # if override_viewport_options is turned off if not override_viewport_options: + panel = cmds.getPanel(with_focus=True) panel_preset = capture.parse_active_view() preset.update(panel_preset) + cmds.setFocus(panel) path = capture.capture(**preset) diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index 4f28aa167c..01980578cf 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -100,6 +100,13 @@ class ExtractThumbnail(openpype.api.Extractor): # camera. if preset.pop("isolate_view", False) and instance.data.get("isolate"): preset["isolate"] = instance.data["setMembers"] + + # Show or Hide Image Plane + image_plane = instance.data.get("imagePlane", True) + if "viewport_options" in preset: + preset["viewport_options"]["imagePlane"] = image_plane + else: + preset["viewport_options"] = {"imagePlane": image_plane} with lib.maintained_time(): # Force viewer to False in call to capture because we have our own @@ -110,14 +117,17 @@ class ExtractThumbnail(openpype.api.Extractor): # Update preset with current panel setting # if override_viewport_options is turned off if not override_viewport_options: + panel = cmds.getPanel(with_focus=True) panel_preset = capture.parse_active_view() preset.update(panel_preset) + cmds.setFocus(panel) path = capture.capture(**preset) playblast = self._fix_playblast_output_path(path) _, thumbnail = os.path.split(playblast) + self.log.info("file list {}".format(thumbnail)) if "representations" not in instance.data: diff --git a/openpype/hosts/maya/plugins/publish/validate_look_contents.py b/openpype/hosts/maya/plugins/publish/validate_look_contents.py index 443a0ad719..b1e1d5416b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_contents.py @@ -78,14 +78,13 @@ class ValidateLookContents(pyblish.api.InstancePlugin): # Check if attributes are on a node with an ID, crucial for rebuild! for attr_changes in lookdata["attributes"]: - if not attr_changes["uuid"]: + if not attr_changes["uuid"] and not attr_changes["attributes"]: cls.log.error("Node '%s' has no cbId, please set the " "attributes to its children if it has any" % attr_changes["name"]) invalid.add(instance.name) return list(invalid) - @classmethod def validate_looks(cls, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/openpype/hosts/maya/plugins/publish/validate_maya_units.py index d5a8c350d5..5f67adec76 100644 --- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py +++ b/openpype/hosts/maya/plugins/publish/validate_maya_units.py @@ -2,8 +2,8 @@ import maya.cmds as cmds import pyblish.api import openpype.api -from openpype import lib import openpype.hosts.maya.api.lib as mayalib +from openpype.pipeline.context_tools import get_current_project_asset from math import ceil @@ -41,7 +41,9 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): # now flooring the value? fps = float_round(context.data.get('fps'), 2, ceil) - asset_fps = lib.get_asset()["data"]["fps"] + # TODO repace query with using 'context.data["assetEntity"]' + asset_doc = get_current_project_asset() + asset_fps = asset_doc["data"]["fps"] self.log.info('Units (linear): {0}'.format(linearunits)) self.log.info('Units (angular): {0}'.format(angularunits)) @@ -91,5 +93,7 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): cls.log.debug(current_linear) cls.log.info("Setting time unit to match project") - asset_fps = lib.get_asset()["data"]["fps"] + # TODO repace query with using 'context.data["assetEntity"]' + asset_doc = get_current_project_asset() + asset_fps = asset_doc["data"]["fps"] mayalib.set_scene_fps(asset_fps) diff --git a/openpype/hosts/maya/plugins/publish/validate_model_name.py b/openpype/hosts/maya/plugins/publish/validate_model_name.py index 50acf2b8b7..02107d5732 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_name.py @@ -10,7 +10,7 @@ from openpype.pipeline import legacy_io import openpype.hosts.maya.api.action from openpype.hosts.maya.api.shader_definition_editor import ( DEFINITION_FILENAME) -from openpype.lib.mongo import OpenPypeMongoConnection +from openpype.client.mongo import OpenPypeMongoConnection import gridfs diff --git a/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py b/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py index 642ca9e25d..4d3796e429 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py @@ -1,17 +1,15 @@ -import maya.mel as mel -import pymel.core as pm +from maya import cmds import pyblish.api import openpype.api -def get_file_rule(rule): - """Workaround for a bug in python with cmds.workspace""" - return mel.eval('workspace -query -fileRuleEntry "{}"'.format(rule)) - - class ValidateRenderImageRule(pyblish.api.InstancePlugin): - """Validates "images" file rule is set to "renders/" + """Validates Maya Workpace "images" file rule matches project settings. + + This validates against the configured default render image folder: + Studio Settings > Project > Maya > + Render Settings > Default render image folder. """ @@ -23,24 +21,29 @@ class ValidateRenderImageRule(pyblish.api.InstancePlugin): def process(self, instance): - default_render_file = self.get_default_render_image_folder(instance) + required_images_rule = self.get_default_render_image_folder(instance) + current_images_rule = cmds.workspace(fileRuleEntry="images") - assert get_file_rule("images") == default_render_file, ( - "Workspace's `images` file rule must be set to: {}".format( - default_render_file + assert current_images_rule == required_images_rule, ( + "Invalid workspace `images` file rule value: '{}'. " + "Must be set to: '{}'".format( + current_images_rule, required_images_rule ) ) @classmethod def repair(cls, instance): - default = cls.get_default_render_image_folder(instance) - pm.workspace.fileRules["images"] = default - pm.system.Workspace.save() + + required_images_rule = cls.get_default_render_image_folder(instance) + current_images_rule = cmds.workspace(fileRuleEntry="images") + + if current_images_rule != required_images_rule: + cmds.workspace(fileRule=("images", required_images_rule)) + cmds.workspace(saveWorkspace=True) @staticmethod def get_default_render_image_folder(instance): return instance.context.data.get('project_settings')\ .get('maya') \ - .get('create') \ - .get('CreateRender') \ + .get('RenderSettings') \ .get('default_render_image_folder') diff --git a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py index e6c6ef6c9e..35b87fd0ab 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py @@ -1,20 +1,11 @@ import re import pyblish.api -import openpype.api -import openpype.hosts.maya.api.action - from maya import cmds - -ImagePrefixes = { - 'mentalray': 'defaultRenderGlobals.imageFilePrefix', - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'defaultRenderGlobals.imageFilePrefix', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', -} +import openpype.api +import openpype.hosts.maya.api.action +from openpype.hosts.maya.api.render_settings import RenderSettings class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): @@ -47,7 +38,11 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): # handle various renderman names if renderer.startswith('renderman'): renderer = 'renderman' - file_prefix = cmds.getAttr(ImagePrefixes[renderer]) + + file_prefix = cmds.getAttr( + RenderSettings.get_image_prefix_attr(renderer) + ) + if len(cameras) > 1: if re.search(cls.R_CAMERA_TOKEN, file_prefix): diff --git a/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py b/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py index d70096ee45..04cc9ab5fb 100644 --- a/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py +++ b/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py @@ -6,7 +6,7 @@ from openpype.pipeline import PublishXmlValidationError class ValidateReviewSubsetUniqueness(pyblish.api.ContextPlugin): - """Validates that nodes has common root.""" + """Validates that review subset has unique name.""" order = openpype.api.ValidateContentsOrder hosts = ["maya"] @@ -17,7 +17,7 @@ class ValidateReviewSubsetUniqueness(pyblish.api.ContextPlugin): subset_names = [] for instance in context: - self.log.info("instance:: {}".format(instance.data)) + self.log.debug("Instance: {}".format(instance.data)) if instance.data.get('publish'): subset_names.append(instance.data.get('subset')) diff --git a/openpype/hosts/maya/plugins/publish/validate_setdress_root.py b/openpype/hosts/maya/plugins/publish/validate_setdress_root.py index 0b4842d208..8e23a7c04f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_setdress_root.py +++ b/openpype/hosts/maya/plugins/publish/validate_setdress_root.py @@ -4,8 +4,7 @@ import openpype.api class ValidateSetdressRoot(pyblish.api.InstancePlugin): - """ - """ + """Validate if set dress top root node is published.""" order = openpype.api.ValidateContentsOrder label = "SetDress Root" diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 0929415c00..a53d932db1 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -21,10 +21,7 @@ from openpype.client import ( ) from openpype.api import ( Logger, - BuildWorkfile, get_version_from_path, - get_workdir_data, - get_asset, get_current_project_settings, ) from openpype.tools.utils import host_tools @@ -35,11 +32,17 @@ from openpype.settings import ( get_anatomy_settings, ) from openpype.modules import ModulesManager +from openpype.pipeline.template_data import get_template_data_with_names from openpype.pipeline import ( discover_legacy_creator_plugins, legacy_io, Anatomy, ) +from openpype.pipeline.context_tools import ( + get_current_project_asset, + get_custom_workfile_template_from_session +) +from openpype.pipeline.workfile import BuildWorkfile from . import gizmo_menu @@ -910,19 +913,17 @@ def get_render_path(node): ''' Generate Render path from presets regarding avalon knob data ''' avalon_knob_data = read_avalon_data(node) - data = {'avalon': avalon_knob_data} nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) - host_name = os.environ.get("AVALON_APP") - data.update({ - "app": host_name, + data = { + "avalon": avalon_knob_data, "nuke_imageio_writes": nuke_imageio_writes - }) + } anatomy_filled = format_anatomy(data) return anatomy_filled["render"]["path"].replace("\\", "/") @@ -965,12 +966,11 @@ def format_anatomy(data): data["version"] = get_version_from_path(file) project_name = anatomy.project_name - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, data["avalon"]["asset"]) + asset_name = data["avalon"]["asset"] task_name = os.environ["AVALON_TASK"] host_name = os.environ["AVALON_APP"] - context_data = get_workdir_data( - project_doc, asset_doc, task_name, host_name + context_data = get_template_data_with_names( + project_name, asset_name, task_name, host_name ) data.update(context_data) data.update({ @@ -1128,10 +1128,8 @@ def create_write_node( if knob["name"] == "file_type": representation = knob["value"] - host_name = os.environ.get("AVALON_APP") try: data.update({ - "app": host_name, "imageio_writes": imageio_writes, "representation": representation, }) @@ -1766,7 +1764,7 @@ class WorkfileSettings(object): kwargs.get("asset_name") or legacy_io.Session["AVALON_ASSET"] ) - self._asset_entity = get_asset(self._asset) + self._asset_entity = get_current_project_asset(self._asset) self._root_node = root_node or nuke.root() self._nodes = self.get_nodes(nodes=nodes) @@ -1925,7 +1923,7 @@ class WorkfileSettings(object): families.append(avalon_knob_data.get("families")) nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) @@ -2224,7 +2222,7 @@ def get_write_node_template_attr(node): avalon_knob_data = read_avalon_data(node) # get template data nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) @@ -2440,22 +2438,21 @@ def _launch_workfile_app(): if starting_up or closing_down: return - from .pipeline import get_main_window - - main_window = get_main_window() - host_tools.show_workfiles(parent=main_window) + # Make sure on top is enabled on first show so the window is not hidden + # under main nuke window + # - this happened on Centos 7 and it is because the focus of nuke + # changes to the main window after showing because of initialization + # which moves workfiles tool under it + host_tools.show_workfiles(parent=None, on_top=True) def process_workfile_builder(): - from openpype.lib import ( - env_value_to_bool, - get_custom_workfile_template - ) # to avoid looping of the callback, remove it! nuke.removeOnCreate(process_workfile_builder, nodeClass="Root") # get state from settings - workfile_builder = get_current_project_settings()["nuke"].get( + project_settings = get_current_project_settings() + workfile_builder = project_settings["nuke"].get( "workfile_builder", {}) # get all imortant settings @@ -2465,7 +2462,6 @@ def process_workfile_builder(): # get settings createfv_on = workfile_builder.get("create_first_version") or None - custom_templates = workfile_builder.get("custom_templates") or None builder_on = workfile_builder.get("builder_on_start") or None last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE") @@ -2473,8 +2469,8 @@ def process_workfile_builder(): # generate first version in file not existing and feature is enabled if createfv_on and not os.path.exists(last_workfile_path): # get custom template path if any - custom_template_path = get_custom_workfile_template( - custom_templates + custom_template_path = get_custom_workfile_template_from_session( + project_settings=project_settings ) # if custom template is defined diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 2e3621ba8f..c1cd8f771a 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -9,7 +9,6 @@ import pyblish.api import openpype from openpype.api import ( Logger, - BuildWorkfile, get_current_project_settings ) from openpype.lib import register_event_callback @@ -22,6 +21,7 @@ from openpype.pipeline import ( deregister_inventory_action_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools from .command import viewer_update_and_undo_stop @@ -142,6 +142,14 @@ def uninstall(): _uninstall_menu() +def _show_workfiles(): + # Make sure parent is not set + # - this makes Workfiles tool as separated window which + # avoid issues with reopening + # - it is possible to explicitly change on top flag of the tool + host_tools.show_workfiles(parent=None, on_top=False) + + def _install_menu(): # uninstall original avalon menu main_window = get_main_window() @@ -158,7 +166,7 @@ def _install_menu(): menu.addSeparator() menu.addCommand( "Work Files...", - lambda: host_tools.show_workfiles(parent=main_window) + _show_workfiles ) menu.addSeparator() diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 925cab0bef..37ce03dc55 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -181,8 +181,6 @@ class ExporterReview(object): # get first and last frame self.first_frame = min(self.collection.indexes) self.last_frame = max(self.collection.indexes) - if "slate" in self.instance.data["families"]: - self.first_frame += 1 else: self.fname = os.path.basename(self.path_in) self.fhead = os.path.splitext(self.fname)[0] + "." diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index d177e6ba76..b2dc4a52d7 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -54,20 +54,28 @@ class LoadClip(plugin.NukeLoader): script_start = int(nuke.root()["first_frame"].value()) # option gui - defaults = { - "start_at_workfile": True + options_defaults = { + "start_at_workfile": True, + "add_retime": True } - options = [ - qargparse.Boolean( - "start_at_workfile", - help="Load at workfile start frame", - default=True - ) - ] - node_name_template = "{class_name}_{ext}" + @classmethod + def get_options(cls, *args): + return [ + qargparse.Boolean( + "start_at_workfile", + help="Load at workfile start frame", + default=cls.options_defaults["start_at_workfile"] + ), + qargparse.Boolean( + "add_retime", + help="Load with retime", + default=cls.options_defaults["add_retime"] + ) + ] + @classmethod def get_representations(cls): return ( @@ -86,7 +94,10 @@ class LoadClip(plugin.NukeLoader): file = self.fname.replace("\\", "/") start_at_workfile = options.get( - "start_at_workfile", self.defaults["start_at_workfile"]) + "start_at_workfile", self.options_defaults["start_at_workfile"]) + + add_retime = options.get( + "add_retime", self.options_defaults["add_retime"]) version = context['version'] version_data = version.get("data", {}) @@ -151,7 +162,7 @@ class LoadClip(plugin.NukeLoader): data_imprint = {} for k in add_keys: if k == 'version': - data_imprint.update({k: context["version"]['name']}) + data_imprint[k] = context["version"]['name'] elif k == 'colorspace': colorspace = repre["data"].get(k) colorspace = colorspace or version_data.get(k) @@ -159,10 +170,13 @@ class LoadClip(plugin.NukeLoader): if used_colorspace: data_imprint["used_colorspace"] = used_colorspace else: - data_imprint.update( - {k: context["version"]['data'].get(k, str(None))}) + data_imprint[k] = context["version"]['data'].get( + k, str(None)) - data_imprint.update({"objectName": read_name}) + data_imprint["objectName"] = read_name + + if add_retime and version_data.get("retime", None): + data_imprint["addRetime"] = True read_node["tile_color"].setValue(int("0x4ecd25ff", 16)) @@ -174,7 +188,7 @@ class LoadClip(plugin.NukeLoader): loader=self.__class__.__name__, data=data_imprint) - if version_data.get("retime", None): + if add_retime and version_data.get("retime", None): self._make_retimes(read_node, version_data) self.set_as_member(read_node) @@ -198,7 +212,12 @@ class LoadClip(plugin.NukeLoader): read_node = nuke.toNode(container['objectName']) file = get_representation_path(representation).replace("\\", "/") - start_at_workfile = bool("start at" in read_node['frame_mode'].value()) + start_at_workfile = "start at" in read_node['frame_mode'].value() + + add_retime = [ + key for key in read_node.knobs().keys() + if "addRetime" in key + ] project_name = legacy_io.active_project() version_doc = get_version_by_id(project_name, representation["parent"]) @@ -286,7 +305,7 @@ class LoadClip(plugin.NukeLoader): "updated to version: {}".format(version_doc.get("name")) ) - if version_data.get("retime", None): + if add_retime and version_data.get("retime", None): self._make_retimes(read_node, version_data) else: self.clear_members(read_node) diff --git a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py index 4257ed3131..bfe32d8fd1 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py +++ b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py @@ -33,6 +33,7 @@ class CollectSlate(pyblish.api.InstancePlugin): if slate_node: instance.data["slateNode"] = slate_node + instance.data["slate"] = True instance.data["families"].append("slate") instance.data["versionData"]["families"].append("slate") self.log.info( diff --git a/openpype/hosts/nuke/plugins/publish/extract_render_local.py b/openpype/hosts/nuke/plugins/publish/extract_render_local.py index 1595fe03fb..8879f0c999 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_render_local.py +++ b/openpype/hosts/nuke/plugins/publish/extract_render_local.py @@ -31,10 +31,6 @@ class NukeRenderLocal(openpype.api.Extractor): first_frame = instance.data.get("frameStartHandle", None) - # exception for slate workflow - if "slate" in families: - first_frame -= 1 - last_frame = instance.data.get("frameEndHandle", None) node_subset_name = instance.data.get("name", None) @@ -68,10 +64,6 @@ class NukeRenderLocal(openpype.api.Extractor): int(last_frame) ) - # exception for slate workflow - if "slate" in families: - first_frame += 1 - ext = node["file_type"].value() if "representations" not in instance.data: @@ -88,8 +80,11 @@ class NukeRenderLocal(openpype.api.Extractor): repre = { 'name': ext, 'ext': ext, - 'frameStart': "%0{}d".format( - len(str(last_frame))) % first_frame, + 'frameStart': ( + "{{:0>{}}}" + .format(len(str(last_frame))) + .format(first_frame) + ), 'files': filenames, "stagingDir": out_dir } @@ -105,13 +100,16 @@ class NukeRenderLocal(openpype.api.Extractor): instance.data['family'] = 'render' families.remove('render.local') families.insert(0, "render2d") + instance.data["anatomyData"]["family"] = "render" elif "prerender.local" in families: instance.data['family'] = 'prerender' families.remove('prerender.local') families.insert(0, "prerender") + instance.data["anatomyData"]["family"] = "prerender" elif "still.local" in families: instance.data['family'] = 'image' families.remove('still.local') + instance.data["anatomyData"]["family"] = "image" instance.data["families"] = families collections, remainder = clique.assemble(filenames) @@ -123,4 +121,4 @@ class NukeRenderLocal(openpype.api.Extractor): self.log.info('Finished render') - self.log.debug("instance extracted: {}".format(instance.data)) + self.log.debug("_ instance.data: {}".format(instance.data)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index 99ade4cf9b..b5cad143db 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -13,6 +13,7 @@ from openpype.hosts.nuke.api import ( get_view_process_node ) + class ExtractSlateFrame(openpype.api.Extractor): """Extracts movie and thumbnail with baked in luts @@ -236,6 +237,7 @@ class ExtractSlateFrame(openpype.api.Extractor): def _render_slate_to_sequence(self, instance): # set slate frame first_frame = instance.data["frameStartHandle"] + last_frame = instance.data["frameEndHandle"] slate_first_frame = first_frame - 1 # render slate as sequence frame @@ -284,6 +286,13 @@ class ExtractSlateFrame(openpype.api.Extractor): matching_repre["files"] = [first_filename, slate_filename] elif slate_filename not in matching_repre["files"]: matching_repre["files"].insert(0, slate_filename) + matching_repre["frameStart"] = ( + "{{:0>{}}}" + .format(len(str(last_frame))) + .format(slate_first_frame) + ) + self.log.debug( + "__ matching_repre: {}".format(pformat(matching_repre))) self.log.warning("Added slate frame to representation files") diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py index 4b3b70fa12..b396056eb9 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py @@ -50,7 +50,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): # establish families family = avalon_knob_data["family"] families_ak = avalon_knob_data.get("families", []) - families = list() + families = [] # except disabled nodes but exclude backdrops in test if ("nukenodes" not in family) and (node["disable"].value()): @@ -94,6 +94,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): # Farm rendering self.log.info("flagged for farm render") instance.data["transfer"] = False + instance.data["farm"] = True families.append("{}.farm".format(family)) family = families_ak.lower() @@ -110,10 +111,10 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("__ families: `{}`".format(families)) # Get format - format = root['format'].value() - resolution_width = format.width() - resolution_height = format.height() - pixel_aspect = format.pixelAspect() + format_ = root['format'].value() + resolution_width = format_.width() + resolution_height = format_.height() + pixel_aspect = format_.pixelAspect() # get publish knob value if "publish" not in node.knobs(): @@ -124,8 +125,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("__ _families_test: `{}`".format(_families_test)) for family_test in _families_test: if family_test in self.sync_workfile_version_on_families: - self.log.debug("Syncing version with workfile for '{}'" - .format(family_test)) + self.log.debug( + "Syncing version with workfile for '{}'".format( + family_test + ) + ) # get version to instance for integration instance.data['version'] = instance.context.data['version'] diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index a97f34b370..e37cc8a80a 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -144,8 +144,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): self.log.debug("colorspace: `{}`".format(colorspace)) version_data = { - "families": [f.replace(".local", "").replace(".farm", "") - for f in _families_test if "write" not in f], + "families": [ + _f.replace(".local", "").replace(".farm", "") + for _f in _families_test if "write" != _f + ], "colorspace": colorspace } diff --git a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py index af5e8e9d27..5f7b1f3806 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py +++ b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py @@ -98,7 +98,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): self.log.error(msg) raise ValidationException(msg) - collected_frames_len = int(len(collection.indexes)) + collected_frames_len = len(collection.indexes) coll_start = min(collection.indexes) coll_end = max(collection.indexes) diff --git a/openpype/hosts/nuke/plugins/publish/validate_script.py b/openpype/hosts/nuke/plugins/publish/validate_script.py index 9bda0da85e..b8d7494b9d 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_script.py +++ b/openpype/hosts/nuke/plugins/publish/validate_script.py @@ -1,7 +1,6 @@ import pyblish.api -from openpype.client import get_project, get_asset_by_id -from openpype import lib +from openpype.client import get_project, get_asset_by_id, get_asset_by_name from openpype.pipeline import legacy_io @@ -17,10 +16,11 @@ class ValidateScript(pyblish.api.InstancePlugin): def process(self, instance): ctx_data = instance.context.data - asset_name = ctx_data["asset"] - asset = lib.get_asset(asset_name) - asset_data = asset["data"] project_name = legacy_io.active_project() + asset_name = ctx_data["asset"] + # TODO repace query with using 'instance.data["assetEntity"]' + asset = get_asset_by_name(project_name, asset_name) + asset_data = asset["data"] # These attributes will be checked attributes = [ diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index 20a6e3169f..ee150d1808 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -1,6 +1,5 @@ import os from Qt import QtWidgets -from bson.objectid import ObjectId import pyblish.api @@ -13,8 +12,8 @@ from openpype.pipeline import ( deregister_loader_plugin_path, deregister_creator_plugin_path, AVALON_CONTAINER_ID, - registered_host, ) +from openpype.pipeline.load import any_outdated_containers import openpype.hosts.photoshop from . import lib @@ -30,7 +29,7 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") def check_inventory(): - if not lib.any_outdated(): + if not any_outdated_containers(): return # Warn about outdated containers. diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index f15068b031..2cfbfa8778 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,3 +1,5 @@ +import re + from openpype.hosts.photoshop import api from openpype.lib import BoolDef from openpype.pipeline import ( @@ -5,6 +7,8 @@ from openpype.pipeline import ( CreatedInstance, legacy_io ) +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class ImageCreator(Creator): @@ -38,17 +42,24 @@ class ImageCreator(Creator): top_level_selected_items = stub.get_selected_layers() if pre_create_data.get("use_selection"): only_single_item_selected = len(top_level_selected_items) == 1 - for selected_item in top_level_selected_items: - if ( - only_single_item_selected or - pre_create_data.get("create_multiple")): + if ( + only_single_item_selected or + pre_create_data.get("create_multiple")): + for selected_item in top_level_selected_items: if selected_item.group: groups_to_create.append(selected_item) else: top_layers_to_wrap.append(selected_item) - else: - group = stub.group_selected_layers(subset_name_from_ui) - groups_to_create.append(group) + else: + group = stub.group_selected_layers(subset_name_from_ui) + groups_to_create.append(group) + else: + stub.select_layers(stub.get_layers()) + try: + group = stub.group_selected_layers(subset_name_from_ui) + except: + raise ValueError("Cannot group locked Bakcground layer!") + groups_to_create.append(group) if not groups_to_create and not top_layers_to_wrap: group = stub.create_group(subset_name_from_ui) @@ -60,6 +71,7 @@ class ImageCreator(Creator): group = stub.group_selected_layers(layer.name) groups_to_create.append(group) + layer_name = '' creating_multiple_groups = len(groups_to_create) > 1 for group in groups_to_create: subset_name = subset_name_from_ui # reset to name from creator UI @@ -67,8 +79,16 @@ class ImageCreator(Creator): created_group_name = self._clean_highlights(stub, group.name) if creating_multiple_groups: - # concatenate with layer name to differentiate subsets - subset_name += group.name.title().replace(" ", "") + layer_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + group.name + ) + if "{layer}" not in subset_name.lower(): + subset_name += "{Layer}" + + layer_fill = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill) if group.long_name: for directory in group.long_name[::-1]: @@ -143,3 +163,6 @@ class ImageCreator(Creator): def _clean_highlights(self, stub, item): return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, '') + @classmethod + def get_dynamic_data(cls, *args, **kwargs): + return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py index 9736471a26..2792a775e0 100644 --- a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py @@ -1,7 +1,12 @@ +import re + from Qt import QtWidgets from openpype.pipeline import create from openpype.hosts.photoshop import api as photoshop +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS + class CreateImage(create.LegacyCreator): """Image folder for publish.""" @@ -75,6 +80,7 @@ class CreateImage(create.LegacyCreator): groups.append(group) creator_subset_name = self.data["subset"] + layer_name = '' for group in groups: long_names = [] group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ @@ -82,7 +88,16 @@ class CreateImage(create.LegacyCreator): subset_name = creator_subset_name if len(groups) > 1: - subset_name += group.name.title().replace(" ", "") + layer_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + group.name + ) + if "{layer}" not in subset_name.lower(): + subset_name += "{Layer}" + + layer_fill = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill) if group.long_name: for directory in group.long_name[::-1]: @@ -98,3 +113,7 @@ class CreateImage(create.LegacyCreator): # reusing existing group, need to rename afterwards if not create_group: stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) + + @classmethod + def get_dynamic_data(cls, *args, **kwargs): + return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py index b53f4e8198..8106d6ff16 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py @@ -4,6 +4,7 @@ import pyblish.api import openpype.api from openpype.pipeline import PublishXmlValidationError from openpype.hosts.photoshop import api as photoshop +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class ValidateNamingRepair(pyblish.api.Action): @@ -50,6 +51,13 @@ class ValidateNamingRepair(pyblish.api.Action): subset_name = re.sub(invalid_chars, replace_char, instance.data["subset"]) + # format from Tool Creator + subset_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + subset_name + ) + layer_meta["subset"] = subset_name stub.imprint(instance_id, layer_meta) diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index 49b478fb3b..b03125d502 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -4,11 +4,11 @@ import uuid import qargparse from Qt import QtWidgets, QtCore -import openpype.api as pype from openpype.pipeline import ( LegacyCreator, LoaderPlugin, ) +from openpype.pipeline.context_tools import get_current_project_asset from openpype.hosts import resolve from . import lib @@ -375,7 +375,7 @@ class ClipLoader: """ asset_name = self.context["representation"]["context"]["asset"] - self.data["assetData"] = pype.get_asset(asset_name)["data"] + self.data["assetData"] = get_current_project_asset(asset_name)["data"] def load(self): # create project bin for the media to be imported into diff --git a/openpype/hosts/resolve/plugins/publish/precollect_instances.py b/openpype/hosts/resolve/plugins/publish/precollect_instances.py index 8f1a13a4e5..ee51998c0d 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_instances.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_instances.py @@ -70,7 +70,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin): "publish": resolve.get_publish_attribute(timeline_item), "fps": context.data["fps"], "handleStart": handle_start, - "handleEnd": handle_end + "handleEnd": handle_end, + "newAssetPublishing": True }) # otio clip data diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py index 0a1d29ccdc..8633d4bf9d 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py @@ -19,6 +19,7 @@ import os import opentimelineio as otio import pyblish.api from openpype import lib as plib +from openpype.pipeline.context_tools import get_current_project_asset class OTIO_View(pyblish.api.Action): @@ -116,7 +117,7 @@ class CollectEditorial(pyblish.api.InstancePlugin): if extension == ".edl": # EDL has no frame rate embedded so needs explicit # frame rate else 24 is asssumed. - kwargs["rate"] = plib.get_asset()["data"]["fps"] + kwargs["rate"] = get_current_project_asset()["data"]["fps"] instance.data["otio_timeline"] = otio.adapters.read_from_file( file_path, **kwargs) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py index d0d36bb717..75c260bad7 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py @@ -1,8 +1,12 @@ import os +from copy import deepcopy + import opentimelineio as otio import pyblish.api + from openpype import lib as plib -from copy import deepcopy +from openpype.pipeline.context_tools import get_current_project_asset + class CollectInstances(pyblish.api.InstancePlugin): """Collect instances from editorial's OTIO sequence""" @@ -48,7 +52,7 @@ class CollectInstances(pyblish.api.InstancePlugin): # get timeline otio data timeline = instance.data["otio_timeline"] - fps = plib.get_asset()["data"]["fps"] + fps = get_current_project_asset()["data"]["fps"] tracks = timeline.each_child( descended_from_type=otio.schema.Track @@ -166,7 +170,8 @@ class CollectInstances(pyblish.api.InstancePlugin): "frameStart": frame_start, "frameEnd": frame_end, "frameStartH": frame_start - handle_start, - "frameEndH": frame_end + handle_end + "frameEndH": frame_end + handle_end, + "newAssetPublishing": True } for data_key in instance_data_filter: diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py index 005157af62..ff7f60354e 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py @@ -3,8 +3,8 @@ import re import pyblish.api import openpype.api -from openpype import lib from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.context_tools import get_current_project_asset class ValidateFrameRange(pyblish.api.InstancePlugin): @@ -27,7 +27,8 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): for pattern in self.skip_timelines_check): self.log.info("Skipping for {} task".format(instance.data["task"])) - asset_data = lib.get_asset(instance.data["asset"])["data"] + # TODO repace query with using 'instance.data["assetEntity"]' + asset_data = get_current_project_asset(instance.data["asset"])["data"] frame_start = asset_data["frameStart"] frame_end = asset_data["frameEnd"] handle_start = asset_data["handleStart"] diff --git a/openpype/hosts/testhost/api/pipeline.py b/openpype/hosts/testhost/api/pipeline.py index 285fe8f8d6..1e05f336fb 100644 --- a/openpype/hosts/testhost/api/pipeline.py +++ b/openpype/hosts/testhost/api/pipeline.py @@ -1,6 +1,6 @@ import os import json -from openpype.pipeline import legacy_io +from openpype.client import get_asset_by_name class HostContext: @@ -17,10 +17,10 @@ class HostContext: if not asset_name: return project_name - asset_doc = legacy_io.find_one( - {"type": "asset", "name": asset_name}, - {"data.parents": 1} + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.parents"] ) + parents = asset_doc.get("data", {}).get("parents") or [] hierarchy = [project_name] diff --git a/openpype/hosts/testhost/plugins/create/auto_creator.py b/openpype/hosts/testhost/plugins/create/auto_creator.py index 06b95375b1..8d59fc3242 100644 --- a/openpype/hosts/testhost/plugins/create/auto_creator.py +++ b/openpype/hosts/testhost/plugins/create/auto_creator.py @@ -1,10 +1,11 @@ from openpype.lib import NumberDef -from openpype.hosts.testhost.api import pipeline +from openpype.client import get_asset_by_name from openpype.pipeline import ( legacy_io, AutoCreator, CreatedInstance, ) +from openpype.hosts.testhost.api import pipeline class MyAutoCreator(AutoCreator): @@ -44,10 +45,7 @@ class MyAutoCreator(AutoCreator): host_name = legacy_io.Session["AVALON_APP"] if existing_instance is None: - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) @@ -69,10 +67,7 @@ class MyAutoCreator(AutoCreator): existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) diff --git a/openpype/hosts/traypublisher/api/__init__.py b/openpype/hosts/traypublisher/api/__init__.py index c461c0c526..4e7284b09a 100644 --- a/openpype/hosts/traypublisher/api/__init__.py +++ b/openpype/hosts/traypublisher/api/__init__.py @@ -1,20 +1,8 @@ from .pipeline import ( - install, - ls, - - set_project_name, - get_context_title, - get_context_data, - update_context_data, + TrayPublisherHost, ) __all__ = ( - "install", - "ls", - - "set_project_name", - "get_context_title", - "get_context_data", - "update_context_data", + "TrayPublisherHost", ) diff --git a/openpype/hosts/traypublisher/api/editorial.py b/openpype/hosts/traypublisher/api/editorial.py new file mode 100644 index 0000000000..7c392ef508 --- /dev/null +++ b/openpype/hosts/traypublisher/api/editorial.py @@ -0,0 +1,331 @@ +import re +from copy import deepcopy + +from openpype.client import get_asset_by_id +from openpype.pipeline.create import CreatorError + + +class ShotMetadataSolver: + """ Solving hierarchical metadata + + Used during editorial publishing. Works with imput + clip name and settings defining python formatable + template. Settings also define searching patterns + and its token keys used for formating in templates. + """ + + NO_DECOR_PATERN = re.compile(r"\{([a-z]*?)\}") + + # presets + clip_name_tokenizer = None + shot_rename = True + shot_hierarchy = None + shot_add_tasks = None + + def __init__( + self, + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + logger + ): + self.clip_name_tokenizer = clip_name_tokenizer + self.shot_rename = shot_rename + self.shot_hierarchy = shot_hierarchy + self.shot_add_tasks = shot_add_tasks + self.log = logger + + def _rename_template(self, data): + """Shot renaming function + + Args: + data (dict): formating data + + Raises: + CreatorError: If missing keys + + Returns: + str: formated new name + """ + shot_rename_template = self.shot_rename[ + "shot_rename_template"] + try: + # format to new shot name + return shot_rename_template.format(**data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct:: \n\n" + f"From template string {shot_rename_template} > " + f"`{_E}` has no equivalent in \n" + f"{list(data.keys())} input formating keys!" + )) + + def _generate_tokens(self, clip_name, source_data): + """Token generator + + Settings defines token pairs key and regex expression. + + Args: + clip_name (str): name of clip in editorial + source_data (dict): data for formating + + Raises: + CreatorError: if missing key + + Returns: + dict: updated source_data + """ + output_data = deepcopy(source_data["anatomy_data"]) + output_data["clip_name"] = clip_name + + if not self.clip_name_tokenizer: + return output_data + + parent_name = source_data["selected_asset_doc"]["name"] + + search_text = parent_name + clip_name + + for token_key, pattern in self.clip_name_tokenizer.items(): + p = re.compile(pattern) + match = p.findall(search_text) + if not match: + raise CreatorError(( + "Make sure regex expression works with your data: \n\n" + f"'{token_key}' with regex '{pattern}' in your settings\n" + "can't find any match in your clip name " + f"'{search_text}'!\n\nLook to: " + "'project_settings/traypublisher/editorial_creators" + "/editorial_simple/clip_name_tokenizer'\n" + "at your project settings..." + )) + + # QUESTION:how to refactory `match[-1]` to some better way? + output_data[token_key] = match[-1] + + return output_data + + def _create_parents_from_settings(self, parents, data): + """Formating parent components. + + Args: + parents (list): list of dict parent components + data (dict): formating data + + Raises: + CreatorError: missing formating key + CreatorError: missing token key + KeyError: missing parent token + + Returns: + list: list of dict of parent components + """ + # fill the parents parts from presets + shot_hierarchy = deepcopy(self.shot_hierarchy) + hierarchy_parents = shot_hierarchy["parents"] + + # fill parent keys data template from anatomy data + try: + _parent_tokens_formating_data = { + parent_token["name"]: parent_token["value"].format(**data) + for parent_token in hierarchy_parents + } + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n" + f"`{_E}` has no equivalent in \n{list(data.keys())}" + )) + + _parent_tokens_type = { + parent_token["name"]: parent_token["type"] + for parent_token in hierarchy_parents + } + for _index, _parent in enumerate( + shot_hierarchy["parents_path"].split("/") + ): + # format parent token with value which is formated + try: + parent_name = _parent.format( + **_parent_tokens_formating_data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n\n" + f"`{_E}` from template string " + f"{shot_hierarchy['parents_path']}, " + f" has no equivalent in \n" + f"{list(_parent_tokens_formating_data.keys())} parents" + )) + + parent_token_name = ( + self.NO_DECOR_PATERN.findall(_parent).pop()) + + if not parent_token_name: + raise KeyError( + f"Parent token is not found in: `{_parent}`") + + # find parent type + parent_token_type = _parent_tokens_type[parent_token_name] + + # in case selected context is set to the same asset + if ( + _index == 0 + and parents[-1]["entity_name"] == parent_name + ): + self.log.debug(f" skipping : {parent_name}") + continue + + # in case first parent is project then start parents from start + if ( + _index == 0 + and parent_token_type == "Project" + ): + self.log.debug("rebuilding parents from scratch") + project_parent = parents[0] + parents = [project_parent] + continue + + parents.append({ + "entity_type": parent_token_type, + "entity_name": parent_name + }) + + self.log.debug(f"__ parents: {parents}") + + return parents + + def _create_hierarchy_path(self, parents): + """Converting hierarchy path from parents + + Args: + parents (list): list of dict parent components + + Returns: + str: hierarchy path + """ + return "/".join( + [ + p["entity_name"] for p in parents + if p["entity_type"] != "Project" + ] + ) if parents else "" + + def _get_parents_from_selected_asset( + self, + asset_doc, + project_doc + ): + """Returning parents from context on selected asset. + + Context defined in Traypublisher project tree. + + Args: + asset_doc (db obj): selected asset doc + project_doc (db obj): actual project doc + + Returns: + list: list of dict parent components + """ + project_name = project_doc["name"] + visual_hierarchy = [asset_doc] + current_doc = asset_doc + + # looping trought all available visual parents + # if they are not available anymore than it breaks + while True: + visual_parent_id = current_doc["data"]["visualParent"] + visual_parent = None + if visual_parent_id: + visual_parent = get_asset_by_id(project_name, visual_parent_id) + + if not visual_parent: + visual_hierarchy.append(project_doc) + break + visual_hierarchy.append(visual_parent) + current_doc = visual_parent + + # add current selection context hierarchy + return [ + { + "entity_type": entity["data"]["entityType"], + "entity_name": entity["name"] + } + for entity in reversed(visual_hierarchy) + ] + + def _generate_tasks_from_settings(self, project_doc): + """Convert settings inputs to task data. + + Args: + project_doc (db obj): actual project doc + + Raises: + KeyError: Missing task type in project doc + + Returns: + dict: tasks data + """ + tasks_to_add = {} + + project_tasks = project_doc["config"]["tasks"] + for task_name, task_data in self.shot_add_tasks.items(): + _task_data = deepcopy(task_data) + + # check if task type in project task types + if _task_data["type"] in project_tasks.keys(): + tasks_to_add[task_name] = _task_data + else: + raise KeyError( + "Missing task type `{}` for `{}` is not" + " existing in `{}``".format( + _task_data["type"], + task_name, + list(project_tasks.keys()) + ) + ) + + return tasks_to_add + + def generate_data(self, clip_name, source_data): + """Metadata generator. + + Converts input data to hierarchy mentadata. + + Args: + clip_name (str): clip name + source_data (dict): formating data + + Returns: + (str, dict): shot name and hierarchy data + """ + self.log.info(f"_ source_data: {source_data}") + + tasks = {} + asset_doc = source_data["selected_asset_doc"] + project_doc = source_data["project_doc"] + + # match clip to shot name at start + shot_name = clip_name + + # parse all tokens and generate formating data + formating_data = self._generate_tokens(shot_name, source_data) + + # generate parents from selected asset + parents = self._get_parents_from_selected_asset(asset_doc, project_doc) + + if self.shot_rename["enabled"]: + shot_name = self._rename_template(formating_data) + self.log.info(f"Renamed shot name: {shot_name}") + + if self.shot_hierarchy["enabled"]: + parents = self._create_parents_from_settings( + parents, formating_data) + + if self.shot_add_tasks: + tasks = self._generate_tasks_from_settings( + project_doc) + + return shot_name, { + "hierarchy": self._create_hierarchy_path(parents), + "parents": parents, + "tasks": tasks + } diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py index 954a0bae47..2d9db7801e 100644 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ b/openpype/hosts/traypublisher/api/pipeline.py @@ -9,6 +9,8 @@ from openpype.pipeline import ( register_creator_plugin_path, legacy_io, ) +from openpype.host import HostBase, INewPublisher + ROOT_DIR = os.path.dirname(os.path.dirname( os.path.abspath(__file__) @@ -17,6 +19,35 @@ PUBLISH_PATH = os.path.join(ROOT_DIR, "plugins", "publish") CREATE_PATH = os.path.join(ROOT_DIR, "plugins", "create") +class TrayPublisherHost(HostBase, INewPublisher): + name = "traypublisher" + + def install(self): + os.environ["AVALON_APP"] = self.name + legacy_io.Session["AVALON_APP"] = self.name + + pyblish.api.register_host("traypublisher") + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_creator_plugin_path(CREATE_PATH) + + def get_context_title(self): + return HostContext.get_project_name() + + def get_context_data(self): + return HostContext.get_context_data() + + def update_context_data(self, data, changes): + HostContext.save_context_data(data, changes) + + def set_project_name(self, project_name): + # TODO Deregister project specific plugins and register new project + # plugins + os.environ["AVALON_PROJECT"] = project_name + legacy_io.Session["AVALON_PROJECT"] = project_name + legacy_io.install() + HostContext.set_project_name(project_name) + + class HostContext: _context_json_path = None @@ -150,32 +181,3 @@ def get_context_data(): def update_context_data(data, changes): HostContext.save_context_data(data) - - -def get_context_title(): - return HostContext.get_project_name() - - -def ls(): - """Probably will never return loaded containers.""" - return [] - - -def install(): - """This is called before a project is known. - - Project is defined with 'set_project_name'. - """ - os.environ["AVALON_APP"] = "traypublisher" - - pyblish.api.register_host("traypublisher") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_creator_plugin_path(CREATE_PATH) - - -def set_project_name(project_name): - # TODO Deregister project specific plugins and register new project plugins - os.environ["AVALON_PROJECT"] = project_name - legacy_io.Session["AVALON_PROJECT"] = project_name - legacy_io.install() - HostContext.set_project_name(project_name) diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py index 202664cfc6..a3eead51c8 100644 --- a/openpype/hosts/traypublisher/api/plugin.py +++ b/openpype/hosts/traypublisher/api/plugin.py @@ -1,8 +1,9 @@ -from openpype.pipeline import ( +from openpype.lib.attribute_definitions import FileDef +from openpype.pipeline.create import ( Creator, + HiddenCreator, CreatedInstance ) -from openpype.lib import FileDef from .pipeline import ( list_instances, @@ -11,6 +12,64 @@ from .pipeline import ( HostContext, ) +IMAGE_EXTENSIONS = [ + ".ani", ".anim", ".apng", ".art", ".bmp", ".bpg", ".bsave", ".cal", + ".cin", ".cpc", ".cpt", ".dds", ".dpx", ".ecw", ".exr", ".fits", + ".flic", ".flif", ".fpx", ".gif", ".hdri", ".hevc", ".icer", + ".icns", ".ico", ".cur", ".ics", ".ilbm", ".jbig", ".jbig2", + ".jng", ".jpeg", ".jpeg-ls", ".jpeg", ".2000", ".jpg", ".xr", + ".jpeg", ".xt", ".jpeg-hdr", ".kra", ".mng", ".miff", ".nrrd", + ".ora", ".pam", ".pbm", ".pgm", ".ppm", ".pnm", ".pcx", ".pgf", + ".pictor", ".png", ".psb", ".psp", ".qtvr", ".ras", + ".rgbe", ".logluv", ".tiff", ".sgi", ".tga", ".tiff", ".tiff/ep", + ".tiff/it", ".ufo", ".ufp", ".wbmp", ".webp", ".xbm", ".xcf", + ".xpm", ".xwd" +] +VIDEO_EXTENSIONS = [ + ".3g2", ".3gp", ".amv", ".asf", ".avi", ".drc", ".f4a", ".f4b", + ".f4p", ".f4v", ".flv", ".gif", ".gifv", ".m2v", ".m4p", ".m4v", + ".mkv", ".mng", ".mov", ".mp2", ".mp4", ".mpe", ".mpeg", ".mpg", + ".mpv", ".mxf", ".nsv", ".ogg", ".ogv", ".qt", ".rm", ".rmvb", + ".roq", ".svi", ".vob", ".webm", ".wmv", ".yuv" +] +REVIEW_EXTENSIONS = IMAGE_EXTENSIONS + VIDEO_EXTENSIONS + + +class HiddenTrayPublishCreator(HiddenCreator): + host_name = "traypublisher" + + def collect_instances(self): + for instance_data in list_instances(): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + update_instances(update_list) + + def remove_instances(self, instances): + remove_instances(instances) + for instance in instances: + self._remove_instance_from_context(instance) + + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + # Add instance to current context + self._add_instance_to_context(new_instance) + class TrayPublishCreator(Creator): create_allow_context_change = True @@ -33,9 +92,20 @@ class TrayPublishCreator(Creator): for instance in instances: self._remove_instance_from_context(instance) - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attrobites - return self.get_instance_attr_defs() + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + # Add instance to current context + self._add_instance_to_context(new_instance) class SettingsCreator(TrayPublishCreator): @@ -43,37 +113,40 @@ class SettingsCreator(TrayPublishCreator): extensions = [] - def collect_instances(self): - for instance_data in list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - def create(self, subset_name, data, pre_create_data): # Pass precreate data to creator attributes data["creator_attributes"] = pre_create_data data["settings_creator"] = True # Create new instance new_instance = CreatedInstance(self.family, subset_name, data, self) - # Host implementation of storing metadata about instance - HostContext.add_instance(new_instance.data_to_store()) - # Add instance to current context - self._add_instance_to_context(new_instance) + + self._store_new_instance(new_instance) def get_instance_attr_defs(self): return [ FileDef( - "filepath", + "representation_files", folders=False, extensions=self.extensions, allow_sequences=self.allow_sequences, - label="Filepath", + single_item=not self.allow_multiple_items, + label="Representations", + ), + FileDef( + "reviewable", + folders=False, + extensions=REVIEW_EXTENSIONS, + allow_sequences=True, + single_item=True, + label="Reviewable representations", + extensions_label="Single reviewable item" ) ] + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attrobites + return self.get_instance_attr_defs() + @classmethod def from_settings(cls, item_data): identifier = item_data["identifier"] @@ -92,6 +165,7 @@ class SettingsCreator(TrayPublishCreator): "detailed_description": item_data["detailed_description"], "extensions": item_data["extensions"], "allow_sequences": item_data["allow_sequences"], + "allow_multiple_items": item_data["allow_multiple_items"], "default_variants": item_data["default_variants"] } ) diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/openpype/hosts/traypublisher/plugins/create/create_editorial.py new file mode 100644 index 0000000000..28a115629e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_editorial.py @@ -0,0 +1,869 @@ +import os +from copy import deepcopy +from pprint import pformat +import opentimelineio as otio +from openpype.client import ( + get_asset_by_name, + get_project +) +from openpype.hosts.traypublisher.api.plugin import ( + TrayPublishCreator, + HiddenTrayPublishCreator +) +from openpype.hosts.traypublisher.api.editorial import ( + ShotMetadataSolver +) + +from openpype.pipeline import CreatedInstance + +from openpype.lib import ( + get_ffprobe_data, + convert_ffprobe_fps_value, + + FileDef, + TextDef, + NumberDef, + EnumDef, + BoolDef, + UISeparatorDef, + UILabelDef +) + + +CLIP_ATTR_DEFS = [ + EnumDef( + "fps", + items={ + "from_selection": "From selection", + 23.997: "23.976", + 24: "24", + 25: "25", + 29.97: "29.97", + 30: "30" + }, + label="FPS" + ), + NumberDef( + "workfile_start_frame", + default=1001, + label="Workfile start frame" + ), + NumberDef( + "handle_start", + default=0, + label="Handle start" + ), + NumberDef( + "handle_end", + default=0, + label="Handle end" + ) +] + + +class EditorialClipInstanceCreatorBase(HiddenTrayPublishCreator): + """ Wrapper class for clip family creators + + Args: + HiddenTrayPublishCreator (BaseCreator): hidden supporting class + """ + host_name = "traypublisher" + + def create(self, instance_data, source_data=None): + self.log.info(f"instance_data: {instance_data}") + subset_name = instance_data["subset"] + + # Create new instance + new_instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + self.log.info(f"instance_data: {pformat(new_instance.data)}") + + self._store_new_instance(new_instance) + + return new_instance + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + +class EditorialShotInstanceCreator(EditorialClipInstanceCreatorBase): + """ Shot family class + + The shot metadata instance carrier. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_shot" + family = "shot" + label = "Editorial Shot" + + def get_instance_attr_defs(self): + attr_defs = [ + TextDef( + "asset_name", + label="Asset name", + ) + ] + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs + + +class EditorialPlateInstanceCreator(EditorialClipInstanceCreatorBase): + """ Plate family class + + Plate representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_plate" + family = "plate" + label = "Editorial Plate" + + +class EditorialAudioInstanceCreator(EditorialClipInstanceCreatorBase): + """ Audio family class + + Audio representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_audio" + family = "audio" + label = "Editorial Audio" + + +class EditorialReviewInstanceCreator(EditorialClipInstanceCreatorBase): + """ Review family class + + Review representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_review" + family = "review" + label = "Editorial Review" + + +class EditorialSimpleCreator(TrayPublishCreator): + """ Editorial creator class + + Simple workflow creator. This creator only disecting input + video file into clip chunks and then converts each to + defined format defined Settings for each subset preset. + + Args: + TrayPublishCreator (Creator): Tray publisher plugin class + """ + + label = "Editorial Simple" + family = "editorial" + identifier = "editorial_simple" + default_variants = [ + "main" + ] + description = "Editorial files to generate shots." + detailed_description = """ +Supporting publishing new shots to project +or updating already created. Publishing will create OTIO file. +""" + icon = "fa.file" + + def __init__( + self, project_settings, *args, **kwargs + ): + super(EditorialSimpleCreator, self).__init__( + project_settings, *args, **kwargs + ) + editorial_creators = deepcopy( + project_settings["traypublisher"]["editorial_creators"] + ) + # get this creator settings by identifier + self._creator_settings = editorial_creators.get(self.identifier) + + clip_name_tokenizer = self._creator_settings["clip_name_tokenizer"] + shot_rename = self._creator_settings["shot_rename"] + shot_hierarchy = self._creator_settings["shot_hierarchy"] + shot_add_tasks = self._creator_settings["shot_add_tasks"] + + self._shot_metadata_solver = ShotMetadataSolver( + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + self.log + ) + + # try to set main attributes from settings + if self._creator_settings.get("default_variants"): + self.default_variants = self._creator_settings["default_variants"] + + def create(self, subset_name, instance_data, pre_create_data): + allowed_family_presets = self._get_allowed_family_presets( + pre_create_data) + + clip_instance_properties = { + k: v for k, v in pre_create_data.items() + if k != "sequence_filepath_data" + if k not in [ + i["family"] for i in self._creator_settings["family_presets"] + ] + } + # Create otio editorial instance + asset_name = instance_data["asset"] + asset_doc = get_asset_by_name(self.project_name, asset_name) + + self.log.info(pre_create_data["fps"]) + + if pre_create_data["fps"] == "from_selection": + # get asset doc data attributes + fps = asset_doc["data"]["fps"] + else: + fps = float(pre_create_data["fps"]) + + instance_data.update({ + "fps": fps + }) + + # get path of sequence + sequence_path_data = pre_create_data["sequence_filepath_data"] + media_path_data = pre_create_data["media_filepaths_data"] + + sequence_path = self._get_path_from_file_data(sequence_path_data) + media_path = self._get_path_from_file_data(media_path_data) + + # get otio timeline + otio_timeline = self._create_otio_timeline( + sequence_path, fps) + + # Create all clip instances + clip_instance_properties.update({ + "fps": fps, + "parent_asset_name": asset_name, + "variant": instance_data["variant"] + }) + + # create clip instances + self._get_clip_instances( + otio_timeline, + media_path, + clip_instance_properties, + family_presets=allowed_family_presets + + ) + + # create otio editorial instance + self._create_otio_instance( + subset_name, instance_data, + sequence_path, media_path, + otio_timeline + ) + + def _create_otio_instance( + self, + subset_name, + data, + sequence_path, + media_path, + otio_timeline + ): + """Otio instance creating function + + Args: + subset_name (str): name of subset + data (dict): instnance data + sequence_path (str): path to sequence file + media_path (str): path to media file + otio_timeline (otio.Timeline): otio timeline object + """ + # Pass precreate data to creator attributes + data.update({ + "sequenceFilePath": sequence_path, + "editorialSourcePath": media_path, + "otioTimeline": otio.adapters.write_to_string(otio_timeline) + }) + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._store_new_instance(new_instance) + + def _create_otio_timeline(self, sequence_path, fps): + """Creating otio timeline from sequence path + + Args: + sequence_path (str): path to sequence file + fps (float): frame per second + + Returns: + otio.Timeline: otio timeline object + """ + # get editorial sequence file into otio timeline object + extension = os.path.splitext(sequence_path)[1] + + kwargs = {} + if extension == ".edl": + # EDL has no frame rate embedded so needs explicit + # frame rate else 24 is asssumed. + kwargs["rate"] = fps + kwargs["ignore_timecode_mismatch"] = True + + self.log.info(f"kwargs: {kwargs}") + return otio.adapters.read_from_file(sequence_path, **kwargs) + + def _get_path_from_file_data(self, file_path_data): + """Converting creator path data to single path string + + Args: + file_path_data (FileDefItem): creator path data inputs + + Raises: + FileExistsError: in case nothing had been set + + Returns: + str: path string + """ + # TODO: just temporarly solving only one media file + if isinstance(file_path_data, list): + file_path_data = file_path_data.pop() + + if len(file_path_data["filenames"]) == 0: + raise FileExistsError( + f"File path was not added: {file_path_data}") + + return os.path.join( + file_path_data["directory"], file_path_data["filenames"][0]) + + def _get_clip_instances( + self, + otio_timeline, + media_path, + instance_data, + family_presets + ): + """Helping function fro creating clip instance + + Args: + otio_timeline (otio.Timeline): otio timeline object + media_path (str): media file path string + instance_data (dict): clip instance data + family_presets (list): list of dict settings subset presets + """ + self.asset_name_check = [] + + tracks = otio_timeline.each_child( + descended_from_type=otio.schema.Track + ) + + # media data for audio sream and reference solving + media_data = self._get_media_source_metadata(media_path) + + for track in tracks: + self.log.debug(f"track.name: {track.name}") + try: + track_start_frame = ( + abs(track.source_range.start_time.value) + ) + self.log.debug(f"track_start_frame: {track_start_frame}") + track_start_frame -= self.timeline_frame_start + except AttributeError: + track_start_frame = 0 + + self.log.debug(f"track_start_frame: {track_start_frame}") + + for clip in track.each_child(): + if not self._validate_clip_for_processing(clip): + continue + + # get available frames info to clip data + self._create_otio_reference(clip, media_path, media_data) + + # convert timeline range to source range + self._restore_otio_source_range(clip) + + base_instance_data = self._get_base_instance_data( + clip, + instance_data, + track_start_frame + ) + + parenting_data = { + "instance_label": None, + "instance_id": None + } + self.log.info(( + "Creating subsets from presets: \n" + f"{pformat(family_presets)}" + )) + + for _fpreset in family_presets: + # exclude audio family if no audio stream + if ( + _fpreset["family"] == "audio" + and not media_data.get("audio") + ): + continue + + instance = self._make_subset_instance( + clip, + _fpreset, + deepcopy(base_instance_data), + parenting_data + ) + self.log.debug(f"{pformat(dict(instance.data))}") + + def _restore_otio_source_range(self, otio_clip): + """Infusing source range. + + Otio clip is missing proper source clip range so + here we add them from from parent timeline frame range. + + Args: + otio_clip (otio.Clip): otio clip object + """ + otio_clip.source_range = otio_clip.range_in_parent() + + def _create_otio_reference( + self, + otio_clip, + media_path, + media_data + ): + """Creating otio reference at otio clip. + + Args: + otio_clip (otio.Clip): otio clip object + media_path (str): media file path string + media_data (dict): media metadata + """ + start_frame = media_data["start_frame"] + frame_duration = media_data["duration"] + fps = media_data["fps"] + + available_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime( + start_frame, fps), + duration=otio.opentime.RationalTime( + frame_duration, fps) + ) + # in case old OTIO or video file create `ExternalReference` + media_reference = otio.schema.ExternalReference( + target_url=media_path, + available_range=available_range + ) + + otio_clip.media_reference = media_reference + + def _get_media_source_metadata(self, path): + """Get all available metadata from file + + Args: + path (str): media file path string + + Raises: + AssertionError: ffprobe couldn't read metadata + + Returns: + dict: media file metadata + """ + return_data = {} + + try: + media_data = get_ffprobe_data( + path, self.log + ) + self.log.debug(f"__ media_data: {pformat(media_data)}") + + # get video stream data + video_stream = media_data["streams"][0] + return_data = { + "video": True, + "start_frame": 0, + "duration": int(video_stream["nb_frames"]), + "fps": float( + convert_ffprobe_fps_value( + video_stream["r_frame_rate"] + ) + ) + } + + # get audio streams data + audio_stream = [ + stream for stream in media_data["streams"] + if stream["codec_type"] == "audio" + ] + + if audio_stream: + return_data["audio"] = True + + except Exception as exc: + raise AssertionError(( + "FFprobe couldn't read information about input file: " + f"\"{path}\". Error message: {exc}" + )) + + return return_data + + def _make_subset_instance( + self, + otio_clip, + preset, + instance_data, + parenting_data + ): + """Making subset instance from input preset + + Args: + otio_clip (otio.Clip): otio clip object + preset (dict): sigle family preset + instance_data (dict): instance data + parenting_data (dict): shot instance parent data + + Returns: + CreatedInstance: creator instance object + """ + family = preset["family"] + label = self._make_subset_naming( + preset, + instance_data + ) + instance_data["label"] = label + + # add file extension filter only if it is not shot family + if family == "shot": + instance_data["otioClip"] = ( + otio.adapters.write_to_string(otio_clip)) + c_instance = self.create_context.creators[ + "editorial_shot"].create( + instance_data) + parenting_data.update({ + "instance_label": label, + "instance_id": c_instance.data["instance_id"] + }) + else: + # add review family if defined + instance_data.update({ + "outputFileType": preset["output_file_type"], + "parent_instance_id": parenting_data["instance_id"], + "creator_attributes": { + "parent_instance": parenting_data["instance_label"], + "add_review_family": preset.get("review") + } + }) + + creator_identifier = f"editorial_{family}" + editorial_clip_creator = self.create_context.creators[ + creator_identifier] + c_instance = editorial_clip_creator.create( + instance_data) + + return c_instance + + def _make_subset_naming( + self, + preset, + instance_data + ): + """ Subset name maker + + Args: + preset (dict): single preset item + instance_data (dict): instance data + + Returns: + str: label string + """ + shot_name = instance_data["shotName"] + variant_name = instance_data["variant"] + family = preset["family"] + + # get variant name from preset or from inharitance + _variant_name = preset.get("variant") or variant_name + + self.log.debug(f"__ family: {family}") + self.log.debug(f"__ preset: {preset}") + + # subset name + subset_name = "{}{}".format( + family, _variant_name.capitalize() + ) + label = "{}_{}".format( + shot_name, + subset_name + ) + + instance_data.update({ + "family": family, + "label": label, + "variant": _variant_name, + "subset": subset_name, + }) + + return label + + def _get_base_instance_data( + self, + otio_clip, + instance_data, + track_start_frame, + ): + """ Factoring basic set of instance data. + + Args: + otio_clip (otio.Clip): otio clip object + instance_data (dict): precreate instance data + track_start_frame (int): track start frame + + Returns: + dict: instance data + """ + # get clip instance properties + parent_asset_name = instance_data["parent_asset_name"] + handle_start = instance_data["handle_start"] + handle_end = instance_data["handle_end"] + timeline_offset = instance_data["timeline_offset"] + workfile_start_frame = instance_data["workfile_start_frame"] + fps = instance_data["fps"] + variant_name = instance_data["variant"] + + # basic unique asset name + clip_name = os.path.splitext(otio_clip.name)[0].lower() + project_doc = get_project(self.project_name) + + shot_name, shot_metadata = self._shot_metadata_solver.generate_data( + clip_name, + { + "anatomy_data": { + "project": { + "name": self.project_name, + "code": project_doc["data"]["code"] + }, + "parent": parent_asset_name, + "app": self.host_name + }, + "selected_asset_doc": get_asset_by_name( + self.project_name, parent_asset_name), + "project_doc": project_doc + } + ) + + self._validate_name_uniqueness(shot_name) + + timing_data = self._get_timing_data( + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ) + + # create creator attributes + creator_attributes = { + "asset_name": shot_name, + "Parent hierarchy path": shot_metadata["hierarchy"], + "workfile_start_frame": workfile_start_frame, + "fps": fps, + "handle_start": int(handle_start), + "handle_end": int(handle_end) + } + creator_attributes.update(timing_data) + + # create shared new instance data + base_instance_data = { + "shotName": shot_name, + "variant": variant_name, + + # HACK: just for temporal bug workaround + # TODO: should loockup shot name for update + "asset": parent_asset_name, + "task": "", + + "newAssetPublishing": True, + + # parent time properties + "trackStartFrame": track_start_frame, + "timelineOffset": timeline_offset, + # creator_attributes + "creator_attributes": creator_attributes + } + # add hierarchy shot metadata + base_instance_data.update(shot_metadata) + + return base_instance_data + + def _get_timing_data( + self, + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ): + """Returning available timing data + + Args: + otio_clip (otio.Clip): otio clip object + timeline_offset (int): offset value + track_start_frame (int): starting frame input + workfile_start_frame (int): start frame for shot's workfiles + + Returns: + dict: timing metadata + """ + # frame ranges data + clip_in = otio_clip.range_in_parent().start_time.value + clip_in += track_start_frame + clip_out = otio_clip.range_in_parent().end_time_inclusive().value + clip_out += track_start_frame + self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}") + + # add offset in case there is any + self.log.debug(f"__ timeline_offset: {timeline_offset}") + if timeline_offset: + clip_in += timeline_offset + clip_out += timeline_offset + + clip_duration = otio_clip.duration().value + self.log.info(f"clip duration: {clip_duration}") + + source_in = otio_clip.trimmed_range().start_time.value + source_out = source_in + clip_duration + + # define starting frame for future shot + frame_start = ( + clip_in if workfile_start_frame is None + else workfile_start_frame + ) + frame_end = frame_start + (clip_duration - 1) + + return { + "frameStart": int(frame_start), + "frameEnd": int(frame_end), + "clipIn": int(clip_in), + "clipOut": int(clip_out), + "clipDuration": int(otio_clip.duration().value), + "sourceIn": int(source_in), + "sourceOut": int(source_out) + } + + def _get_allowed_family_presets(self, pre_create_data): + """ Filter out allowed family presets. + + Args: + pre_create_data (dict): precreate attributes inputs + + Returns: + list: lit of dict with preset items + """ + self.log.debug(f"__ pre_create_data: {pre_create_data}") + return [ + {"family": "shot"}, + *[ + preset for preset in self._creator_settings["family_presets"] + if pre_create_data[preset["family"]] + ] + ] + + def _validate_clip_for_processing(self, otio_clip): + """Validate otio clip attribues + + Args: + otio_clip (otio.Clip): otio clip object + + Returns: + bool: True if all passing conditions + """ + if otio_clip.name is None: + return False + + if isinstance(otio_clip, otio.schema.Gap): + return False + + # skip all generators like black empty + if isinstance( + otio_clip.media_reference, + otio.schema.GeneratorReference): + return False + + # Transitions are ignored, because Clips have the full frame + # range. + if isinstance(otio_clip, otio.schema.Transition): + return False + + return True + + def _validate_name_uniqueness(self, name): + """ Validating name uniqueness. + + In context of other clip names in sequence file. + + Args: + name (str): shot name string + """ + if name not in self.asset_name_check: + self.asset_name_check.append(name) + else: + self.log.warning( + f"Duplicate shot name: {name}! " + "Please check names in the input sequence files." + ) + + def get_pre_create_attr_defs(self): + """ Creating pre-create attributes at creator plugin. + + Returns: + list: list of attribute object instances + """ + # Use same attributes as for instance attrobites + attr_defs = [ + FileDef( + "sequence_filepath_data", + folders=False, + extensions=[ + ".edl", + ".xml", + ".aaf", + ".fcpxml" + ], + allow_sequences=False, + single_item=True, + label="Sequence file", + ), + FileDef( + "media_filepaths_data", + folders=False, + extensions=[ + ".mov", + ".mp4", + ".wav" + ], + allow_sequences=False, + single_item=False, + label="Media files", + ), + # TODO: perhpas better would be timecode and fps input + NumberDef( + "timeline_offset", + default=0, + label="Timeline offset" + ), + UISeparatorDef(), + UILabelDef("Clip instance attributes"), + UISeparatorDef() + ] + # add variants swithers + attr_defs.extend( + BoolDef(_var["family"], label=_var["family"]) + for _var in self._creator_settings["family_presets"] + ) + attr_defs.append(UISeparatorDef()) + + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs diff --git a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py index baca274ea6..41c1c29bb0 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py +++ b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py @@ -1,6 +1,7 @@ import os +from openpype.api import get_project_settings, Logger -from openpype.api import get_project_settings +log = Logger.get_logger(__name__) def initialize(): @@ -13,6 +14,7 @@ def initialize(): global_variables = globals() for item in simple_creators: + dynamic_plugin = SettingsCreator.from_settings(item) global_variables[dynamic_plugin.__name__] = dynamic_plugin diff --git a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py new file mode 100644 index 0000000000..c5f0d6b75e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py @@ -0,0 +1,216 @@ +import copy +import os +import re + +from openpype.client import get_assets, get_asset_by_name +from openpype.lib import ( + FileDef, + BoolDef, + get_subset_name_with_asset_doc, + TaskNotSetError, +) +from openpype.pipeline import ( + CreatedInstance, + CreatorError +) + +from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator + + +class BatchMovieCreator(TrayPublishCreator): + """Creates instances from movie file(s). + + Intended for .mov files, but should work for any video file. + Doesn't handle image sequences though. + """ + identifier = "render_movie_batch" + label = "Batch Movies" + family = "render" + description = "Publish batch of video files" + + create_allow_context_change = False + version_regex = re.compile(r"^(.+)_v([0-9]+)$") + + def __init__(self, project_settings, *args, **kwargs): + super(BatchMovieCreator, self).__init__(project_settings, + *args, **kwargs) + creator_settings = ( + project_settings["traypublisher"]["BatchMovieCreator"] + ) + self.default_variants = creator_settings["default_variants"] + self.default_tasks = creator_settings["default_tasks"] + self.extensions = creator_settings["extensions"] + + def get_icon(self): + return "fa.file" + + def create(self, subset_name, data, pre_create_data): + file_paths = pre_create_data.get("filepath") + if not file_paths: + return + + for file_info in file_paths: + instance_data = copy.deepcopy(data) + file_name = file_info["filenames"][0] + filepath = os.path.join(file_info["directory"], file_name) + instance_data["creator_attributes"] = {"filepath": filepath} + + asset_doc, version = self.get_asset_doc_from_file_name( + file_name, self.project_name) + + subset_name, task_name = self._get_subset_and_task( + asset_doc, data["variant"], self.project_name) + + instance_data["task"] = task_name + instance_data["asset"] = asset_doc["name"] + + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, + instance_data, self) + self._store_new_instance(new_instance) + + def get_asset_doc_from_file_name(self, source_filename, project_name): + """Try to parse out asset name from file name provided. + + Artists might provide various file name formats. + Currently handled: + - chair.mov + - chair_v001.mov + - my_chair_to_upload.mov + """ + version = None + asset_name = os.path.splitext(source_filename)[0] + # Always first check if source filename is in assets + matching_asset_doc = self._get_asset_by_name_case_not_sensitive( + project_name, asset_name) + + if matching_asset_doc is None: + matching_asset_doc, version = ( + self._parse_with_version(project_name, asset_name)) + + if matching_asset_doc is None: + matching_asset_doc = self._parse_containing(project_name, + asset_name) + + if matching_asset_doc is None: + raise CreatorError( + "Cannot guess asset name from {}".format(source_filename)) + + return matching_asset_doc, version + + def _parse_with_version(self, project_name, asset_name): + """Try to parse asset name from a file name containing version too + + Eg. 'chair_v001.mov' >> 'chair', 1 + """ + self.log.debug(( + "Asset doc by \"{}\" was not found, trying version regex." + ).format(asset_name)) + + matching_asset_doc = version_number = None + + regex_result = self.version_regex.findall(asset_name) + if regex_result: + _asset_name, _version_number = regex_result[0] + matching_asset_doc = self._get_asset_by_name_case_not_sensitive( + project_name, _asset_name) + if matching_asset_doc: + version_number = int(_version_number) + + return matching_asset_doc, version_number + + def _parse_containing(self, project_name, asset_name): + """Look if file name contains any existing asset name""" + for asset_doc in get_assets(project_name, fields=["name"]): + if asset_doc["name"].lower() in asset_name.lower(): + return get_asset_by_name(project_name, asset_doc["name"]) + + def _get_subset_and_task(self, asset_doc, variant, project_name): + """Create subset name according to standard template process""" + task_name = self._get_task_name(asset_doc) + + try: + subset_name = get_subset_name_with_asset_doc( + self.family, + variant, + task_name, + asset_doc, + project_name + ) + except TaskNotSetError: + # Create instance with fake task + # - instance will be marked as invalid so it can't be published + # but user have ability to change it + # NOTE: This expect that there is not task 'Undefined' on asset + task_name = "Undefined" + subset_name = get_subset_name_with_asset_doc( + self.family, + variant, + task_name, + asset_doc, + project_name + ) + + return subset_name, task_name + + def _get_task_name(self, asset_doc): + """Get applicable task from 'asset_doc' """ + available_task_names = {} + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + for task_name in asset_tasks.keys(): + available_task_names[task_name.lower()] = task_name + + task_name = None + for _task_name in self.default_tasks: + _task_name_low = _task_name.lower() + if _task_name_low in available_task_names: + task_name = available_task_names[_task_name_low] + break + + return task_name + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes + return [ + FileDef( + "filepath", + folders=False, + single_item=False, + extensions=self.extensions, + label="Filepath" + ), + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_detail_description(self): + return """# Publish batch of .mov to multiple assets. + + File names must then contain only asset name, or asset name + version. + (eg. 'chair.mov', 'chair_v001.mov', not really safe `my_chair_v001.mov` + """ + + def _get_asset_by_name_case_not_sensitive(self, project_name, asset_name): + """Handle more cases in file names""" + asset_name = re.compile(asset_name, re.IGNORECASE) + + assets = list(get_assets(project_name, asset_names=[asset_name])) + if assets: + if len(assets) > 1: + self.log.warning("Too many records found for {}".format( + asset_name)) + return + + return assets.pop() diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py new file mode 100644 index 0000000000..bdf7c05f3d --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py @@ -0,0 +1,36 @@ +from pprint import pformat +import pyblish.api + + +class CollectClipInstance(pyblish.api.InstancePlugin): + """Collect clip instances and resolve its parent""" + + label = "Collect Clip Instances" + order = pyblish.api.CollectorOrder - 0.081 + + hosts = ["traypublisher"] + families = ["plate", "review", "audio"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + instance.data["families"].append("clip") + + parent_instance_id = instance.data["parent_instance_id"] + edit_shared_data = instance.context.data["editorialSharedData"] + instance.data.update( + edit_shared_data[parent_instance_id] + ) + + if "editorialSourcePath" in instance.context.data.keys(): + instance.data["editorialSourcePath"] = ( + instance.context.data["editorialSourcePath"]) + instance.data["families"].append("trimming") + + self.log.debug(pformat(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py new file mode 100644 index 0000000000..e181d0abe5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py @@ -0,0 +1,48 @@ +import os +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectEditorialInstance(pyblish.api.InstancePlugin): + """Collect data for instances created by settings creators.""" + + label = "Collect Editorial Instances" + order = pyblish.api.CollectorOrder - 0.1 + + hosts = ["traypublisher"] + families = ["editorial"] + + def process(self, instance): + + if "families" not in instance.data: + instance.data["families"] = [] + + if "representations" not in instance.data: + instance.data["representations"] = [] + + fpath = instance.data["sequenceFilePath"] + otio_timeline_string = instance.data.pop("otioTimeline") + otio_timeline = otio.adapters.read_from_string( + otio_timeline_string) + + instance.context.data["otioTimeline"] = otio_timeline + instance.context.data["editorialSourcePath"] = ( + instance.data["editorialSourcePath"]) + + self.log.info(fpath) + + instance.data["stagingDir"] = os.path.dirname(fpath) + + _, ext = os.path.splitext(fpath) + + instance.data["representations"].append({ + "ext": ext[1:], + "name": ext[1:], + "stagingDir": instance.data["stagingDir"], + "files": os.path.basename(fpath) + }) + + self.log.debug("Created Editorial Instance {}".format( + pformat(instance.data) + )) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py new file mode 100644 index 0000000000..4af4fb94e9 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py @@ -0,0 +1,30 @@ +import pyblish.api + + +class CollectEditorialReviewable(pyblish.api.InstancePlugin): + """ Collect review input from user. + + Adds the input to instance data. + """ + + label = "Collect Editorial Reviewable" + order = pyblish.api.CollectorOrder + + families = ["plate", "review", "audio"] + hosts = ["traypublisher"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + creator_attributes = instance.data["creator_attributes"] + + if creator_attributes["add_review_family"]: + instance.data["families"].append("review") + + self.log.debug("instance.data {}".format(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py new file mode 100644 index 0000000000..f37e04d1c9 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py @@ -0,0 +1,47 @@ +import os + +import pyblish.api +from openpype.pipeline import OpenPypePyblishPluginMixin + + +class CollectMovieBatch( + pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin +): + """Collect file url for batch movies and create representation. + + Adds review on instance and to repre.tags based on value of toggle button + on creator. + """ + + label = "Collect Movie Batch Files" + order = pyblish.api.CollectorOrder + + hosts = ["traypublisher"] + + def process(self, instance): + if instance.data.get("creator_identifier") != "render_movie_batch": + return + + creator_attributes = instance.data["creator_attributes"] + + file_url = creator_attributes["filepath"] + file_name = os.path.basename(file_url) + _, ext = os.path.splitext(file_name) + + repre = { + "name": ext[1:], + "ext": ext[1:], + "files": file_name, + "stagingDir": os.path.dirname(file_url), + "tags": [] + } + + if creator_attributes["add_review_family"]: + repre["tags"].append("review") + instance.data["families"].append("review") + + instance.data["representations"].append(repre) + + instance.data["source"] = file_url + + self.log.debug("instance.data {}".format(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py b/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py deleted file mode 100644 index 965e251527..0000000000 --- a/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py +++ /dev/null @@ -1,31 +0,0 @@ -import pyblish.api -from openpype.lib import BoolDef -from openpype.pipeline import OpenPypePyblishPluginMixin - - -class CollectReviewFamily( - pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin -): - """Add review family.""" - - label = "Collect Review Family" - order = pyblish.api.CollectorOrder - 0.49 - - hosts = ["traypublisher"] - families = [ - "image", - "render", - "plate", - "review" - ] - - def process(self, instance): - values = self.get_attr_values_from_data(instance.data) - if values.get("add_review_family"): - instance.data["families"].append("review") - - @classmethod - def get_attribute_defs(cls): - return [ - BoolDef("add_review_family", label="Review", default=True) - ] diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py new file mode 100644 index 0000000000..716f73022e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py @@ -0,0 +1,213 @@ +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectShotInstance(pyblish.api.InstancePlugin): + """ Collect shot instances + + Resolving its user inputs from creator attributes + to instance data. + """ + + label = "Collect Shot Instances" + order = pyblish.api.CollectorOrder - 0.09 + + hosts = ["traypublisher"] + families = ["shot"] + + SHARED_KEYS = [ + "asset", + "fps", + "handleStart", + "handleEnd", + "frameStart", + "frameEnd", + "clipIn", + "clipOut", + "clipDuration", + "sourceIn", + "sourceOut", + "otioClip", + "workfileFrameStart" + ] + + def process(self, instance): + self.log.debug(pformat(instance.data)) + + creator_identifier = instance.data["creator_identifier"] + if "editorial" not in creator_identifier: + return + + # get otio clip object + otio_clip = self._get_otio_clip(instance) + instance.data["otioClip"] = otio_clip + + # first solve the inputs from creator attr + data = self._solve_inputs_to_data(instance) + instance.data.update(data) + + # distribute all shared keys to clips instances + self._distribute_shared_data(instance) + self._solve_hierarchy_context(instance) + + self.log.debug(pformat(instance.data)) + + def _get_otio_clip(self, instance): + """ Converts otio string data. + + Convert them to proper otio object + and finds its equivalent at otio timeline. + This process is a hack to support also + resolving parent range. + + Args: + instance (obj): publishing instance + + Returns: + otio.Clip: otio clip object + """ + context = instance.context + # convert otio clip from string to object + otio_clip_string = instance.data.pop("otioClip") + otio_clip = otio.adapters.read_from_string( + otio_clip_string) + + otio_timeline = context.data["otioTimeline"] + + clips = [ + clip for clip in otio_timeline.each_child( + descended_from_type=otio.schema.Clip) + if clip.name == otio_clip.name + ] + + otio_clip = clips.pop() + self.log.debug(f"__ otioclip.parent: {otio_clip.parent}") + + return otio_clip + + def _distribute_shared_data(self, instance): + """ Distribute all defined keys. + + All data are shared between all related + instances in context. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + instance_id = instance.data["instance_id"] + + if not context.data.get("editorialSharedData"): + context.data["editorialSharedData"] = {} + + context.data["editorialSharedData"][instance_id] = { + _k: _v for _k, _v in instance.data.items() + if _k in self.SHARED_KEYS + } + + def _solve_inputs_to_data(self, instance): + """ Resolve all user inputs into instance data. + + Args: + instance (obj): publishing instance + + Returns: + dict: instance data updating data + """ + _cr_attrs = instance.data["creator_attributes"] + workfile_start_frame = _cr_attrs["workfile_start_frame"] + frame_start = _cr_attrs["frameStart"] + frame_end = _cr_attrs["frameEnd"] + frame_dur = frame_end - frame_start + + return { + "asset": _cr_attrs["asset_name"], + "fps": float(_cr_attrs["fps"]), + "handleStart": _cr_attrs["handle_start"], + "handleEnd": _cr_attrs["handle_end"], + "frameStart": workfile_start_frame, + "frameEnd": workfile_start_frame + frame_dur, + "clipIn": _cr_attrs["clipIn"], + "clipOut": _cr_attrs["clipOut"], + "clipDuration": _cr_attrs["clipDuration"], + "sourceIn": _cr_attrs["sourceIn"], + "sourceOut": _cr_attrs["sourceOut"], + "workfileFrameStart": workfile_start_frame + } + + def _solve_hierarchy_context(self, instance): + """ Adding hierarchy data to context shared data. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + final_context = ( + context.data["hierarchyContext"] + if context.data.get("hierarchyContext") + else {} + ) + + name = instance.data["asset"] + + # get handles + handle_start = int(instance.data["handleStart"]) + handle_end = int(instance.data["handleEnd"]) + + in_info = { + "entity_type": "Shot", + "custom_attributes": { + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "clipIn": instance.data["clipIn"], + "clipOut": instance.data["clipOut"], + "fps": instance.data["fps"] + }, + "tasks": instance.data["tasks"] + } + + parents = instance.data.get('parents', []) + self.log.debug(f"parents: {pformat(parents)}") + + actual = {name: in_info} + + for parent in reversed(parents): + parent_name = parent["entity_name"] + next_dict = { + parent_name: { + "entity_type": parent["entity_type"], + "childs": actual + } + } + actual = next_dict + + final_context = self._update_dict(final_context, actual) + + # adding hierarchy context to instance + context.data["hierarchyContext"] = final_context + self.log.debug(pformat(final_context)) + + def _update_dict(self, ex_dict, new_dict): + """ Recursion function + + Updating nested data with another nested data. + + Args: + ex_dict (dict): nested data + new_dict (dict): nested data + + Returns: + dict: updated nested data + """ + for key in ex_dict: + if key in new_dict and isinstance(ex_dict[key], dict): + new_dict[key] = self._update_dict(ex_dict[key], new_dict[key]) + elif not ex_dict.get(key) or not new_dict.get(key): + new_dict[key] = ex_dict[key] + + return new_dict diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py index b2be43c701..c0ae694c3c 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py +++ b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py @@ -1,9 +1,31 @@ import os +import tempfile + +import clique import pyblish.api class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): - """Collect data for instances created by settings creators.""" + """Collect data for instances created by settings creators. + + Plugin create representations for simple instances based + on 'representation_files' attribute stored on instance data. + + There is also possibility to have reviewable representation which can be + stored under 'reviewable' attribute stored on instance data. If there was + already created representation with the same files as 'revieable' containes + + Representations can be marked for review and in that case is also added + 'review' family to instance families. For review can be marked only one + representation so **first** representation that has extension available + in '_review_extensions' is used for review. + + For instance 'source' is used path from last representation created + from 'representation_files'. + + Set staging directory on instance. That is probably never used because + each created representation has it's own staging dir. + """ label = "Collect Settings Simple Instances" order = pyblish.api.CollectorOrder - 0.49 @@ -14,37 +36,193 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): if not instance.data.get("settings_creator"): return - if "families" not in instance.data: - instance.data["families"] = [] + instance_label = instance.data["name"] + # Create instance's staging dir in temp + tmp_folder = tempfile.mkdtemp(prefix="traypublisher_") + instance.data["stagingDir"] = tmp_folder + instance.context.data["cleanupFullPaths"].append(tmp_folder) - if "representations" not in instance.data: - instance.data["representations"] = [] - repres = instance.data["representations"] + self.log.debug(( + "Created temp staging directory for instance {}. {}" + ).format(instance_label, tmp_folder)) + + # Store filepaths for validation of their existence + source_filepaths = [] + # Make sure there are no representations with same name + repre_names_counter = {} + # Store created names for logging + repre_names = [] + # Store set of filepaths per each representation + representation_files_mapping = [] + source = self._create_main_representations( + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ) + + self._create_review_representation( + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ) + + instance.data["source"] = source + instance.data["sourceFilepaths"] = list(set(source_filepaths)) + + self.log.debug( + ( + "Created Simple Settings instance \"{}\"" + " with {} representations: {}" + ).format( + instance_label, + len(instance.data["representations"]), + ", ".join(repre_names) + ) + ) + + def _create_main_representations( + self, + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ): + creator_attributes = instance.data["creator_attributes"] + filepath_items = creator_attributes["representation_files"] + if not isinstance(filepath_items, list): + filepath_items = [filepath_items] + + source = None + for filepath_item in filepath_items: + # Skip if filepath item does not have filenames + if not filepath_item["filenames"]: + continue + + filepaths = { + os.path.join(filepath_item["directory"], filename) + for filename in filepath_item["filenames"] + } + source_filepaths.extend(filepaths) + + source = self._calculate_source(filepaths) + representation = self._create_representation_data( + filepath_item, repre_names_counter, repre_names + ) + instance.data["representations"].append(representation) + representation_files_mapping.append( + (filepaths, representation, source) + ) + return source + + def _create_review_representation( + self, + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ): + # Skip review representation creation if there are no representations + # created for "main" part + # - review representation must not be created in that case so + # validation can care about it + if not representation_files_mapping: + self.log.warning(( + "There are missing source representations." + " Creation of review representation was skipped." + )) + return creator_attributes = instance.data["creator_attributes"] - filepath_item = creator_attributes["filepath"] - self.log.info(filepath_item) - filepaths = [ - os.path.join(filepath_item["directory"], filename) - for filename in filepath_item["filenames"] - ] + review_file_item = creator_attributes["reviewable"] + filenames = review_file_item.get("filenames") + if not filenames: + self.log.debug(( + "Filepath for review is not defined." + " Skipping review representation creation." + )) + return - instance.data["sourceFilepaths"] = filepaths - instance.data["stagingDir"] = filepath_item["directory"] + filepaths = { + os.path.join(review_file_item["directory"], filename) + for filename in filenames + } + source_filepaths.extend(filepaths) + # First try to find out representation with same filepaths + # so it's not needed to create new representation just for review + review_representation = None + # Review path (only for logging) + review_path = None + for item in representation_files_mapping: + _filepaths, representation, repre_path = item + if _filepaths == filepaths: + review_representation = representation + review_path = repre_path + break + + if review_representation is None: + self.log.debug("Creating new review representation") + review_path = self._calculate_source(filepaths) + review_representation = self._create_representation_data( + review_file_item, repre_names_counter, repre_names + ) + instance.data["representations"].append(review_representation) + + if "review" not in instance.data["families"]: + instance.data["families"].append("review") + + review_representation["tags"].append("review") + self.log.debug("Representation {} was marked for review. {}".format( + review_representation["name"], review_path + )) + + def _create_representation_data( + self, filepath_item, repre_names_counter, repre_names + ): + """Create new representation data based on file item. + + Args: + filepath_item (Dict[str, Any]): Item with information about + representation paths. + repre_names_counter (Dict[str, int]): Store count of representation + names. + repre_names (List[str]): All used representation names. For + logging purposes. + + Returns: + Dict: Prepared base representation data. + """ filenames = filepath_item["filenames"] _, ext = os.path.splitext(filenames[0]) - ext = ext[1:] if len(filenames) == 1: filenames = filenames[0] - repres.append({ - "ext": ext, - "name": ext, + repre_name = repre_ext = ext[1:] + if repre_name not in repre_names_counter: + repre_names_counter[repre_name] = 2 + else: + counter = repre_names_counter[repre_name] + repre_names_counter[repre_name] += 1 + repre_name = "{}_{}".format(repre_name, counter) + repre_names.append(repre_name) + return { + "ext": repre_ext, + "name": repre_name, "stagingDir": filepath_item["directory"], - "files": filenames - }) + "files": filenames, + "tags": [] + } - self.log.debug("Created Simple Settings instance {}".format( - instance.data - )) + def _calculate_source(self, filepaths): + cols, rems = clique.assemble(filepaths) + if cols: + source = cols[0].format("{head}{padding}{tail}") + elif rems: + source = rems[0] + return source diff --git a/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml b/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml new file mode 100644 index 0000000000..933df1c7c5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml @@ -0,0 +1,15 @@ + + + +Invalid frame range + +## Invalid frame range + +Expected duration or '{duration}' frames set in database, workfile contains only '{found}' frames. + +### How to repair? + +Modify configuration in the database or tweak frame range in the workfile. + + + \ No newline at end of file diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py index c7302b1005..749199fbd3 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py +++ b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py @@ -3,8 +3,17 @@ import pyblish.api from openpype.pipeline import PublishValidationError -class ValidateWorkfilePath(pyblish.api.InstancePlugin): - """Validate existence of workfile instance existence.""" +class ValidateFilePath(pyblish.api.InstancePlugin): + """Validate existence of source filepaths on instance. + + Plugins looks into key 'sourceFilepaths' and validate if paths there + actually exist on disk. + + Also validate if the key is filled but is empty. In that case also + crashes so do not fill the key if unfilled value should not cause error. + + This is primarily created for Simple Creator instances. + """ label = "Validate Workfile" order = pyblish.api.ValidatorOrder - 0.49 @@ -14,12 +23,28 @@ class ValidateWorkfilePath(pyblish.api.InstancePlugin): def process(self, instance): if "sourceFilepaths" not in instance.data: self.log.info(( - "Can't validate source filepaths existence." + "Skipped validation of source filepaths existence." " Instance does not have collected 'sourceFilepaths'" )) return - filepaths = instance.data.get("sourceFilepaths") + family = instance.data["family"] + label = instance.data["name"] + filepaths = instance.data["sourceFilepaths"] + if not filepaths: + raise PublishValidationError( + ( + "Source filepaths of '{}' instance \"{}\" are not filled" + ).format(family, label), + "File not filled", + ( + "## Files were not filled" + "\nThis mean that you didn't enter any files into required" + " file input." + "\n- Please refresh publishing and check instance" + " {}" + ).format(label) + ) not_found_files = [ filepath @@ -34,11 +59,7 @@ class ValidateWorkfilePath(pyblish.api.InstancePlugin): raise PublishValidationError( ( "Filepath of '{}' instance \"{}\" does not exist:\n{}" - ).format( - instance.data["family"], - instance.data["name"], - joined_paths - ), + ).format(family, label, joined_paths), "File not found", ( "## Files were not found\nFiles\n{}" diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py new file mode 100644 index 0000000000..947624100a --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py @@ -0,0 +1,75 @@ +import re + +import pyblish.api + +import openpype.api +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) + + +class ValidateFrameRange(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validating frame range of rendered files against state in DB.""" + + label = "Validate Frame Range" + hosts = ["traypublisher"] + families = ["render"] + order = openpype.api.ValidateContentsOrder + + optional = True + # published data might be sequence (.mov, .mp4) in that counting files + # doesnt make sense + check_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", + "gif", "svg"] + skip_timelines_check = [] # skip for specific task names (regex) + + def process(self, instance): + # Skip the instance if is not active by data on the instance + if not self.is_active(instance.data): + return + + if (self.skip_timelines_check and + any(re.search(pattern, instance.data["task"]) + for pattern in self.skip_timelines_check)): + self.log.info("Skipping for {} task".format(instance.data["task"])) + + asset_doc = instance.data["assetEntity"] + asset_data = asset_doc["data"] + frame_start = asset_data["frameStart"] + frame_end = asset_data["frameEnd"] + handle_start = asset_data["handleStart"] + handle_end = asset_data["handleEnd"] + duration = (frame_end - frame_start + 1) + handle_start + handle_end + + repres = instance.data.get("representations") + if not repres: + self.log.info("No representations, skipping.") + return + + first_repre = repres[0] + ext = first_repre['ext'].replace(".", '') + + if not ext or ext.lower() not in self.check_extensions: + self.log.warning("Cannot check for extension {}".format(ext)) + return + + files = first_repre["files"] + if isinstance(files, str): + files = [files] + frames = len(files) + + msg = ( + "Frame duration from DB:'{}' doesn't match number of files:'{}'" + " Please change frame range for Asset or limit no. of files" + ). format(int(duration), frames) + + formatting_data = {"duration": duration, + "found": frames} + if frames != duration: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + self.log.debug("Valid ranges expected '{}' - found '{}'". + format(int(duration), frames)) diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index c6dc765a27..a99b300730 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,17 +1,16 @@ import os -from openpype.client import get_project, get_asset_by_name -from openpype.lib import ( - StringTemplate, - get_workfile_template_key_from_context, - get_workdir_data, - get_last_workfile_with_version, -) +from openpype.lib import StringTemplate from openpype.pipeline import ( registered_host, legacy_io, Anatomy, ) +from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context, + get_last_workfile_with_version, +) +from openpype.pipeline.template_data import get_template_data_with_names from openpype.hosts.tvpaint.api import lib, pipeline, plugin @@ -54,19 +53,17 @@ class LoadWorkfile(plugin.Loader): asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, asset_name) - template_key = get_workfile_template_key_from_context( asset_name, task_name, host_name, - project_name=project_name, - dbcon=legacy_io + project_name=project_name ) anatomy = Anatomy(project_name) - data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) data["root"] = anatomy.roots file_template = anatomy.templates[template_key]["file"] diff --git a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py index 5be04fc841..50b34bd573 100644 --- a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py +++ b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- """Hook to launch Unreal and prepare projects.""" import os +import copy from pathlib import Path from openpype.lib import ( PreLaunchHook, ApplicationLaunchFailed, ApplicationNotFound, - get_workdir_data, get_workfile_template_key ) import openpype.hosts.unreal.lib as unreal_lib @@ -35,18 +35,13 @@ class UnrealPrelaunchHook(PreLaunchHook): return last_workfile.name # Prepare data for fill data and for getting workfile template key - task_name = self.data["task_name"] anatomy = self.data["anatomy"] - asset_doc = self.data["asset_doc"] project_doc = self.data["project_doc"] - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") + # Use already prepared workdir data + workdir_data = copy.deepcopy(self.data["workdir_data"]) + task_type = workdir_data.get("task", {}).get("type") - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, self.host_name - ) # QUESTION raise exception if version is part of filename template? workdir_data["version"] = 1 workdir_data["ext"] = "uproject" diff --git a/openpype/hosts/unreal/integration/UE_5.0/Content/__init__.py b/openpype/hosts/unreal/integration/UE_5.0/Content/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py index da2830bc52..1fe0bef462 100644 --- a/openpype/hosts/unreal/plugins/load/load_animation.py +++ b/openpype/hosts/unreal/plugins/load/load_animation.py @@ -8,13 +8,13 @@ from unreal import EditorAssetLibrary from unreal import MovieSceneSkeletalAnimationTrack from unreal import MovieSceneSkeletalAnimationSection +from openpype.pipeline.context_tools import get_current_project_asset from openpype.pipeline import ( get_representation_path, AVALON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline -from openpype.api import get_asset class AnimationFBXLoader(plugin.Loader): @@ -53,6 +53,8 @@ class AnimationFBXLoader(plugin.Loader): if not actor: return None + asset_doc = get_current_project_asset(fields=["data.fps"]) + task.set_editor_property('filename', self.fname) task.set_editor_property('destination_path', asset_dir) task.set_editor_property('destination_name', asset_name) @@ -80,7 +82,7 @@ class AnimationFBXLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'use_default_sample_rate', False) task.options.anim_sequence_import_data.set_editor_property( - 'custom_sample_rate', get_asset()["data"].get("fps")) + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) task.options.anim_sequence_import_data.set_editor_property( 'import_custom_attribute', True) task.options.anim_sequence_import_data.set_editor_property( @@ -246,6 +248,7 @@ class AnimationFBXLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] source_path = get_representation_path(representation) + asset_doc = get_current_project_asset(fields=["data.fps"]) destination_path = container["namespace"] task = unreal.AssetImportTask() @@ -279,7 +282,7 @@ class AnimationFBXLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'use_default_sample_rate', False) task.options.anim_sequence_import_data.set_editor_property( - 'custom_sample_rate', get_asset()["data"].get("fps")) + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) task.options.anim_sequence_import_data.set_editor_property( 'import_custom_attribute', True) task.options.anim_sequence_import_data.set_editor_property( diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py index 4fdfac51c8..926c932a85 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/openpype/hosts/unreal/plugins/load/load_layout.py @@ -23,7 +23,7 @@ from openpype.pipeline import ( AVALON_CONTAINER_ID, legacy_io, ) -from openpype.api import get_asset +from openpype.pipeline.context_tools import get_current_project_asset from openpype.api import get_current_project_settings from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -232,6 +232,7 @@ class LayoutLoader(plugin.Loader): anim_path = f"{asset_dir}/animations/{anim_file_name}" + asset_doc = get_current_project_asset() # Import animation task = unreal.AssetImportTask() task.options = unreal.FbxImportUI() @@ -266,7 +267,7 @@ class LayoutLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'use_default_sample_rate', False) task.options.anim_sequence_import_data.set_editor_property( - 'custom_sample_rate', get_asset()["data"].get("fps")) + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) task.options.anim_sequence_import_data.set_editor_property( 'import_custom_attribute', True) task.options.anim_sequence_import_data.set_editor_property( diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py index 4cb3cee8e1..6444a5191d 100644 --- a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py +++ b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py @@ -10,9 +10,9 @@ from aiohttp.web_response import Response from openpype.client import ( get_projects, get_assets, + OpenPypeMongoConnection, ) from openpype.lib import ( - OpenPypeMongoConnection, PypeLogger, ) from openpype.lib.remote_publish import ( diff --git a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py b/openpype/hosts/webpublisher/webserver_service/webserver_cli.py index 1ed8f22b2c..6620e5d5cf 100644 --- a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py +++ b/openpype/hosts/webpublisher/webserver_service/webserver_cli.py @@ -6,6 +6,7 @@ import requests import json import subprocess +from openpype.client import OpenPypeMongoConnection from openpype.lib import PypeLogger from .webpublish_routes import ( @@ -121,8 +122,6 @@ def run_webserver(*args, **kwargs): def reprocess_failed(upload_dir, webserver_url): # log.info("check_reprocesable_records") - from openpype.lib import OpenPypeMongoConnection - mongo_client = OpenPypeMongoConnection.get_mongo_client() database_name = os.environ["OPENPYPE_DATABASE_NAME"] dbcon = mongo_client[database_name]["webpublishes"] diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index fb52a9aca7..3d3e425a86 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -63,7 +63,10 @@ from .execute import ( path_to_subprocess_arg, CREATE_NO_WINDOW ) -from .log import PypeLogger, timeit +from .log import ( + Logger, + PypeLogger, +) from .path_templates import ( merge_dict, @@ -83,8 +86,9 @@ from .anatomy import ( Anatomy ) -from .config import ( +from .dateutils import ( get_datetime_data, + get_timestamp, get_formatted_current_time ) @@ -111,6 +115,7 @@ from .transcoding import ( get_ffmpeg_codec_args, get_ffmpeg_format_args, convert_ffprobe_fps_value, + convert_ffprobe_fps_to_float, ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, @@ -283,6 +288,7 @@ __all__ = [ "get_ffmpeg_codec_args", "get_ffmpeg_format_args", "convert_ffprobe_fps_value", + "convert_ffprobe_fps_to_float", "CURRENT_DOC_SCHEMAS", "PROJECT_NAME_ALLOWED_SYMBOLS", @@ -370,13 +376,13 @@ __all__ = [ "get_datetime_data", "get_formatted_current_time", + "Logger", "PypeLogger", + "get_default_components", "validate_mongo_connection", "OpenPypeMongoConnection", - "timeit", - "is_overlapping_otio_ranges", "otio_range_with_handles", "convert_to_padded_path", diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index f46197e15f..8c92665366 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -27,12 +27,6 @@ from openpype.settings.constants import ( from . import PypeLogger from .profiles_filtering import filter_profiles from .local_settings import get_openpype_username -from .avalon_context import ( - get_workdir_data, - get_workdir_with_workdir_data, - get_workfile_template_key, - get_last_workfile -) from .python_module_tools import ( modules_from_path, @@ -1576,6 +1570,9 @@ def prepare_context_environments(data, env_group=None): data (EnvironmentPrepData): Dictionary where result and intermediate result will be stored. """ + + from openpype.pipeline.template_data import get_template_data + # Context environments log = data["log"] @@ -1596,7 +1593,9 @@ def prepare_context_environments(data, env_group=None): # Load project specific environments project_name = project_doc["name"] project_settings = get_project_settings(project_name) + system_settings = get_system_settings() data["project_settings"] = project_settings + data["system_settings"] = system_settings # Apply project specific environments on current env value apply_project_environments_value( project_name, data["env"], project_settings, env_group @@ -1619,8 +1618,8 @@ def prepare_context_environments(data, env_group=None): if not app.is_host: return - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, app.host_name + workdir_data = get_template_data( + project_doc, asset_doc, task_name, app.host_name, system_settings ) data["workdir_data"] = workdir_data @@ -1631,7 +1630,14 @@ def prepare_context_environments(data, env_group=None): data["task_type"] = task_type try: - workdir = get_workdir_with_workdir_data(workdir_data, anatomy) + from openpype.pipeline.workfile import get_workdir_with_workdir_data + + workdir = get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + project_settings=project_settings + ) except Exception as exc: raise ApplicationLaunchFailed( @@ -1721,11 +1727,19 @@ def _prepare_last_workfile(data, workdir): if not last_workfile_path: extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) if extensions: + from openpype.pipeline.workfile import ( + get_workfile_template_key, + get_last_workfile + ) + anatomy = data["anatomy"] project_settings = data["project_settings"] task_type = workdir_data["task"]["type"] template_key = get_workfile_template_key( - task_type, app.host_name, project_settings=project_settings + task_type, + app.host_name, + project_name, + project_settings=project_settings ) # Find last workfile file_template = str(anatomy.templates[template_key]["file"]) diff --git a/openpype/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py index a1f7c1e0f4..17658eef93 100644 --- a/openpype/lib/attribute_definitions.py +++ b/openpype/lib/attribute_definitions.py @@ -14,6 +14,7 @@ class AbstractAttrDefMeta(ABCMeta): Each object of `AbtractAttrDef` mus have defined 'key' attribute. """ + def __call__(self, *args, **kwargs): obj = super(AbstractAttrDefMeta, self).__call__(*args, **kwargs) init_class = getattr(obj, "__init__class__", None) @@ -45,6 +46,7 @@ class AbtractAttrDef: is_label_horizontal(bool): UI specific argument. Specify if label is next to value input or ahead. """ + is_value_def = True def __init__( @@ -77,6 +79,7 @@ class AbtractAttrDef: Convert passed value to a valid type. Use default if value can't be converted. """ + pass @@ -113,6 +116,7 @@ class UnknownDef(AbtractAttrDef): This attribute can be used to keep existing data unchanged but does not have known definition of type. """ + def __init__(self, key, default=None, **kwargs): kwargs["default"] = default super(UnknownDef, self).__init__(key, **kwargs) @@ -204,6 +208,7 @@ class TextDef(AbtractAttrDef): placeholder(str): UI placeholder for attribute. default(str, None): Default value. Empty string used when not defined. """ + def __init__( self, key, multiline=None, regex=None, placeholder=None, default=None, **kwargs @@ -531,14 +536,15 @@ class FileDef(AbtractAttrDef): Args: single_item(bool): Allow only single path item. folders(bool): Allow folder paths. - extensions(list): Allow files with extensions. Empty list will + extensions(List[str]): Allow files with extensions. Empty list will allow all extensions and None will disable files completely. - default(str, list): Defautl value. + extensions_label(str): Custom label shown instead of extensions in UI. + default(str, List[str]): Default value. """ def __init__( self, key, single_item=True, folders=None, extensions=None, - allow_sequences=True, default=None, **kwargs + allow_sequences=True, extensions_label=None, default=None, **kwargs ): if folders is None and extensions is None: folders = True @@ -578,6 +584,7 @@ class FileDef(AbtractAttrDef): self.folders = folders self.extensions = set(extensions) self.allow_sequences = allow_sequences + self.extensions_label = extensions_label super(FileDef, self).__init__(key, default=default, **kwargs) def __eq__(self, other): diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 76ed6cbbd3..3c7e9b6289 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -13,23 +13,16 @@ from openpype.client import ( get_project, get_assets, get_asset_by_name, - get_subset_by_name, get_subsets, - get_version_by_id, get_last_versions, - get_last_version_by_subset_id, + get_last_version_by_subset_name, get_representations, - get_representation_by_id, get_workfile_info, ) -from openpype.settings import ( - get_project_settings, - get_system_settings -) +from openpype.settings import get_project_settings from .profiles_filtering import filter_profiles from .events import emit_event from .path_templates import StringTemplate -from .local_settings import get_openpype_username legacy_io = None @@ -180,7 +173,7 @@ def with_pipeline_io(func): return wrapped -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.is_representation_from_latest") def is_latest(representation): """Return whether the representation is from latest version @@ -189,54 +182,30 @@ def is_latest(representation): Returns: bool: Whether the representation is of latest version. + + Deprecated: + Function will be removed after release version 3.14.* """ - project_name = legacy_io.active_project() - version = get_version_by_id( - project_name, - representation["parent"], - fields=["_id", "type", "parent"] - ) - if version["type"] == "hero_version": - return True + from openpype.pipeline.context_tools import is_representation_from_latest - # Get highest version under the parent - last_version = get_last_version_by_subset_id( - project_name, version["parent"], fields=["_id"] - ) - - return version["_id"] == last_version["_id"] + return is_representation_from_latest(representation) -@with_pipeline_io +@deprecated("openpype.pipeline.load.any_outdated_containers") def any_outdated(): - """Return whether the current scene has any outdated content""" - from openpype.pipeline import registered_host + """Return whether the current scene has any outdated content. - project_name = legacy_io.active_project() - checked = set() - host = registered_host() - for container in host.ls(): - representation = container['representation'] - if representation in checked: - continue + Deprecated: + Function will be removed after release version 3.14.* + """ - representation_doc = get_representation_by_id( - project_name, representation, fields=["parent"] - ) - if representation_doc and not is_latest(representation_doc): - return True - elif not representation_doc: - log.debug("Container '{objectName}' has an invalid " - "representation, it is missing in the " - "database".format(**container)) + from openpype.pipeline.load import any_outdated_containers - checked.add(representation) - - return False + return any_outdated_containers() -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_current_project_asset") def get_asset(asset_name=None): """ Returning asset document from database by its name. @@ -247,30 +216,25 @@ def get_asset(asset_name=None): Returns: (MongoDB document) + + Deprecated: + Function will be removed after release version 3.14.* """ - project_name = legacy_io.active_project() - if not asset_name: - asset_name = legacy_io.Session["AVALON_ASSET"] + from openpype.pipeline.context_tools import get_current_project_asset - asset_document = get_asset_by_name(project_name, asset_name) - if not asset_document: - raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) - - return asset_document + return get_current_project_asset(asset_name=asset_name) +@deprecated("openpype.pipeline.template_data.get_general_template_data") def get_system_general_anatomy_data(system_settings=None): - if not system_settings: - system_settings = get_system_settings() - studio_name = system_settings["general"]["studio_name"] - studio_code = system_settings["general"]["studio_code"] - return { - "studio": { - "name": studio_name, - "code": studio_code - } - } + """ + Deprecated: + Function will be removed after release version 3.14.* + """ + from openpype.pipeline.template_data import get_general_template_data + + return get_general_template_data(system_settings) def get_linked_asset_ids(asset_doc): @@ -319,7 +283,7 @@ def get_linked_assets(asset_doc): return list(get_assets(project_name, link_ids)) -@with_pipeline_io +@deprecated("openpype.client.get_last_version_by_subset_name") def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): """Retrieve latest version from `asset_name`, and `subset_name`. @@ -335,11 +299,16 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): Returns: None: If asset, subset or version were not found. - dict: Last version document for entered . + dict: Last version document for entered. + + Deprecated: + Function will be removed after release version 3.14.* """ if not project_name: if not dbcon: + from openpype.pipeline import legacy_io + log.debug("Using `legacy_io` for query.") dbcon = legacy_io # Make sure is installed @@ -347,39 +316,13 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): project_name = dbcon.active_project() - log.debug(( - "Getting latest version for Project: \"{}\" Asset: \"{}\"" - " and Subset: \"{}\"" - ).format(project_name, asset_name, subset_name)) - - # Query asset document id by asset name - asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) - if not asset_doc: - log.info( - "Asset \"{}\" was not found in Database.".format(asset_name) - ) - return None - - subset_doc = get_subset_by_name( - project_name, subset_name, asset_doc["_id"] + return get_last_version_by_subset_name( + project_name, subset_name, asset_name=asset_name ) - if not subset_doc: - log.info( - "Subset \"{}\" was not found in Database.".format(subset_name) - ) - return None - - version_doc = get_last_version_by_subset_id( - project_name, subset_doc["_id"] - ) - if not version_doc: - log.info( - "Subset \"{}\" does not have any version yet.".format(subset_name) - ) - return None - return version_doc +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key_from_context") def get_workfile_template_key_from_context( asset_name, task_name, host_name, project_name=None, dbcon=None, project_settings=None @@ -408,27 +351,26 @@ def get_workfile_template_key_from_context( ValueError: When both 'dbcon' and 'project_name' were not passed. """ + + from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context + ) + if not project_name: if not dbcon: raise ValueError(( "`get_workfile_template_key_from_context` requires to pass" " one of 'dbcon' or 'project_name' arguments." )) - project_name = dbcon.active_project() - asset_doc = get_asset_by_name( - project_name, asset_name, fields=["data.tasks"] - ) - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") - - return get_workfile_template_key( - task_type, host_name, project_name, project_settings + return get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings ) +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key") def get_workfile_template_key( task_type, host_name, project_name=None, project_settings=None ): @@ -452,43 +394,15 @@ def get_workfile_template_key( ValueError: When both 'project_name' and 'project_settings' were not passed. """ - default = "work" - if not task_type or not host_name: - return default - if not project_settings: - if not project_name: - raise ValueError(( - "`get_workfile_template_key` requires to pass" - " one of 'project_name' or 'project_settings' arguments." - )) - project_settings = get_project_settings(project_name) + from openpype.pipeline.workfile import get_workfile_template_key - try: - profiles = ( - project_settings - ["global"] - ["tools"] - ["Workfiles"] - ["workfile_template_profiles"] - ) - except Exception: - profiles = [] - - if not profiles: - return default - - profile_filter = { - "task_types": task_type, - "hosts": host_name - } - profile = filter_profiles(profiles, profile_filter) - if profile: - return profile["workfile_template"] or default - return default + return get_workfile_template_key( + task_type, host_name, project_name, project_settings + ) -# TODO rename function as is not just "work" specific +@deprecated("openpype.pipeline.template_data.get_template_data") def get_workdir_data(project_doc, asset_doc, task_name, host_name): """Prepare data for workdir template filling from entered information. @@ -501,42 +415,19 @@ def get_workdir_data(project_doc, asset_doc, task_name, host_name): Returns: dict: Data prepared for filling workdir template. + + Deprecated: + Function will be removed after release version 3.14.* """ - task_type = asset_doc['data']['tasks'].get(task_name, {}).get('type') - project_task_types = project_doc["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") + from openpype.pipeline.template_data import get_template_data - asset_parents = asset_doc["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_doc["name"] - if asset_parents: - parent_name = asset_parents[-1] - - data = { - "project": { - "name": project_doc["name"], - "code": project_doc["data"].get("code") - }, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - }, - "asset": asset_doc["name"], - "parent": parent_name, - "app": host_name, - "user": get_openpype_username(), - "hierarchy": hierarchy, - } - - system_general_data = get_system_general_anatomy_data() - data.update(system_general_data) - - return data + return get_template_data( + project_doc, asset_doc, task_name, host_name + ) +@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") def get_workdir_with_workdir_data( workdir_data, anatomy=None, project_name=None, template_key=None ): @@ -563,31 +454,24 @@ def get_workdir_with_workdir_data( Raises: ValueError: When both `anatomy` and `project_name` are set to None. """ + if not anatomy and not project_name: raise ValueError(( "Missing required arguments one of `project_name` or `anatomy`" " must be entered." )) - if not anatomy: - from openpype.pipeline import Anatomy - anatomy = Anatomy(project_name) + if not project_name: + project_name = anatomy.project_name - if not template_key: - template_key = get_workfile_template_key( - workdir_data["task"]["type"], - workdir_data["app"], - project_name=workdir_data["project"]["name"] - ) + from openpype.pipeline.workfile import get_workdir_with_workdir_data - anatomy_filled = anatomy.format(workdir_data) - # Output is TemplateResult object which contain useful data - path = anatomy_filled[template_key]["folder"] - if path: - path = os.path.normpath(path) - return path + return get_workdir_with_workdir_data( + workdir_data, project_name, anatomy, template_key + ) +@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") def get_workdir( project_doc, asset_doc, @@ -616,40 +500,36 @@ def get_workdir( TemplateResult: Workdir path. """ - if not anatomy: - from openpype.pipeline import Anatomy - anatomy = Anatomy(project_doc["name"]) - - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, host_name - ) + from openpype.pipeline.workfile import get_workdir # Output is TemplateResult object which contain useful data - return get_workdir_with_workdir_data( - workdir_data, anatomy, template_key=template_key + return get_workdir( + project_doc, + asset_doc, + task_name, + host_name, + anatomy, + template_key ) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_template_data_from_session") def template_data_from_session(session=None): """ Return dictionary with template from session keys. Args: session (dict, Optional): The Session to use. If not provided use the currently active global Session. + Returns: dict: All available data from session. + + Deprecated: + Function will be removed after release version 3.14.* """ - if session is None: - session = legacy_io.Session + from openpype.pipeline.context_tools import get_template_data_from_session - project_name = session["AVALON_PROJECT"] - asset_name = session["AVALON_ASSET"] - task_name = session["AVALON_TASK"] - host_name = session["AVALON_APP"] - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, asset_name) - return get_workdir_data(project_doc, asset_doc, task_name, host_name) + return get_template_data_from_session(session) @with_pipeline_io @@ -675,6 +555,8 @@ def compute_session_changes( dict: The required changes in the Session dictionary. """ + from openpype.pipeline.context_tools import get_workdir_from_session + changes = dict() # If no changes, return directly @@ -721,29 +603,11 @@ def compute_session_changes( return changes -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_workdir_from_session") def get_workdir_from_session(session=None, template_key=None): - from openpype.pipeline import Anatomy + from openpype.pipeline.context_tools import get_workdir_from_session - if session is None: - session = legacy_io.Session - project_name = session["AVALON_PROJECT"] - host_name = session["AVALON_APP"] - anatomy = Anatomy(project_name) - template_data = template_data_from_session(session) - anatomy_filled = anatomy.format(template_data) - - if not template_key: - task_type = template_data["task"]["type"] - template_key = get_workfile_template_key( - task_type, - host_name, - project_name=project_name - ) - path = anatomy_filled[template_key]["folder"] - if path: - path = os.path.normpath(path) - return path + return get_workdir_from_session(session, template_key) @with_pipeline_io @@ -759,8 +623,8 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): Returns: dict: The changed key, values in the current Session. - """ + changes = compute_session_changes( legacy_io.Session, task=task, @@ -790,7 +654,6 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): return changes -@with_pipeline_io @deprecated("openpype.client.get_workfile_info") def get_workfile_doc(asset_id, task_name, filename, dbcon=None): """Return workfile document for entered context. @@ -811,13 +674,14 @@ def get_workfile_doc(asset_id, task_name, filename, dbcon=None): # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io project_name = dbcon.active_project() return get_workfile_info(project_name, asset_id, task_name, filename) -@with_pipeline_io +@deprecated def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): """Creates or replace workfile document in mongo. @@ -832,10 +696,13 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and `legacy_io` is used if not entered. """ + from openpype.pipeline import Anatomy + from openpype.pipeline.template_data import get_template_data # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io # Filter of workfile document @@ -851,7 +718,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): # Prepare project for workdir data project_name = dbcon.active_project() project_doc = get_project(project_name) - workdir_data = get_workdir_data( + workdir_data = get_template_data( project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"] ) # Prepare anatomy @@ -882,7 +749,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): ) -@with_pipeline_io +@deprecated def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): if not workfile_doc: # TODO add log message @@ -893,6 +760,7 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io # Convert data to mongo modification keys/values @@ -910,661 +778,11 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): ) -class BuildWorkfile: - """Wrapper for build workfile process. +@deprecated("openpype.pipeline.workfile.BuildWorkfile") +def BuildWorkfile(): + from openpype.pipeline.workfile import BuildWorkfile - Load representations for current context by build presets. Build presets - are host related, since each host has it's loaders. - """ - - log = logging.getLogger("BuildWorkfile") - - @staticmethod - def map_subsets_by_family(subsets): - subsets_by_family = collections.defaultdict(list) - for subset in subsets: - family = subset["data"].get("family") - if not family: - families = subset["data"].get("families") - if not families: - continue - family = families[0] - - subsets_by_family[family].append(subset) - return subsets_by_family - - def process(self): - """Main method of this wrapper. - - Building of workfile is triggered and is possible to implement - post processing of loaded containers if necessary. - """ - containers = self.build_workfile() - - return containers - - @with_pipeline_io - def build_workfile(self): - """Prepares and load containers into workfile. - - Loads latest versions of current and linked assets to workfile by logic - stored in Workfile profiles from presets. Profiles are set by host, - filtered by current task name and used by families. - - Each family can specify representation names and loaders for - representations and first available and successful loaded - representation is returned as container. - - At the end you'll get list of loaded containers per each asset. - - loaded_containers [{ - "asset_entity": , - "containers": [, , ...] - }, { - "asset_entity": , - "containers": [, ...] - }, { - ... - }] - """ - from openpype.pipeline import discover_loader_plugins - - # Get current asset name and entity - project_name = legacy_io.active_project() - current_asset_name = legacy_io.Session["AVALON_ASSET"] - current_asset_entity = get_asset_by_name( - project_name, current_asset_name - ) - # Skip if asset was not found - if not current_asset_entity: - print("Asset entity with name `{}` was not found".format( - current_asset_name - )) - return - - # Prepare available loaders - loaders_by_name = {} - for loader in discover_loader_plugins(): - loader_name = loader.__name__ - if loader_name in loaders_by_name: - raise KeyError( - "Duplicated loader name {0}!".format(loader_name) - ) - loaders_by_name[loader_name] = loader - - # Skip if there are any loaders - if not loaders_by_name: - self.log.warning("There are no registered loaders.") - return - - # Get current task name - current_task_name = legacy_io.Session["AVALON_TASK"] - - # Load workfile presets for task - self.build_presets = self.get_build_presets( - current_task_name, current_asset_entity - ) - - # Skip if there are any presets for task - if not self.build_presets: - self.log.warning( - "Current task `{}` does not have any loading preset.".format( - current_task_name - ) - ) - return - - # Get presets for loading current asset - current_context_profiles = self.build_presets.get("current_context") - # Get presets for loading linked assets - link_context_profiles = self.build_presets.get("linked_assets") - # Skip if both are missing - if not current_context_profiles and not link_context_profiles: - self.log.warning( - "Current task `{}` has empty loading preset.".format( - current_task_name - ) - ) - return - - elif not current_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any loading" - " preset for it's context." - ).format(current_task_name)) - - elif not link_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any" - "loading preset for it's linked assets." - ).format(current_task_name)) - - # Prepare assets to process by workfile presets - assets = [] - current_asset_id = None - if current_context_profiles: - # Add current asset entity if preset has current context set - assets.append(current_asset_entity) - current_asset_id = current_asset_entity["_id"] - - if link_context_profiles: - # Find and append linked assets if preset has set linked mapping - link_assets = get_linked_assets(current_asset_entity) - if link_assets: - assets.extend(link_assets) - - # Skip if there are no assets. This can happen if only linked mapping - # is set and there are no links for his asset. - if not assets: - self.log.warning( - "Asset does not have linked assets. Nothing to process." - ) - return - - # Prepare entities from database for assets - prepared_entities = self._collect_last_version_repres(assets) - - # Load containers by prepared entities and presets - loaded_containers = [] - # - Current asset containers - if current_asset_id and current_asset_id in prepared_entities: - current_context_data = prepared_entities.pop(current_asset_id) - loaded_data = self.load_containers_by_asset_data( - current_context_data, current_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # - Linked assets container - for linked_asset_data in prepared_entities.values(): - loaded_data = self.load_containers_by_asset_data( - linked_asset_data, link_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # Return list of loaded containers - return loaded_containers - - @with_pipeline_io - def get_build_presets(self, task_name, asset_doc): - """ Returns presets to build workfile for task name. - - Presets are loaded for current project set in - io.Session["AVALON_PROJECT"], filtered by registered host - and entered task name. - - Args: - task_name (str): Task name used for filtering build presets. - - Returns: - (dict): preset per entered task name - """ - host_name = os.environ["AVALON_APP"] - project_settings = get_project_settings( - legacy_io.Session["AVALON_PROJECT"] - ) - - host_settings = project_settings.get(host_name) or {} - # Get presets for host - wb_settings = host_settings.get("workfile_builder") - if not wb_settings: - # backward compatibility - wb_settings = host_settings.get("workfile_build") or {} - - builder_profiles = wb_settings.get("profiles") - if not builder_profiles: - return None - - task_type = ( - asset_doc - .get("data", {}) - .get("tasks", {}) - .get(task_name, {}) - .get("type") - ) - filter_data = { - "task_types": task_type, - "tasks": task_name - } - return filter_profiles(builder_profiles, filter_data) - - def _filter_build_profiles(self, build_profiles, loaders_by_name): - """ Filter build profiles by loaders and prepare process data. - - Valid profile must have "loaders", "families" and "repre_names" keys - with valid values. - - "loaders" expects list of strings representing possible loaders. - - "families" expects list of strings for filtering - by main subset family. - - "repre_names" expects list of strings for filtering by - representation name. - - Lowered "families" and "repre_names" are prepared for each profile with - all required keys. - - Args: - build_profiles (dict): Profiles for building workfile. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list): Filtered and prepared profiles. - """ - valid_profiles = [] - for profile in build_profiles: - # Check loaders - profile_loaders = profile.get("loaders") - if not profile_loaders: - self.log.warning(( - "Build profile has missing loaders configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check if any loader is available - loaders_match = False - for loader_name in profile_loaders: - if loader_name in loaders_by_name: - loaders_match = True - break - - if not loaders_match: - self.log.warning(( - "All loaders from Build profile are not available: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check families - profile_families = profile.get("families") - if not profile_families: - self.log.warning(( - "Build profile is missing families configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check representation names - profile_repre_names = profile.get("repre_names") - if not profile_repre_names: - self.log.warning(( - "Build profile is missing" - " representation names filtering: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Prepare lowered families and representation names - profile["families_lowered"] = [ - fam.lower() for fam in profile_families - ] - profile["repre_names_lowered"] = [ - name.lower() for name in profile_repre_names - ] - - valid_profiles.append(profile) - - return valid_profiles - - def _prepare_profile_for_subsets(self, subsets, profiles): - """Select profile for each subset by it's data. - - Profiles are filtered for each subset individually. - Profile is filtered by subset's family, optionally by name regex and - representation names set in profile. - It is possible to not find matching profile for subset, in that case - subset is skipped and it is possible that none of subsets have - matching profile. - - Args: - subsets (list): Subset documents. - profiles (dict): Build profiles. - - Returns: - (dict) Profile by subset's id. - """ - # Prepare subsets - subsets_by_family = self.map_subsets_by_family(subsets) - - profiles_per_subset_id = {} - for family, subsets in subsets_by_family.items(): - family_low = family.lower() - for profile in profiles: - # Skip profile if does not contain family - if family_low not in profile["families_lowered"]: - continue - - # Precompile name filters as regexes - profile_regexes = profile.get("subset_name_filters") - if profile_regexes: - _profile_regexes = [] - for regex in profile_regexes: - _profile_regexes.append(re.compile(regex)) - profile_regexes = _profile_regexes - - # TODO prepare regex compilation - for subset in subsets: - # Verify regex filtering (optional) - if profile_regexes: - valid = False - for pattern in profile_regexes: - if re.match(pattern, subset["name"]): - valid = True - break - - if not valid: - continue - - profiles_per_subset_id[subset["_id"]] = profile - - # break profiles loop on finding the first matching profile - break - return profiles_per_subset_id - - def load_containers_by_asset_data( - self, asset_entity_data, build_profiles, loaders_by_name - ): - """Load containers for entered asset entity by Build profiles. - - Args: - asset_entity_data (dict): Prepared data with subsets, last version - and representations for specific asset. - build_profiles (dict): Build profiles. - loaders_by_name (dict): Available loaders per name. - - Returns: - (dict) Output contains asset document and loaded containers. - """ - - # Make sure all data are not empty - if not asset_entity_data or not build_profiles or not loaders_by_name: - return - - asset_entity = asset_entity_data["asset_entity"] - - valid_profiles = self._filter_build_profiles( - build_profiles, loaders_by_name - ) - if not valid_profiles: - self.log.warning( - "There are not valid Workfile profiles. Skipping process." - ) - return - - self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) - - subsets_by_id = {} - version_by_subset_id = {} - repres_by_version_id = {} - for subset_id, in_data in asset_entity_data["subsets"].items(): - subset_entity = in_data["subset_entity"] - subsets_by_id[subset_entity["_id"]] = subset_entity - - version_data = in_data["version"] - version_entity = version_data["version_entity"] - version_by_subset_id[subset_id] = version_entity - repres_by_version_id[version_entity["_id"]] = ( - version_data["repres"] - ) - - if not subsets_by_id: - self.log.warning("There are not subsets for asset {0}".format( - asset_entity["name"] - )) - return - - profiles_per_subset_id = self._prepare_profile_for_subsets( - subsets_by_id.values(), valid_profiles - ) - if not profiles_per_subset_id: - self.log.warning("There are not valid subsets.") - return - - valid_repres_by_subset_id = collections.defaultdict(list) - for subset_id, profile in profiles_per_subset_id.items(): - profile_repre_names = profile["repre_names_lowered"] - - version_entity = version_by_subset_id[subset_id] - version_id = version_entity["_id"] - repres = repres_by_version_id[version_id] - for repre in repres: - repre_name_low = repre["name"].lower() - if repre_name_low in profile_repre_names: - valid_repres_by_subset_id[subset_id].append(repre) - - # DEBUG message - msg = "Valid representations for Asset: `{}`".format( - asset_entity["name"] - ) - for subset_id, repres in valid_repres_by_subset_id.items(): - subset = subsets_by_id[subset_id] - msg += "\n# Subset Name/ID: `{}`/{}".format( - subset["name"], subset_id - ) - for repre in repres: - msg += "\n## Repre name: `{}`".format(repre["name"]) - - self.log.debug(msg) - - containers = self._load_containers( - valid_repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ) - - return { - "asset_entity": asset_entity, - "containers": containers - } - - @with_pipeline_io - def _load_containers( - self, repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ): - """Real load by collected data happens here. - - Loading of representations per subset happens here. Each subset can - loads one representation. Loading is tried in specific order. - Representations are tried to load by names defined in configuration. - If subset has representation matching representation name each loader - is tried to load it until any is successful. If none of them was - successful then next representation name is tried. - Subset process loop ends when any representation is loaded or - all matching representations were already tried. - - Args: - repres_by_subset_id (dict): Available representations mapped - by their parent (subset) id. - subsets_by_id (dict): Subset documents mapped by their id. - profiles_per_subset_id (dict): Build profiles mapped by subset id. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list) Objects of loaded containers. - """ - from openpype.pipeline import ( - IncompatibleLoaderError, - load_container, - ) - - loaded_containers = [] - - # Get subset id order from build presets. - build_presets = self.build_presets.get("current_context", []) - build_presets += self.build_presets.get("linked_assets", []) - subset_ids_ordered = [] - for preset in build_presets: - for preset_family in preset["families"]: - for id, subset in subsets_by_id.items(): - if preset_family not in subset["data"].get("families", []): - continue - - subset_ids_ordered.append(id) - - # Order representations from subsets. - print("repres_by_subset_id", repres_by_subset_id) - representations_ordered = [] - representations = [] - for id in subset_ids_ordered: - for subset_id, repres in repres_by_subset_id.items(): - if repres in representations: - continue - - if id == subset_id: - representations_ordered.append((subset_id, repres)) - representations.append(repres) - - print("representations", representations) - - # Load ordered representations. - for subset_id, repres in representations_ordered: - subset_name = subsets_by_id[subset_id]["name"] - - profile = profiles_per_subset_id[subset_id] - loaders_last_idx = len(profile["loaders"]) - 1 - repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 - - repre_by_low_name = { - repre["name"].lower(): repre for repre in repres - } - - is_loaded = False - for repre_name_idx, profile_repre_name in enumerate( - profile["repre_names_lowered"] - ): - # Break iteration if representation was already loaded - if is_loaded: - break - - repre = repre_by_low_name.get(profile_repre_name) - if not repre: - continue - - for loader_idx, loader_name in enumerate(profile["loaders"]): - if is_loaded: - break - - loader = loaders_by_name.get(loader_name) - if not loader: - continue - try: - container = load_container( - loader, - repre["_id"], - name=subset_name - ) - loaded_containers.append(container) - is_loaded = True - - except Exception as exc: - if exc == IncompatibleLoaderError: - self.log.info(( - "Loader `{}` is not compatible with" - " representation `{}`" - ).format(loader_name, repre["name"])) - - else: - self.log.error( - "Unexpected error happened during loading", - exc_info=True - ) - - msg = "Loading failed." - if loader_idx < loaders_last_idx: - msg += " Trying next loader." - elif repre_name_idx < repre_names_last_idx: - msg += ( - " Loading of subset `{}` was not successful." - ).format(subset_name) - else: - msg += " Trying next representation." - self.log.info(msg) - - return loaded_containers - - @with_pipeline_io - def _collect_last_version_repres(self, asset_docs): - """Collect subsets, versions and representations for asset_entities. - - Args: - asset_entities (list): Asset entities for which want to find data - - Returns: - (dict): collected entities - - Example output: - ``` - { - {Asset ID}: { - "asset_entity": , - "subsets": { - {Subset ID}: { - "subset_entity": , - "version": { - "version_entity": , - "repres": [ - , , ... - ] - } - }, - ... - } - }, - ... - } - output[asset_id]["subsets"][subset_id]["version"]["repres"] - ``` - """ - - output = {} - if not asset_docs: - return output - - asset_docs_by_ids = {asset["_id"]: asset for asset in asset_docs} - - project_name = legacy_io.active_project() - subsets = list(get_subsets( - project_name, asset_ids=asset_docs_by_ids.keys() - )) - subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - - last_version_by_subset_id = get_last_versions( - project_name, subset_entity_by_ids.keys() - ) - last_version_docs_by_id = { - version["_id"]: version - for version in last_version_by_subset_id.values() - } - repre_docs = get_representations( - project_name, version_ids=last_version_docs_by_id.keys() - ) - - for repre_doc in repre_docs: - version_id = repre_doc["parent"] - version_doc = last_version_docs_by_id[version_id] - - subset_id = version_doc["parent"] - subset_doc = subset_entity_by_ids[subset_id] - - asset_id = subset_doc["parent"] - asset_doc = asset_docs_by_ids[asset_id] - - if asset_id not in output: - output[asset_id] = { - "asset_entity": asset_doc, - "subsets": {} - } - - if subset_id not in output[asset_id]["subsets"]: - output[asset_id]["subsets"][subset_id] = { - "subset_entity": subset_doc, - "version": { - "version_entity": version_doc, - "repres": [] - } - } - - output[asset_id]["subsets"][subset_id]["version"]["repres"].append( - repre_doc - ) - - return output + return BuildWorkfile() @with_pipeline_io @@ -1597,13 +815,21 @@ def get_creator_by_name(creator_name, case_sensitive=False): return None -@with_pipeline_io +@deprecated def change_timer_to_current_context(): """Called after context change to change timers. + Deprecated: + This method is specific for TimersManager module so please use the + functionality from there. Function will be removed after release + version 3.14.* + TODO: - use TimersManager's static method instead of reimplementing it here """ + + from openpype.pipeline import legacy_io + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") if not webserver_url: log.warning("Couldn't find webserver url") @@ -1691,6 +917,8 @@ def _get_task_context_data_for_anatomy( return data +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_context") def get_custom_workfile_template_by_context( template_profiles, project_doc, asset_doc, task_name, anatomy=None ): @@ -1744,6 +972,9 @@ def get_custom_workfile_template_by_context( return None +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_string_context" +) def get_custom_workfile_template_by_string_context( template_profiles, project_name, asset_name, task_name, dbcon=None, anatomy=None @@ -1788,7 +1019,7 @@ def get_custom_workfile_template_by_string_context( ) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_custom_workfile_template") def get_custom_workfile_template(template_profiles): """Filter and fill workfile template profiles by current context. @@ -1803,6 +1034,8 @@ def get_custom_workfile_template(template_profiles): context. (Existence of formatted path is not validated.) """ + from openpype.pipeline import legacy_io + return get_custom_workfile_template_by_string_context( template_profiles, legacy_io.Session["AVALON_PROJECT"], @@ -1812,6 +1045,7 @@ def get_custom_workfile_template(template_profiles): ) +@deprecated("openpype.pipeline.workfile.get_last_workfile_with_version") def get_last_workfile_with_version( workdir, file_template, fill_data, extensions ): @@ -1827,78 +1061,15 @@ def get_last_workfile_with_version( tuple: Last workfile with version if there is any otherwise returns (None, None). """ - if not os.path.exists(workdir): - return None, None - # Fast match on extension - filenames = [ - filename - for filename in os.listdir(workdir) - if os.path.splitext(filename)[1] in extensions - ] + from openpype.pipeline.workfile import get_last_workfile_with_version - # Build template without optionals, version to digits only regex - # and comment to any definable value. - _ext = [] - for ext in extensions: - if not ext.startswith("."): - ext = "." + ext - # Escape dot for regex - ext = "\\" + ext - _ext.append(ext) - ext_expression = "(?:" + "|".join(_ext) + ")" - - # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end - file_template = re.sub(r"\.?{ext}", ext_expression, file_template) - # Replace optional keys with optional content regex - file_template = re.sub(r"<.*?>", r".*?", file_template) - # Replace `{version}` with group regex - file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) - file_template = re.sub(r"{comment.*?}", r".+?", file_template) - file_template = StringTemplate.format_strict_template( - file_template, fill_data + return get_last_workfile_with_version( + workdir, file_template, fill_data, extensions ) - # Match with ignore case on Windows due to the Windows - # OS not being case-sensitive. This avoids later running - # into the error that the file did exist if it existed - # with a different upper/lower-case. - kwargs = {} - if platform.system().lower() == "windows": - kwargs["flags"] = re.IGNORECASE - - # Get highest version among existing matching files - version = None - output_filenames = [] - for filename in sorted(filenames): - match = re.match(file_template, filename, **kwargs) - if not match: - continue - - file_version = int(match.group(1)) - if version is None or file_version > version: - output_filenames[:] = [] - version = file_version - - if file_version == version: - output_filenames.append(filename) - - output_filename = None - if output_filenames: - if len(output_filenames) == 1: - output_filename = output_filenames[0] - else: - last_time = None - for _output_filename in output_filenames: - full_path = os.path.join(workdir, _output_filename) - mod_time = os.path.getmtime(full_path) - if last_time is None or last_time < mod_time: - output_filename = _output_filename - last_time = mod_time - - return output_filename, version - +@deprecated("openpype.pipeline.workfile.get_last_workfile") def get_last_workfile( workdir, file_template, fill_data, extensions, full_path=False ): @@ -1916,22 +1087,12 @@ def get_last_workfile( Returns: str: Last or first workfile as filename of full path to filename. """ - filename, version = get_last_workfile_with_version( - workdir, file_template, fill_data, extensions + + from openpype.pipeline.workfile import get_last_workfile + + return get_last_workfile( + workdir, file_template, fill_data, extensions, full_path ) - if filename is None: - data = copy.deepcopy(fill_data) - data["version"] = 1 - data.pop("comment", None) - if not data.get("ext"): - data["ext"] = extensions[0] - data["ext"] = data["ext"].replace('.', '') - filename = StringTemplate.format_strict_template(file_template, data) - - if full_path: - return os.path.normpath(os.path.join(workdir, filename)) - - return filename @with_pipeline_io diff --git a/openpype/lib/config.py b/openpype/lib/config.py index 57e8efa57d..26822649e4 100644 --- a/openpype/lib/config.py +++ b/openpype/lib/config.py @@ -1,82 +1,41 @@ -# -*- coding: utf-8 -*- -"""Get configuration data.""" -import datetime +import warnings +import functools -def get_datetime_data(datetime_obj=None): - """Returns current datetime data as dictionary. +class ConfigDeprecatedWarning(DeprecationWarning): + pass - Args: - datetime_obj (datetime): Specific datetime object - Returns: - dict: prepared date & time data +def deprecated(func): + """Mark functions as deprecated. - Available keys: - "d" - in shortest possible way. - "dd" - with 2 digits. - "ddd" - shortened week day. e.g.: `Mon`, ... - "dddd" - full name of week day. e.g.: `Monday`, ... - "m" - in shortest possible way. e.g.: `1` if January - "mm" - with 2 digits. - "mmm" - shortened month name. e.g.: `Jan`, ... - "mmmm" - full month name. e.g.: `January`, ... - "yy" - shortened year. e.g.: `19`, `20`, ... - "yyyy" - full year. e.g.: `2019`, `2020`, ... - "H" - shortened hours. - "HH" - with 2 digits. - "h" - shortened hours. - "hh" - with 2 digits. - "ht" - AM or PM. - "M" - shortened minutes. - "MM" - with 2 digits. - "S" - shortened seconds. - "SS" - with 2 digits. + It will result in a warning being emitted when the function is used. """ - if not datetime_obj: - datetime_obj = datetime.datetime.now() - - year = datetime_obj.strftime("%Y") - - month = datetime_obj.strftime("%m") - month_name_full = datetime_obj.strftime("%B") - month_name_short = datetime_obj.strftime("%b") - day = datetime_obj.strftime("%d") - - weekday_full = datetime_obj.strftime("%A") - weekday_short = datetime_obj.strftime("%a") - - hours = datetime_obj.strftime("%H") - hours_midday = datetime_obj.strftime("%I") - hour_midday_type = datetime_obj.strftime("%p") - minutes = datetime_obj.strftime("%M") - seconds = datetime_obj.strftime("%S") - - return { - "d": str(int(day)), - "dd": str(day), - "ddd": weekday_short, - "dddd": weekday_full, - "m": str(int(month)), - "mm": str(month), - "mmm": month_name_short, - "mmmm": month_name_full, - "yy": str(year[2:]), - "yyyy": str(year), - "H": str(int(hours)), - "HH": str(hours), - "h": str(int(hours_midday)), - "hh": str(hours_midday), - "ht": hour_midday_type, - "M": str(int(minutes)), - "MM": str(minutes), - "S": str(int(seconds)), - "SS": str(seconds), - } + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.simplefilter("always", ConfigDeprecatedWarning) + warnings.warn( + ( + "Deprecated import of function '{}'." + " Class was moved to 'openpype.lib.dateutils.{}'." + " Please change your imports." + ).format(func.__name__), + category=ConfigDeprecatedWarning + ) + return func(*args, **kwargs) + return new_func +@deprecated +def get_datetime_data(datetime_obj=None): + from .dateutils import get_datetime_data + + return get_datetime_data(datetime_obj) + + +@deprecated def get_formatted_current_time(): - return datetime.datetime.now().strftime( - "%Y%m%dT%H%M%SZ" - ) + from .dateutils import get_formatted_current_time + + return get_formatted_current_time() diff --git a/openpype/lib/dateutils.py b/openpype/lib/dateutils.py new file mode 100644 index 0000000000..68cd1d1c5b --- /dev/null +++ b/openpype/lib/dateutils.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +"""Get configuration data.""" +import datetime + + +def get_datetime_data(datetime_obj=None): + """Returns current datetime data as dictionary. + + Args: + datetime_obj (datetime): Specific datetime object + + Returns: + dict: prepared date & time data + + Available keys: + "d" - in shortest possible way. + "dd" - with 2 digits. + "ddd" - shortened week day. e.g.: `Mon`, ... + "dddd" - full name of week day. e.g.: `Monday`, ... + "m" - in shortest possible way. e.g.: `1` if January + "mm" - with 2 digits. + "mmm" - shortened month name. e.g.: `Jan`, ... + "mmmm" - full month name. e.g.: `January`, ... + "yy" - shortened year. e.g.: `19`, `20`, ... + "yyyy" - full year. e.g.: `2019`, `2020`, ... + "H" - shortened hours. + "HH" - with 2 digits. + "h" - shortened hours. + "hh" - with 2 digits. + "ht" - AM or PM. + "M" - shortened minutes. + "MM" - with 2 digits. + "S" - shortened seconds. + "SS" - with 2 digits. + """ + + if not datetime_obj: + datetime_obj = datetime.datetime.now() + + year = datetime_obj.strftime("%Y") + + month = datetime_obj.strftime("%m") + month_name_full = datetime_obj.strftime("%B") + month_name_short = datetime_obj.strftime("%b") + day = datetime_obj.strftime("%d") + + weekday_full = datetime_obj.strftime("%A") + weekday_short = datetime_obj.strftime("%a") + + hours = datetime_obj.strftime("%H") + hours_midday = datetime_obj.strftime("%I") + hour_midday_type = datetime_obj.strftime("%p") + minutes = datetime_obj.strftime("%M") + seconds = datetime_obj.strftime("%S") + + return { + "d": str(int(day)), + "dd": str(day), + "ddd": weekday_short, + "dddd": weekday_full, + "m": str(int(month)), + "mm": str(month), + "mmm": month_name_short, + "mmmm": month_name_full, + "yy": str(year[2:]), + "yyyy": str(year), + "H": str(int(hours)), + "HH": str(hours), + "h": str(int(hours_midday)), + "hh": str(hours_midday), + "ht": hour_midday_type, + "M": str(int(minutes)), + "MM": str(minutes), + "S": str(int(seconds)), + "SS": str(seconds), + } + + +def get_timestamp(datetime_obj=None): + """Get standardized timestamp from datetime object. + + Args: + datetime_obj (datetime.datetime): Object of datetime. Current time + is used if not passed. + """ + + if datetime_obj is None: + datetime_obj = datetime.datetime.now() + return datetime_obj.strftime( + "%Y%m%dT%H%M%SZ" + ) + + +def get_formatted_current_time(): + return get_timestamp() diff --git a/openpype/lib/events.py b/openpype/lib/events.py index 7bec6ee30d..301d62e2a6 100644 --- a/openpype/lib/events.py +++ b/openpype/lib/events.py @@ -11,6 +11,10 @@ except Exception: from openpype.lib.python_2_comp import WeakMethod +class MissingEventSystem(Exception): + pass + + class EventCallback(object): """Callback registered to a topic. @@ -176,16 +180,20 @@ class Event(object): topic (str): Identifier of event. data (Any): Data specific for event. Dictionary is recommended. source (str): Identifier of source. + event_system (EventSystem): Event system in which can be event + triggered. """ + _data = {} - def __init__(self, topic, data=None, source=None): + def __init__(self, topic, data=None, source=None, event_system=None): self._id = str(uuid4()) self._topic = topic if data is None: data = {} self._data = data self._source = source + self._event_system = event_system def __getitem__(self, key): return self._data[key] @@ -211,28 +219,118 @@ class Event(object): def emit(self): """Emit event and trigger callbacks.""" - StoredCallbacks.emit_event(self) + if self._event_system is None: + raise MissingEventSystem( + "Can't emit event {}. Does not have set event system.".format( + str(repr(self)) + ) + ) + self._event_system.emit_event(self) -class StoredCallbacks: - _registered_callbacks = [] +class EventSystem(object): + """Encapsulate event handling into an object. + + System wraps registered callbacks and triggered events into single object + so it is possible to create mutltiple independent systems that have their + topics and callbacks. + + + """ + + def __init__(self): + self._registered_callbacks = [] + + def add_callback(self, topic, callback): + """Register callback in event system. + + Args: + topic (str): Topic for EventCallback. + callback (Callable): Function or method that will be called + when topic is triggered. + + Returns: + EventCallback: Created callback object which can be used to + stop listening. + """ - @classmethod - def add_callback(cls, topic, callback): callback = EventCallback(topic, callback) - cls._registered_callbacks.append(callback) + self._registered_callbacks.append(callback) return callback - @classmethod - def emit_event(cls, event): + def create_event(self, topic, data, source): + """Create new event which is bound to event system. + + Args: + topic (str): Event topic. + data (dict): Data related to event. + source (str): Source of event. + + Returns: + Event: Object of event. + """ + + return Event(topic, data, source, self) + + def emit(self, topic, data, source): + """Create event based on passed data and emit it. + + This is easiest way how to trigger event in an event system. + + Args: + topic (str): Event topic. + data (dict): Data related to event. + source (str): Source of event. + + Returns: + Event: Created and emitted event. + """ + + event = self.create_event(topic, data, source) + event.emit() + return event + + def emit_event(self, event): + """Emit event object. + + Args: + event (Event): Prepared event with topic and data. + """ + invalid_callbacks = [] - for callback in cls._registered_callbacks: + for callback in self._registered_callbacks: callback.process_event(event) if not callback.is_ref_valid: invalid_callbacks.append(callback) for callback in invalid_callbacks: - cls._registered_callbacks.remove(callback) + self._registered_callbacks.remove(callback) + + +class GlobalEventSystem: + """Event system living in global scope of process. + + This is primarily used in host implementation to trigger events + related to DCC changes or changes of context in the host implementation. + """ + + _global_event_system = None + + @classmethod + def get_global_event_system(cls): + if cls._global_event_system is None: + cls._global_event_system = EventSystem() + return cls._global_event_system + + @classmethod + def add_callback(cls, topic, callback): + event_system = cls.get_global_event_system() + return event_system.add_callback(topic, callback) + + @classmethod + def emit(cls, topic, data, source): + event_system = cls.get_global_event_system() + return event_system.emit(topic, data, source) def register_event_callback(topic, callback): @@ -249,7 +347,8 @@ def register_event_callback(topic, callback): enable/disable listening to a topic or remove the callback from the topic completely. """ - return StoredCallbacks.add_callback(topic, callback) + + return GlobalEventSystem.add_callback(topic, callback) def emit_event(topic, data=None, source=None): @@ -263,6 +362,5 @@ def emit_event(topic, data=None, source=None): Returns: Event: Object of event that was emitted. """ - event = Event(topic, data, source) - event.emit() - return event + + return GlobalEventSystem.emit(topic, data, source) diff --git a/openpype/lib/file_transaction.py b/openpype/lib/file_transaction.py new file mode 100644 index 0000000000..1626bec6b6 --- /dev/null +++ b/openpype/lib/file_transaction.py @@ -0,0 +1,171 @@ +import os +import logging +import sys +import errno +import six + +from openpype.lib import create_hard_link + +# this is needed until speedcopy for linux is fixed +if sys.platform == "win32": + from speedcopy import copyfile +else: + from shutil import copyfile + + +class FileTransaction(object): + """ + + The file transaction is a three step process. + + 1) Rename any existing files to a "temporary backup" during `process()` + 2) Copy the files to final destination during `process()` + 3) Remove any backed up files (*no rollback possible!) during `finalize()` + + Step 3 is done during `finalize()`. If not called the .bak files will + remain on disk. + + These steps try to ensure that we don't overwrite half of any existing + files e.g. if they are currently in use. + + Note: + A regular filesystem is *not* a transactional file system and even + though this implementation tries to produce a 'safe copy' with a + potential rollback do keep in mind that it's inherently unsafe due + to how filesystem works and a myriad of things could happen during + the transaction that break the logic. A file storage could go down, + permissions could be changed, other machines could be moving or writing + files. A lot can happen. + + Warning: + Any folders created during the transfer will not be removed. + + """ + + MODE_COPY = 0 + MODE_HARDLINK = 1 + + def __init__(self, log=None): + + if log is None: + log = logging.getLogger("FileTransaction") + + self.log = log + + # The transfer queue + # todo: make this an actual FIFO queue? + self._transfers = {} + + # Destination file paths that a file was transferred to + self._transferred = [] + + # Backup file location mapping to original locations + self._backup_to_original = {} + + def add(self, src, dst, mode=MODE_COPY): + """Add a new file to transfer queue""" + opts = {"mode": mode} + + src = os.path.abspath(src) + dst = os.path.abspath(dst) + + if dst in self._transfers: + queued_src = self._transfers[dst][0] + if src == queued_src: + self.log.debug("File transfer was already " + "in queue: {} -> {}".format(src, dst)) + return + else: + self.log.warning("File transfer in queue replaced..") + self.log.debug("Removed from queue: " + "{} -> {}".format(queued_src, dst)) + self.log.debug("Added to queue: {} -> {}".format(src, dst)) + + self._transfers[dst] = (src, opts) + + def process(self): + + # Backup any existing files + for dst in self._transfers.keys(): + if os.path.exists(dst): + # Backup original file + # todo: add timestamp or uuid to ensure unique + backup = dst + ".bak" + self._backup_to_original[backup] = dst + self.log.debug("Backup existing file: " + "{} -> {}".format(dst, backup)) + os.rename(dst, backup) + + # Copy the files to transfer + for dst, (src, opts) in self._transfers.items(): + self._create_folder_for_file(dst) + + if opts["mode"] == self.MODE_COPY: + self.log.debug("Copying file ... {} -> {}".format(src, dst)) + copyfile(src, dst) + elif opts["mode"] == self.MODE_HARDLINK: + self.log.debug("Hardlinking file ... {} -> {}".format(src, + dst)) + create_hard_link(src, dst) + + self._transferred.append(dst) + + def finalize(self): + # Delete any backed up files + for backup in self._backup_to_original.keys(): + try: + os.remove(backup) + except OSError: + self.log.error("Failed to remove backup file: " + "{}".format(backup), + exc_info=True) + + def rollback(self): + + errors = 0 + + # Rollback any transferred files + for path in self._transferred: + try: + os.remove(path) + except OSError: + errors += 1 + self.log.error("Failed to rollback created file: " + "{}".format(path), + exc_info=True) + + # Rollback the backups + for backup, original in self._backup_to_original.items(): + try: + os.rename(backup, original) + except OSError: + errors += 1 + self.log.error("Failed to restore original file: " + "{} -> {}".format(backup, original), + exc_info=True) + + if errors: + self.log.error("{} errors occurred during " + "rollback.".format(errors), exc_info=True) + six.reraise(*sys.exc_info()) + + @property + def transferred(self): + """Return the processed transfers destination paths""" + return list(self._transferred) + + @property + def backups(self): + """Return the backup file paths""" + return list(self._backup_to_original.keys()) + + def _create_folder_for_file(self, path): + dirname = os.path.dirname(path) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + self.log.critical("An unexpected error occurred.") + six.reraise(*sys.exc_info()) diff --git a/openpype/lib/git_progress.py b/openpype/lib/git_progress.py deleted file mode 100644 index 331b7b6745..0000000000 --- a/openpype/lib/git_progress.py +++ /dev/null @@ -1,86 +0,0 @@ -import git -from tqdm import tqdm - - -class _GitProgress(git.remote.RemoteProgress): - """ Class handling displaying progress during git operations. - - This is using **tqdm** for showing progress bars. As **GitPython** - is parsing progress directly from git command, it is somehow unreliable - as in some operations it is difficult to get total count of iterations - to display meaningful progress bar. - - """ - _t = None - _code = 0 - _current_status = '' - _current_max = '' - - _description = { - 256: "Checking out files", - 4: "Counting objects", - 128: "Finding sources", - 32: "Receiving objects", - 64: "Resolving deltas", - 16: "Writing objects" - } - - def __init__(self): - super().__init__() - - def __del__(self): - if self._t is not None: - self._t.close() - - def _detroy_tqdm(self): - """ Used to close tqdm when operation ended. - - """ - if self._t is not None: - self._t.close() - self._t = None - - def _check_mask(self, opcode: int) -> bool: - """" Add meaningful description to **GitPython** opcodes. - - :param opcode: OP_MASK opcode - :type opcode: int - :return: String description of opcode - :rtype: str - - .. seealso:: For opcodes look at :class:`git.RemoteProgress` - - """ - if opcode & self.COUNTING: - return self._description.get(self.COUNTING) - elif opcode & self.CHECKING_OUT: - return self._description.get(self.CHECKING_OUT) - elif opcode & self.WRITING: - return self._description.get(self.WRITING) - elif opcode & self.RECEIVING: - return self._description.get(self.RECEIVING) - elif opcode & self.RESOLVING: - return self._description.get(self.RESOLVING) - elif opcode & self.FINDING_SOURCES: - return self._description.get(self.FINDING_SOURCES) - else: - return "Processing" - - def update(self, op_code, cur_count, max_count=None, message=''): - """ Called when git operation update progress. - - .. seealso:: For more details see - :func:`git.objects.submodule.base.Submodule.update` - `Documentation `_ - - """ - code = self._check_mask(op_code) - if self._current_status != code or self._current_max != max_count: - self._current_max = max_count - self._current_status = code - self._detroy_tqdm() - self._t = tqdm(total=max_count) - self._t.set_description(" . {}".format(code)) - - self._t.update(cur_count) diff --git a/openpype/lib/local_settings.py b/openpype/lib/local_settings.py index 97e99b4b5a..c6c9699240 100644 --- a/openpype/lib/local_settings.py +++ b/openpype/lib/local_settings.py @@ -34,7 +34,7 @@ from openpype.settings import ( get_system_settings ) -from .import validate_mongo_connection +from openpype.client.mongo import validate_mongo_connection _PLACEHOLDER = object() diff --git a/openpype/lib/log.py b/openpype/lib/log.py index 2cdb7ec8e4..e77edea0e9 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -24,12 +24,13 @@ import traceback import threading import copy -from . import Terminal -from .mongo import ( +from openpype.client.mongo import ( MongoEnvNotSet, get_default_components, - OpenPypeMongoConnection + OpenPypeMongoConnection, ) +from . import Terminal + try: import log4mongo from log4mongo.handlers import MongoHandler @@ -41,13 +42,13 @@ except ImportError: USE_UNICODE = hasattr(__builtins__, "unicode") -class PypeStreamHandler(logging.StreamHandler): +class LogStreamHandler(logging.StreamHandler): """ StreamHandler class designed to handle utf errors in python 2.x hosts. """ def __init__(self, stream=None): - super(PypeStreamHandler, self).__init__(stream) + super(LogStreamHandler, self).__init__(stream) self.enabled = True def enable(self): @@ -56,7 +57,6 @@ class PypeStreamHandler(logging.StreamHandler): Used to silence output """ self.enabled = True - pass def disable(self): """ Disable StreamHandler @@ -107,13 +107,13 @@ class PypeStreamHandler(logging.StreamHandler): self.handleError(record) -class PypeFormatter(logging.Formatter): +class LogFormatter(logging.Formatter): DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ]' default_formatter = logging.Formatter(DFT) def __init__(self, formats): - super(PypeFormatter, self).__init__() + super(LogFormatter, self).__init__() self.formatters = {} for loglevel in formats: self.formatters[loglevel] = logging.Formatter(formats[loglevel]) @@ -141,7 +141,7 @@ class PypeFormatter(logging.Formatter): return out -class PypeMongoFormatter(logging.Formatter): +class MongoFormatter(logging.Formatter): DEFAULT_PROPERTIES = logging.LogRecord( '', '', '', '', '', '', '', '').__dict__.keys() @@ -161,7 +161,7 @@ class PypeMongoFormatter(logging.Formatter): 'method': record.funcName, 'lineNumber': record.lineno } - document.update(PypeLogger.get_process_data()) + document.update(Logger.get_process_data()) # Standard document decorated with exception info if record.exc_info is not None: @@ -181,7 +181,7 @@ class PypeMongoFormatter(logging.Formatter): return document -class PypeLogger: +class Logger: DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ] ' DBG = " - { %(name)s }: [ %(message)s ] " INF = ">>> [ %(message)s ] " @@ -239,7 +239,7 @@ class PypeLogger: for handler in logger.handlers: if isinstance(handler, MongoHandler): add_mongo_handler = False - elif isinstance(handler, PypeStreamHandler): + elif isinstance(handler, LogStreamHandler): add_console_handler = False if add_console_handler: @@ -292,7 +292,7 @@ class PypeLogger: "username": components["username"], "password": components["password"], "capped": True, - "formatter": PypeMongoFormatter() + "formatter": MongoFormatter() } if components["port"] is not None: kwargs["port"] = int(components["port"]) @@ -303,10 +303,10 @@ class PypeLogger: @classmethod def _get_console_handler(cls): - formatter = PypeFormatter(cls.FORMAT_FILE) - console_handler = PypeStreamHandler() + formatter = LogFormatter(cls.FORMAT_FILE) + console_handler = LogStreamHandler() - console_handler.set_name("PypeStreamHandler") + console_handler.set_name("LogStreamHandler") console_handler.setFormatter(formatter) return console_handler @@ -417,9 +417,9 @@ class PypeLogger: def get_process_name(cls): """Process name that is like "label" of a process. - Pype's logging can be used from pype itseld of from hosts. Even in Pype - it's good to know if logs are from Pype tray or from pype's event - server. This should help to identify that information. + OpenPype's logging can be used from OpenPyppe itself of from hosts. + Even in OpenPype process it's good to know if logs are from tray or + from other cli commands. This should help to identify that information. """ if cls._process_name is not None: return cls._process_name @@ -485,23 +485,13 @@ class PypeLogger: return OpenPypeMongoConnection.get_mongo_client() -def timeit(method): - """Print time in function. - - For debugging. - - """ - log = logging.getLogger() - - def timed(*args, **kw): - ts = time.time() - result = method(*args, **kw) - te = time.time() - if 'log_time' in kw: - name = kw.get('log_name', method.__name__.upper()) - kw['log_time'][name] = int((te - ts) * 1000) - else: - log.debug('%r %2.2f ms' % (method.__name__, (te - ts) * 1000)) - print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000)) - return result - return timed +class PypeLogger(Logger): + @classmethod + def get_logger(cls, *args, **kwargs): + logger = Logger.get_logger(*args, **kwargs) + # TODO uncomment when replaced most of places + # logger.warning(( + # "'openpype.lib.PypeLogger' is deprecated class." + # " Please use 'openpype.lib.Logger' instead." + # )) + return logger diff --git a/openpype/lib/mongo.py b/openpype/lib/mongo.py index c08e76c75c..bb2ee6016a 100644 --- a/openpype/lib/mongo.py +++ b/openpype/lib/mongo.py @@ -1,206 +1,61 @@ -import os -import sys -import time -import logging -import pymongo -import certifi - -if sys.version_info[0] == 2: - from urlparse import urlparse, parse_qs -else: - from urllib.parse import urlparse, parse_qs +import warnings +import functools +from openpype.client.mongo import ( + MongoEnvNotSet, + OpenPypeMongoConnection, +) -class MongoEnvNotSet(Exception): +class MongoDeprecatedWarning(DeprecationWarning): pass -def _decompose_url(url): - """Decompose mongo url to basic components. +def mongo_deprecated(func): + """Mark functions as deprecated. - Used for creation of MongoHandler which expect mongo url components as - separated kwargs. Components are at the end not used as we're setting - connection directly this is just a dumb components for MongoHandler - validation pass. + It will result in a warning being emitted when the function is used. """ - # Use first url from passed url - # - this is because it is possible to pass multiple urls for multiple - # replica sets which would crash on urlparse otherwise - # - please don't use comma in username of password - url = url.split(",")[0] - components = { - "scheme": None, - "host": None, - "port": None, - "username": None, - "password": None, - "auth_db": None - } - result = urlparse(url) - if result.scheme is None: - _url = "mongodb://{}".format(url) - result = urlparse(_url) - - components["scheme"] = result.scheme - components["host"] = result.hostname - try: - components["port"] = result.port - except ValueError: - raise RuntimeError("invalid port specified") - components["username"] = result.username - components["password"] = result.password - - try: - components["auth_db"] = parse_qs(result.query)['authSource'][0] - except KeyError: - # no auth db provided, mongo will use the one we are connecting to - pass - - return components - - -def get_default_components(): - mongo_url = os.environ.get("OPENPYPE_MONGO") - if mongo_url is None: - raise MongoEnvNotSet( - "URL for Mongo logging connection is not set." + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.simplefilter("always", MongoDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'." + " Function was moved to 'openpype.client.mongo'." + ).format(func.__name__), + category=MongoDeprecatedWarning, + stacklevel=2 ) - return _decompose_url(mongo_url) + return func(*args, **kwargs) + return new_func +@mongo_deprecated +def get_default_components(): + from openpype.client.mongo import get_default_components + + return get_default_components() + + +@mongo_deprecated def should_add_certificate_path_to_mongo_url(mongo_url): - """Check if should add ca certificate to mongo url. + from openpype.client.mongo import should_add_certificate_path_to_mongo_url - Since 30.9.2021 cloud mongo requires newer certificates that are not - available on most of workstation. This adds path to certifi certificate - which is valid for it. To add the certificate path url must have scheme - 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. - """ - parsed = urlparse(mongo_url) - query = parse_qs(parsed.query) - lowered_query_keys = set(key.lower() for key in query.keys()) - add_certificate = False - # Check if url 'ssl' or 'tls' are set to 'true' - for key in ("ssl", "tls"): - if key in query and "true" in query["ssl"]: - add_certificate = True - break - - # Check if url contains 'mongodb+srv' - if not add_certificate and parsed.scheme == "mongodb+srv": - add_certificate = True - - # Check if url does already contain certificate path - if add_certificate and "tlscafile" in lowered_query_keys: - add_certificate = False - - return add_certificate + return should_add_certificate_path_to_mongo_url(mongo_url) +@mongo_deprecated def validate_mongo_connection(mongo_uri): - """Check if provided mongodb URL is valid. + from openpype.client.mongo import validate_mongo_connection - Args: - mongo_uri (str): URL to validate. - - Raises: - ValueError: When port in mongo uri is not valid. - pymongo.errors.InvalidURI: If passed mongo is invalid. - pymongo.errors.ServerSelectionTimeoutError: If connection timeout - passed so probably couldn't connect to mongo server. - - """ - client = OpenPypeMongoConnection.create_connection( - mongo_uri, retry_attempts=1 - ) - client.close() + return validate_mongo_connection(mongo_uri) -class OpenPypeMongoConnection: - """Singleton MongoDB connection. - - Keeps MongoDB connections by url. - """ - mongo_clients = {} - log = logging.getLogger("OpenPypeMongoConnection") - - @staticmethod - def get_default_mongo_url(): - return os.environ["OPENPYPE_MONGO"] - - @classmethod - def get_mongo_client(cls, mongo_url=None): - if mongo_url is None: - mongo_url = cls.get_default_mongo_url() - - connection = cls.mongo_clients.get(mongo_url) - if connection: - # Naive validation of existing connection - try: - connection.server_info() - with connection.start_session(): - pass - except Exception: - connection = None - - if not connection: - cls.log.debug("Creating mongo connection to {}".format(mongo_url)) - connection = cls.create_connection(mongo_url) - cls.mongo_clients[mongo_url] = connection - - return connection - - @classmethod - def create_connection(cls, mongo_url, timeout=None, retry_attempts=None): - parsed = urlparse(mongo_url) - # Force validation of scheme - if parsed.scheme not in ["mongodb", "mongodb+srv"]: - raise pymongo.errors.InvalidURI(( - "Invalid URI scheme:" - " URI must begin with 'mongodb://' or 'mongodb+srv://'" - )) - - if timeout is None: - timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000) - - kwargs = { - "serverSelectionTimeoutMS": timeout - } - if should_add_certificate_path_to_mongo_url(mongo_url): - kwargs["ssl_ca_certs"] = certifi.where() - - mongo_client = pymongo.MongoClient(mongo_url, **kwargs) - - if retry_attempts is None: - retry_attempts = 3 - - elif not retry_attempts: - retry_attempts = 1 - - last_exc = None - valid = False - t1 = time.time() - for attempt in range(1, retry_attempts + 1): - try: - mongo_client.server_info() - with mongo_client.start_session(): - pass - valid = True - break - - except Exception as exc: - last_exc = exc - if attempt < retry_attempts: - cls.log.warning( - "Attempt {} failed. Retrying... ".format(attempt) - ) - time.sleep(1) - - if not valid: - raise last_exc - - cls.log.info("Connected to {}, delay {:.3f}s".format( - mongo_url, time.time() - t1 - )) - return mongo_client +__all__ = ( + "MongoEnvNotSet", + "OpenPypeMongoConnection", + "get_default_components", + "should_add_certificate_path_to_mongo_url", + "validate_mongo_connection", +) diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index 5c40aa4549..e4b18ec258 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -211,15 +211,28 @@ class StringTemplate(object): if counted_symb > -1: parts = tmp_parts.pop(counted_symb) counted_symb -= 1 + # If part contains only single string keep value + # unchanged if parts: # Remove optional start char parts.pop(0) - if counted_symb < 0: - out_parts = new_parts - else: - out_parts = tmp_parts[counted_symb] - # Store temp parts - out_parts.append(OptionalPart(parts)) + + if not parts: + value = "<>" + elif ( + len(parts) == 1 + and isinstance(parts[0], six.string_types) + ): + value = "<{}>".format(parts[0]) + else: + value = OptionalPart(parts) + + if counted_symb < 0: + out_parts = new_parts + else: + out_parts = tmp_parts[counted_symb] + # Store value + out_parts.append(value) continue if counted_symb < 0: @@ -409,6 +422,19 @@ class TemplateResult(str): self.invalid_types ) + def normalized(self): + """Convert to normalized path.""" + + cls = self.__class__ + return cls( + os.path.normpath(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + class TemplatesResultDict(dict): """Holds and wrap TemplateResults for easy bug report.""" @@ -780,6 +806,7 @@ class OptionalPart: parts(list): Parts of template. Can contain 'str', 'OptionalPart' or 'FormattingPart'. """ + def __init__(self, parts): self._parts = parts diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 1d3c1eec6b..060db94ae0 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -1,11 +1,13 @@ # -*- coding: utf-8 -*- """Avalon/Pyblish plugin tools.""" import os -import inspect import logging import re import json +import warnings +import functools + from openpype.client import get_asset_by_id from openpype.settings import get_project_settings @@ -17,6 +19,51 @@ log = logging.getLogger(__name__) DEFAULT_SUBSET_TEMPLATE = "{family}{Variant}" +class PluginToolsDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", PluginToolsDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=PluginToolsDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + class TaskNotSetError(KeyError): def __init__(self, msg=None): if not msg: @@ -197,6 +244,7 @@ def prepare_template_data(fill_pairs): return fill_data +@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins") def filter_pyblish_plugins(plugins): """Filter pyblish plugins by presets. @@ -206,57 +254,14 @@ def filter_pyblish_plugins(plugins): Args: plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base` `discover()` method. - """ - from pyblish import api - host = api.current_host() + from openpype.pipeline.publish.lib import filter_pyblish_plugins - presets = get_project_settings(os.environ['AVALON_PROJECT']) or {} - # skip if there are no presets to process - if not presets: - return - - # iterate over plugins - for plugin in plugins[:]: - - try: - config_data = presets[host]["publish"][plugin.__name__] - except KeyError: - # host determined from path - file = os.path.normpath(inspect.getsourcefile(plugin)) - file = os.path.normpath(file) - - split_path = file.split(os.path.sep) - if len(split_path) < 4: - log.warning( - 'plugin path too short to extract host {}'.format(file) - ) - continue - - host_from_file = split_path[-4] - plugin_kind = split_path[-2] - - # TODO: change after all plugins are moved one level up - if host_from_file == "openpype": - host_from_file = "global" - - try: - config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501 - except KeyError: - continue - - for option, value in config_data.items(): - if option == "enabled" and value is False: - log.info('removing plugin {}'.format(plugin.__name__)) - plugins.remove(plugin) - else: - log.info('setting {}:{} on plugin {}'.format( - option, value, plugin.__name__)) - - setattr(plugin, option, value) + filter_pyblish_plugins(plugins) +@deprecated def set_plugin_attributes_from_settings( plugins, superclass, host_name=None, project_name=None ): @@ -273,6 +278,8 @@ def set_plugin_attributes_from_settings( project_name (str): Name of project for which settings will be loaded. Value from environment `AVALON_PROJECT` is used if not entered. """ + + # Function is not used anymore from openpype.pipeline import LegacyCreator, LoaderPlugin # determine host application to use for finding presets diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py index f0188e6765..ff2f1d4b88 100644 --- a/openpype/lib/project_backpack.py +++ b/openpype/lib/project_backpack.py @@ -53,7 +53,7 @@ def pack_project(project_name, destination_dir=None): Args: project_name(str): Project that should be packaged. - destination_dir(str): Optinal path where zip will be stored. Project's + destination_dir(str): Optional path where zip will be stored. Project's root is used if not passed. """ print("Creating package of project \"{}\"".format(project_name)) diff --git a/openpype/lib/remote_publish.py b/openpype/lib/remote_publish.py index d7884d0200..b4b05c053b 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/lib/remote_publish.py @@ -7,8 +7,10 @@ from bson.objectid import ObjectId import pyblish.util import pyblish.api -from openpype.lib.mongo import OpenPypeMongoConnection +from openpype.client.mongo import OpenPypeMongoConnection from openpype.lib.plugin_tools import parse_json +from openpype.lib.profiles_filtering import filter_profiles +from openpype.api import get_project_settings ERROR_STATUS = "error" IN_PROGRESS_STATUS = "in_progress" @@ -175,14 +177,12 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): ) -def fail_batch(_id, batches_in_progress, dbcon): - """Set current batch as failed as there are some stuck batches.""" - running_batches = [str(batch["_id"]) - for batch in batches_in_progress - if batch["_id"] != _id] - msg = "There are still running batches {}\n". \ - format("\n".join(running_batches)) - msg += "Ask admin to check them and reprocess current batch" +def fail_batch(_id, dbcon, msg): + """Set current batch as failed as there is some problem. + + Raises: + ValueError + """ dbcon.update_one( {"_id": _id}, {"$set": @@ -259,3 +259,19 @@ def get_task_data(batch_dir): "Cannot parse batch meta in {} folder".format(task_data)) return task_data + + +def get_timeout(project_name, host_name, task_type): + """Returns timeout(seconds) from Setting profile.""" + filter_data = { + "task_types": task_type, + "hosts": host_name + } + timeout_profiles = (get_project_settings(project_name)["webpublisher"] + ["timeout_profiles"]) + matching_item = filter_profiles(timeout_profiles, filter_data) + timeout = 3600 + if matching_item: + timeout = matching_item["timeout"] + + return timeout diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index ee9a0f08de..60d5d3ed4a 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -938,3 +938,40 @@ def convert_ffprobe_fps_value(str_value): fps = int(fps) return str(fps) + + +def convert_ffprobe_fps_to_float(value): + """Convert string value of frame rate to float. + + Copy of 'convert_ffprobe_fps_value' which raises exceptions on invalid + value, does not convert value to string and does not return "Unknown" + string. + + Args: + value (str): Value to be converted. + + Returns: + Float: Converted frame rate in float. If divisor in value is '0' then + '0.0' is returned. + + Raises: + ValueError: Passed value is invalid for conversion. + """ + + if not value: + raise ValueError("Got empty value.") + + items = value.split("/") + if len(items) == 1: + return float(items[0]) + + if len(items) > 2: + raise ValueError(( + "FPS expression contains multiple dividers \"{}\"." + ).format(value)) + + dividend = float(items.pop(0)) + divisor = float(items.pop(0)) + if divisor == 0.0: + return 0.0 + return dividend / divisor diff --git a/openpype/modules/base.py b/openpype/modules/base.py index b9ccec13cc..1bd343fd07 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -49,6 +49,7 @@ class _ModuleClass(object): Object of this class can be stored to `sys.modules` and used for storing dynamically imported modules. """ + def __init__(self, name): # Call setattr on super class super(_ModuleClass, self).__setattr__("name", name) @@ -116,12 +117,13 @@ class _InterfacesClass(_ModuleClass): - this is because interfaces must be available even if are missing implementation """ + def __getattr__(self, attr_name): if attr_name not in self.__attributes__: if attr_name in ("__path__", "__file__"): return None - raise ImportError(( + raise AttributeError(( "cannot import name '{}' from 'openpype_interfaces'" ).format(attr_name)) diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index de8df3dd9e..c55f85c8da 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -80,7 +80,8 @@ class AfterEffectsSubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index a1ee5e0957..3f9c09b592 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -274,7 +274,8 @@ class HarmonySubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index f834ae7e92..95856137e2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -55,7 +55,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): scenename = os.path.basename(scene) # Get project code - project = legacy_io.find_one({"type": "project"}) + project = context.data["projectEntity"] code = project["data"].get("code", project["name"]) job_name = "{scene} [PUBLISH]".format(scene=scenename) @@ -130,6 +130,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "houdini17.5;pluginx2.3" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index aca88c7440..beda753723 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -101,6 +101,7 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 3707c5709f..13dfc0183a 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -413,8 +413,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Gather needed data ------------------------------------------------ default_render_file = instance.context.data.get('project_settings')\ .get('maya')\ - .get('create')\ - .get('CreateRender')\ + .get('RenderSettings')\ .get('default_render_image_folder') filename = os.path.basename(filepath) comment = context.data.get("comment", "") @@ -519,12 +518,14 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", + "OPENPYPE_SG_USER", "AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index 57572fcb24..38ae5d2f7f 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -5,7 +5,6 @@ from maya import cmds from openpype.pipeline import legacy_io, PublishXmlValidationError from openpype.settings import get_project_settings -import openpype.api import pyblish.api @@ -34,7 +33,9 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): targets = ["local"] def process(self, instance): - settings = get_project_settings(os.getenv("AVALON_PROJECT")) + project_name = instance.context.data["projectName"] + # TODO settings can be received from 'context.data["project_settings"]' + settings = get_project_settings(project_name) # use setting for publish job on farm, no reason to have it separately deadline_publish_job_sett = (settings["deadline"] ["publish"] @@ -53,9 +54,6 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): scene = instance.context.data["currentFile"] scenename = os.path.basename(scene) - # Get project code - project_name = legacy_io.Session["AVALON_PROJECT"] - job_name = "{scene} [PUBLISH]".format(scene=scenename) batch_name = "{code} - {scene}".format(code=project_name, scene=scenename) @@ -102,13 +100,14 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", - "FTRACK_SERVER" + "FTRACK_SERVER", + "OPENPYPE_VERSION" ] environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) - # TODO replace legacy_io with context.data ? - environment["AVALON_PROJECT"] = legacy_io.Session["AVALON_PROJECT"] + # TODO replace legacy_io with context.data + environment["AVALON_PROJECT"] = project_name environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"] environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"] environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index 93fb511a34..336a56ec45 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -80,10 +80,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Using published scene for render {}".format(script_path) ) - # exception for slate workflow - if "slate" in instance.data["families"]: - submit_frame_start -= 1 - response = self.payload_submit( instance, script_path, @@ -99,10 +95,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["publishJobState"] = "Suspended" if instance.data.get("bakingNukeScripts"): - # exception for slate workflow - if "slate" in instance.data["families"]: - submit_frame_start += 1 - for baking_script in instance.data["bakingNukeScripts"]: render_path = baking_script["bakeRenderPath"] script_path = baking_script["bakeScriptPath"] @@ -261,7 +253,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "PYBLISHPLUGINPATH", "NUKE_PATH", "TOOL_ENV", - "FOUNDRY_LICENSE" + "FOUNDRY_LICENSE", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): @@ -365,7 +358,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): if not instance.data.get("expectedFiles"): instance.data["expectedFiles"] = [] - dir = os.path.dirname(path) + dirname = os.path.dirname(path) file = os.path.basename(path) if "#" in file: @@ -377,9 +370,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["expectedFiles"].append(path) return + if instance.data.get("slate"): + start_frame -= 1 + for i in range(start_frame, (end_frame + 1)): instance.data["expectedFiles"].append( - os.path.join(dir, (file % i)).replace("\\", "/")) + os.path.join(dirname, (file % i)).replace("\\", "/")) def get_limit_groups(self): """Search for limit group nodes and return group name. diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index 9dd1428a63..379953c9e4 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -10,7 +10,10 @@ import clique import pyblish.api -import openpype.api +from openpype.client import ( + get_last_version_by_subset_name, + get_representations, +) from openpype.pipeline import ( get_representation_path, legacy_io, @@ -18,15 +21,23 @@ from openpype.pipeline import ( from openpype.pipeline.farm.patterning import match_aov_pattern -def get_resources(version, extension=None): +def get_resources(project_name, version, extension=None): """Get the files from the specific version.""" - query = {"type": "representation", "parent": version["_id"]} + + # TODO this functions seems to be weird + # - it's looking for representation with one extension or first (any) + # representation from a version? + # - not sure how this should work, maybe it does for specific use cases + # but probably can't be used for all resources from 2D workflows + extensions = None if extension: - query["name"] = extension - - representation = legacy_io.find_one(query) - assert representation, "This is a bug" + extensions = [extension] + repre_docs = list(get_representations( + project_name, version_ids=[version["_id"]], extensions=extensions + )) + assert repre_docs, "This is a bug" + representation = repre_docs[0] directory = get_representation_path(representation) print("Source: ", directory) resources = sorted( @@ -130,7 +141,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "OPENPYPE_USERNAME", "OPENPYPE_RENDER_JOB", "OPENPYPE_PUBLISH_JOB", - "OPENPYPE_MONGO" + "OPENPYPE_MONGO", + "OPENPYPE_VERSION" ] # custom deadline attributes @@ -147,7 +159,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # mapping of instance properties to be transfered to new instance for every # specified family instance_transfer = { - "slate": ["slateFrames"], + "slate": ["slateFrames", "slate"], "review": ["lutPath"], "render2d": ["bakingNukeScripts", "version"], "renderlayer": ["convertToScanline"] @@ -330,13 +342,21 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.log.info("Preparing to copy ...") start = instance.data.get("frameStart") end = instance.data.get("frameEnd") + project_name = legacy_io.active_project() # get latest version of subset # this will stop if subset wasn't published yet - version = openpype.api.get_latest_version(instance.data.get("asset"), - instance.data.get("subset")) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + instance.data.get("subset"), + asset_name=instance.data.get("asset") + ) + # get its files based on extension - subset_resources = get_resources(version, representation.get("ext")) + subset_resources = get_resources( + project_name, version, representation.get("ext") + ) r_col, _ = clique.assemble(subset_resources) # if override remove all frames we are expecting to be rendered @@ -566,11 +586,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): " This may cause issues on farm." ).format(staging)) + frame_start = int(instance.get("frameStartHandle")) + if instance.get("slate"): + frame_start -= 1 + rep = { "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(collection)], - "frameStart": int(instance.get("frameStartHandle")), + "frameStart": frame_start, "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": staging, @@ -1013,9 +1037,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): prev_start = None prev_end = None - version = openpype.api.get_latest_version(asset_name=asset, - subset_name=subset - ) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + subset, + asset_name=asset + ) # Set prev start / end frames for comparison if not prev_start and not prev_end: @@ -1060,7 +1087,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): based on 'publish' template """ if not version: - version = openpype.api.get_latest_version(asset, subset) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + subset, + asset_name=asset + ) if version: version = int(version["name"]) + 1 else: diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index bcd853f374..61b95cf06d 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -6,13 +6,52 @@ import subprocess import json import platform import uuid -from Deadline.Scripting import RepositoryUtils, FileUtils +import re +from Deadline.Scripting import RepositoryUtils, FileUtils, DirectoryUtils + + +def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + str or None: version of OpenPype if found. + + """ + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(" ! path is not a build: {}".format(path)) + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) + return version_match[1] def get_openpype_executable(): """Return OpenPype Executable from Event Plug-in Settings""" config = RepositoryUtils.GetPluginConfig("OpenPype") - return config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + dir_list = config.GetConfigEntryWithDefault( + "OpenPypeInstallationDirs", "") + return exe_list, dir_list def inject_openpype_environment(deadlinePlugin): @@ -25,16 +64,94 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Injecting OpenPype environments ...") try: print(">>> Getting OpenPype executable ...") - exe_list = get_openpype_executable() - openpype_app = FileUtils.SearchFileList(exe_list) - if openpype_app == "": + exe_list, dir_list = get_openpype_executable() + openpype_versions = [] + # if the job requires specific OpenPype version, + # lets go over all available and find compatible build. + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + print(( + ">>> Scanning for compatible requested version {}" + ).format(requested_version)) + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + print("--- Looking for OpenPype at: {}".format(install_dir)) + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = get_openpype_version_from_path(subdir) + if not version: + continue + print(" - found: {} - {}".format(version, subdir)) + openpype_versions.append((version, subdir)) + + exe = FileUtils.SearchFileList(exe_list) + if openpype_versions: + # if looking for requested compatible version, + # add the implicitly specified to the list too. + print("Looking for OpenPype at: {}".format(os.path.dirname(exe))) + version = get_openpype_version_from_path( + os.path.dirname(exe)) + if version: + print(" - found: {} - {}".format( + version, os.path.dirname(exe) + )) + openpype_versions.append((version, os.path.dirname(exe))) + + if requested_version: + # sort detected versions + if openpype_versions: + # use natural sorting + openpype_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + print(( + "*** Latest available version found is {}" + ).format(openpype_versions[-1][0])) + requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 + compatible_versions = [] + for version in openpype_versions: + v = version[0].split(".")[:3] + if v[0] == requested_major and v[1] == requested_minor: + compatible_versions.append(version) + if not compatible_versions: + raise RuntimeError( + ("Cannot find compatible version available " + "for version {} requested by the job. " + "Please add it through plugin configuration " + "in Deadline or install it to configured " + "directory.").format(requested_version)) + # sort compatible versions nad pick the last one + compatible_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + print(( + "*** Latest compatible version found is {}" + ).format(compatible_versions[-1][0])) + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join( + compatible_versions[-1][1], "openpype_console.exe"), + os.path.join( + compatible_versions[-1][1], "openpype_console") + ] + exe = FileUtils.SearchFileList(";".join(exe_list)) + if exe == "": raise RuntimeError( "OpenPype executable was not found " + - "in the semicolon separated list \"" + exe_list + "\". " + + "in the semicolon separated list " + + "\"" + ";".join(exe_list) + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") - print("--- OpenPype executable: {}".format(openpype_app)) + print("--- OpenPype executable: {}".format(exe)) # tempfile.TemporaryFile cannot be used because of locking temp_file_name = "{}_{}.json".format( @@ -45,7 +162,7 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Temporary path: {}".format(export_url)) args = [ - openpype_app, + exe, "--headless", 'extractenvironments', export_url @@ -75,9 +192,9 @@ def inject_openpype_environment(deadlinePlugin): env["OPENPYPE_HEADLESS_MODE"] = "1" env["AVALON_TIMEOUT"] = "5000" - print(">>> Executing: {}".format(args)) + print(">>> Executing: {}".format(" ".join(args))) std_output = subprocess.check_output(args, - cwd=os.path.dirname(openpype_app), + cwd=os.path.dirname(exe), env=env) print(">>> Process result {}".format(std_output)) @@ -122,78 +239,6 @@ def inject_render_job_id(deadlinePlugin): print(">>> Injection end.") -def pype_command_line(executable, arguments, workingDirectory): - """Remap paths in comand line argument string. - - Using Deadline rempper it will remap all path found in command-line. - - Args: - executable (str): path to executable - arguments (str): arguments passed to executable - workingDirectory (str): working directory path - - Returns: - Tuple(executable, arguments, workingDirectory) - - """ - print("-" * 40) - print("executable: {}".format(executable)) - print("arguments: {}".format(arguments)) - print("workingDirectory: {}".format(workingDirectory)) - print("-" * 40) - print("Remapping arguments ...") - arguments = RepositoryUtils.CheckPathMapping(arguments) - print("* {}".format(arguments)) - print("-" * 40) - return executable, arguments, workingDirectory - - -def pype(deadlinePlugin): - """Remaps `PYPE_METADATA_FILE` and `PYPE_PYTHON_EXE` environment vars. - - `PYPE_METADATA_FILE` is used on farm to point to rendered data. This path - originates on platform from which this job was published. To be able to - publish on different platform, this path needs to be remapped. - - `PYPE_PYTHON_EXE` can be used to specify custom location of python - interpreter to use for Pype. This is remappeda also if present even - though it probably doesn't make much sense. - - Arguments: - deadlinePlugin: Deadline job plugin passed by Deadline - - """ - print(">>> Getting job ...") - job = deadlinePlugin.GetJob() - # PYPE should be here, not OPENPYPE - backward compatibility!! - pype_metadata = job.GetJobEnvironmentKeyValue("PYPE_METADATA_FILE") - pype_python = job.GetJobEnvironmentKeyValue("PYPE_PYTHON_EXE") - print(">>> Having backward compatible env vars {}/{}".format(pype_metadata, - pype_python)) - # test if it is pype publish job. - if pype_metadata: - pype_metadata = RepositoryUtils.CheckPathMapping(pype_metadata) - if platform.system().lower() == "linux": - pype_metadata = pype_metadata.replace("\\", "/") - - print("- remapping PYPE_METADATA_FILE: {}".format(pype_metadata)) - job.SetJobEnvironmentKeyValue("PYPE_METADATA_FILE", pype_metadata) - deadlinePlugin.SetProcessEnvironmentVariable( - "PYPE_METADATA_FILE", pype_metadata) - - if pype_python: - pype_python = RepositoryUtils.CheckPathMapping(pype_python) - if platform.system().lower() == "linux": - pype_python = pype_python.replace("\\", "/") - - print("- remapping PYPE_PYTHON_EXE: {}".format(pype_python)) - job.SetJobEnvironmentKeyValue("PYPE_PYTHON_EXE", pype_python) - deadlinePlugin.SetProcessEnvironmentVariable( - "PYPE_PYTHON_EXE", pype_python) - - deadlinePlugin.ModifyCommandLineCallback += pype_command_line - - def __main__(deadlinePlugin): print("*** GlobalJobPreload start ...") print(">>> Getting job ...") @@ -217,5 +262,3 @@ def __main__(deadlinePlugin): inject_render_job_id(deadlinePlugin) elif openpype_render_job == '1' or openpype_remote_job == '1': inject_openpype_environment(deadlinePlugin) - else: - pype(deadlinePlugin) # backward compatibility with Pype2 diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param index 8bd6dce12d..b3ac18e20c 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param @@ -7,11 +7,20 @@ Index=0 Default=OpenPype Plugin for Deadline Description=Not configurable +[OpenPypeInstallationDirs] +Type=multilinemultifolder +Label=Directories where OpenPype versions are installed +Category=OpenPype Installation Directories +CategoryOrder=0 +Index=0 +Default=C:\Program Files (x86)\OpenPype +Description=Path or paths to directories where multiple versions of OpenPype might be installed. Enter every such path on separate lines. + [OpenPypeExecutable] Type=multilinemultifilename Label=OpenPype Executable Category=OpenPype Executables -CategoryOrder=0 +CategoryOrder=1 Index=0 Default= Description=The path to the OpenPype executable. Enter alternative paths on separate lines. diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py index 451d71fb63..6b0f69d98f 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py @@ -1,10 +1,19 @@ +#!/usr/bin/env python3 + from System.IO import Path from System.Text.RegularExpressions import Regex from Deadline.Plugins import PluginType, DeadlinePlugin -from Deadline.Scripting import StringUtils, FileUtils, RepositoryUtils +from Deadline.Scripting import ( + StringUtils, + FileUtils, + DirectoryUtils, + RepositoryUtils +) import re +import os +import platform ###################################################################### @@ -52,13 +61,115 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): self.AddStdoutHandlerCallback( ".*Progress: (\d+)%.*").HandleCallback += self.HandleProgress + @staticmethod + def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + str or None: version of OpenPype if found. + + """ + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(f" ! path is not a build: {path}") + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) + return version_match[1] + def RenderExecutable(self): - exeList = self.GetConfigEntry("OpenPypeExecutable") - exe = FileUtils.SearchFileList(exeList) + job = self.GetJob() + openpype_versions = [] + # if the job requires specific OpenPype version, + # lets go over all available and find compatible build. + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + self.LogInfo(( + "Scanning for compatible requested " + f"version {requested_version}")) + dir_list = self.GetConfigEntry("OpenPypeInstallationDirs") + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if dir: + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = self.get_openpype_version_from_path(subdir) + if not version: + continue + openpype_versions.append((version, subdir)) + + exe_list = self.GetConfigEntry("OpenPypeExecutable") + exe = FileUtils.SearchFileList(exe_list) + if openpype_versions: + # if looking for requested compatible version, + # add the implicitly specified to the list too. + version = self.get_openpype_version_from_path( + os.path.dirname(exe)) + if version: + openpype_versions.append((version, os.path.dirname(exe))) + + if requested_version: + # sort detected versions + if openpype_versions: + openpype_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 + compatible_versions = [] + for version in openpype_versions: + v = version[0].split(".")[:3] + if v[0] == requested_major and v[1] == requested_minor: + compatible_versions.append(version) + if not compatible_versions: + self.FailRender(("Cannot find compatible version available " + "for version {} requested by the job. " + "Please add it through plugin configuration " + "in Deadline or install it to configured " + "directory.").format(requested_version)) + # sort compatible versions nad pick the last one + compatible_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join( + compatible_versions[-1][1], "openpype_console.exe"), + os.path.join( + compatible_versions[-1][1], "openpype_console") + ] + exe = FileUtils.SearchFileList(";".join(exe_list)) + if exe == "": self.FailRender( "OpenPype executable was not found " + - "in the semicolon separated list \"" + exeList + "\". " + + "in the semicolon separated list " + + "\"" + ";".join(exe_list) + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") return exe diff --git a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py index 8a8e86e7b9..21382007a0 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py +++ b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py @@ -6,7 +6,10 @@ import collections import ftrack_api from openpype.lib import get_datetime_data -from openpype.api import get_project_settings +from openpype.settings.lib import ( + get_project_settings, + get_default_project_settings +) from openpype_modules.ftrack.lib import ServerAction @@ -79,6 +82,35 @@ class CreateDailyReviewSessionServerAction(ServerAction): ) return True + def _calculate_next_cycle_delta(self): + studio_default_settings = get_default_project_settings() + action_settings = ( + studio_default_settings + ["ftrack"] + [self.settings_frack_subkey] + [self.settings_key] + ) + cycle_hour_start = action_settings.get("cycle_hour_start") + if not cycle_hour_start: + h = m = s = 0 + else: + h, m, s = cycle_hour_start + + # Create threading timer which will trigger creation of report + # at the 00:00:01 of next day + # - callback will trigger another timer which will have 1 day offset + now = datetime.datetime.now() + # Create object of today morning + expected_next_trigger = datetime.datetime( + now.year, now.month, now.day, h, m, s + ) + if expected_next_trigger > now: + seconds = (expected_next_trigger - now).total_seconds() + else: + expected_next_trigger += self._day_delta + seconds = (expected_next_trigger - now).total_seconds() + return seconds, expected_next_trigger + def register(self, *args, **kwargs): """Override register to be able trigger """ # Register server action as would be normally @@ -86,22 +118,14 @@ class CreateDailyReviewSessionServerAction(ServerAction): *args, **kwargs ) - # Create threading timer which will trigger creation of report - # at the 00:00:01 of next day - # - callback will trigger another timer which will have 1 day offset - now = datetime.datetime.now() - # Create object of today morning - today_morning = datetime.datetime( - now.year, now.month, now.day, 0, 0, 1 - ) - # Add a day delta (to calculate next day date) - next_day_morning = today_morning + self._day_delta - # Calculate first delta in seconds for first threading timer - first_delta = (next_day_morning - now).total_seconds() + seconds_delta, cycle_time = self._calculate_next_cycle_delta() + # Store cycle time which will be used to create next timer - self._last_cyle_time = next_day_morning + self._last_cyle_time = cycle_time # Create timer thread - self._cycle_timer = threading.Timer(first_delta, self._timer_callback) + self._cycle_timer = threading.Timer( + seconds_delta, self._timer_callback + ) self._cycle_timer.start() self._check_review_session() @@ -111,13 +135,12 @@ class CreateDailyReviewSessionServerAction(ServerAction): self._cycle_timer is not None and self._last_cyle_time is not None ): - now = datetime.datetime.now() - while self._last_cyle_time < now: - self._last_cyle_time = self._last_cyle_time + self._day_delta + seconds_delta, cycle_time = self._calculate_next_cycle_delta() + self._last_cyle_time = cycle_time - delay = (self._last_cyle_time - now).total_seconds() - - self._cycle_timer = threading.Timer(delay, self._timer_callback) + self._cycle_timer = threading.Timer( + seconds_delta, self._timer_callback + ) self._cycle_timer.start() self._check_review_session() diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py index 0914933de4..dc76920a57 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py @@ -1,10 +1,11 @@ import collections import datetime +import copy import ftrack_api from openpype_modules.ftrack.lib import ( BaseEvent, - query_custom_attributes + query_custom_attributes, ) @@ -124,10 +125,15 @@ class PushFrameValuesToTaskEvent(BaseEvent): # Separate value changes and task parent changes _entities_info = [] + added_entities = [] + added_entity_ids = set() task_parent_changes = [] for entity_info in entities_info: if entity_info["entity_type"].lower() == "task": task_parent_changes.append(entity_info) + elif entity_info.get("action") == "add": + added_entities.append(entity_info) + added_entity_ids.add(entity_info["entityId"]) else: _entities_info.append(entity_info) entities_info = _entities_info @@ -136,6 +142,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data, changed_keys_by_object_id = self.filter_changes( session, event, entities_info, interest_attributes ) + self.interesting_data_for_added( + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ) if not interesting_data and not task_parent_changes: return @@ -151,9 +164,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): # - it is a complex way how to find out if interesting_data: self.process_attribute_changes( - session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ) if task_parent_changes: @@ -163,8 +180,12 @@ class PushFrameValuesToTaskEvent(BaseEvent): ) def process_task_parent_change( - self, session, object_types_by_name, task_parent_changes, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + task_parent_changes, + interest_entity_types, + interest_attributes ): """Push custom attribute values if task parent has changed. @@ -176,6 +197,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): real hierarchical value and non hierarchical custom attribute value should be set to hierarchical value. """ + # Store task ids which were created or moved under parent with entity # type defined in settings (interest_entity_types). task_ids = set() @@ -380,33 +402,49 @@ class PushFrameValuesToTaskEvent(BaseEvent): uncommited_changes = False for idx, item in enumerate(changes): new_value = item["new_value"] + old_value = item["old_value"] attr_id = item["attr_id"] entity_id = item["entity_id"] attr_key = item["attr_key"] - entity_key = collections.OrderedDict() - entity_key["configuration_id"] = attr_id - entity_key["entity_id"] = entity_id + entity_key = collections.OrderedDict(( + ("configuration_id", attr_id), + ("entity_id", entity_id) + )) self._cached_changes.append({ "attr_key": attr_key, "entity_id": entity_id, "value": new_value, "time": datetime.datetime.now() }) + old_value_is_set = ( + old_value is not ftrack_api.symbol.NOT_SET + and old_value is not None + ) if new_value is None: + if not old_value_is_set: + continue op = ftrack_api.operation.DeleteEntityOperation( "CustomAttributeValue", entity_key ) - else: + + elif old_value_is_set: op = ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + "CustomAttributeValue", entity_key, "value", - ftrack_api.symbol.NOT_SET, + old_value, new_value ) + else: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + entity_key, + {"value": new_value} + ) + session.recorded_operations.push(op) self.log.info(( "Changing Custom Attribute \"{}\" to value" @@ -432,9 +470,14 @@ class PushFrameValuesToTaskEvent(BaseEvent): self.log.warning("Changing of values failed.", exc_info=True) def process_attribute_changes( - self, session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ): # Prepare task object id task_object_id = object_types_by_name["task"]["id"] @@ -522,15 +565,26 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id_by_task_id[task_id] = task_entity["parent_id"] self.finalize_attribute_changes( - session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ) def finalize_attribute_changes( - self, session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + self, + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ): attr_id_to_key = {} for attr_confs in attrs_by_obj_id.values(): @@ -550,7 +604,11 @@ class PushFrameValuesToTaskEvent(BaseEvent): attr_ids = set(attr_id_to_key.keys()) current_values_by_id = self.get_current_values( - session, attr_ids, entity_ids, task_entity_ids, hier_attrs + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ) changes = [] @@ -560,14 +618,25 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id = entity_id values = interesting_data[parent_id] + added_entity = entity_id in added_entity_ids for attr_id, old_value in current_values.items(): + if added_entity and attr_id in hier_attrs: + continue + attr_key = attr_id_to_key.get(attr_id) if not attr_key: continue # Convert new value from string new_value = values.get(attr_key) - if new_value is not None and old_value is not None: + new_value_is_valid = ( + old_value is not ftrack_api.symbol.NOT_SET + and new_value is not None + ) + if added_entity and not new_value_is_valid: + continue + + if new_value is not None and new_value_is_valid: try: new_value = type(old_value)(new_value) except Exception: @@ -581,6 +650,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): changes.append({ "new_value": new_value, "attr_id": attr_id, + "old_value": old_value, "entity_id": entity_id, "attr_key": attr_key }) @@ -599,6 +669,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data = {} changed_keys_by_object_id = {} + for entity_info in entities_info: # Care only about changes if specific keys entity_changes = {} @@ -644,16 +715,123 @@ class PushFrameValuesToTaskEvent(BaseEvent): return interesting_data, changed_keys_by_object_id + def interesting_data_for_added( + self, + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ): + if not added_entities or not interest_attributes: + return + + object_type_ids = set() + entity_ids = set() + all_entity_ids = set() + object_id_by_entity_id = {} + project_id = None + entity_ids_by_parent_id = collections.defaultdict(set) + for entity_info in added_entities: + object_id = entity_info["objectTypeId"] + entity_id = entity_info["entityId"] + object_type_ids.add(object_id) + entity_ids.add(entity_id) + object_id_by_entity_id[entity_id] = object_id + + for item in entity_info["parents"]: + entity_id = item["entityId"] + all_entity_ids.add(entity_id) + parent_id = item["parentId"] + if not parent_id: + project_id = entity_id + else: + entity_ids_by_parent_id[parent_id].add(entity_id) + + hier_attrs = self.get_hierarchical_configurations( + session, interest_attributes + ) + if not hier_attrs: + return + + hier_attrs_key_by_id = { + attr_conf["id"]: attr_conf["key"] + for attr_conf in hier_attrs + } + default_values_by_key = { + attr_conf["key"]: attr_conf["default"] + for attr_conf in hier_attrs + } + + values = query_custom_attributes( + session, list(hier_attrs_key_by_id.keys()), all_entity_ids, True + ) + values_per_entity_id = {} + for entity_id in all_entity_ids: + values_per_entity_id[entity_id] = {} + for attr_name in interest_attributes: + values_per_entity_id[entity_id][attr_name] = None + + for item in values: + entity_id = item["entity_id"] + key = hier_attrs_key_by_id[item["configuration_id"]] + values_per_entity_id[entity_id][key] = item["value"] + + fill_queue = collections.deque() + fill_queue.append((project_id, default_values_by_key)) + while fill_queue: + item = fill_queue.popleft() + entity_id, values_by_key = item + entity_values = values_per_entity_id[entity_id] + new_values_by_key = copy.deepcopy(values_by_key) + for key, value in values_by_key.items(): + current_value = entity_values[key] + if current_value is None: + entity_values[key] = value + else: + new_values_by_key[key] = current_value + + for child_id in entity_ids_by_parent_id[entity_id]: + fill_queue.append((child_id, new_values_by_key)) + + for entity_id in entity_ids: + entity_changes = {} + for key, value in values_per_entity_id[entity_id].items(): + if value is not None: + entity_changes[key] = value + + if not entity_changes: + continue + + interesting_data[entity_id] = entity_changes + object_id = object_id_by_entity_id[entity_id] + if object_id not in changed_keys_by_object_id: + changed_keys_by_object_id[object_id] = set() + changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) + def get_current_values( - self, session, attr_ids, entity_ids, task_entity_ids, hier_attrs + self, + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ): current_values_by_id = {} if not attr_ids or not entity_ids: return current_values_by_id + for entity_id in entity_ids: + current_values_by_id[entity_id] = {} + for attr_id in attr_ids: + current_values_by_id[entity_id][attr_id] = ( + ftrack_api.symbol.NOT_SET + ) + values = query_custom_attributes( session, attr_ids, entity_ids, True ) + for item in values: entity_id = item["entity_id"] attr_id = item["configuration_id"] @@ -699,6 +877,18 @@ class PushFrameValuesToTaskEvent(BaseEvent): output[obj_id][attr["key"]] = attr["id"] return output, hiearchical + def get_hierarchical_configurations(self, session, interest_attributes): + hier_attr_query = ( + "select id, key, object_type_id, is_hierarchical, default" + " from CustomAttributeConfiguration" + " where key in ({}) and is_hierarchical is true" + ) + if not interest_attributes: + return [] + return list(session.query(hier_attr_query.format( + self.join_query_keys(interest_attributes), + )).all()) + def register(session): PushFrameValuesToTaskEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py index ebea8872f9..df914de854 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py @@ -84,6 +84,11 @@ class CreateProjectFolders(BaseAction): create_project_folders(basic_paths, project_name) self.create_ftrack_entities(basic_paths, project_entity) + self.trigger_event( + "openpype.project.structure.created", + {"project_name": project_name} + ) + except Exception as exc: self.log.warning("Creating of structure crashed.", exc_info=True) session.rollback() diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py index ad82af39a3..eec245070c 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delivery.py @@ -16,7 +16,7 @@ from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype_modules.ftrack.lib.custom_attributes import ( query_custom_attributes ) -from openpype.lib import config +from openpype.lib.dateutils import get_datetime_data from openpype.lib.delivery import ( path_from_representation, get_format_dict, @@ -555,7 +555,7 @@ class Delivery(BaseAction): format_dict = get_format_dict(anatomy, location_path) - datetime_data = config.get_datetime_data() + datetime_data = get_datetime_data() for repre in repres_to_deliver: source_path = repre.get("data", {}).get("path") debug_msg = "Processing representation {}".format(repre["_id"]) diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index d91649d7ba..fb1cdf340e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -11,13 +11,11 @@ from openpype.client import ( get_project, get_assets, ) -from openpype.settings import get_project_settings -from openpype.lib import ( - get_workfile_template_key, - get_workdir_data, - StringTemplate, -) +from openpype.settings import get_project_settings, get_system_settings +from openpype.lib import StringTemplate from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data +from openpype.pipeline.workfile import get_workfile_template_key from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks @@ -279,14 +277,19 @@ class FillWorkfileAttributeAction(BaseAction): extension = "{ext}" project_doc = get_project(project_name) project_settings = get_project_settings(project_name) + system_settings = get_system_settings() anatomy = Anatomy(project_name) templates_by_key = {} operations = [] for asset_doc, task_entities in asset_docs_with_task_entities: for task_entity in task_entities: - workfile_data = get_workdir_data( - project_doc, asset_doc, task_entity["name"], host_name + workfile_data = get_template_data( + project_doc, + asset_doc, + task_entity["name"], + host_name, + system_settings ) # Use version 1 for each workfile workfile_data["version"] = 1 @@ -294,7 +297,10 @@ class FillWorkfileAttributeAction(BaseAction): task_type = workfile_data["task"]["type"] template_key = get_workfile_template_key( - task_type, host_name, project_settings=project_settings + task_type, + host_name, + project_name, + project_settings=project_settings ) if template_key in templates_by_key: template = templates_by_key[template_key] diff --git a/openpype/modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/ftrack/ftrack_server/event_server_cli.py index 90ce757242..3ef7c8270a 100644 --- a/openpype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/openpype/modules/ftrack/ftrack_server/event_server_cli.py @@ -1,11 +1,9 @@ import os -import sys import signal import datetime import subprocess import socket import json -import platform import getpass import atexit import time @@ -13,12 +11,14 @@ import uuid import ftrack_api import pymongo +from openpype.client.mongo import ( + OpenPypeMongoConnection, + validate_mongo_connection, +) from openpype.lib import ( get_openpype_execute_args, - OpenPypeMongoConnection, get_openpype_version, get_build_version, - validate_mongo_connection ) from openpype_modules.ftrack import FTRACK_MODULE_DIR from openpype_modules.ftrack.lib import credentials diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py index 5c6d6352d2..947dacf917 100644 --- a/openpype/modules/ftrack/ftrack_server/lib.py +++ b/openpype/modules/ftrack/ftrack_server/lib.py @@ -7,6 +7,7 @@ import threading import datetime import time import queue +import collections import appdirs import pymongo @@ -24,7 +25,7 @@ except ImportError: from ftrack_api._weakref import WeakMethod from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info -from openpype.lib import OpenPypeMongoConnection +from openpype.client import OpenPypeMongoConnection from openpype.api import Logger TOPIC_STATUS_SERVER = "openpype.event.server.status" @@ -309,7 +310,20 @@ class CustomEventHubSession(ftrack_api.session.Session): # Currently pending operations. self.recorded_operations = ftrack_api.operation.Operations() - self.record_operations = True + + # OpenPype change - In new API are operations properties + new_api = hasattr(self.__class__, "record_operations") + + if new_api: + self._record_operations = collections.defaultdict( + lambda: True + ) + self._auto_populate = collections.defaultdict( + lambda: auto_populate + ) + else: + self.record_operations = True + self.auto_populate = auto_populate self.cache_key_maker = cache_key_maker if self.cache_key_maker is None: @@ -328,6 +342,9 @@ class CustomEventHubSession(ftrack_api.session.Session): if cache is not None: self.cache.caches.append(cache) + if new_api: + self.merge_lock = threading.RLock() + self._managed_request = None self._request = requests.Session() self._request.auth = ftrack_api.session.SessionAuthentication( @@ -335,8 +352,6 @@ class CustomEventHubSession(ftrack_api.session.Session): ) self.request_timeout = timeout - self.auto_populate = auto_populate - # Fetch server information and in doing so also check credentials. self._server_information = self._fetch_server_information() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py index c4f7b1f05d..20a69e060c 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py @@ -26,8 +26,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): families = ["ftrack"] def process(self, instance): - session = instance.context.data["ftrackSession"] - context = instance.context component_list = instance.data.get("ftrackComponentsList") if not component_list: self.log.info( @@ -36,8 +34,8 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): ) return - session = instance.context.data["ftrackSession"] context = instance.context + session = context.data["ftrackSession"] parent_entity = None default_asset_name = None @@ -89,6 +87,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): asset_versions_data_by_id = {} used_asset_versions = [] + # Iterate over components and publish for data in component_list: self.log.debug("data: {}".format(data)) @@ -118,9 +117,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): asset_version_status_ids_by_name ) - # Component - self.create_component(session, asset_version_entity, data) - # Store asset version and components items that were version_id = asset_version_entity["id"] if version_id not in asset_versions_data_by_id: @@ -137,6 +133,8 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): if asset_version_entity not in used_asset_versions: used_asset_versions.append(asset_version_entity) + self._create_components(session, asset_versions_data_by_id) + instance.data["ftrackIntegratedAssetVersionsData"] = ( asset_versions_data_by_id ) @@ -625,3 +623,40 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session.rollback() session._configure_locations() six.reraise(tp, value, tb) + + def _create_components(self, session, asset_versions_data_by_id): + for item in asset_versions_data_by_id.values(): + asset_version_entity = item["asset_version"] + component_items = item["component_items"] + + component_entities = session.query( + ( + "select id, name from Component where version_id is \"{}\"" + ).format(asset_version_entity["id"]) + ).all() + + existing_component_names = { + component["name"] + for component in component_entities + } + + contain_review = "ftrackreview-mp4" in existing_component_names + thumbnail_component_item = None + for component_item in component_items: + component_data = component_item.get("component_data") or {} + component_name = component_data.get("name") + if component_name == "ftrackreview-mp4": + contain_review = True + elif component_name == "ftrackreview-image": + thumbnail_component_item = component_item + + if contain_review and thumbnail_component_item: + thumbnail_component_item["component_data"]["name"] = ( + "thumbnail" + ) + + # Component + for component_item in component_items: + self.create_component( + session, asset_version_entity, component_item + ) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py index 047fd8462c..8cb2336391 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py @@ -13,7 +13,10 @@ class IntegrateFtrackComponentOverwrite(pyblish.api.InstancePlugin): active = False def process(self, instance): - component_list = instance.data['ftrackComponentsList'] + component_list = instance.data.get('ftrackComponentsList') + if not component_list: + self.log.info("No component to overwrite...") + return for cl in component_list: cl['component_overwrite'] = True diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py index c6a3d47f66..e7c265988e 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py @@ -6,9 +6,11 @@ Requires: """ import sys +import json import six import pyblish.api +from openpype.lib import StringTemplate class IntegrateFtrackDescription(pyblish.api.InstancePlugin): @@ -25,6 +27,10 @@ class IntegrateFtrackDescription(pyblish.api.InstancePlugin): description_template = "{comment}" def process(self, instance): + if not self.description_template: + self.log.info("Skipping. Description template is not set.") + return + # Check if there are any integrated AssetVersion entities asset_versions_key = "ftrackIntegratedAssetVersionsData" asset_versions_data_by_id = instance.data.get(asset_versions_key) @@ -38,39 +44,62 @@ class IntegrateFtrackDescription(pyblish.api.InstancePlugin): else: self.log.debug("Comment is set to `{}`".format(comment)) - session = instance.context.data["ftrackSession"] - intent = instance.context.data.get("intent") - intent_label = None - if intent and isinstance(intent, dict): - intent_val = intent.get("value") - intent_label = intent.get("label") - else: - intent_val = intent + if intent and "{intent}" in self.description_template: + value = intent.get("value") + if value: + intent = intent.get("label") or value - if not intent_label: - intent_label = intent_val or "" + if not intent and not comment: + self.log.info("Skipping. Intent and comment are empty.") + return # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) - if intent_label: - self.log.debug( - "Intent label is set to `{}`.".format(intent_label) - ) - + if intent: + self.log.debug("Intent is set to `{}`.".format(intent)) else: self.log.debug("Intent is not set.") + # If we would like to use more "optional" possibilities we would have + # come up with some expressions in templates or speicifc templates + # for all 3 possible combinations when comment and intent are + # set or not (when both are not set then description does not + # make sense). + fill_data = {} + if comment: + fill_data["comment"] = comment + if intent: + fill_data["intent"] = intent + + description = StringTemplate.format_template( + self.description_template, fill_data + ) + if not description.solved: + self.log.warning(( + "Couldn't solve template \"{}\" with data {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + if not description: + self.log.debug(( + "Skipping. Result of template is empty string." + " Template \"{}\" Fill data: {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + session = instance.context.data["ftrackSession"] for asset_version_data in asset_versions_data_by_id.values(): asset_version = asset_version_data["asset_version"] # Backwards compatibility for older settings using # attribute 'note_with_intent_template' - comment = self.description_template.format(**{ - "intent": intent_label, - "comment": comment - }) - asset_version["comment"] = comment + + asset_version["comment"] = description try: session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index c8d9e4117d..a1e5922730 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -3,7 +3,10 @@ import json import copy import pyblish.api -from openpype.lib import get_ffprobe_streams +from openpype.lib.transcoding import ( + get_ffprobe_streams, + convert_ffprobe_fps_to_float, +) from openpype.lib.profiles_filtering import filter_profiles @@ -58,7 +61,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): version_number = int(instance_version) family = instance.data["family"] - family_low = instance.data["family"].lower() + family_low = family.lower() asset_type = instance.data.get("ftrackFamily") if not asset_type and family_low in self.family_mapping: @@ -79,11 +82,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ).format(family)) return - # Prepare FPS - instance_fps = instance.data.get("fps") - if instance_fps is None: - instance_fps = instance.context.data["fps"] - status_name = self._get_asset_version_status_name(instance) # Base of component item data @@ -140,24 +138,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component = None first_thumbnail_component_repre = None for repre in thumbnail_representations: - published_path = repre.get("published_path") - if not published_path: - comp_files = repre["files"] - if isinstance(comp_files, (tuple, list, set)): - filename = comp_files[0] - else: - filename = comp_files - - published_path = os.path.join( - repre["stagingDir"], filename + repre_path = self._get_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." ) - if not os.path.exists(published_path): - continue - repre["published_path"] = published_path + continue # Create copy of base comp item and append it thumbnail_item = copy.deepcopy(base_component_item) - thumbnail_item["component_path"] = repre["published_path"] + thumbnail_item["component_path"] = repre_path thumbnail_item["component_data"] = { "name": "thumbnail" } @@ -176,10 +166,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Add item to component list component_list.append(thumbnail_item) - if ( - not review_representations - and first_thumbnail_component is not None - ): + if first_thumbnail_component is not None: width = first_thumbnail_component_repre.get("width") height = first_thumbnail_component_repre.get("height") if not width or not height: @@ -216,6 +203,13 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): extended_asset_name = "" multiple_reviewable = len(review_representations) > 1 for repre in review_representations: + repre_path = self._get_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." + ) + continue + # Create copy of base comp item and append it review_item = copy.deepcopy(base_component_item) @@ -254,33 +248,18 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component[ "asset_data"]["name"] = extended_asset_name - frame_start = repre.get("frameStartFtrack") - frame_end = repre.get("frameEndFtrack") - if frame_start is None or frame_end is None: - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - - # Frame end of uploaded video file should be duration in frames - # - frame start is always 0 - # - frame end is duration in frames - duration = frame_end - frame_start + 1 - - fps = repre.get("fps") - if fps is None: - fps = instance_fps + component_meta = self._prepare_component_metadata( + instance, repre, repre_path, True + ) # Change location - review_item["component_path"] = repre["published_path"] + review_item["component_path"] = repre_path # Change component data review_item["component_data"] = { # Default component name is "main". "name": "ftrackreview-mp4", "metadata": { - "ftr_meta": json.dumps({ - "frameIn": 0, - "frameOut": int(duration), - "frameRate": float(fps) - }) + "ftr_meta": json.dumps(component_meta) } } @@ -323,11 +302,18 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): component_data = copy_src_item["component_data"] component_name = component_data["name"] component_data["name"] = component_name + "_src" + component_meta = self._prepare_component_metadata( + instance, repre, copy_src_item["component_path"], False + ) + if component_meta: + component_data["metadata"] = { + "ftr_meta": json.dumps(component_meta) + } component_list.append(copy_src_item) # Add others representations as component for repre in other_representations: - published_path = repre.get("published_path") + published_path = self._get_repre_path(instance, repre, True) if not published_path: continue # Create copy of base comp item and append it @@ -340,9 +326,17 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ): other_item["asset_data"]["name"] = extended_asset_name - other_item["component_data"] = { + component_meta = self._prepare_component_metadata( + instance, repre, published_path, False + ) + component_data = { "name": repre["name"] } + if component_meta: + component_data["metadata"] = { + "ftr_meta": json.dumps(component_meta) + } + other_item["component_data"] = component_data other_item["component_location_name"] = unmanaged_location_name other_item["component_path"] = published_path component_list.append(other_item) @@ -360,6 +354,51 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): )) instance.data["ftrackComponentsList"] = component_list + def _get_repre_path(self, instance, repre, only_published): + """Get representation path that can be used for integration. + + When 'only_published' is set to true the validation of path is not + relevant. In that case we just need what is set in 'published_path' + as "reference". The reference is not used to get or upload the file but + for reference where the file was published. + + Args: + instance (pyblish.Instance): Processed instance object. Used + for source of staging dir if representation does not have + filled it. + repre (dict): Representation on instance which could be and + could not be integrated with main integrator. + only_published (bool): Care only about published paths and + ignore if filepath is not existing anymore. + + Returns: + str: Path to representation file. + None: Path is not filled or does not exists. + """ + + published_path = repre.get("published_path") + if published_path: + published_path = os.path.normpath(published_path) + if os.path.exists(published_path): + return published_path + + if only_published: + return published_path + + comp_files = repre["files"] + if isinstance(comp_files, (tuple, list, set)): + filename = comp_files[0] + else: + filename = comp_files + + staging_dir = repre.get("stagingDir") + if not staging_dir: + staging_dir = instance.data["stagingDir"] + src_path = os.path.normpath(os.path.join(staging_dir, filename)) + if os.path.exists(src_path): + return src_path + return None + def _get_asset_version_status_name(self, instance): if not self.asset_versions_status_profiles: return None @@ -380,3 +419,107 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): return None return matching_profile["status"] or None + + def _prepare_component_metadata( + self, instance, repre, component_path, is_review + ): + extension = os.path.splitext(component_path)[-1] + streams = [] + try: + streams = get_ffprobe_streams(component_path) + except Exception: + self.log.debug(( + "Failed to retrieve information about intput {}" + ).format(component_path)) + + # Find video streams + video_streams = [ + stream + for stream in streams + if stream["codec_type"] == "video" + ] + # Skip if there are not video streams + # - exr is special case which can have issues with reading through + # ffmpegh but we want to set fps for it + if not video_streams and extension not in [".exr"]: + return {} + + stream_width = None + stream_height = None + stream_fps = None + frame_out = None + for video_stream in video_streams: + tmp_width = video_stream.get("width") + tmp_height = video_stream.get("height") + if tmp_width and tmp_height: + stream_width = tmp_width + stream_height = tmp_height + + input_framerate = video_stream.get("r_frame_rate") + duration = video_stream.get("duration") + if input_framerate is None or duration is None: + continue + try: + stream_fps = convert_ffprobe_fps_to_float( + input_framerate + ) + except ValueError: + self.log.warning(( + "Could not convert ffprobe fps to float \"{}\"" + ).format(input_framerate)) + continue + + stream_width = tmp_width + stream_height = tmp_height + + self.log.debug("FPS from stream is {} and duration is {}".format( + input_framerate, duration + )) + frame_out = float(duration) * stream_fps + break + + # Prepare FPS + instance_fps = instance.data.get("fps") + if instance_fps is None: + instance_fps = instance.context.data["fps"] + + if not is_review: + output = {} + fps = stream_fps or instance_fps + if fps: + output["frameRate"] = fps + + if stream_width and stream_height: + output["width"] = int(stream_width) + output["height"] = int(stream_height) + return output + + frame_start = repre.get("frameStartFtrack") + frame_end = repre.get("frameEndFtrack") + if frame_start is None or frame_end is None: + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + + fps = None + repre_fps = repre.get("fps") + if repre_fps is not None: + repre_fps = float(repre_fps) + + fps = stream_fps or repre_fps or instance_fps + + # Frame end of uploaded video file should be duration in frames + # - frame start is always 0 + # - frame end is duration in frames + if not frame_out: + frame_out = frame_end - frame_start + 1 + + # Ftrack documentation says that it is required to have + # 'width' and 'height' in review component. But with those values + # review video does not play. + component_meta = { + "frameIn": 0, + "frameOut": frame_out, + "frameRate": float(fps) + } + + return component_meta diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py index 952b21546d..ac3fa874e0 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py @@ -9,9 +9,11 @@ Requires: """ import sys +import copy import six import pyblish.api +from openpype.lib import StringTemplate class IntegrateFtrackNote(pyblish.api.InstancePlugin): @@ -53,14 +55,10 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): intent = instance.context.data.get("intent") intent_label = None - if intent and isinstance(intent, dict): - intent_val = intent.get("value") - intent_label = intent.get("label") - else: - intent_val = intent - - if not intent_label: - intent_label = intent_val or "" + if intent: + value = intent["value"] + if value: + intent_label = intent["label"] or value # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) @@ -96,6 +94,14 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels.append(label) + base_format_data = { + "host_name": host_name, + "app_name": app_name, + "app_label": app_label, + "source": instance.data.get("source", '') + } + if comment: + base_format_data["comment"] = comment for asset_version_data in asset_versions_data_by_id.values(): asset_version = asset_version_data["asset_version"] component_items = asset_version_data["component_items"] @@ -109,22 +115,31 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): template = self.note_template if template is None: template = self.note_with_intent_template - format_data = { - "intent": intent_label, - "comment": comment, - "host_name": host_name, - "app_name": app_name, - "app_label": app_label, - "published_paths": "
".join(sorted(published_paths)), - } - comment = template.format(**format_data) - if not comment: + format_data = copy.deepcopy(base_format_data) + format_data["published_paths"] = "
".join( + sorted(published_paths) + ) + if intent: + if "{intent}" in template: + format_data["intent"] = intent_label + else: + format_data["intent"] = intent + + note_text = StringTemplate.format_template(template, format_data) + if not note_text.solved: + self.log.warning(( + "Note template require more keys then can be provided." + "\nTemplate: {}\nData: {}" + ).format(template, format_data)) + continue + + if not note_text: self.log.info(( "Note for AssetVersion {} would be empty. Skipping." "\nTemplate: {}\nData: {}" ).format(asset_version["id"], template, format_data)) continue - asset_version.create_note(comment, author=user, labels=labels) + asset_version.create_note(note_text, author=user, labels=labels) try: session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py index 1a5d74bf26..b8855ee2bd 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py @@ -65,7 +65,13 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder - 0.04 label = 'Integrate Hierarchy To Ftrack' families = ["shot"] - hosts = ["hiero", "resolve", "standalonepublisher", "flame"] + hosts = [ + "hiero", + "resolve", + "standalonepublisher", + "flame", + "traypublisher" + ] optional = False def process(self, context): diff --git a/openpype/modules/ftrack/scripts/sub_event_storer.py b/openpype/modules/ftrack/scripts/sub_event_storer.py index 946ecbff79..204cce89e8 100644 --- a/openpype/modules/ftrack/scripts/sub_event_storer.py +++ b/openpype/modules/ftrack/scripts/sub_event_storer.py @@ -6,6 +6,8 @@ import socket import pymongo import ftrack_api + +from openpype.client import OpenPypeMongoConnection from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( SocketSession, @@ -15,7 +17,6 @@ from openpype_modules.ftrack.ftrack_server.lib import ( ) from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info from openpype.lib import ( - OpenPypeMongoConnection, get_openpype_version, get_build_version ) diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py index d28ded06c7..c9e78b59eb 100644 --- a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py @@ -39,10 +39,12 @@ class CollectKitsuEntities(pyblish.api.ContextPlugin): kitsu_entity = gazu.asset.get_asset(zou_asset_data["id"]) if not kitsu_entity: - raise AssertionError(f"{entity_type} not found in kitsu!") + raise AssertionError("{} not found in kitsu!".format(entity_type)) context.data["kitsu_entity"] = kitsu_entity - self.log.debug(f"Collect kitsu {entity_type}: {kitsu_entity}") + self.log.debug( + "Collect kitsu {}: {}".format(entity_type, kitsu_entity) + ) if zou_task_data: kitsu_task = gazu.task.get_task(zou_task_data["id"]) diff --git a/openpype/modules/kitsu/utils/sync_service.py b/openpype/modules/kitsu/utils/sync_service.py index 577050c5af..441b95a7ec 100644 --- a/openpype/modules/kitsu/utils/sync_service.py +++ b/openpype/modules/kitsu/utils/sync_service.py @@ -2,11 +2,17 @@ import os import gazu +from openpype.client import ( + get_project, + get_assets, + get_asset_by_name +) from openpype.pipeline import AvalonMongoDB from .credentials import validate_credentials from .update_op_with_zou import ( create_op_asset, set_op_project, + get_kitsu_project_name, write_project_to_op, update_op_assets, ) @@ -119,17 +125,16 @@ class Listener: # Write into DB if update_project: - self.dbcon = self.dbcon.database[project_name] + self.dbcon.Session["AVALON_PROJECT"] = project_name self.dbcon.bulk_write([update_project]) def _delete_project(self, data): """Delete project.""" - project_doc = self.dbcon.find_one( - {"type": "project", "data.zou_id": data["project_id"]} - ) + + project_name = get_kitsu_project_name(data["project_id"]) # Delete project collection - self.dbcon.database[project_doc["name"]].drop() + self.dbcon.database[project_name].drop() # == Asset == @@ -150,7 +155,8 @@ class Listener: def _update_asset(self, data): """Update asset into OP DB.""" set_op_project(self.dbcon, data["project_id"]) - project_doc = self.dbcon.find_one({"type": "project"}) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) # Get gazu entity asset = gazu.asset.get_asset(data["asset_id"]) @@ -159,7 +165,7 @@ class Listener: # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in self.dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[asset["project_id"]] = project_doc @@ -199,7 +205,8 @@ class Listener: def _update_episode(self, data): """Update episode into OP DB.""" set_op_project(self.dbcon, data["project_id"]) - project_doc = self.dbcon.find_one({"type": "project"}) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) # Get gazu entity episode = gazu.shot.get_episode(data["episode_id"]) @@ -208,7 +215,7 @@ class Listener: # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in self.dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[episode["project_id"]] = project_doc @@ -249,7 +256,8 @@ class Listener: def _update_sequence(self, data): """Update sequence into OP DB.""" set_op_project(self.dbcon, data["project_id"]) - project_doc = self.dbcon.find_one({"type": "project"}) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) # Get gazu entity sequence = gazu.shot.get_sequence(data["sequence_id"]) @@ -258,7 +266,7 @@ class Listener: # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in self.dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[sequence["project_id"]] = project_doc @@ -299,7 +307,8 @@ class Listener: def _update_shot(self, data): """Update shot into OP DB.""" set_op_project(self.dbcon, data["project_id"]) - project_doc = self.dbcon.find_one({"type": "project"}) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) # Get gazu entity shot = gazu.shot.get_shot(data["shot_id"]) @@ -308,7 +317,7 @@ class Listener: # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in self.dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[shot["project_id"]] = project_doc @@ -335,14 +344,15 @@ class Listener: """Create new task into OP DB.""" # Get project entity set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() # Get gazu entity task = gazu.task.get_task(data["task_id"]) # Find asset doc - asset_doc = self.dbcon.find_one( - {"type": "asset", "data.zou.id": task["entity"]["id"]} - ) + parent_name = task["entity"]["name"] + + asset_doc = get_asset_by_name(project_name, parent_name) # Update asset tasks with new one asset_tasks = asset_doc["data"].get("tasks") @@ -359,10 +369,11 @@ class Listener: def _delete_task(self, data): """Delete task of OP DB.""" - set_op_project(self.dbcon, data["project_id"]) + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() # Find asset doc - asset_docs = [doc for doc in self.dbcon.find({"type": "asset"})] + asset_docs = list(get_assets(project_name)) for doc in asset_docs: # Match task for name, task in doc["data"]["tasks"].items(): diff --git a/openpype/modules/kitsu/utils/update_op_with_zou.py b/openpype/modules/kitsu/utils/update_op_with_zou.py index de74b0c677..e03cf2b30e 100644 --- a/openpype/modules/kitsu/utils/update_op_with_zou.py +++ b/openpype/modules/kitsu/utils/update_op_with_zou.py @@ -10,6 +10,12 @@ from gazu.task import ( all_tasks_for_shot, ) +from openpype.client import ( + get_project, + get_assets, + get_asset_by_id, + get_asset_by_name, +) from openpype.pipeline import AvalonMongoDB from openpype.api import get_project_settings from openpype.lib import create_project @@ -33,6 +39,20 @@ def create_op_asset(gazu_entity: dict) -> dict: } +def get_kitsu_project_name(project_id: str) -> str: + """Get project name based on project id in kitsu. + + Args: + project_id (str): UUID of project in Kitsu. + + Returns: + str: Name of Kitsu project. + """ + + project = gazu.project.get_project(project_id) + return project["name"] + + def set_op_project(dbcon: AvalonMongoDB, project_id: str): """Set project context. @@ -40,9 +60,8 @@ def set_op_project(dbcon: AvalonMongoDB, project_id: str): dbcon (AvalonMongoDB): Connection to DB project_id (str): Project zou ID """ - project = gazu.project.get_project(project_id) - project_name = project["name"] - dbcon.Session["AVALON_PROJECT"] = project_name + + dbcon.Session["AVALON_PROJECT"] = get_kitsu_project_name(project_id) def update_op_assets( @@ -72,9 +91,7 @@ def update_op_assets( if not item_doc: # Create asset op_asset = create_op_asset(item) insert_result = dbcon.insert_one(op_asset) - item_doc = dbcon.find_one( - {"type": "asset", "_id": insert_result.inserted_id} - ) + item_doc = get_asset_by_id(project_name, insert_result.inserted_id) # Update asset item_data = deepcopy(item_doc["data"]) @@ -137,17 +154,23 @@ def update_op_assets( parent_zou_id = substitute_parent_item["parent_id"] else: parent_zou_id = ( - item.get("parent_id") + # For Asset, put under asset type directory + item.get("entity_type_id") + if item_type == "Asset" + else None + # Else, fallback on usual hierarchy + or item.get("parent_id") or item.get("episode_id") or item.get("source_id") - ) # TODO check consistency + ) - # Substitute Episode and Sequence by Shot - substitute_item_type = ( - "shots" - if item_type in ["Episode", "Sequence"] - else f"{item_type.lower()}s" - ) + # Substitute item type for general classification (assets or shots) + if item_type in ["Asset", "AssetType"]: + substitute_item_type = "assets" + elif item_type in ["Episode", "Sequence"]: + substitute_item_type = "shots" + else: + substitute_item_type = f"{item_type.lower()}s" entity_parent_folders = [ f for f in project_module_settings["entities_root"] @@ -161,15 +184,33 @@ def update_op_assets( asset_doc_ids[parent_zou_id]["_id"] if parent_zou_id else None ) if visual_parent_doc_id is None: - # Find root folder doc - root_folder_doc = dbcon.find_one( - { - "type": "asset", - "name": entity_parent_folders[-1], - "data.root_of": substitute_item_type, - }, - ["_id"], + # Find root folder docs + root_folder_docs = get_assets( + project_name, + asset_names=[entity_parent_folders[-1]], + fields=["_id", "data.root_of"], ) + # NOTE: Not sure why it's checking for entity type? + # OP3 does not support multiple assets with same names so type + # filtering is irelevant. + # This way mimics previous implementation: + # ``` + # root_folder_doc = dbcon.find_one( + # { + # "type": "asset", + # "name": entity_parent_folders[-1], + # "data.root_of": substitute_item_type, + # }, + # ["_id"], + # ) + # ``` + root_folder_doc = None + for folder_doc in root_folder_docs: + root_of = folder_doc.get("data", {}).get("root_of") + if root_of == substitute_item_type: + root_folder_doc = folder_doc + break + if root_folder_doc: visual_parent_doc_id = root_folder_doc["_id"] @@ -178,13 +219,25 @@ def update_op_assets( # Add parents for hierarchy item_data["parents"] = [] - while parent_zou_id is not None: - parent_doc = asset_doc_ids[parent_zou_id] + ancestor_id = parent_zou_id + while ancestor_id is not None: + parent_doc = asset_doc_ids[ancestor_id] item_data["parents"].insert(0, parent_doc["name"]) # Get parent entity parent_entity = parent_doc["data"]["zou"] - parent_zou_id = parent_entity["parent_id"] + ancestor_id = parent_entity.get("parent_id") + + # Build OpenPype compatible name + if item_type in ["Shot", "Sequence"] and parent_zou_id is not None: + # Name with parents hierarchy "({episode}_){sequence}_{shot}" + # to avoid duplicate name issue + item_name = f"{item_data['parents'][-1]}_{item['name']}" + + # Update doc name + asset_doc_ids[item["id"]]["name"] = item_name + else: + item_name = item["name"] # Set root folders parents item_data["parents"] = entity_parent_folders + item_data["parents"] @@ -199,9 +252,9 @@ def update_op_assets( item_doc["_id"], { "$set": { - "name": item["name"], + "name": item_name, "data": item_data, - "parent": asset_doc_ids[item["project_id"]]["_id"], + "parent": project_doc["_id"], } }, ) @@ -222,13 +275,13 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: UpdateOne: Update instance for the project """ project_name = project["name"] - project_doc = dbcon.database[project_name].find_one({"type": "project"}) + project_doc = get_project(project_name) if not project_doc: print(f"Creating project '{project_name}'") project_doc = create_project(project_name, project_name, dbcon=dbcon) # Project data and tasks - project_data = project["data"] or {} + project_data = project_doc["data"] or {} # Build project code and update Kitsu project_code = project.get("code") @@ -257,6 +310,7 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: "config.tasks": { t["name"]: {"short_name": t.get("short_name", t["name"])} for t in gazu.task.all_task_types_for_project(project) + or gazu.task.all_task_types() }, "data": project_data, } @@ -292,6 +346,11 @@ def sync_all_projects(login: str, password: str): def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): """Update OP project in DB with Zou data. + `root_of` is meant to sort entities by type for a better readability in + the data tree. It puts all shot like (Shot and Episode and Sequence) and + asset entities under two different root folders or hierarchy, defined in + settings. + Args: dbcon (AvalonMongoDB): MongoDB connection project (dict): Project dict got using gazu. @@ -306,12 +365,17 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): # Get all assets from zou all_assets = gazu.asset.all_assets_for_project(project) + all_asset_types = gazu.asset.all_asset_types_for_project(project) all_episodes = gazu.shot.all_episodes_for_project(project) all_seqs = gazu.shot.all_sequences_for_project(project) all_shots = gazu.shot.all_shots_for_project(project) all_entities = [ item - for item in all_assets + all_episodes + all_seqs + all_shots + for item in all_assets + + all_asset_types + + all_episodes + + all_seqs + + all_shots if naming_pattern.match(item["name"]) ] @@ -319,26 +383,44 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): bulk_writes.append(write_project_to_op(project, dbcon)) # Try to find project document - dbcon.Session["AVALON_PROJECT"] = project["name"] - project_doc = dbcon.find_one({"type": "project"}) + project_name = project["name"] + dbcon.Session["AVALON_PROJECT"] = project_name + project_doc = get_project(project_name) # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[project["id"]] = project_doc # Create entities root folders - project_module_settings = get_project_settings(project["name"])["kitsu"] + project_module_settings = get_project_settings(project_name)["kitsu"] for entity_type, root in project_module_settings["entities_root"].items(): parent_folders = root.split("/") direct_parent_doc = None for i, folder in enumerate(parent_folders, 1): - parent_doc = dbcon.find_one( - {"type": "asset", "name": folder, "data.root_of": entity_type} + parent_doc = get_asset_by_name( + project_name, folder, fields=["_id", "data.root_of"] ) + # NOTE: Not sure why it's checking for entity type? + # OP3 does not support multiple assets with same names so type + # filtering is irelevant. + # Also all of the entities could find be queried at once using + # 'get_assets'. + # This way mimics previous implementation: + # ``` + # parent_doc = dbcon.find_one( + # {"type": "asset", "name": folder, "data.root_of": entity_type} + # ) + # ``` + if ( + parent_doc + and parent_doc.get("data", {}).get("root_of") != entity_type + ): + parent_doc = None + if not parent_doc: direct_parent_doc = dbcon.insert_one( { @@ -348,21 +430,20 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): "data": { "root_of": entity_type, "parents": parent_folders[:i], - "visualParent": direct_parent_doc, + "visualParent": direct_parent_doc.inserted_id + if direct_parent_doc + else None, "tasks": {}, }, } ) # Create - to_insert = [] - to_insert.extend( - [ - create_op_asset(item) - for item in all_entities - if item["id"] not in zou_ids_and_asset_docs.keys() - ] - ) + to_insert = [ + create_op_asset(item) + for item in all_entities + if item["id"] not in zou_ids_and_asset_docs.keys() + ] if to_insert: # Insert doc in DB dbcon.insert_many(to_insert) @@ -371,7 +452,7 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): zou_ids_and_asset_docs.update( { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou") } ) diff --git a/openpype/modules/kitsu/utils/update_zou_with_op.py b/openpype/modules/kitsu/utils/update_zou_with_op.py index 81d421206f..57d7094e95 100644 --- a/openpype/modules/kitsu/utils/update_zou_with_op.py +++ b/openpype/modules/kitsu/utils/update_zou_with_op.py @@ -6,6 +6,7 @@ from typing import List import gazu from pymongo import UpdateOne +from openpype.client import get_project, get_assets from openpype.pipeline import AvalonMongoDB from openpype.api import get_project_settings from openpype.modules.kitsu.utils.credentials import validate_credentials @@ -53,9 +54,7 @@ def sync_zou_from_op_project( """ # Get project doc if not provided if not project_doc: - project_doc = dbcon.database[project_name].find_one( - {"type": "project"} - ) + project_doc = get_project(project_name) # Get all entities from zou print(f"Synchronizing {project_name}...") @@ -96,7 +95,7 @@ def sync_zou_from_op_project( dbcon.Session["AVALON_PROJECT"] = project_name asset_docs = { asset_doc["_id"]: asset_doc - for asset_doc in dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) } # Create new assets diff --git a/openpype/modules/shotgrid/README.md b/openpype/modules/shotgrid/README.md new file mode 100644 index 0000000000..cbee0e9bf4 --- /dev/null +++ b/openpype/modules/shotgrid/README.md @@ -0,0 +1,19 @@ +## Shotgrid Module + +### Pre-requisites + +Install and launch a [shotgrid leecher](https://github.com/Ellipsanime/shotgrid-leecher) server + +### Quickstart + +The goal of this tutorial is to synchronize an already existing shotgrid project with OpenPype. + +- Activate the shotgrid module in the **system settings** and inform the shotgrid leecher server API url + +- Create a new OpenPype project with the **project manager** + +- Inform the shotgrid authentication infos (url, script name, api key) and the shotgrid project ID related to this OpenPype project in the **project settings** + +- Use the batch interface (Tray > shotgrid > Launch batch), select your project and click "batch" + +- You can now access your shotgrid entities within the **avalon launcher** and publish informations to shotgrid with **pyblish** diff --git a/openpype/modules/shotgrid/__init__.py b/openpype/modules/shotgrid/__init__.py new file mode 100644 index 0000000000..f1337a9492 --- /dev/null +++ b/openpype/modules/shotgrid/__init__.py @@ -0,0 +1,5 @@ +from .shotgrid_module import ( + ShotgridModule, +) + +__all__ = ("ShotgridModule",) diff --git a/openpype/hosts/unreal/integration/UE_5.0/Content/Python/__init__.py b/openpype/modules/shotgrid/lib/__init__.py similarity index 100% rename from openpype/hosts/unreal/integration/UE_5.0/Content/Python/__init__.py rename to openpype/modules/shotgrid/lib/__init__.py diff --git a/openpype/modules/shotgrid/lib/const.py b/openpype/modules/shotgrid/lib/const.py new file mode 100644 index 0000000000..2a34800fac --- /dev/null +++ b/openpype/modules/shotgrid/lib/const.py @@ -0,0 +1 @@ +MODULE_NAME = "shotgrid" diff --git a/openpype/modules/shotgrid/lib/credentials.py b/openpype/modules/shotgrid/lib/credentials.py new file mode 100644 index 0000000000..337c4f6ecb --- /dev/null +++ b/openpype/modules/shotgrid/lib/credentials.py @@ -0,0 +1,125 @@ + +from urllib.parse import urlparse + +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSecureRegistry, OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.record import Credentials + + +def _get_shotgrid_secure_key(hostname, key): + """Secure item key for entered hostname.""" + return f"shotgrid/{hostname}/{key}" + + +def _get_secure_value_and_registry( + hostname, + name, +): + key = _get_shotgrid_secure_key(hostname, name) + registry = OpenPypeSecureRegistry(key) + return registry.get_item(name, None), registry + + +def get_shotgrid_hostname(shotgrid_url): + + if not shotgrid_url: + raise Exception("Shotgrid url cannot be a null") + valid_shotgrid_url = ( + f"//{shotgrid_url}" if "//" not in shotgrid_url else shotgrid_url + ) + return urlparse(valid_shotgrid_url).hostname + + +# Credentials storing function (using keyring) + + +def get_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + if not hostname: + return None + login_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + return Credentials(login_value, password_value) + + +def save_credentials(login, password, shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + _, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + _, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + clear_credentials(shotgrid_url) + login_registry.set_item(Credentials.login_key_prefix(), login) + password_registry.set_item(Credentials.password_key_prefix(), password) + + +def clear_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + login_value, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + + if login_value is not None: + login_registry.delete_item(Credentials.login_key_prefix()) + + if password_value is not None: + password_registry.delete_item(Credentials.password_key_prefix()) + + +# Login storing function (using json) + + +def get_local_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None + + +def save_local_login(login): + reg = OpenPypeSettingsRegistry() + reg.set_item("shotgrid_login", login) + + +def clear_local_login(): + reg = OpenPypeSettingsRegistry() + reg.delete_item("shotgrid_login") + + +def check_credentials( + login, + password, + shotgrid_url, +): + + if not shotgrid_url or not login or not password: + return False + try: + session = shotgun_api3.Shotgun( + shotgrid_url, + login=login, + password=password, + ) + session.preferences_read() + session.close() + except AuthenticationFault: + return False + return True diff --git a/openpype/modules/shotgrid/lib/record.py b/openpype/modules/shotgrid/lib/record.py new file mode 100644 index 0000000000..f62f4855d5 --- /dev/null +++ b/openpype/modules/shotgrid/lib/record.py @@ -0,0 +1,20 @@ + +class Credentials: + login = None + password = None + + def __init__(self, login, password) -> None: + super().__init__() + self.login = login + self.password = password + + def is_empty(self): + return not (self.login and self.password) + + @staticmethod + def login_key_prefix(): + return "login" + + @staticmethod + def password_key_prefix(): + return "password" diff --git a/openpype/modules/shotgrid/lib/settings.py b/openpype/modules/shotgrid/lib/settings.py new file mode 100644 index 0000000000..924099f04b --- /dev/null +++ b/openpype/modules/shotgrid/lib/settings.py @@ -0,0 +1,18 @@ +from openpype.api import get_system_settings, get_project_settings +from openpype.modules.shotgrid.lib.const import MODULE_NAME + + +def get_shotgrid_project_settings(project): + return get_project_settings(project).get(MODULE_NAME, {}) + + +def get_shotgrid_settings(): + return get_system_settings().get("modules", {}).get(MODULE_NAME, {}) + + +def get_shotgrid_servers(): + return get_shotgrid_settings().get("shotgrid_settings", {}) + + +def get_leecher_backend_url(): + return get_shotgrid_settings().get("leecher_backend_url") diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py new file mode 100644 index 0000000000..0b03ac2e5d --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py @@ -0,0 +1,100 @@ +import os + +import pyblish.api +from openpype.lib.mongo import OpenPypeMongoConnection + + +class CollectShotgridEntities(pyblish.api.ContextPlugin): + """Collect shotgrid entities according to the current context""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Shotgrid entities" + + def process(self, context): + + avalon_project = context.data.get("projectEntity") + avalon_asset = context.data.get("assetEntity") + avalon_task_name = os.getenv("AVALON_TASK") + + self.log.info(avalon_project) + self.log.info(avalon_asset) + + sg_project = _get_shotgrid_project(context) + sg_task = _get_shotgrid_task( + avalon_project, + avalon_asset, + avalon_task_name + ) + sg_entity = _get_shotgrid_entity(avalon_project, avalon_asset) + + if sg_project: + context.data["shotgridProject"] = sg_project + self.log.info( + "Collected correspondig shotgrid project : {}".format( + sg_project + ) + ) + + if sg_task: + context.data["shotgridTask"] = sg_task + self.log.info( + "Collected correspondig shotgrid task : {}".format(sg_task) + ) + + if sg_entity: + context.data["shotgridEntity"] = sg_entity + self.log.info( + "Collected correspondig shotgrid entity : {}".format(sg_entity) + ) + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + + sg = context.data.get("shotgridSession") + return sg.find_one("Version", filters, []) + + +def _get_shotgrid_collection(project): + client = OpenPypeMongoConnection.get_mongo_client() + return client.get_database("shotgrid_openpype").get_collection(project) + + +def _get_shotgrid_project(context): + shotgrid_project_id = context.data["project_settings"].get( + "shotgrid_project_id") + if shotgrid_project_id: + return {"type": "Project", "id": shotgrid_project_id} + return {} + + +def _get_shotgrid_task(avalon_project, avalon_asset, avalon_task): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_task_hierarchy_row = sg_col.find_one( + { + "type": "Task", + "_id": {"$regex": "^" + avalon_task + "_[0-9]*"}, + "parent": {"$regex": ".*," + avalon_asset["name"] + ","}, + } + ) + if shotgrid_task_hierarchy_row: + return {"type": "Task", "id": shotgrid_task_hierarchy_row["src_id"]} + return {} + + +def _get_shotgrid_entity(avalon_project, avalon_asset): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_entity_hierarchy_row = sg_col.find_one( + {"_id": avalon_asset["name"]} + ) + if shotgrid_entity_hierarchy_row: + return { + "type": shotgrid_entity_hierarchy_row["type"], + "id": shotgrid_entity_hierarchy_row["src_id"], + } + return {} diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py new file mode 100644 index 0000000000..9d5d2271bf --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py @@ -0,0 +1,123 @@ +import os + +import pyblish.api +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.settings import ( + get_shotgrid_servers, + get_shotgrid_project_settings, +) + + +class CollectShotgridSession(pyblish.api.ContextPlugin): + """Collect shotgrid session using user credentials""" + + order = pyblish.api.CollectorOrder + label = "Shotgrid user session" + + def process(self, context): + + certificate_path = os.getenv("SHOTGUN_API_CACERTS") + if certificate_path is None or not os.path.exists(certificate_path): + self.log.info( + "SHOTGUN_API_CACERTS does not contains a valid \ + path: {}".format( + certificate_path + ) + ) + certificate_path = get_shotgrid_certificate() + self.log.info("Get Certificate from shotgrid_api") + + if not os.path.exists(certificate_path): + self.log.error( + "Could not find certificate in shotgun_api3: \ + {}".format( + certificate_path + ) + ) + return + + set_shotgrid_certificate(certificate_path) + self.log.info("Set Certificate: {}".format(certificate_path)) + + avalon_project = os.getenv("AVALON_PROJECT") + + shotgrid_settings = get_shotgrid_project_settings(avalon_project) + self.log.info("shotgrid settings: {}".format(shotgrid_settings)) + shotgrid_servers_settings = get_shotgrid_servers() + self.log.info( + "shotgrid_servers_settings: {}".format(shotgrid_servers_settings) + ) + + shotgrid_server = shotgrid_settings.get("shotgrid_server", "") + if not shotgrid_server: + self.log.error( + "No Shotgrid server found, please choose a credential" + "in script name and script key in OpenPype settings" + ) + + shotgrid_server_setting = shotgrid_servers_settings.get( + shotgrid_server, {} + ) + shotgrid_url = shotgrid_server_setting.get("shotgrid_url", "") + + shotgrid_script_name = shotgrid_server_setting.get( + "shotgrid_script_name", "" + ) + shotgrid_script_key = shotgrid_server_setting.get( + "shotgrid_script_key", "" + ) + if not shotgrid_script_name and not shotgrid_script_key: + self.log.error( + "No Shotgrid api credential found, please enter " + "script name and script key in OpenPype settings" + ) + + login = get_login() or os.getenv("OPENPYPE_SG_USER") + + if not login: + self.log.error( + "No Shotgrid login found, please " + "login to shotgrid withing openpype Tray" + ) + + session = shotgun_api3.Shotgun( + base_url=shotgrid_url, + script_name=shotgrid_script_name, + api_key=shotgrid_script_key, + sudo_as_login=login, + ) + + try: + session.preferences_read() + except AuthenticationFault: + raise ValueError( + "Could not connect to shotgrid {} with user {}".format( + shotgrid_url, login + ) + ) + + self.log.info( + "Logged to shotgrid {} with user {}".format(shotgrid_url, login) + ) + context.data["shotgridSession"] = session + context.data["shotgridUser"] = login + + +def get_shotgrid_certificate(): + shotgun_api_path = os.path.dirname(shotgun_api3.__file__) + return os.path.join(shotgun_api_path, "lib", "certifi", "cacert.pem") + + +def set_shotgrid_certificate(certificate): + os.environ["SHOTGUN_API_CACERTS"] = certificate + + +def get_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py new file mode 100644 index 0000000000..cfd2d10fd9 --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py @@ -0,0 +1,77 @@ +import os +import pyblish.api + + +class IntegrateShotgridPublish(pyblish.api.InstancePlugin): + """ + Create published Files from representations and add it to version. If + representation is tagged add shotgrid review, it will add it in + path to movie for a movie file or path to frame for an image sequence. + """ + + order = pyblish.api.IntegratorOrder + 0.499 + label = "Shotgrid Published Files" + + def process(self, instance): + + context = instance.context + + self.sg = context.data.get("shotgridSession") + + shotgrid_version = instance.data.get("shotgridVersion") + + for representation in instance.data.get("representations", []): + + local_path = representation.get("published_path") + code = os.path.basename(local_path) + + if representation.get("tags", []): + continue + + published_file = self._find_existing_publish( + code, context, shotgrid_version + ) + + published_file_data = { + "project": context.data.get("shotgridProject"), + "code": code, + "entity": context.data.get("shotgridEntity"), + "task": context.data.get("shotgridTask"), + "version": shotgrid_version, + "path": {"local_path": local_path}, + } + if not published_file: + published_file = self._create_published(published_file_data) + self.log.info( + "Create Shotgrid PublishedFile: {}".format(published_file) + ) + else: + self.sg.update( + published_file["type"], + published_file["id"], + published_file_data, + ) + self.log.info( + "Update Shotgrid PublishedFile: {}".format(published_file) + ) + + if instance.data["family"] == "image": + self.sg.upload_thumbnail( + published_file["type"], published_file["id"], local_path + ) + instance.data["shotgridPublishedFile"] = published_file + + def _find_existing_publish(self, code, context, shotgrid_version): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["version", "is", shotgrid_version], + ["code", "is", code], + ] + return self.sg.find_one("PublishedFile", filters, []) + + def _create_published(self, published_file_data): + + return self.sg.create("PublishedFile", published_file_data) diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py new file mode 100644 index 0000000000..a1b7140e22 --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py @@ -0,0 +1,92 @@ +import os +import pyblish.api + + +class IntegrateShotgridVersion(pyblish.api.InstancePlugin): + """Integrate Shotgrid Version""" + + order = pyblish.api.IntegratorOrder + 0.497 + label = "Shotgrid Version" + + sg = None + + def process(self, instance): + + context = instance.context + self.sg = context.data.get("shotgridSession") + + # TODO: Use path template solver to build version code from settings + anatomy = instance.data.get("anatomyData", {}) + code = "_".join( + [ + anatomy["project"]["code"], + anatomy["parent"], + anatomy["asset"], + anatomy["task"]["name"], + "v{:03}".format(int(anatomy["version"])), + ] + ) + + version = self._find_existing_version(code, context) + + if not version: + version = self._create_version(code, context) + self.log.info("Create Shotgrid version: {}".format(version)) + else: + self.log.info("Use existing Shotgrid version: {}".format(version)) + + data_to_update = {} + status = context.data.get("intent", {}).get("value") + if status: + data_to_update["sg_status_list"] = status + + for representation in instance.data.get("representations", []): + local_path = representation.get("published_path") + code = os.path.basename(local_path) + + if "shotgridreview" in representation.get("tags", []): + + if representation["ext"] in ["mov", "avi"]: + self.log.info( + "Upload review: {} for version shotgrid {}".format( + local_path, version.get("id") + ) + ) + self.sg.upload( + "Version", + version.get("id"), + local_path, + field_name="sg_uploaded_movie", + ) + + data_to_update["sg_path_to_movie"] = local_path + + elif representation["ext"] in ["jpg", "png", "exr", "tga"]: + path_to_frame = local_path.replace("0000", "#") + data_to_update["sg_path_to_frames"] = path_to_frame + + self.log.info("Update Shotgrid version with {}".format(data_to_update)) + self.sg.update("Version", version["id"], data_to_update) + + instance.data["shotgridVersion"] = version + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + return self.sg.find_one("Version", filters, []) + + def _create_version(self, code, context): + + version_data = { + "project": context.data.get("shotgridProject"), + "sg_task": context.data.get("shotgridTask"), + "entity": context.data.get("shotgridEntity"), + "code": code, + } + + return self.sg.create("Version", version_data) diff --git a/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py new file mode 100644 index 0000000000..c14c980e2a --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py @@ -0,0 +1,38 @@ +import pyblish.api +import openpype.api + + +class ValidateShotgridUser(pyblish.api.ContextPlugin): + """ + Check if user is valid and have access to the project. + """ + + label = "Validate Shotgrid User" + order = openpype.api.ValidateContentsOrder + + def process(self, context): + sg = context.data.get("shotgridSession") + + login = context.data.get("shotgridUser") + self.log.info("Login shotgrid set in OpenPype is {}".format(login)) + project = context.data.get("shotgridProject") + self.log.info("Current shotgun project is {}".format(project)) + + if not (login and sg and project): + raise KeyError() + + user = sg.find_one("HumanUser", [["login", "is", login]], ["projects"]) + + self.log.info(user) + self.log.info(login) + user_projects_id = [p["id"] for p in user.get("projects", [])] + if not project.get("id") in user_projects_id: + raise PermissionError( + "Login {} don't have access to the project {}".format( + login, project + ) + ) + + self.log.info( + "Login {} have access to the project {}".format(login, project) + ) diff --git a/openpype/modules/shotgrid/server/README.md b/openpype/modules/shotgrid/server/README.md new file mode 100644 index 0000000000..15e056ff3e --- /dev/null +++ b/openpype/modules/shotgrid/server/README.md @@ -0,0 +1,5 @@ + +### Shotgrid server + +Please refer to the external project that covers Openpype/Shotgrid communication: + - https://github.com/Ellipsanime/shotgrid-leecher diff --git a/openpype/modules/shotgrid/shotgrid_module.py b/openpype/modules/shotgrid/shotgrid_module.py new file mode 100644 index 0000000000..5644f0c35f --- /dev/null +++ b/openpype/modules/shotgrid/shotgrid_module.py @@ -0,0 +1,58 @@ +import os + +from openpype_interfaces import ( + ITrayModule, + IPluginPaths, + ILaunchHookPaths, +) + +from openpype.modules import OpenPypeModule + +SHOTGRID_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class ShotgridModule( + OpenPypeModule, ITrayModule, IPluginPaths, ILaunchHookPaths +): + leecher_manager_url = None + name = "shotgrid" + enabled = False + project_id = None + tray_wrapper = None + + def initialize(self, modules_settings): + shotgrid_settings = modules_settings.get(self.name, dict()) + self.enabled = shotgrid_settings.get("enabled", False) + self.leecher_manager_url = shotgrid_settings.get( + "leecher_manager_url", "" + ) + + def connect_with_modules(self, enabled_modules): + pass + + def get_global_environments(self): + return {"PROJECT_ID": self.project_id} + + def get_plugin_paths(self): + return { + "publish": [ + os.path.join(SHOTGRID_MODULE_DIR, "plugins", "publish") + ] + } + + def get_launch_hook_paths(self): + return os.path.join(SHOTGRID_MODULE_DIR, "hooks") + + def tray_init(self): + from .tray.shotgrid_tray import ShotgridTrayWrapper + + self.tray_wrapper = ShotgridTrayWrapper(self) + + def tray_start(self): + return self.tray_wrapper.validate() + + def tray_exit(self, *args, **kwargs): + return self.tray_wrapper + + def tray_menu(self, tray_menu): + return self.tray_wrapper.tray_menu(tray_menu) diff --git a/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py new file mode 100644 index 0000000000..1f78cf77c9 --- /dev/null +++ b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py @@ -0,0 +1,34 @@ +import pytest +from assertpy import assert_that + +import openpype.modules.shotgrid.lib.credentials as sut + + +def test_missing_shotgrid_url(): + with pytest.raises(Exception) as ex: + # arrange + url = "" + # act + sut.get_shotgrid_hostname(url) + # assert + assert_that(ex).is_equal_to("Shotgrid url cannot be a null") + + +def test_full_shotgrid_url(): + # arrange + url = "https://shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") + + +def test_incomplete_shotgrid_url(): + # arrange + url = "shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") diff --git a/openpype/modules/shotgrid/tray/credential_dialog.py b/openpype/modules/shotgrid/tray/credential_dialog.py new file mode 100644 index 0000000000..9d841d98be --- /dev/null +++ b/openpype/modules/shotgrid/tray/credential_dialog.py @@ -0,0 +1,201 @@ +import os +from Qt import QtCore, QtWidgets, QtGui + +from openpype import style +from openpype import resources +from openpype.modules.shotgrid.lib import settings, credentials + + +class CredentialsDialog(QtWidgets.QDialog): + SIZE_W = 450 + SIZE_H = 200 + + _module = None + _is_logged = False + url_label = None + login_label = None + password_label = None + url_input = None + login_input = None + password_input = None + input_layout = None + login_button = None + buttons_layout = None + main_widget = None + + login_changed = QtCore.Signal() + + def __init__(self, module, parent=None): + super(CredentialsDialog, self).__init__(parent) + + self._module = module + self._is_logged = False + + self.setWindowTitle("OpenPype - Shotgrid Login") + + icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + self.setWindowIcon(icon) + + self.setWindowFlags( + QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowMinimizeButtonHint + ) + self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) + self.setMaximumSize(QtCore.QSize(self.SIZE_W + 100, self.SIZE_H + 100)) + self.setStyleSheet(style.load_stylesheet()) + + self.ui_init() + + def ui_init(self): + self.url_label = QtWidgets.QLabel("Shotgrid server:") + self.login_label = QtWidgets.QLabel("Login:") + self.password_label = QtWidgets.QLabel("Password:") + + self.url_input = QtWidgets.QComboBox() + # self.url_input.setReadOnly(True) + + self.login_input = QtWidgets.QLineEdit() + self.login_input.setPlaceholderText("login") + + self.password_input = QtWidgets.QLineEdit() + self.password_input.setPlaceholderText("password") + self.password_input.setEchoMode(QtWidgets.QLineEdit.Password) + + self.error_label = QtWidgets.QLabel("") + self.error_label.setStyleSheet("color: red;") + self.error_label.setWordWrap(True) + self.error_label.hide() + + self.input_layout = QtWidgets.QFormLayout() + self.input_layout.setContentsMargins(10, 15, 10, 5) + + self.input_layout.addRow(self.url_label, self.url_input) + self.input_layout.addRow(self.login_label, self.login_input) + self.input_layout.addRow(self.password_label, self.password_input) + self.input_layout.addRow(self.error_label) + + self.login_button = QtWidgets.QPushButton("Login") + self.login_button.setToolTip("Log in shotgrid instance") + self.login_button.clicked.connect(self._on_shotgrid_login_clicked) + + self.logout_button = QtWidgets.QPushButton("Logout") + self.logout_button.setToolTip("Log out shotgrid instance") + self.logout_button.clicked.connect(self._on_shotgrid_logout_clicked) + + self.buttons_layout = QtWidgets.QHBoxLayout() + self.buttons_layout.addWidget(self.logout_button) + self.buttons_layout.addWidget(self.login_button) + + self.main_widget = QtWidgets.QVBoxLayout(self) + self.main_widget.addLayout(self.input_layout) + self.main_widget.addLayout(self.buttons_layout) + self.setLayout(self.main_widget) + + def show(self, *args, **kwargs): + super(CredentialsDialog, self).show(*args, **kwargs) + self._fill_shotgrid_url() + self._fill_shotgrid_login() + + def _fill_shotgrid_url(self): + servers = settings.get_shotgrid_servers() + + if servers: + for _, v in servers.items(): + self.url_input.addItem("{}".format(v.get('shotgrid_url'))) + self._valid_input(self.url_input) + self.login_button.show() + self.logout_button.show() + enabled = True + else: + self.set_error("Ask your admin to add shotgrid server in settings") + self._invalid_input(self.url_input) + self.login_button.hide() + self.logout_button.hide() + enabled = False + + self.login_input.setEnabled(enabled) + self.password_input.setEnabled(enabled) + + def _fill_shotgrid_login(self): + login = credentials.get_local_login() + + if login: + self.login_input.setText(login) + + def _clear_shotgrid_login(self): + self.login_input.setText("") + self.password_input.setText("") + + def _on_shotgrid_login_clicked(self): + login = self.login_input.text().strip() + password = self.password_input.text().strip() + missing = [] + + if login == "": + missing.append("login") + self._invalid_input(self.login_input) + + if password == "": + missing.append("password") + self._invalid_input(self.password_input) + + url = self.url_input.currentText() + if url == "": + missing.append("url") + self._invalid_input(self.url_input) + + if len(missing) > 0: + self.set_error("You didn't enter {}".format(" and ".join(missing))) + return + + # if credentials.check_credentials( + # login=login, + # password=password, + # shotgrid_url=url, + # ): + credentials.save_local_login( + login=login + ) + os.environ['OPENPYPE_SG_USER'] = login + self._on_login() + + self.set_error("CANT LOGIN") + + def _on_shotgrid_logout_clicked(self): + credentials.clear_local_login() + del os.environ['OPENPYPE_SG_USER'] + self._clear_shotgrid_login() + self._on_logout() + + def set_error(self, msg): + self.error_label.setText(msg) + self.error_label.show() + + def _on_login(self): + self._is_logged = True + self.login_changed.emit() + self._close_widget() + + def _on_logout(self): + self._is_logged = False + self.login_changed.emit() + + def _close_widget(self): + self.hide() + + def _valid_input(self, input_widget): + input_widget.setStyleSheet("") + + def _invalid_input(self, input_widget): + input_widget.setStyleSheet("border: 1px solid red;") + + def login_with_credentials( + self, url, login, password + ): + verification = credentials.check_credentials(url, login, password) + if verification: + credentials.save_credentials(login, password, False) + self._module.set_credentials_to_env(login, password) + self.set_credentials(login, password) + self.login_changed.emit() + return verification diff --git a/openpype/modules/shotgrid/tray/shotgrid_tray.py b/openpype/modules/shotgrid/tray/shotgrid_tray.py new file mode 100644 index 0000000000..4038d77b03 --- /dev/null +++ b/openpype/modules/shotgrid/tray/shotgrid_tray.py @@ -0,0 +1,75 @@ +import os +import webbrowser + +from Qt import QtWidgets + +from openpype.modules.shotgrid.lib import credentials +from openpype.modules.shotgrid.tray.credential_dialog import ( + CredentialsDialog, +) + + +class ShotgridTrayWrapper: + module = None + credentials_dialog = None + logged_user_label = None + + def __init__(self, module): + self.module = module + self.credentials_dialog = CredentialsDialog(module) + self.credentials_dialog.login_changed.connect(self.set_login_label) + self.logged_user_label = QtWidgets.QAction("") + self.logged_user_label.setDisabled(True) + self.set_login_label() + + def show_batch_dialog(self): + if self.module.leecher_manager_url: + webbrowser.open(self.module.leecher_manager_url) + + def show_connect_dialog(self): + self.show_credential_dialog() + + def show_credential_dialog(self): + self.credentials_dialog.show() + self.credentials_dialog.activateWindow() + self.credentials_dialog.raise_() + + def set_login_label(self): + login = credentials.get_local_login() + if login: + self.logged_user_label.setText("{}".format(login)) + else: + self.logged_user_label.setText( + "No User logged in {0}".format(login) + ) + + def tray_menu(self, tray_menu): + # Add login to user menu + menu = QtWidgets.QMenu("Shotgrid", tray_menu) + show_connect_action = QtWidgets.QAction("Connect to Shotgrid", menu) + show_connect_action.triggered.connect(self.show_connect_dialog) + menu.addAction(self.logged_user_label) + menu.addSeparator() + menu.addAction(show_connect_action) + tray_menu.addMenu(menu) + + # Add manager to Admin menu + for m in tray_menu.findChildren(QtWidgets.QMenu): + if m.title() == "Admin": + shotgrid_manager_action = QtWidgets.QAction( + "Shotgrid manager", menu + ) + shotgrid_manager_action.triggered.connect( + self.show_batch_dialog + ) + m.addAction(shotgrid_manager_action) + + def validate(self): + login = credentials.get_local_login() + + if not login: + self.show_credential_dialog() + else: + os.environ["OPENPYPE_SG_USER"] = login + + return True diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/slack/plugins/publish/integrate_slack_api.py index 10bde7d4c0..c3b288f0cd 100644 --- a/openpype/modules/slack/plugins/publish/integrate_slack_api.py +++ b/openpype/modules/slack/plugins/publish/integrate_slack_api.py @@ -4,8 +4,8 @@ import pyblish.api import copy from datetime import datetime +from openpype.client import OpenPypeMongoConnection from openpype.lib.plugin_tools import prepare_template_data -from openpype.lib import OpenPypeMongoConnection class IntegrateSlackAPI(pyblish.api.InstancePlugin): diff --git a/openpype/modules/sync_server/providers/abstract_provider.py b/openpype/modules/sync_server/providers/abstract_provider.py index 688a17f14f..8c2fe1cad9 100644 --- a/openpype/modules/sync_server/providers/abstract_provider.py +++ b/openpype/modules/sync_server/providers/abstract_provider.py @@ -62,7 +62,7 @@ class AbstractProvider: @abc.abstractmethod def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Copy file from 'source_path' to 'target_path' on provider. @@ -75,7 +75,7 @@ class AbstractProvider: arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): name of project_name file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -87,7 +87,7 @@ class AbstractProvider: @abc.abstractmethod def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download file from provider into local system @@ -99,7 +99,7 @@ class AbstractProvider: arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name diff --git a/openpype/modules/sync_server/providers/dropbox.py b/openpype/modules/sync_server/providers/dropbox.py index dfc42fed75..89d6990841 100644 --- a/openpype/modules/sync_server/providers/dropbox.py +++ b/openpype/modules/sync_server/providers/dropbox.py @@ -224,7 +224,7 @@ class DropboxHandler(AbstractProvider): return False def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Copy file from 'source_path' to 'target_path' on provider. @@ -237,7 +237,7 @@ class DropboxHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -290,7 +290,7 @@ class DropboxHandler(AbstractProvider): cursor.offset = f.tell() server.update_db( - collection=collection, + project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -301,7 +301,7 @@ class DropboxHandler(AbstractProvider): return path def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download file from provider into local system @@ -313,7 +313,7 @@ class DropboxHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -337,7 +337,7 @@ class DropboxHandler(AbstractProvider): self.dbx.files_download_to_file(local_path, source_path) server.update_db( - collection=collection, + project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/sync_server/providers/gdrive.py index aa7329b104..bef707788b 100644 --- a/openpype/modules/sync_server/providers/gdrive.py +++ b/openpype/modules/sync_server/providers/gdrive.py @@ -251,7 +251,7 @@ class GDriveHandler(AbstractProvider): return folder_id def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. @@ -264,7 +264,7 @@ class GDriveHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -324,7 +324,7 @@ class GDriveHandler(AbstractProvider): while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, - project_name=collection): + project_name=project_name): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) @@ -333,7 +333,7 @@ class GDriveHandler(AbstractProvider): last_tick = time.time() log.debug("Uploaded %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -358,7 +358,7 @@ class GDriveHandler(AbstractProvider): return response['id'] def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'local_path'. @@ -372,7 +372,7 @@ class GDriveHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -410,7 +410,7 @@ class GDriveHandler(AbstractProvider): while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, - project_name=collection): + project_name=project_name): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) @@ -419,7 +419,7 @@ class GDriveHandler(AbstractProvider): last_tick = time.time() log.debug("Downloaded %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/providers/local_drive.py b/openpype/modules/sync_server/providers/local_drive.py index 172cb338cf..01bc891d08 100644 --- a/openpype/modules/sync_server/providers/local_drive.py +++ b/openpype/modules/sync_server/providers/local_drive.py @@ -82,7 +82,7 @@ class LocalDriveHandler(AbstractProvider): return editable def upload_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False, direction="Upload"): """ Copies file from 'source_path' to 'target_path' @@ -95,7 +95,7 @@ class LocalDriveHandler(AbstractProvider): thread = threading.Thread(target=self._copy, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, direction) else: if os.path.exists(target_path): @@ -105,13 +105,14 @@ class LocalDriveHandler(AbstractProvider): return os.path.basename(target_path) def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download a file form 'source_path' to 'local_path' """ return self.upload_file(source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, + representation, site, overwrite, direction="Download") def delete_file(self, path): @@ -188,7 +189,7 @@ class LocalDriveHandler(AbstractProvider): except shutil.SameFileError: print("same files, skipping") - def _mark_progress(self, collection, file, representation, server, site, + def _mark_progress(self, project_name, file, representation, server, site, source_path, target_path, direction): """ Updates progress field in DB by values 0-1. @@ -204,7 +205,7 @@ class LocalDriveHandler(AbstractProvider): status_val = target_file_size / source_file_size last_tick = time.time() log.debug(direction + "ed %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/providers/sftp.py b/openpype/modules/sync_server/providers/sftp.py index 49b87b14ec..302ffae3e6 100644 --- a/openpype/modules/sync_server/providers/sftp.py +++ b/openpype/modules/sync_server/providers/sftp.py @@ -222,7 +222,7 @@ class SFTPHandler(AbstractProvider): return os.path.basename(path) def upload_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. @@ -235,7 +235,7 @@ class SFTPHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -256,7 +256,7 @@ class SFTPHandler(AbstractProvider): thread = threading.Thread(target=self._upload, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, "upload") return os.path.basename(target_path) @@ -267,7 +267,7 @@ class SFTPHandler(AbstractProvider): conn.put(source_path, target_path) def download_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'target_path'. @@ -281,7 +281,7 @@ class SFTPHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -302,7 +302,7 @@ class SFTPHandler(AbstractProvider): thread = threading.Thread(target=self._download, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, "download") return os.path.basename(target_path) @@ -425,7 +425,7 @@ class SFTPHandler(AbstractProvider): pysftp.exceptions.ConnectionException): log.warning("Couldn't connect", exc_info=True) - def _mark_progress(self, collection, file, representation, server, site, + def _mark_progress(self, project_name, file, representation, server, site, source_path, target_path, direction): """ Updates progress field in DB by values 0-1. @@ -446,7 +446,7 @@ class SFTPHandler(AbstractProvider): status_val = target_file_size / source_file_size last_tick = time.time() log.debug(direction + "ed %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/sync_server.py b/openpype/modules/sync_server/sync_server.py index 356a75f99d..97538fcd4e 100644 --- a/openpype/modules/sync_server/sync_server.py +++ b/openpype/modules/sync_server/sync_server.py @@ -14,7 +14,7 @@ from .utils import SyncStatus, ResumableError log = PypeLogger().get_logger("SyncServer") -async def upload(module, collection, file, representation, provider_name, +async def upload(module, project_name, file, representation, provider_name, remote_site_name, tree=None, preset=None): """ Upload single 'file' of a 'representation' to 'provider'. @@ -31,7 +31,7 @@ async def upload(module, collection, file, representation, provider_name, Args: module(SyncServerModule): object to run SyncServerModule API - collection (str): source collection + project_name (str): source db file (dictionary): of file from representation in Mongo representation (dictionary): of representation provider_name (string): gdrive, gdc etc. @@ -47,15 +47,16 @@ async def upload(module, collection, file, representation, provider_name, # thread can do that at a time, upload/download to prepared # structure should be run in parallel remote_handler = lib.factory.get_provider(provider_name, - collection, + project_name, remote_site_name, tree=tree, presets=preset) file_path = file.get("path", "") try: - local_file_path, remote_file_path = resolve_paths(module, - file_path, collection, remote_site_name, remote_handler + local_file_path, remote_file_path = resolve_paths( + module, file_path, project_name, + remote_site_name, remote_handler ) except Exception as exp: print(exp) @@ -74,27 +75,28 @@ async def upload(module, collection, file, representation, provider_name, local_file_path, remote_file_path, module, - collection, + project_name, file, representation, remote_site_name, True ) - module.handle_alternate_site(collection, representation, remote_site_name, + module.handle_alternate_site(project_name, representation, + remote_site_name, file["_id"], file_id) return file_id -async def download(module, collection, file, representation, provider_name, +async def download(module, project_name, file, representation, provider_name, remote_site_name, tree=None, preset=None): """ Downloads file to local folder denoted in representation.Context. Args: module(SyncServerModule): object to run SyncServerModule API - collection (str): source collection + project_name (str): source file (dictionary) : info about processed file representation (dictionary): repr that 'file' belongs to provider_name (string): 'gdrive' etc @@ -108,20 +110,20 @@ async def download(module, collection, file, representation, provider_name, """ with module.lock: remote_handler = lib.factory.get_provider(provider_name, - collection, + project_name, remote_site_name, tree=tree, presets=preset) file_path = file.get("path", "") local_file_path, remote_file_path = resolve_paths( - module, file_path, collection, remote_site_name, remote_handler + module, file_path, project_name, remote_site_name, remote_handler ) local_folder = os.path.dirname(local_file_path) os.makedirs(local_folder, exist_ok=True) - local_site = module.get_active_site(collection) + local_site = module.get_active_site(project_name) loop = asyncio.get_running_loop() file_id = await loop.run_in_executor(None, @@ -129,20 +131,20 @@ async def download(module, collection, file, representation, provider_name, remote_file_path, local_file_path, module, - collection, + project_name, file, representation, local_site, True ) - module.handle_alternate_site(collection, representation, local_site, + module.handle_alternate_site(project_name, representation, local_site, file["_id"], file_id) return file_id -def resolve_paths(module, file_path, collection, +def resolve_paths(module, file_path, project_name, remote_site_name=None, remote_handler=None): """ Returns tuple of local and remote file paths with {root} @@ -153,7 +155,7 @@ def resolve_paths(module, file_path, collection, Args: module(SyncServerModule): object to run SyncServerModule API file_path(string): path with {root} - collection(string): project name + project_name(string): project name remote_site_name(string): remote site remote_handler(AbstractProvider): implementation Returns: @@ -164,7 +166,7 @@ def resolve_paths(module, file_path, collection, remote_file_path = remote_handler.resolve_path(file_path) local_handler = lib.factory.get_provider( - 'local_drive', collection, module.get_active_site(collection)) + 'local_drive', project_name, module.get_active_site(project_name)) local_file_path = local_handler.resolve_path(file_path) return local_file_path, remote_file_path @@ -269,8 +271,8 @@ class SyncServerThread(threading.Thread): - gets list of collections in DB - gets list of active remote providers (has configuration, credentials) - - for each collection it looks for representations that should - be synced + - for each project_name it looks for representations that + should be synced - synchronize found collections - update representations - fills error messages for exceptions - waits X seconds and repeat @@ -282,17 +284,17 @@ class SyncServerThread(threading.Thread): import time start_time = time.time() self.module.set_sync_project_settings() # clean cache - collection = None + project_name = None enabled_projects = self.module.get_enabled_projects() - for collection in enabled_projects: - preset = self.module.sync_project_settings[collection] + for project_name in enabled_projects: + preset = self.module.sync_project_settings[project_name] - local_site, remote_site = self._working_sites(collection) + local_site, remote_site = self._working_sites(project_name) if not all([local_site, remote_site]): continue sync_repres = self.module.get_sync_representations( - collection, + project_name, local_site, remote_site ) @@ -310,7 +312,7 @@ class SyncServerThread(threading.Thread): remote_provider = \ self.module.get_provider_for_site(site=remote_site) handler = lib.factory.get_provider(remote_provider, - collection, + project_name, remote_site, presets=site_preset) limit = lib.factory.get_provider_batch_limit( @@ -341,7 +343,7 @@ class SyncServerThread(threading.Thread): limit -= 1 task = asyncio.create_task( upload(self.module, - collection, + project_name, file, sync, remote_provider, @@ -353,7 +355,7 @@ class SyncServerThread(threading.Thread): files_processed_info.append((file, sync, remote_site, - collection + project_name )) processed_file_path.add(file_path) if status == SyncStatus.DO_DOWNLOAD: @@ -361,7 +363,7 @@ class SyncServerThread(threading.Thread): limit -= 1 task = asyncio.create_task( download(self.module, - collection, + project_name, file, sync, remote_provider, @@ -373,7 +375,7 @@ class SyncServerThread(threading.Thread): files_processed_info.append((file, sync, local_site, - collection + project_name )) processed_file_path.add(file_path) @@ -384,12 +386,12 @@ class SyncServerThread(threading.Thread): return_exceptions=True) for file_id, info in zip(files_created, files_processed_info): - file, representation, site, collection = info + file, representation, site, project_name = info error = None if isinstance(file_id, BaseException): error = str(file_id) file_id = None - self.module.update_db(collection, + self.module.update_db(project_name, file_id, file, representation, @@ -399,7 +401,7 @@ class SyncServerThread(threading.Thread): duration = time.time() - start_time log.debug("One loop took {:.2f}s".format(duration)) - delay = self.module.get_loop_delay(collection) + delay = self.module.get_loop_delay(project_name) log.debug("Waiting for {} seconds to new loop".format(delay)) self.timer = asyncio.create_task(self.run_timer(delay)) await asyncio.gather(self.timer) @@ -458,19 +460,19 @@ class SyncServerThread(threading.Thread): self.timer.cancel() self.timer = None - def _working_sites(self, collection): - if self.module.is_project_paused(collection): + def _working_sites(self, project_name): + if self.module.is_project_paused(project_name): log.debug("Both sites same, skipping") return None, None - local_site = self.module.get_active_site(collection) - remote_site = self.module.get_remote_site(collection) + local_site = self.module.get_active_site(project_name) + remote_site = self.module.get_remote_site(project_name) if local_site == remote_site: log.debug("{}-{} sites same, skipping".format(local_site, remote_site)) return None, None - configured_sites = _get_configured_sites(self.module, collection) + configured_sites = _get_configured_sites(self.module, project_name) if not all([local_site in configured_sites, remote_site in configured_sites]): log.debug("Some of the sites {} - {} is not ".format(local_site, diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index 4027561d22..8fdfab9c2e 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -25,6 +25,8 @@ from .providers import lib from .utils import time_function, SyncStatus, SiteAlreadyPresentError +from openpype.client import get_representations, get_representation_by_id + log = PypeLogger.get_logger("SyncServer") @@ -128,12 +130,12 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.projects_processed = set() """ Start of Public API """ - def add_site(self, collection, representation_id, site_name=None, + def add_site(self, project_name, representation_id, site_name=None, force=False): """ Adds new site to representation to be synced. - 'collection' must have synchronization enabled (globally or + 'project_name' must have synchronization enabled (globally or project only) Used as a API endpoint from outside applications (Loader etc). @@ -141,7 +143,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Use 'force' to reset existing site. Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site force (bool): reset site if exists @@ -151,25 +153,25 @@ class SyncServerModule(OpenPypeModule, ITrayModule): not 'force' ValueError - other errors (repre not found, misconfiguration) """ - if not self.get_sync_project_setting(collection): + if not self.get_sync_project_setting(project_name): raise ValueError("Project not configured") if not site_name: site_name = self.DEFAULT_SITE - self.reset_site_on_representation(collection, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, force=force) - def remove_site(self, collection, representation_id, site_name, + def remove_site(self, project_name, representation_id, site_name, remove_local_files=False): """ Removes 'site_name' for particular 'representation_id' on - 'collection' + 'project_name' Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site remove_local_files (bool): remove only files for 'local_id' @@ -178,15 +180,15 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: throws ValueError if any issue """ - if not self.get_sync_project_setting(collection): + if not self.get_sync_project_setting(project_name): raise ValueError("Project not configured") - self.reset_site_on_representation(collection, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, remove=True) if remove_local_files: - self._remove_local_file(collection, representation_id, site_name) + self._remove_local_file(project_name, representation_id, site_name) def compute_resource_sync_sites(self, project_name): """Get available resource sync sites state for publish process. @@ -333,9 +335,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return alt_site_pairs - def clear_project(self, collection, site_name): + def clear_project(self, project_name, site_name): """ - Clear 'collection' of 'site_name' and its local files + Clear 'project_name' of 'site_name' and its local files Works only on real local sites, not on 'studio' """ @@ -344,16 +346,17 @@ class SyncServerModule(OpenPypeModule, ITrayModule): "files.sites.name": site_name } + # TODO currently not possible to replace with get_representations representations = list( - self.connection.database[collection].find(query)) + self.connection.database[project_name].find(query)) if not representations: self.log.debug("No repre found") return for repre in representations: - self.remove_site(collection, repre.get("_id"), site_name, True) + self.remove_site(project_name, repre.get("_id"), site_name, True) - def create_validate_project_task(self, collection, site_name): + def create_validate_project_task(self, project_name, site_name): """Adds metadata about project files validation on a queue. This process will loop through all representation and check if @@ -370,33 +373,28 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ task = { "type": "validate", - "project_name": collection, - "func": lambda: self.validate_project(collection, site_name, + "project_name": project_name, + "func": lambda: self.validate_project(project_name, site_name, reset_missing=True) } - self.projects_processed.add(collection) + self.projects_processed.add(project_name) self.long_running_tasks.append(task) - def validate_project(self, collection, site_name, reset_missing=False): - """Validate 'collection' of 'site_name' and its local files + def validate_project(self, project_name, site_name, reset_missing=False): + """Validate 'project_name' of 'site_name' and its local files If file present and not marked with a 'site_name' in DB, DB is updated with site name and file modified date. Args: - collection (string): project name + project_name (string): project name site_name (string): active site name reset_missing (bool): if True reset site in DB if missing physically """ - self.log.debug("Validation of {} for {} started".format(collection, + self.log.debug("Validation of {} for {} started".format(project_name, site_name)) - query = { - "type": "representation" - } - - representations = list( - self.connection.database[collection].find(query)) + representations = list(get_representations(project_name)) if not representations: self.log.debug("No repre found") return @@ -416,7 +414,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): continue file_path = repre_file.get("path", "") - local_file_path = self.get_local_file_path(collection, + local_file_path = self.get_local_file_path(project_name, site_name, file_path) @@ -428,14 +426,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): "Adding site {} for {}".format(site_name, repre_id)) - query = { - "_id": repre_id - } created_dt = datetime.fromtimestamp( os.path.getmtime(local_file_path)) elem = {"name": site_name, "created_dt": created_dt} - self._add_site(collection, query, repre, elem, + self._add_site(project_name, repre, elem, site_name=site_name, file_id=repre_file["_id"], force=True) @@ -445,41 +440,42 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.log.debug("Resetting site {} for {}". format(site_name, repre_id)) self.reset_site_on_representation( - collection, repre_id, site_name=site_name, + project_name, repre_id, site_name=site_name, file_id=repre_file["_id"]) sites_reset += 1 if sites_added % 100 == 0: self.log.debug("Sites added {}".format(sites_added)) - self.log.debug("Validation of {} for {} ended".format(collection, + self.log.debug("Validation of {} for {} ended".format(project_name, site_name)) self.log.info("Sites added {}, sites reset {}".format(sites_added, reset_missing)) - def pause_representation(self, collection, representation_id, site_name): + def pause_representation(self, project_name, representation_id, site_name): """ Sets 'representation_id' as paused, eg. no syncing should be happening on it. Args: - collection (string): project name + project_name (string): project name representation_id (string): MongoDB objectId value site_name (string): 'gdrive', 'studio' etc. """ log.info("Pausing SyncServer for {}".format(representation_id)) self._paused_representations.add(representation_id) - self.reset_site_on_representation(collection, representation_id, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, pause=True) - def unpause_representation(self, collection, representation_id, site_name): + def unpause_representation(self, project_name, + representation_id, site_name): """ Sets 'representation_id' as unpaused. Does not fail or warn if repre wasn't paused. Args: - collection (string): project name + project_name (string): project name representation_id (string): MongoDB objectId value site_name (string): 'gdrive', 'studio' etc. """ @@ -489,7 +485,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): except KeyError: pass # self.paused_representations is not persistent - self.reset_site_on_representation(collection, representation_id, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, pause=False) def is_representation_paused(self, representation_id, @@ -520,7 +516,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): happening on all representation inside. Args: - project_name (string): collection name + project_name (string): project_name name """ log.info("Pausing SyncServer for {}".format(project_name)) self._paused_projects.add(project_name) @@ -532,7 +528,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Does not fail or warn if project wasn't paused. Args: - project_name (string): collection name + project_name (string): """ log.info("Unpausing SyncServer for {}".format(project_name)) try: @@ -545,7 +541,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns if 'project_name' is paused or not. Args: - project_name (string): collection name + project_name (string): check_parents (bool): check if server itself is not paused Returns: @@ -944,8 +940,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return True return False - def handle_alternate_site(self, collection, representation, processed_site, - file_id, synced_file_id): + def handle_alternate_site(self, project_name, representation, + processed_site, file_id, synced_file_id): """ For special use cases where one site vendors another. @@ -958,7 +954,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): same location >> file is accesible on 'sftp' site right away. Args: - collection (str): name of project + project_name (str): name of project representation (dict) processed_site (str): real site_name of published/uploaded file file_id (ObjectId): DB id of file handled @@ -982,26 +978,112 @@ class SyncServerModule(OpenPypeModule, ITrayModule): alternate_sites = set(alternate_sites) for alt_site in alternate_sites: - query = { - "_id": representation["_id"] - } elem = {"name": alt_site, "created_dt": datetime.now(), "id": synced_file_id} self.log.debug("Adding alternate {} to {}".format( alt_site, representation["_id"])) - self._add_site(collection, query, + self._add_site(project_name, representation, elem, alt_site, file_id=file_id, force=True) + def get_repre_info_for_versions(self, project_name, version_ids, + active_site, remote_site): + """Returns representation documents for versions and sites combi + + Args: + project_name (str) + version_ids (list): of version[_id] + active_site (string): 'local', 'studio' etc + remote_site (string): dtto + Returns: + + """ + self.connection.Session["AVALON_PROJECT"] = project_name + query = [ + {"$match": {"parent": {"$in": version_ids}, + "type": "representation", + "files.sites.name": {"$exists": 1}}}, + {"$unwind": "$files"}, + {'$addFields': { + 'order_local': { + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', active_site]} + } + } + }}, + {'$addFields': { + 'order_remote': { + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', remote_site]} + } + } + }}, + {'$addFields': { + 'progress_local': {"$arrayElemAt": [{ + '$cond': [ + {'$size': "$order_local.progress"}, + "$order_local.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_local.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} + }}, + {'$addFields': { + 'progress_remote': {"$arrayElemAt": [{ + '$cond': [ + {'$size': "$order_remote.progress"}, + "$order_remote.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_remote.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} + }}, + {'$group': { # first group by repre + '_id': '$_id', + 'parent': {'$first': '$parent'}, + 'avail_ratio_local': { + '$first': { + '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}] + } + }, + 'avail_ratio_remote': { + '$first': { + '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}] + } + } + }}, + {'$group': { # second group by parent, eg version_id + '_id': '$parent', + 'repre_count': {'$sum': 1}, # total representations + # fully available representation for site + 'avail_repre_local': {'$sum': "$avail_ratio_local"}, + 'avail_repre_remote': {'$sum': "$avail_ratio_remote"}, + }}, + ] + # docs = list(self.connection.aggregate(query)) + return self.connection.aggregate(query) + """ End of Public API """ - def get_local_file_path(self, collection, site_name, file_path): + def get_local_file_path(self, project_name, site_name, file_path): """ Externalized for app """ - handler = LocalDriveHandler(collection, site_name) + handler = LocalDriveHandler(project_name, site_name) local_file_path = handler.resolve_path(file_path) return local_file_path @@ -1288,7 +1370,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return sites.get(site, 'N/A') @time_function - def get_sync_representations(self, collection, active_site, remote_site): + def get_sync_representations(self, project_name, active_site, remote_site): """ Get representations that should be synced, these could be recognised by presence of document in 'files.sites', where key is @@ -1299,8 +1381,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): better performance. Goal is to get as few representations as possible. Args: - collection (string): name of collection (in most cases matches - project name + project_name (string): active_site (string): identifier of current active site (could be 'local_0' when working from home, 'studio' when working in the studio (default) @@ -1309,10 +1390,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: (list) of dictionaries """ - log.debug("Check representations for : {}".format(collection)) - self.connection.Session["AVALON_PROJECT"] = collection + log.debug("Check representations for : {}".format(project_name)) + self.connection.Session["AVALON_PROJECT"] = project_name # retry_cnt - number of attempts to sync specific file before giving up - retries_arr = self._get_retries_arr(collection) + retries_arr = self._get_retries_arr(project_name) match = { "type": "representation", "$or": [ @@ -1449,14 +1530,14 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return SyncStatus.DO_NOTHING - def update_db(self, collection, new_file_id, file, representation, + def update_db(self, project_name, new_file_id, file, representation, site, error=None, progress=None, priority=None): """ Update 'provider' portion of records in DB with success (file_id) or error (exception) Args: - collection (string): name of project - force to db connection as + project_name (string): name of project - force to db connection as each file might come from different collection new_file_id (string): file (dictionary): info about processed file (pulled from DB) @@ -1499,7 +1580,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if file_id: arr_filter.append({'f._id': ObjectId(file_id)}) - self.connection.database[collection].update_one( + self.connection.database[project_name].update_one( query, update, upsert=True, @@ -1562,7 +1643,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return -1, None - def reset_site_on_representation(self, collection, representation_id, + def reset_site_on_representation(self, project_name, representation_id, side=None, file_id=None, site_name=None, remove=False, pause=None, force=False): """ @@ -1579,7 +1660,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Should be used when repre should be synced to new site. Args: - collection (string): name of project (eg. collection) in DB + project_name (string): name of project (eg. collection) in DB representation_id(string): _id of representation file_id (string): file _id in representation side (string): local or remote side @@ -1593,20 +1674,18 @@ class SyncServerModule(OpenPypeModule, ITrayModule): not 'force' ValueError - other errors (repre not found, misconfiguration) """ - query = { - "_id": ObjectId(representation_id) - } - - representation = self.connection.database[collection].find_one(query) + representation = get_representation_by_id(project_name, + representation_id) if not representation: raise ValueError("Representation {} not found in {}". - format(representation_id, collection)) + format(representation_id, project_name)) + if side and site_name: raise ValueError("Misconfiguration, only one of side and " + "site_name arguments should be passed.") - local_site = self.get_active_site(collection) - remote_site = self.get_remote_site(collection) + local_site = self.get_active_site(project_name) + remote_site = self.get_remote_site(project_name) if side: if side == 'local': @@ -1617,37 +1696,43 @@ class SyncServerModule(OpenPypeModule, ITrayModule): elem = {"name": site_name} if file_id: # reset site for particular file - self._reset_site_for_file(collection, query, + self._reset_site_for_file(project_name, representation_id, elem, file_id, site_name) elif side: # reset site for whole representation - self._reset_site(collection, query, elem, site_name) + self._reset_site(project_name, representation_id, elem, site_name) elif remove: # remove site for whole representation - self._remove_site(collection, query, representation, site_name) + self._remove_site(project_name, + representation, site_name) elif pause is not None: - self._pause_unpause_site(collection, query, + self._pause_unpause_site(project_name, representation, site_name, pause) else: # add new site to all files for representation - self._add_site(collection, query, representation, elem, site_name, + self._add_site(project_name, representation, elem, site_name, force=force) - def _update_site(self, collection, query, update, arr_filter): + def _update_site(self, project_name, representation_id, + update, arr_filter): """ Auxiliary method to call update_one function on DB Used for refactoring ugly reset_provider_for_file """ - self.connection.database[collection].update_one( + query = { + "_id": ObjectId(representation_id) + } + + self.connection.database[project_name].update_one( query, update, upsert=True, array_filters=arr_filter ) - def _reset_site_for_file(self, collection, query, + def _reset_site_for_file(self, project_name, representation_id, elem, file_id, site_name): """ Resets 'site_name' for 'file_id' on representation in 'query' on - 'collection' + 'project_name' """ update = { "$set": {"files.$[f].sites.$[s]": elem} @@ -1660,9 +1745,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'f._id': file_id} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, update, arr_filter) - def _reset_site(self, collection, query, elem, site_name): + def _reset_site(self, project_name, representation_id, elem, site_name): """ Resets 'site_name' for all files of representation in 'query' """ @@ -1674,9 +1759,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'s.name': site_name} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, update, arr_filter) - def _remove_site(self, collection, query, representation, site_name): + def _remove_site(self, project_name, representation, site_name): """ Removes 'site_name' for 'representation' in 'query' @@ -1698,10 +1783,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): } arr_filter = [] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation["_id"], + update, arr_filter) - def _pause_unpause_site(self, collection, query, - representation, site_name, pause): + def _pause_unpause_site(self, project_name, representation, + site_name, pause): """ Pauses/unpauses all files for 'representation' based on 'pause' @@ -1733,12 +1819,13 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'s.name': site_name} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation["_id"], + update, arr_filter) - def _add_site(self, collection, query, representation, elem, site_name, + def _add_site(self, project_name, representation, elem, site_name, force=False, file_id=None): """ - Adds 'site_name' to 'representation' on 'collection' + Adds 'site_name' to 'representation' on 'project_name' Args: representation (dict) @@ -1746,10 +1833,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Use 'force' to remove existing or raises ValueError """ + representation_id = representation["_id"] reset_existing = False files = representation.get("files", []) if not files: - log.debug("No files for {}".format(representation["_id"])) + log.debug("No files for {}".format(representation_id)) return for repre_file in files: @@ -1759,7 +1847,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule): for site in repre_file.get("sites"): if site["name"] == site_name: if force or site.get("error"): - self._reset_site_for_file(collection, query, + self._reset_site_for_file(project_name, + representation_id, elem, repre_file["_id"], site_name) reset_existing = True @@ -1785,14 +1874,15 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'f._id': file_id} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, + update, arr_filter) - def _remove_local_file(self, collection, representation_id, site_name): + def _remove_local_file(self, project_name, representation_id, site_name): """ Removes all local files for 'site_name' of 'representation_id' Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site @@ -1808,21 +1898,17 @@ class SyncServerModule(OpenPypeModule, ITrayModule): provider_name = self.get_provider_for_site(site=site_name) if provider_name == 'local_drive': - query = { - "_id": ObjectId(representation_id) - } - - representation = list( - self.connection.database[collection].find(query)) + representation = get_representation_by_id(project_name, + representation_id, + fields=["files"]) if not representation: self.log.debug("No repre {} found".format( representation_id)) return - representation = representation.pop() local_file_path = '' for file in representation.get("files"): - local_file_path = self.get_local_file_path(collection, + local_file_path = self.get_local_file_path(project_name, site_name, file.get("path", "") ) diff --git a/openpype/modules/sync_server/tray/models.py b/openpype/modules/sync_server/tray/models.py index 6d1e85c17a..629c4cbbf1 100644 --- a/openpype/modules/sync_server/tray/models.py +++ b/openpype/modules/sync_server/tray/models.py @@ -11,6 +11,7 @@ from openpype.tools.utils.delegates import pretty_timestamp from openpype.lib import PypeLogger from openpype.api import get_local_site_id +from openpype.client import get_representation_by_id from . import lib @@ -440,7 +441,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): full text filtering. Allows pagination, most of heavy lifting is being done on DB side. - Single model matches to single collection. When project is changed, + Single model matches to single project. When project is changed, model is reset and refreshed. Args: @@ -919,11 +920,10 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): repre_id = self.data(index, Qt.UserRole) - representation = list(self.dbcon.find({"type": "representation", - "_id": repre_id})) + representation = get_representation_by_id(self.project, repre_id) if representation: self.sync_server.update_db(self.project, None, None, - representation.pop(), + representation, get_local_site_id(), priority=value) self.is_editing = False @@ -1357,11 +1357,10 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): file_id = self.data(index, Qt.UserRole) updated_file = None - # conversion from cursor to list - representations = list(self.dbcon.find({"type": "representation", - "_id": self._id})) + representation = get_representation_by_id(self.project, self._id) + if not representation: + return - representation = representations.pop() for repre_file in representation["files"]: if repre_file["_id"] == file_id: updated_file = repre_file diff --git a/openpype/modules/timers_manager/plugins/publish/start_timer.py b/openpype/modules/timers_manager/plugins/publish/start_timer.py new file mode 100644 index 0000000000..6408327ca1 --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/start_timer.py @@ -0,0 +1,39 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + +import pyblish.api + +from openpype.pipeline import legacy_io + + +class StartTimer(pyblish.api.ContextPlugin): + label = "Start Timer" + order = pyblish.api.IntegratorOrder + 1 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + project_name = legacy_io.active_project() + asset_name = legacy_io.Session.get("AVALON_ASSET") + task_name = legacy_io.Session.get("AVALON_TASK") + if not project_name or not asset_name or not task_name: + self.log.info(( + "Current context does not contain all" + " required information to start a timer." + )) + return + timers_manager.start_timer_with_webserver( + project_name, asset_name, task_name, self.log + ) diff --git a/openpype/modules/timers_manager/plugins/publish/stop_timer.py b/openpype/modules/timers_manager/plugins/publish/stop_timer.py new file mode 100644 index 0000000000..a8674ff2ca --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/stop_timer.py @@ -0,0 +1,27 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + + +import pyblish.api + + +class StopTimer(pyblish.api.ContextPlugin): + label = "Stop Timer" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + timers_manager.stop_timer_with_webserver(self.log) diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/timers_manager/timers_manager.py index 3cf1614316..93332ace4f 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/openpype/modules/timers_manager/timers_manager.py @@ -2,16 +2,19 @@ import os import platform +from openpype.client import get_asset_by_name from openpype.modules import OpenPypeModule from openpype_interfaces import ( ITrayService, - ILaunchHookPaths + ILaunchHookPaths, + IPluginPaths ) from openpype.lib.events import register_event_callback -from openpype.pipeline import AvalonMongoDB from .exceptions import InvalidContextError +TIMER_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + class ExampleTimersManagerConnector: """Timers manager can handle timers of multiple modules/addons. @@ -33,6 +36,7 @@ class ExampleTimersManagerConnector: } ``` """ + # Not needed at all def __init__(self, module): # Store timer manager module to be able call it's methods when needed @@ -72,7 +76,12 @@ class ExampleTimersManagerConnector: self._timers_manager_module.timer_stopped(self._module.id) -class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): +class TimersManager( + OpenPypeModule, + ITrayService, + ILaunchHookPaths, + IPluginPaths +): """ Handles about Timers. Should be able to start/stop all timers at once. @@ -177,11 +186,19 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): def get_launch_hook_paths(self): """Implementation of `ILaunchHookPaths`.""" + return os.path.join( - os.path.dirname(os.path.abspath(__file__)), + TIMER_MODULE_DIR, "launch_hooks" ) + def get_plugin_paths(self): + """Implementation of `IPluginPaths`.""" + + return { + "publish": [os.path.join(TIMER_MODULE_DIR, "plugins", "publish")] + } + @staticmethod def get_timer_data_for_context( project_name, asset_name, task_name, logger=None @@ -197,22 +214,13 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): " Project: \"{}\" Asset: \"{}\" Task: \"{}\"" ).format(str(project_name), str(asset_name), str(task_name))) - dbconn = AvalonMongoDB() - dbconn.install() - dbconn.Session["AVALON_PROJECT"] = project_name - - asset_doc = dbconn.find_one( - { - "type": "asset", - "name": asset_name - }, - { - "data.tasks": True, - "data.parents": True - } + asset_doc = get_asset_by_name( + project_name, + asset_name, + fields=["_id", "name", "data.tasks", "data.parents"] ) + if not asset_doc: - dbconn.uninstall() raise InvalidContextError(( "Asset \"{}\" not found in project \"{}\"" ).format(asset_name, project_name)) @@ -220,7 +228,6 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): asset_data = asset_doc.get("data") or {} asset_tasks = asset_data.get("tasks") or {} if task_name not in asset_tasks: - dbconn.uninstall() raise InvalidContextError(( "Task \"{}\" not found on asset \"{}\" in project \"{}\"" ).format(task_name, asset_name, project_name)) @@ -238,9 +245,10 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): hierarchy_items = asset_data.get("parents") or [] hierarchy_items.append(asset_name) - dbconn.uninstall() return { "project_name": project_name, + "asset_id": str(asset_doc["_id"]), + "asset_name": asset_doc["name"], "task_name": task_name, "task_type": task_type, "hierarchy": hierarchy_items @@ -397,6 +405,7 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): logger (logging.Logger): Logger object. Using 'print' if not passed. """ + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") if not webserver_url: msg = "Couldn't find webserver url" @@ -424,6 +433,36 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): return requests.post(rest_api_url, json=data) + @staticmethod + def stop_timer_with_webserver(logger=None): + """Prepared method for calling stop timers on REST api. + + Args: + logger (logging.Logger): Logger used for logging messages. + """ + + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + if not webserver_url: + msg = "Couldn't find webserver url" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) + try: + import requests + except Exception: + msg = "Couldn't start timer ('requests' is not available)" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + return requests.post(rest_api_url) + def on_host_install(self, host, host_name, project_name): self.log.debug("Installing task changed callback") register_event_callback("taskChanged", self._on_host_task_change) diff --git a/openpype/pipeline/anatomy.py b/openpype/pipeline/anatomy.py index 73081f18fb..08db4749b3 100644 --- a/openpype/pipeline/anatomy.py +++ b/openpype/pipeline/anatomy.py @@ -380,6 +380,19 @@ class AnatomyTemplateResult(TemplateResult): ) return self.__class__(tmp, self.rootless) + def normalized(self): + """Convert to normalized path.""" + + tmp = TemplateResult( + os.path.normpath(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + return self.__class__(tmp, self.rootless) + class AnatomyTemplates(TemplatesDict): inner_key_pattern = re.compile(r"(\{@.*?[^{}0]*\})") diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py index e719e46514..5f763cd249 100644 --- a/openpype/pipeline/context_tools.py +++ b/openpype/pipeline/context_tools.py @@ -10,10 +10,22 @@ import pyblish.api from pyblish.lib import MessageHandler import openpype +from openpype.client import ( + get_project, + get_asset_by_id, + get_asset_by_name, + version_is_latest, +) from openpype.modules import load_modules, ModulesManager from openpype.settings import get_project_settings -from openpype.lib import filter_pyblish_plugins + +from .publish.lib import filter_pyblish_plugins from .anatomy import Anatomy +from .template_data import get_template_data_with_names +from .workfile import ( + get_workfile_template_key, + get_custom_workfile_template_by_string_context, +) from . import ( legacy_io, register_loader_plugin_path, @@ -240,29 +252,7 @@ def registered_host(): def deregister_host(): - _registered_host["_"] = default_host() - - -def default_host(): - """A default host, in place of anything better - - This may be considered as reference for the - interface a host must implement. It also ensures - that the system runs, even when nothing is there - to support it. - - """ - - host = types.ModuleType("defaultHost") - - def ls(): - return list() - - host.__dict__.update({ - "ls": ls - }) - - return host + _registered_host["_"] = None def debug_host(): @@ -304,3 +294,154 @@ def debug_host(): }) return host + + +def get_current_project(fields=None): + """Helper function to get project document based on global Session. + + This function should be called only in process where host is installed. + + Returns: + dict: Project document. + None: Project is not set. + """ + + project_name = legacy_io.active_project() + return get_project(project_name, fields=fields) + + +def get_current_project_asset(asset_name=None, asset_id=None, fields=None): + """Helper function to get asset document based on global Session. + + This function should be called only in process where host is installed. + + Asset is found out based on passed asset name or id (not both). Asset name + is not used for filtering if asset id is passed. When both asset name and + id are missing then asset name from current process is used. + + Args: + asset_name (str): Name of asset used for filter. + asset_id (Union[str, ObjectId]): Asset document id. If entered then + is used as only filter. + fields (Union[List[str], None]): Limit returned data of asset documents + to specific keys. + + Returns: + dict: Asset document. + None: Asset is not set or not exist. + """ + + project_name = legacy_io.active_project() + if asset_id: + return get_asset_by_id(project_name, asset_id, fields=fields) + + if not asset_name: + asset_name = legacy_io.Session.get("AVALON_ASSET") + # Skip if is not set even on context + if not asset_name: + return None + return get_asset_by_name(project_name, asset_name, fields=fields) + + +def is_representation_from_latest(representation): + """Return whether the representation is from latest version + + Args: + representation (dict): The representation document from the database. + + Returns: + bool: Whether the representation is of latest version. + """ + + project_name = legacy_io.active_project() + return version_is_latest(project_name, representation["parent"]) + + +def get_template_data_from_session(session=None, system_settings=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + system_settings (Union[Dict[str, Any], Any]): Prepared system settings. + Optional are auto received if not passed. + + Returns: + Dict[str, Any]: All available data from session. + """ + + if session is None: + session = legacy_io.Session + + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] + host_name = session["AVALON_APP"] + + return get_template_data_with_names( + project_name, asset_name, task_name, host_name, system_settings + ) + + +def get_workdir_from_session(session=None, template_key=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + template_key (str): Prepared template key from which workdir is + calculated. + + Returns: + str: Workdir path. + """ + + if session is None: + session = legacy_io.Session + project_name = session["AVALON_PROJECT"] + host_name = session["AVALON_APP"] + anatomy = Anatomy(project_name) + template_data = get_template_data_from_session(session) + anatomy_filled = anatomy.format(template_data) + + if not template_key: + task_type = template_data["task"]["type"] + template_key = get_workfile_template_key( + task_type, + host_name, + project_name=project_name + ) + path = anatomy_filled[template_key]["folder"] + if path: + path = os.path.normpath(path) + return path + + +def get_custom_workfile_template_from_session( + session=None, project_settings=None +): + """Filter and fill workfile template profiles by current context. + + Current context is defined by `legacy_io.Session`. That's why this + function should be used only inside host where context is set and stable. + + Args: + session (Union[None, Dict[str, str]]): Session from which are taken + data. + project_settings(Dict[str, Any]): Template profiles from settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + """ + + if session is None: + session = legacy_io.Session + + return get_custom_workfile_template_by_string_context( + session["AVALON_PROJECT"], + session["AVALON_ASSET"], + session["AVALON_TASK"], + session["AVALON_APP"], + project_settings=project_settings + ) diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 1beeb4267b..bd196ccfd1 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -7,6 +7,7 @@ from .creator_plugins import ( BaseCreator, Creator, AutoCreator, + HiddenCreator, discover_creator_plugins, discover_legacy_creator_plugins, @@ -35,6 +36,7 @@ __all__ = ( "BaseCreator", "Creator", "AutoCreator", + "HiddenCreator", "discover_creator_plugins", "discover_legacy_creator_plugins", diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index aecdb04635..eaaed39357 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -6,6 +6,7 @@ import inspect from uuid import uuid4 from contextlib import contextmanager +from openpype.client import get_assets from openpype.host import INewPublisher from openpype.pipeline import legacy_io from openpype.pipeline.mongodb import ( @@ -29,6 +30,7 @@ UpdateData = collections.namedtuple("UpdateData", ["instance", "changes"]) class ImmutableKeyError(TypeError): """Accessed key is immutable so does not allow changes or removements.""" + def __init__(self, key, msg=None): self.immutable_key = key if not msg: @@ -40,6 +42,7 @@ class ImmutableKeyError(TypeError): class HostMissRequiredMethod(Exception): """Host does not have implemented required functions for creation.""" + def __init__(self, host, missing_methods): self.missing_methods = missing_methods self.host = host @@ -66,6 +69,7 @@ class InstanceMember: TODO: Implement and use! """ + def __init__(self, instance, name): self.instance = instance @@ -94,6 +98,7 @@ class AttributeValues: values(dict): Values after possible conversion. origin_data(dict): Values loaded from host before conversion. """ + def __init__(self, attr_defs, values, origin_data=None): from openpype.lib.attribute_definitions import UnknownDef @@ -174,6 +179,10 @@ class AttributeValues: output = {} for key in self._data: output[key] = self[key] + + for key, attr_def in self._attr_defs_by_key.items(): + if key not in output: + output[key] = attr_def.default return output @staticmethod @@ -196,6 +205,7 @@ class CreatorAttributeValues(AttributeValues): Args: instance (CreatedInstance): Instance for which are values hold. """ + def __init__(self, instance, *args, **kwargs): self.instance = instance super(CreatorAttributeValues, self).__init__(*args, **kwargs) @@ -211,6 +221,7 @@ class PublishAttributeValues(AttributeValues): publish_attributes(PublishAttributes): Wrapper for multiple publish attributes is used as parent object. """ + def __init__(self, publish_attributes, *args, **kwargs): self.publish_attributes = publish_attributes super(PublishAttributeValues, self).__init__(*args, **kwargs) @@ -232,6 +243,7 @@ class PublishAttributes: attr_plugins(list): List of publish plugins that may have defined attribute definitions. """ + def __init__(self, parent, origin_data, attr_plugins=None): self.parent = parent self._origin_data = copy.deepcopy(origin_data) @@ -270,6 +282,7 @@ class PublishAttributes: key(str): Plugin name. default: Default value if plugin was not found. """ + if key not in self._data: return default @@ -287,11 +300,13 @@ class PublishAttributes: def plugin_names_order(self): """Plugin names order by their 'order' attribute.""" + for name in self._plugin_names_order: yield name def data_to_store(self): """Convert attribute values to "data to store".""" + output = {} for key, attr_value in self._data.items(): output[key] = attr_value.data_to_store() @@ -299,6 +314,7 @@ class PublishAttributes: def changes(self): """Return changes per each key.""" + changes = {} for key, attr_val in self._data.items(): attr_changes = attr_val.changes() @@ -314,6 +330,7 @@ class PublishAttributes: def set_publish_plugins(self, attr_plugins): """Set publish plugins attribute definitions.""" + self._plugin_names_order = [] self._missing_plugins = [] self.attr_plugins = attr_plugins or [] @@ -365,6 +382,7 @@ class CreatedInstance: `openpype.pipeline.registered_host`. new(bool): Is instance new. """ + # Keys that can't be changed or removed from data after loading using # creator. # - 'creator_attributes' and 'publish_attributes' can change values of @@ -566,6 +584,7 @@ class CreatedInstance: @property def id(self): """Instance identifier.""" + return self._data["instance_id"] @property @@ -574,10 +593,12 @@ class CreatedInstance: Access to data is needed to modify values. """ + return self def changes(self): """Calculate and return changes.""" + changes = {} new_keys = set() for key, new_value in self._data.items(): @@ -716,6 +737,7 @@ class CreateContext: self.manual_creators = {} self.publish_discover_result = None + self.publish_plugins_mismatch_targets = [] self.publish_plugins = [] self.plugins_with_defs = [] self._attr_plugins_by_family = {} @@ -838,6 +860,7 @@ class CreateContext: discover_result = DiscoverResult() plugins_with_defs = [] plugins_by_targets = [] + plugins_mismatch_targets = [] if discover_publish_plugins: discover_result = publish_plugins_discover() publish_plugins = discover_result.plugins @@ -847,11 +870,19 @@ class CreateContext: plugins_by_targets = pyblish.logic.plugins_by_targets( publish_plugins, list(targets) ) + # Collect plugins that can have attribute definitions for plugin in publish_plugins: if OpenPypePyblishPluginMixin in inspect.getmro(plugin): plugins_with_defs.append(plugin) + plugins_mismatch_targets = [ + plugin + for plugin in publish_plugins + if plugin not in plugins_by_targets + ] + + self.publish_plugins_mismatch_targets = plugins_mismatch_targets self.publish_discover_result = discover_result self.publish_plugins = plugins_by_targets self.plugins_with_defs = plugins_with_defs @@ -1052,15 +1083,10 @@ class CreateContext: for asset_name in task_names_by_asset_name.keys() if asset_name is not None ] - asset_docs = list(self.dbcon.find( - { - "type": "asset", - "name": {"$in": asset_names} - }, - { - "name": True, - "data.tasks": True - } + asset_docs = list(get_assets( + self.project_name, + asset_names=asset_names, + fields=["name", "data.tasks"] )) task_names_by_asset_name = {} diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 91b9d80234..9a5d559774 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -1,3 +1,4 @@ +import os import copy from abc import ( @@ -7,10 +8,8 @@ from abc import ( ) import six -from openpype.lib import ( - get_subset_name_with_asset_doc, - set_plugin_attributes_from_settings, -) +from openpype.settings import get_system_settings, get_project_settings +from openpype.lib import get_subset_name_with_asset_doc from openpype.pipeline.plugin_discover import ( discover, register_plugin, @@ -102,6 +101,10 @@ class BaseCreator: return self.create_context.project_name + @property + def host(self): + return self.create_context.host + def get_group_label(self): """Group label under which are instances grouped in UI. @@ -412,6 +415,12 @@ class Creator(BaseCreator): return self.pre_create_attr_defs +class HiddenCreator(BaseCreator): + @abstractmethod + def create(self, instance_data, source_data): + pass + + class AutoCreator(BaseCreator): """Creator which is automatically triggered without user interaction. @@ -428,8 +437,24 @@ def discover_creator_plugins(): def discover_legacy_creator_plugins(): + from openpype.lib import Logger + + log = Logger.get_logger("CreatorDiscover") + plugins = discover(LegacyCreator) - set_plugin_attributes_from_settings(plugins, LegacyCreator) + project_name = os.environ.get("AVALON_PROJECT") + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to loader {}".format( + plugin.__name__ + ), + exc_info=True + ) return plugins diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index 46e0e3d663..2764b3cb95 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -5,6 +5,7 @@ Renamed classes and functions - 'create' -> 'legacy_create' """ +import os import logging import collections @@ -37,6 +38,48 @@ class LegacyCreator(object): self.data.update(data or {}) + @classmethod + def apply_settings(cls, project_settings, system_settings): + """Apply OpenPype settings to a plugin class.""" + + host_name = os.environ.get("AVALON_APP") + plugin_type = "create" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + setattr(cls, "active", False) + print(" - is disabled by preset") + else: + setattr(cls, option, value) + print(" - setting `{}`: `{}`".format(option, value)) + def process(self): pass diff --git a/openpype/pipeline/load/__init__.py b/openpype/pipeline/load/__init__.py index 6e7612d4c1..e46d9f152b 100644 --- a/openpype/pipeline/load/__init__.py +++ b/openpype/pipeline/load/__init__.py @@ -24,6 +24,10 @@ from .utils import ( loaders_from_repre_context, loaders_from_representation, + + any_outdated_containers, + get_outdated_containers, + filter_containers, ) from .plugins import ( @@ -66,6 +70,10 @@ __all__ = ( "loaders_from_repre_context", "loaders_from_representation", + "any_outdated_containers", + "get_outdated_containers", + "filter_containers", + # plugins.py "LoaderPlugin", "SubsetLoaderPlugin", diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py index a30a2188a4..8cba8d8217 100644 --- a/openpype/pipeline/load/plugins.py +++ b/openpype/pipeline/load/plugins.py @@ -1,6 +1,8 @@ +import os import logging -from openpype.lib import set_plugin_attributes_from_settings +from openpype.settings import get_system_settings, get_project_settings +from openpype.pipeline import legacy_io from openpype.pipeline.plugin_discover import ( discover, register_plugin, @@ -37,6 +39,46 @@ class LoaderPlugin(list): def __init__(self, context): self.fname = self.filepath_from_context(context) + @classmethod + def apply_settings(cls, project_settings, system_settings): + host_name = os.environ.get("AVALON_APP") + plugin_type = "load" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + setattr(cls, "active", False) + print(" - is disabled by preset") + else: + setattr(cls, option, value) + print(" - setting `{}`: `{}`".format(option, value)) + @classmethod def get_representations(cls): return cls.representations @@ -110,9 +152,25 @@ class SubsetLoaderPlugin(LoaderPlugin): pass -def discover_loader_plugins(): +def discover_loader_plugins(project_name=None): + from openpype.lib import Logger + + log = Logger.get_logger("LoaderDiscover") plugins = discover(LoaderPlugin) - set_plugin_attributes_from_settings(plugins, LoaderPlugin) + if not project_name: + project_name = legacy_io.active_project() + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to loader {}".format( + plugin.__name__ + ), + exc_info=True + ) return plugins diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index 2c213aff6f..fe5102353d 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -4,8 +4,10 @@ import copy import getpass import logging import inspect +import collections import numbers +from openpype.host import ILoadHost from openpype.client import ( get_project, get_assets, @@ -15,6 +17,7 @@ from openpype.client import ( get_last_version_by_subset_id, get_hero_version_by_subset_id, get_version_by_name, + get_last_versions, get_representations, get_representation_by_id, get_representation_by_name, @@ -28,6 +31,11 @@ from openpype.pipeline import ( log = logging.getLogger(__name__) +ContainersFilterResult = collections.namedtuple( + "ContainersFilterResult", + ["latest", "outdated", "not_foud", "invalid"] +) + class HeroVersionType(object): def __init__(self, version): @@ -685,3 +693,164 @@ def loaders_from_representation(loaders, representation): context = get_representation_context(representation) return loaders_from_repre_context(loaders, context) + + +def any_outdated_containers(host=None, project_name=None): + """Check if there are any outdated containers in scene.""" + + if get_outdated_containers(host, project_name): + return True + return False + + +def get_outdated_containers(host=None, project_name=None): + """Collect outdated containers from host scene. + + Currently registered host and project in global session are used if + arguments are not passed. + + Args: + host (ModuleType): Host implementation with 'ls' function available. + project_name (str): Name of project in which context we are. + """ + + if host is None: + from openpype.pipeline import registered_host + host = registered_host() + + if project_name is None: + project_name = legacy_io.active_project() + + if isinstance(host, ILoadHost): + containers = host.get_containers() + else: + containers = host.ls() + return filter_containers(containers, project_name).outdated + + +def filter_containers(containers, project_name): + """Filter containers and split them into 4 categories. + + Categories are 'latest', 'outdated', 'invalid' and 'not_found'. + The 'lastest' containers are from last version, 'outdated' are not, + 'invalid' are invalid containers (invalid content) and 'not_foud' has + some missing entity in database. + + Args: + containers (Iterable[dict]): List of containers referenced into scene. + project_name (str): Name of project in which context shoud look for + versions. + + Returns: + ContainersFilterResult: Named tuple with 'latest', 'outdated', + 'invalid' and 'not_found' containers. + """ + + # Make sure containers is list that won't change + containers = list(containers) + + outdated_containers = [] + uptodate_containers = [] + not_found_containers = [] + invalid_containers = [] + output = ContainersFilterResult( + uptodate_containers, + outdated_containers, + not_found_containers, + invalid_containers + ) + # Query representation docs to get it's version ids + repre_ids = { + container["representation"] + for container in containers + if container["representation"] + } + if not repre_ids: + if containers: + invalid_containers.extend(containers) + return output + + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] + ) + # Store representations by stringified representation id + repre_docs_by_str_id = {} + repre_docs_by_version_id = collections.defaultdict(list) + for repre_doc in repre_docs: + repre_id = str(repre_doc["_id"]) + version_id = repre_doc["parent"] + repre_docs_by_str_id[repre_id] = repre_doc + repre_docs_by_version_id[version_id].append(repre_doc) + + # Query version docs to get it's subset ids + # - also query hero version to be able identify if representation + # belongs to existing version + version_docs = get_versions( + project_name, + version_ids=repre_docs_by_version_id.keys(), + hero=True, + fields=["_id", "parent", "type"] + ) + verisons_by_id = {} + versions_by_subset_id = collections.defaultdict(list) + hero_version_ids = set() + for version_doc in version_docs: + version_id = version_doc["_id"] + # Store versions by their ids + verisons_by_id[version_id] = version_doc + # There's no need to query subsets for hero versions + # - they are considered as latest? + if version_doc["type"] == "hero_version": + hero_version_ids.add(version_id) + continue + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + last_versions = get_last_versions( + project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id"] + ) + # Figure out which versions are outdated + outdated_version_ids = set() + for subset_id, last_version_doc in last_versions.items(): + for version_doc in versions_by_subset_id[subset_id]: + version_id = version_doc["_id"] + if version_id != last_version_doc["_id"]: + outdated_version_ids.add(version_id) + + # Based on all collected data figure out which containers are outdated + # - log out if there are missing representation or version documents + for container in containers: + container_name = container["objectName"] + repre_id = container["representation"] + if not repre_id: + invalid_containers.append(container) + continue + + repre_doc = repre_docs_by_str_id.get(repre_id) + if not repre_doc: + log.debug(( + "Container '{}' has an invalid representation." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + continue + + version_id = repre_doc["parent"] + if version_id in outdated_version_ids: + outdated_containers.append(container) + + elif version_id not in verisons_by_id: + log.debug(( + "Representation on container '{}' has an invalid version." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + + else: + uptodate_containers.append(container) + + return output diff --git a/openpype/pipeline/mongodb.py b/openpype/pipeline/mongodb.py index dab5bb9e13..be2b67a5e7 100644 --- a/openpype/pipeline/mongodb.py +++ b/openpype/pipeline/mongodb.py @@ -5,6 +5,8 @@ import logging import pymongo from uuid import uuid4 +from openpype.client import OpenPypeMongoConnection + from . import schema @@ -156,8 +158,6 @@ class AvalonMongoDB: @property def mongo_client(self): - from openpype.lib import OpenPypeMongoConnection - return OpenPypeMongoConnection.get_mongo_client() @property diff --git a/openpype/pipeline/publish/abstract_collect_render.py b/openpype/pipeline/publish/abstract_collect_render.py index 2e537227c3..ccb2415346 100644 --- a/openpype/pipeline/publish/abstract_collect_render.py +++ b/openpype/pipeline/publish/abstract_collect_render.py @@ -63,6 +63,8 @@ class RenderInstance(object): family = attr.ib(default="renderlayer") families = attr.ib(default=["renderlayer"]) # list of families + # True if should be rendered on farm, eg not integrate + farm = attr.ib(default=False) # format settings multipartExr = attr.ib(default=False) # flag for multipart exrs diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index 739b2c8806..d5494cd8a4 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -6,6 +6,10 @@ import xml.etree.ElementTree import six import pyblish.plugin +import pyblish.api + +from openpype.lib import Logger +from openpype.settings import get_project_settings, get_system_settings class DiscoverResult: @@ -180,3 +184,92 @@ def publish_plugins_discover(paths=None): result.plugins = plugins return result + + +def filter_pyblish_plugins(plugins): + """Pyblish plugin filter which applies OpenPype settings. + + Apply OpenPype settings on discovered plugins. On plugin with implemented + class method 'def apply_settings(cls, project_settings, system_settings)' + is called the method. Default behavior looks for plugin name and current + host name to look for + + Args: + plugins (List[pyblish.plugin.Plugin]): Discovered plugins on which + are applied settings. + """ + + log = Logger.get_logger("filter_pyblish_plugins") + + # TODO: Don't use host from 'pyblish.api' but from defined host by us. + # - kept becau on farm is probably used host 'shell' which propably + # affect how settings are applied there + host = pyblish.api.current_host() + project_name = os.environ.get("AVALON_PROJECT") + + project_setting = get_project_settings(project_name) + system_settings = get_system_settings() + + # iterate over plugins + for plugin in plugins[:]: + if hasattr(plugin, "apply_settings"): + try: + # Use classmethod 'apply_settings' + # - can be used to target settings from custom settings place + # - skip default behavior when successful + plugin.apply_settings(project_setting, system_settings) + continue + + except Exception: + log.warning( + ( + "Failed to apply settings on plugin {}" + ).format(plugin.__name__), + exc_info=True + ) + + try: + config_data = ( + project_setting + [host] + ["publish"] + [plugin.__name__] + ) + except KeyError: + # host determined from path + file = os.path.normpath(inspect.getsourcefile(plugin)) + file = os.path.normpath(file) + + split_path = file.split(os.path.sep) + if len(split_path) < 4: + log.warning( + 'plugin path too short to extract host {}'.format(file) + ) + continue + + host_from_file = split_path[-4] + plugin_kind = split_path[-2] + + # TODO: change after all plugins are moved one level up + if host_from_file == "openpype": + host_from_file = "global" + + try: + config_data = ( + project_setting + [host_from_file] + [plugin_kind] + [plugin.__name__] + ) + except KeyError: + continue + + for option, value in config_data.items(): + if option == "enabled" and value is False: + log.info('removing plugin {}'.format(plugin.__name__)) + plugins.remove(plugin) + else: + log.info('setting {}:{} on plugin {}'.format( + option, value, plugin.__name__)) + + setattr(plugin, option, value) diff --git a/openpype/pipeline/template_data.py b/openpype/pipeline/template_data.py new file mode 100644 index 0000000000..824a25127c --- /dev/null +++ b/openpype/pipeline/template_data.py @@ -0,0 +1,228 @@ +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_system_settings +from openpype.lib.local_settings import get_openpype_username + + +def get_general_template_data(system_settings=None): + """General template data based on system settings or machine. + + Output contains formatting keys: + - 'studio[name]' - Studio name filled from system settings + - 'studio[code]' - Studio code filled from system settings + - 'user' - User's name using 'get_openpype_username' + + Args: + system_settings (Dict[str, Any]): System settings. + """ + + if not system_settings: + system_settings = get_system_settings() + studio_name = system_settings["general"]["studio_name"] + studio_code = system_settings["general"]["studio_code"] + return { + "studio": { + "name": studio_name, + "code": studio_code + }, + "user": get_openpype_username() + } + + +def get_project_template_data(project_doc): + """Extract data from project document that are used in templates. + + Project document must have 'name' and (at this moment) optional + key 'data.code'. + + Output contains formatting keys: + - 'project[name]' - Project name + - 'project[code]' - Project code + + Args: + project_doc (Dict[str, Any]): Queried project document. + + Returns: + Dict[str, Dict[str, str]]: Template data based on project document. + """ + + project_code = project_doc.get("data", {}).get("code") + return { + "project": { + "name": project_doc["name"], + "code": project_code + } + } + + +def get_asset_template_data(asset_doc, project_name): + """Extract data from asset document that are used in templates. + + Output dictionary contains keys: + - 'asset' - asset name + - 'hierarchy' - parent asset names joined with '/' + - 'parent' - direct parent name, project name used if is under project + + Required document fields: + Asset: 'name', 'data.parents' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + project_name (str): Is used for 'parent' key if asset doc does not have + any. + + Returns: + Dict[str, str]: Data that are based on asset document and can be used + in templates. + """ + + asset_parents = asset_doc["data"]["parents"] + hierarchy = "/".join(asset_parents) + if asset_parents: + parent_name = asset_parents[-1] + else: + parent_name = project_name + + return { + "asset": asset_doc["name"], + "hierarchy": hierarchy, + "parent": parent_name + } + + +def get_task_type(asset_doc, task_name): + """Get task type based on asset document and task name. + + Required document fields: + Asset: 'data.tasks' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + task_name (str): Task name which is under asset. + + Returns: + str: Task type name. + None: Task was not found on asset document. + """ + + asset_tasks_info = asset_doc["data"]["tasks"] + return asset_tasks_info.get(task_name, {}).get("type") + + +def get_task_template_data(project_doc, asset_doc, task_name): + """"Extract task specific data from project and asset documents. + + Required document fields: + Project: 'config.tasks' + Asset: 'data.tasks'. + + Args: + project_doc (Dict[str, Any]): Queried project document. + asset_doc (Dict[str, Any]): Queried asset document. + tas_name (str): Name of task for which data should be returned. + + Returns: + Dict[str, Dict[str, str]]: Template data + """ + + project_task_types = project_doc["config"]["tasks"] + task_type = get_task_type(asset_doc, task_name) + task_code = project_task_types.get(task_type, {}).get("short_name") + + return { + "task": { + "name": task_name, + "type": task_type, + "short": task_code, + } + } + + +def get_template_data( + project_doc, + asset_doc=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered documents and info. + + This function does not "auto fill" any values except system settings and + it's on purpose. + + Universal function to receive template data from passed arguments. Only + required argument is project document all other arguments are optional + and their values won't be added to template data if are not passed. + + Required document fields: + Project: 'name', 'data.code', 'config.tasks' + Asset: 'name', 'data.parents', 'data.tasks' + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]): Used to fill '{app}' key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed (may be slower). + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + template_data = get_general_template_data(system_settings) + template_data.update(get_project_template_data(project_doc)) + if asset_doc: + template_data.update(get_asset_template_data( + asset_doc, project_doc["name"] + )) + if task_name: + template_data.update(get_task_template_data( + project_doc, asset_doc, task_name + )) + + if host_name: + template_data["app"] = host_name + + return template_data + + +def get_template_data_with_names( + project_name, + asset_name=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered entity names and info. + + Copy of 'get_template_data' but based on entity names instead of documents. + Only difference is that documents are queried. + + Args: + project_name (str): Project name for which template data are + calculated. + asset_name (Union[str, None]): Asset name for which template data are + calculated. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]):Used to fill '{app}' key. + because workdir template may contain `{app}` key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed. + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + project_doc = get_project( + project_name, fields=["name", "data.code", "config.tasks"] + ) + asset_doc = None + if asset_name: + asset_doc = get_asset_by_name( + project_name, + asset_name, + fields=["name", "data.parents", "data.tasks"] + ) + return get_template_data( + project_doc, asset_doc, task_name, host_name, system_settings + ) diff --git a/openpype/pipeline/thumbnail.py b/openpype/pipeline/thumbnail.py index ec97b36954..eb383b16d9 100644 --- a/openpype/pipeline/thumbnail.py +++ b/openpype/pipeline/thumbnail.py @@ -2,6 +2,7 @@ import os import copy import logging +from openpype.client import get_project from . import legacy_io from .plugin_discover import ( discover, @@ -85,13 +86,8 @@ class TemplateResolver(ThumbnailResolver): self.log.debug("Thumbnail entity does not have set template") return - project = self.dbcon.find_one( - {"type": "project"}, - { - "name": True, - "data.code": True - } - ) + project_name = self.dbcon.active_project() + project = get_project(project_name, fields=["name", "data.code"]) template_data = copy.deepcopy( thumbnail_entity["data"].get("template_data") or {} diff --git a/openpype/pipeline/workfile/__init__.py b/openpype/pipeline/workfile/__init__.py new file mode 100644 index 0000000000..0aad29b6f9 --- /dev/null +++ b/openpype/pipeline/workfile/__init__.py @@ -0,0 +1,30 @@ +from .path_resolving import ( + get_workfile_template_key_from_context, + get_workfile_template_key, + get_workdir_with_workdir_data, + get_workdir, + + get_last_workfile_with_version, + get_last_workfile, + + get_custom_workfile_template, + get_custom_workfile_template_by_string_context, +) + +from .build_workfile import BuildWorkfile + + +__all__ = ( + "get_workfile_template_key_from_context", + "get_workfile_template_key", + "get_workdir_with_workdir_data", + "get_workdir", + + "get_last_workfile_with_version", + "get_last_workfile", + + "get_custom_workfile_template", + "get_custom_workfile_template_by_string_context", + + "BuildWorkfile", +) diff --git a/openpype/pipeline/workfile/build_workfile.py b/openpype/pipeline/workfile/build_workfile.py new file mode 100644 index 0000000000..bb6fcb4189 --- /dev/null +++ b/openpype/pipeline/workfile/build_workfile.py @@ -0,0 +1,693 @@ +import os +import re +import collections +import json + +from openpype.client import ( + get_asset_by_name, + get_subsets, + get_last_versions, + get_representations, +) +from openpype.settings import get_project_settings +from openpype.lib import ( + get_linked_assets, + filter_profiles, + Logger, +) +from openpype.pipeline import legacy_io +from openpype.pipeline.load import ( + discover_loader_plugins, + IncompatibleLoaderError, + load_container, +) + + +class BuildWorkfile: + """Wrapper for build workfile process. + + Load representations for current context by build presets. Build presets + are host related, since each host has it's loaders. + """ + + _log = None + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @staticmethod + def map_subsets_by_family(subsets): + subsets_by_family = collections.defaultdict(list) + for subset in subsets: + family = subset["data"].get("family") + if not family: + families = subset["data"].get("families") + if not families: + continue + family = families[0] + + subsets_by_family[family].append(subset) + return subsets_by_family + + def process(self): + """Main method of this wrapper. + + Building of workfile is triggered and is possible to implement + post processing of loaded containers if necessary. + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + return self.build_workfile() + + def build_workfile(self): + """Prepares and load containers into workfile. + + Loads latest versions of current and linked assets to workfile by logic + stored in Workfile profiles from presets. Profiles are set by host, + filtered by current task name and used by families. + + Each family can specify representation names and loaders for + representations and first available and successful loaded + representation is returned as container. + + At the end you'll get list of loaded containers per each asset. + + loaded_containers [{ + "asset_entity": , + "containers": [, , ...] + }, { + "asset_entity": , + "containers": [, ...] + }, { + ... + }] + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + loaded_containers = [] + + # Get current asset name and entity + project_name = legacy_io.active_project() + current_asset_name = legacy_io.Session["AVALON_ASSET"] + current_asset_entity = get_asset_by_name( + project_name, current_asset_name + ) + # Skip if asset was not found + if not current_asset_entity: + print("Asset entity with name `{}` was not found".format( + current_asset_name + )) + return loaded_containers + + # Prepare available loaders + loaders_by_name = {} + for loader in discover_loader_plugins(): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {0}!".format(loader_name) + ) + loaders_by_name[loader_name] = loader + + # Skip if there are any loaders + if not loaders_by_name: + self.log.warning("There are no registered loaders.") + return loaded_containers + + # Get current task name + current_task_name = legacy_io.Session["AVALON_TASK"] + + # Load workfile presets for task + self.build_presets = self.get_build_presets( + current_task_name, current_asset_entity + ) + + # Skip if there are any presets for task + if not self.build_presets: + self.log.warning( + "Current task `{}` does not have any loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + # Get presets for loading current asset + current_context_profiles = self.build_presets.get("current_context") + # Get presets for loading linked assets + link_context_profiles = self.build_presets.get("linked_assets") + # Skip if both are missing + if not current_context_profiles and not link_context_profiles: + self.log.warning( + "Current task `{}` has empty loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + elif not current_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any loading" + " preset for it's context." + ).format(current_task_name)) + + elif not link_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any" + "loading preset for it's linked assets." + ).format(current_task_name)) + + # Prepare assets to process by workfile presets + assets = [] + current_asset_id = None + if current_context_profiles: + # Add current asset entity if preset has current context set + assets.append(current_asset_entity) + current_asset_id = current_asset_entity["_id"] + + if link_context_profiles: + # Find and append linked assets if preset has set linked mapping + link_assets = get_linked_assets(current_asset_entity) + if link_assets: + assets.extend(link_assets) + + # Skip if there are no assets. This can happen if only linked mapping + # is set and there are no links for his asset. + if not assets: + self.log.warning( + "Asset does not have linked assets. Nothing to process." + ) + return loaded_containers + + # Prepare entities from database for assets + prepared_entities = self._collect_last_version_repres(assets) + + # Load containers by prepared entities and presets + # - Current asset containers + if current_asset_id and current_asset_id in prepared_entities: + current_context_data = prepared_entities.pop(current_asset_id) + loaded_data = self.load_containers_by_asset_data( + current_context_data, current_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # - Linked assets container + for linked_asset_data in prepared_entities.values(): + loaded_data = self.load_containers_by_asset_data( + linked_asset_data, link_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # Return list of loaded containers + return loaded_containers + + def get_build_presets(self, task_name, asset_doc): + """ Returns presets to build workfile for task name. + + Presets are loaded for current project set in + io.Session["AVALON_PROJECT"], filtered by registered host + and entered task name. + + Args: + task_name (str): Task name used for filtering build presets. + + Returns: + Dict[str, Any]: preset per entered task name + """ + + host_name = os.environ["AVALON_APP"] + project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + host_settings = project_settings.get(host_name) or {} + # Get presets for host + wb_settings = host_settings.get("workfile_builder") + if not wb_settings: + # backward compatibility + wb_settings = host_settings.get("workfile_build") or {} + + builder_profiles = wb_settings.get("profiles") + if not builder_profiles: + return None + + task_type = ( + asset_doc + .get("data", {}) + .get("tasks", {}) + .get(task_name, {}) + .get("type") + ) + filter_data = { + "task_types": task_type, + "tasks": task_name + } + return filter_profiles(builder_profiles, filter_data) + + def _filter_build_profiles(self, build_profiles, loaders_by_name): + """ Filter build profiles by loaders and prepare process data. + + Valid profile must have "loaders", "families" and "repre_names" keys + with valid values. + - "loaders" expects list of strings representing possible loaders. + - "families" expects list of strings for filtering + by main subset family. + - "repre_names" expects list of strings for filtering by + representation name. + + Lowered "families" and "repre_names" are prepared for each profile with + all required keys. + + Args: + build_profiles (Dict[str, Any]): Profiles for building workfile. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Filtered and prepared profiles. + """ + + valid_profiles = [] + for profile in build_profiles: + # Check loaders + profile_loaders = profile.get("loaders") + if not profile_loaders: + self.log.warning(( + "Build profile has missing loaders configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check if any loader is available + loaders_match = False + for loader_name in profile_loaders: + if loader_name in loaders_by_name: + loaders_match = True + break + + if not loaders_match: + self.log.warning(( + "All loaders from Build profile are not available: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check families + profile_families = profile.get("families") + if not profile_families: + self.log.warning(( + "Build profile is missing families configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check representation names + profile_repre_names = profile.get("repre_names") + if not profile_repre_names: + self.log.warning(( + "Build profile is missing" + " representation names filtering: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Prepare lowered families and representation names + profile["families_lowered"] = [ + fam.lower() for fam in profile_families + ] + profile["repre_names_lowered"] = [ + name.lower() for name in profile_repre_names + ] + + valid_profiles.append(profile) + + return valid_profiles + + def _prepare_profile_for_subsets(self, subsets, profiles): + """Select profile for each subset by it's data. + + Profiles are filtered for each subset individually. + Profile is filtered by subset's family, optionally by name regex and + representation names set in profile. + It is possible to not find matching profile for subset, in that case + subset is skipped and it is possible that none of subsets have + matching profile. + + Args: + subsets (List[Dict[str, Any]]): Subset documents. + profiles (List[Dict[str, Any]]): Build profiles. + + Returns: + Dict[str, Any]: Profile by subset's id. + """ + + # Prepare subsets + subsets_by_family = self.map_subsets_by_family(subsets) + + profiles_per_subset_id = {} + for family, subsets in subsets_by_family.items(): + family_low = family.lower() + for profile in profiles: + # Skip profile if does not contain family + if family_low not in profile["families_lowered"]: + continue + + # Precompile name filters as regexes + profile_regexes = profile.get("subset_name_filters") + if profile_regexes: + _profile_regexes = [] + for regex in profile_regexes: + _profile_regexes.append(re.compile(regex)) + profile_regexes = _profile_regexes + + # TODO prepare regex compilation + for subset in subsets: + # Verify regex filtering (optional) + if profile_regexes: + valid = False + for pattern in profile_regexes: + if re.match(pattern, subset["name"]): + valid = True + break + + if not valid: + continue + + profiles_per_subset_id[subset["_id"]] = profile + + # break profiles loop on finding the first matching profile + break + return profiles_per_subset_id + + def load_containers_by_asset_data( + self, asset_entity_data, build_profiles, loaders_by_name + ): + """Load containers for entered asset entity by Build profiles. + + Args: + asset_entity_data (Dict[str, Any]): Prepared data with subsets, + last versions and representations for specific asset. + build_profiles (Dict[str, Any]): Build profiles. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + Dict[str, Any]: Output contains asset document + and loaded containers. + """ + + # Make sure all data are not empty + if not asset_entity_data or not build_profiles or not loaders_by_name: + return + + asset_entity = asset_entity_data["asset_entity"] + + valid_profiles = self._filter_build_profiles( + build_profiles, loaders_by_name + ) + if not valid_profiles: + self.log.warning( + "There are not valid Workfile profiles. Skipping process." + ) + return + + self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) + + subsets_by_id = {} + version_by_subset_id = {} + repres_by_version_id = {} + for subset_id, in_data in asset_entity_data["subsets"].items(): + subset_entity = in_data["subset_entity"] + subsets_by_id[subset_entity["_id"]] = subset_entity + + version_data = in_data["version"] + version_entity = version_data["version_entity"] + version_by_subset_id[subset_id] = version_entity + repres_by_version_id[version_entity["_id"]] = ( + version_data["repres"] + ) + + if not subsets_by_id: + self.log.warning("There are not subsets for asset {0}".format( + asset_entity["name"] + )) + return + + profiles_per_subset_id = self._prepare_profile_for_subsets( + subsets_by_id.values(), valid_profiles + ) + if not profiles_per_subset_id: + self.log.warning("There are not valid subsets.") + return + + valid_repres_by_subset_id = collections.defaultdict(list) + for subset_id, profile in profiles_per_subset_id.items(): + profile_repre_names = profile["repre_names_lowered"] + + version_entity = version_by_subset_id[subset_id] + version_id = version_entity["_id"] + repres = repres_by_version_id[version_id] + for repre in repres: + repre_name_low = repre["name"].lower() + if repre_name_low in profile_repre_names: + valid_repres_by_subset_id[subset_id].append(repre) + + # DEBUG message + msg = "Valid representations for Asset: `{}`".format( + asset_entity["name"] + ) + for subset_id, repres in valid_repres_by_subset_id.items(): + subset = subsets_by_id[subset_id] + msg += "\n# Subset Name/ID: `{}`/{}".format( + subset["name"], subset_id + ) + for repre in repres: + msg += "\n## Repre name: `{}`".format(repre["name"]) + + self.log.debug(msg) + + containers = self._load_containers( + valid_repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ) + + return { + "asset_entity": asset_entity, + "containers": containers + } + + def _load_containers( + self, repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ): + """Real load by collected data happens here. + + Loading of representations per subset happens here. Each subset can + loads one representation. Loading is tried in specific order. + Representations are tried to load by names defined in configuration. + If subset has representation matching representation name each loader + is tried to load it until any is successful. If none of them was + successful then next representation name is tried. + Subset process loop ends when any representation is loaded or + all matching representations were already tried. + + Args: + repres_by_subset_id (Dict[str, Dict[str, Any]]): Available + representations mapped by their parent (subset) id. + subsets_by_id (Dict[str, Dict[str, Any]]): Subset documents + mapped by their id. + profiles_per_subset_id (Dict[str, Dict[str, Any]]): Build profiles + mapped by subset id. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Objects of loaded containers. + """ + + loaded_containers = [] + + # Get subset id order from build presets. + build_presets = self.build_presets.get("current_context", []) + build_presets += self.build_presets.get("linked_assets", []) + subset_ids_ordered = [] + for preset in build_presets: + for preset_family in preset["families"]: + for id, subset in subsets_by_id.items(): + if preset_family not in subset["data"].get("families", []): + continue + + subset_ids_ordered.append(id) + + # Order representations from subsets. + print("repres_by_subset_id", repres_by_subset_id) + representations_ordered = [] + representations = [] + for id in subset_ids_ordered: + for subset_id, repres in repres_by_subset_id.items(): + if repres in representations: + continue + + if id == subset_id: + representations_ordered.append((subset_id, repres)) + representations.append(repres) + + print("representations", representations) + + # Load ordered representations. + for subset_id, repres in representations_ordered: + subset_name = subsets_by_id[subset_id]["name"] + + profile = profiles_per_subset_id[subset_id] + loaders_last_idx = len(profile["loaders"]) - 1 + repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 + + repre_by_low_name = { + repre["name"].lower(): repre for repre in repres + } + + is_loaded = False + for repre_name_idx, profile_repre_name in enumerate( + profile["repre_names_lowered"] + ): + # Break iteration if representation was already loaded + if is_loaded: + break + + repre = repre_by_low_name.get(profile_repre_name) + if not repre: + continue + + for loader_idx, loader_name in enumerate(profile["loaders"]): + if is_loaded: + break + + loader = loaders_by_name.get(loader_name) + if not loader: + continue + try: + container = load_container( + loader, + repre["_id"], + name=subset_name + ) + loaded_containers.append(container) + is_loaded = True + + except Exception as exc: + if exc == IncompatibleLoaderError: + self.log.info(( + "Loader `{}` is not compatible with" + " representation `{}`" + ).format(loader_name, repre["name"])) + + else: + self.log.error( + "Unexpected error happened during loading", + exc_info=True + ) + + msg = "Loading failed." + if loader_idx < loaders_last_idx: + msg += " Trying next loader." + elif repre_name_idx < repre_names_last_idx: + msg += ( + " Loading of subset `{}` was not successful." + ).format(subset_name) + else: + msg += " Trying next representation." + self.log.info(msg) + + return loaded_containers + + def _collect_last_version_repres(self, asset_docs): + """Collect subsets, versions and representations for asset_entities. + + Args: + asset_docs (List[Dict[str, Any]]): Asset entities for which + want to find data. + + Returns: + Dict[str, Any]: collected entities + + Example output: + ``` + { + {Asset ID}: { + "asset_entity": , + "subsets": { + {Subset ID}: { + "subset_entity": , + "version": { + "version_entity": , + "repres": [ + , , ... + ] + } + }, + ... + } + }, + ... + } + output[asset_id]["subsets"][subset_id]["version"]["repres"] + ``` + """ + + output = {} + if not asset_docs: + return output + + asset_docs_by_ids = {asset["_id"]: asset for asset in asset_docs} + + project_name = legacy_io.active_project() + subsets = list(get_subsets( + project_name, asset_ids=asset_docs_by_ids.keys() + )) + subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} + + last_version_by_subset_id = get_last_versions( + project_name, subset_entity_by_ids.keys() + ) + last_version_docs_by_id = { + version["_id"]: version + for version in last_version_by_subset_id.values() + } + repre_docs = get_representations( + project_name, version_ids=last_version_docs_by_id.keys() + ) + + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_doc = last_version_docs_by_id[version_id] + + subset_id = version_doc["parent"] + subset_doc = subset_entity_by_ids[subset_id] + + asset_id = subset_doc["parent"] + asset_doc = asset_docs_by_ids[asset_id] + + if asset_id not in output: + output[asset_id] = { + "asset_entity": asset_doc, + "subsets": {} + } + + if subset_id not in output[asset_id]["subsets"]: + output[asset_id]["subsets"][subset_id] = { + "subset_entity": subset_doc, + "version": { + "version_entity": version_doc, + "repres": [] + } + } + + output[asset_id]["subsets"][subset_id]["version"]["repres"].append( + repre_doc + ) + + return output diff --git a/openpype/pipeline/workfile/path_resolving.py b/openpype/pipeline/workfile/path_resolving.py new file mode 100644 index 0000000000..ed1d1d793e --- /dev/null +++ b/openpype/pipeline/workfile/path_resolving.py @@ -0,0 +1,464 @@ +import os +import re +import copy +import platform + +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_project_settings +from openpype.lib import ( + filter_profiles, + Logger, + StringTemplate, +) +from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data + + +def get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings=None +): + """Helper function to get template key for workfile template. + + Do the same as `get_workfile_template_key` but returns value for "session + context". + + Args: + asset_name(str): Name of asset document. + task_name(str): Task name for which is template key retrieved. + Must be available on asset document under `data.tasks`. + host_name(str): Name of host implementation for which is workfile + used. + project_name(str): Project name where asset and task is. + project_settings(Dict[str, Any]): Project settings for passed + 'project_name'. Not required at all but makes function faster. + """ + + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.tasks"] + ) + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + task_info = asset_tasks.get(task_name) or {} + task_type = task_info.get("type") + + return get_workfile_template_key( + task_type, host_name, project_name, project_settings + ) + + +def get_workfile_template_key( + task_type, host_name, project_name, project_settings=None +): + """Workfile template key which should be used to get workfile template. + + Function is using profiles from project settings to return right template + for passet task type and host name. + + Args: + task_type(str): Name of task type. + host_name(str): Name of host implementation (e.g. "maya", "nuke", ...) + project_name(str): Name of project in which context should look for + settings. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. + """ + + default = "work" + if not task_type or not host_name: + return default + + if not project_settings: + project_settings = get_project_settings(project_name) + + try: + profiles = ( + project_settings + ["global"] + ["tools"] + ["Workfiles"] + ["workfile_template_profiles"] + ) + except Exception: + profiles = [] + + if not profiles: + return default + + profile_filter = { + "task_types": task_type, + "hosts": host_name + } + profile = filter_profiles(profiles, profile_filter) + if profile: + return profile["workfile_template"] or default + return default + + +def get_workdir_with_workdir_data( + workdir_data, + project_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + It is possible to pass only project's name instead of project's anatomy but + one of them **must** be entered. It is preferred to enter anatomy if is + available as initialization of a new Anatomy object may be time consuming. + + Args: + workdir_data (Dict[str, Any]): Data to fill workdir template. + project_name (str): Project's name. + anatomy (Anatomy): Anatomy object for specific project. Faster + processing if is passed. + template_key (str): Key of work templates in anatomy templates. If not + passed `get_workfile_template_key_from_context` is used to get it. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_name) + + if not template_key: + template_key = get_workfile_template_key( + workdir_data["task"]["type"], + workdir_data["app"], + workdir_data["project"]["name"], + project_settings + ) + + anatomy_filled = anatomy.format(workdir_data) + # Output is TemplateResult object which contain useful data + output = anatomy_filled[template_key]["folder"] + if output: + return output.normalized() + return output + + +def get_workdir( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (str): Task name for which are workdir data preapred. + host_name (str): Host which is used to workdir. This is required + because workdir template may contain `{app}` key. In `Session` + is stored under `AVALON_APP` key. + anatomy (Anatomy): Optional argument. Anatomy object is created using + project name from `project_doc`. It is preferred to pass this + argument as initialization of a new Anatomy object may be time + consuming. + template_key (str): Key of work templates in anatomy templates. Default + value is defined in `get_workdir_with_workdir_data`. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_doc["name"]) + + workdir_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # Output is TemplateResult object which contain useful data + return get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + template_key, + project_settings + ) + + +def get_last_workfile_with_version( + workdir, file_template, fill_data, extensions +): + """Return last workfile version. + + Usign workfile template and it's filling data find most possible last + version of workfile which was created for the context. + + Functionality is fully based on knowing which keys are optional or what + values are expected as value. + + The last modified file is used if more files can be considered as + last workfile. + + Args: + workdir (str): Path to dir where workfiles are stored. + file_template (str): Template of file name. + fill_data (Dict[str, Any]): Data for filling template. + extensions (Iterable[str]): All allowed file extensions of workfile. + + Returns: + Tuple[Union[str, None], Union[int, None]]: Last workfile with version + if there is any workfile otherwise None for both. + """ + + if not os.path.exists(workdir): + return None, None + + dotted_extensions = set() + for ext in extensions: + if not ext.startswith("."): + ext = ".{}".format(ext) + dotted_extensions.add(ext) + + # Fast match on extension + filenames = [ + filename + for filename in os.listdir(workdir) + if os.path.splitext(filename)[-1] in dotted_extensions + ] + + # Build template without optionals, version to digits only regex + # and comment to any definable value. + # Escape extensions dot for regex + regex_exts = [ + "\\" + ext + for ext in dotted_extensions + ] + ext_expression = "(?:" + "|".join(regex_exts) + ")" + + # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end + file_template = re.sub(r"\.?{ext}", ext_expression, file_template) + # Replace optional keys with optional content regex + file_template = re.sub(r"<.*?>", r".*?", file_template) + # Replace `{version}` with group regex + file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) + file_template = re.sub(r"{comment.*?}", r".+?", file_template) + file_template = StringTemplate.format_strict_template( + file_template, fill_data + ) + + # Match with ignore case on Windows due to the Windows + # OS not being case-sensitive. This avoids later running + # into the error that the file did exist if it existed + # with a different upper/lower-case. + kwargs = {} + if platform.system().lower() == "windows": + kwargs["flags"] = re.IGNORECASE + + # Get highest version among existing matching files + version = None + output_filenames = [] + for filename in sorted(filenames): + match = re.match(file_template, filename, **kwargs) + if not match: + continue + + file_version = int(match.group(1)) + if version is None or file_version > version: + output_filenames[:] = [] + version = file_version + + if file_version == version: + output_filenames.append(filename) + + output_filename = None + if output_filenames: + if len(output_filenames) == 1: + output_filename = output_filenames[0] + else: + last_time = None + for _output_filename in output_filenames: + full_path = os.path.join(workdir, _output_filename) + mod_time = os.path.getmtime(full_path) + if last_time is None or last_time < mod_time: + output_filename = _output_filename + last_time = mod_time + + return output_filename, version + + +def get_last_workfile( + workdir, file_template, fill_data, extensions, full_path=False +): + """Return last workfile filename. + + Returns file with version 1 if there is not workfile yet. + + Args: + workdir(str): Path to dir where workfiles are stored. + file_template(str): Template of file name. + fill_data(Dict[str, Any]): Data for filling template. + extensions(Iterable[str]): All allowed file extensions of workfile. + full_path(bool): Full path to file is returned if set to True. + + Returns: + str: Last or first workfile as filename of full path to filename. + """ + + filename, version = get_last_workfile_with_version( + workdir, file_template, fill_data, extensions + ) + if filename is None: + data = copy.deepcopy(fill_data) + data["version"] = 1 + data.pop("comment", None) + if not data.get("ext"): + data["ext"] = extensions[0] + data["ext"] = data["ext"].replace('.', '') + filename = StringTemplate.format_strict_template(file_template, data) + + if full_path: + return os.path.normpath(os.path.join(workdir, filename)) + + return filename + + +def get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Custom workfile template can be used as first version of workfiles. + Template is a file on a disk which is set in settings. Expected settings + structure to have this feature enabled is: + project settings + |- + |- workfile_builder + |- create_first_version - a bool which must be set to 'True' + |- custom_templates - profiles based on task name/type which + points to a file which is copied as + first workfile + + It is expected that passed argument are already queried documents of + project and asset as parents of processing task name. + + Args: + project_doc (Dict[str, Any]): Project document from MongoDB. + asset_doc (Dict[str, Any]): Asset document from MongoDB. + task_name (str): Name of task for which templates are filtered. + host_name (str): Name of host. + anatomy (Anatomy): Optionally passed anatomy object for passed project + name. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. Existence of formatted path is not validated. + None: If no profile is matching context. + """ + + log = Logger.get_logger("CustomWorkfileResolve") + + project_name = project_doc["name"] + if project_settings is None: + project_settings = get_project_settings(project_name) + + host_settings = project_settings.get(host_name) + if not host_settings: + log.info("Host \"{}\" doesn't have settings".format(host_name)) + return None + + workfile_builder_settings = host_settings.get("workfile_builder") + if not workfile_builder_settings: + log.info(( + "Seems like old version of settings is used." + " Can't access custom templates in host \"{}\"." + ).format(host_name)) + return + + if not workfile_builder_settings["create_first_version"]: + log.info(( + "Project \"{}\" has turned off to create first workfile for" + " host \"{}\"" + ).format(project_name, host_name)) + return + + # Backwards compatibility + template_profiles = workfile_builder_settings.get("custom_templates") + if not template_profiles: + log.info( + "Custom templates are not filled. Skipping template copy." + ) + return + + if anatomy is None: + anatomy = Anatomy(project_name) + + # get project, asset, task anatomy context data + anatomy_context_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # add root dict + anatomy_context_data["root"] = anatomy.roots + + # get task type for the task in context + current_task_type = anatomy_context_data["task"]["type"] + + # get path from matching profile + matching_item = filter_profiles( + template_profiles, + {"task_types": current_task_type} + ) + # when path is available try to format it in case + # there are some anatomy template strings + if matching_item: + template = matching_item["path"][platform.system().lower()] + return StringTemplate.format_strict_template( + template, anatomy_context_data + ).normalized() + + return None + + +def get_custom_workfile_template_by_string_context( + project_name, + asset_name, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Passed context are string representations of project, asset and task. + Function will query documents of project and asset to be able use + `get_custom_workfile_template` for rest of logic. + + Args: + project_name(str): Project name. + asset_name(str): Asset name. + task_name(str): Task name. + host_name (str): Name of host. + anatomy(Anatomy): Optionally prepared anatomy object for passed + project. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + None: If no profile is matching context. + """ + + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) + + return get_custom_workfile_template( + project_doc, asset_doc, task_name, host_name, anatomy, project_settings + ) diff --git a/openpype/plugins/load/delivery.py b/openpype/plugins/load/delivery.py index 7585ea4c59..f6e1d4f06b 100644 --- a/openpype/plugins/load/delivery.py +++ b/openpype/plugins/load/delivery.py @@ -4,10 +4,10 @@ from collections import defaultdict from Qt import QtWidgets, QtCore, QtGui from openpype.client import get_representations -from openpype.lib import config from openpype.pipeline import load, Anatomy from openpype import resources, style +from openpype.lib.dateutils import get_datetime_data from openpype.lib.delivery import ( sizeof_fmt, path_from_representation, @@ -160,7 +160,7 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): selected_repres = self._get_selected_repres() - datetime_data = config.get_datetime_data() + datetime_data = get_datetime_data() template_name = self.dropdown.currentText() format_dict = get_format_dict(self.anatomy, self.root_line_edit.text()) for repre in self._representations: diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py index 0794adfb67..8433816908 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/openpype/plugins/publish/collect_anatomy_context_data.py @@ -15,10 +15,8 @@ Provides: import json import pyblish.api -from openpype.lib import ( - get_system_general_anatomy_data -) from openpype.pipeline import legacy_io +from openpype.pipeline.template_data import get_template_data class CollectAnatomyContextData(pyblish.api.ContextPlugin): @@ -33,11 +31,15 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): "asset": "AssetName", "hierarchy": "path/to/asset", "task": "Working", + "user": "MeDespicable", + # Duplicated entry "username": "MeDespicable", + # Current host name + "app": "maya" + *** OPTIONAL *** - "app": "maya" # Current application base name - + mutliple keys from `datetimeData` # see it's collector + + mutliple keys from `datetimeData` (See it's collector) } """ @@ -45,52 +47,26 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): label = "Collect Anatomy Context Data" def process(self, context): + host_name = context.data["hostName"] + system_settings = context.data["system_settings"] project_entity = context.data["projectEntity"] - context_data = { - "project": { - "name": project_entity["name"], - "code": project_entity["data"].get("code") - }, - "username": context.data["user"], - "app": context.data["hostName"] - } - - context.data["anatomyData"] = context_data - - # add system general settings anatomy data - system_general_data = get_system_general_anatomy_data() - context_data.update(system_general_data) - - datetime_data = context.data.get("datetimeData") or {} - context_data.update(datetime_data) - asset_entity = context.data.get("assetEntity") + task_name = None if asset_entity: task_name = legacy_io.Session["AVALON_TASK"] - asset_tasks = asset_entity["data"]["tasks"] - task_type = asset_tasks.get(task_name, {}).get("type") + anatomy_data = get_template_data( + project_entity, asset_entity, task_name, host_name, system_settings + ) + anatomy_data.update(context.data.get("datetimeData") or {}) - project_task_types = project_entity["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") + username = context.data["user"] + anatomy_data["user"] = username + # Backwards compatibility for 'username' key + anatomy_data["username"] = username - asset_parents = asset_entity["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_entity["name"] - if asset_parents: - parent_name = asset_parents[-1] - - context_data.update({ - "asset": asset_entity["name"], - "parent": parent_name, - "hierarchy": hierarchy, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - } - }) + # Store + context.data["anatomyData"] = anatomy_data self.log.info("Global anatomy Data collected") - self.log.debug(json.dumps(context_data, indent=4)) + self.log.debug(json.dumps(anatomy_data, indent=4)) diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/openpype/plugins/publish/collect_anatomy_instance_data.py index c75534cf83..f67d3373d9 100644 --- a/openpype/plugins/publish/collect_anatomy_instance_data.py +++ b/openpype/plugins/publish/collect_anatomy_instance_data.py @@ -51,6 +51,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): project_name = legacy_io.active_project() self.fill_missing_asset_docs(context, project_name) + self.fill_instance_data_from_asset(context) self.fill_latest_versions(context, project_name) self.fill_anatomy_data(context) @@ -115,6 +116,23 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): "Not found asset documents with names \"{}\"." ).format(joined_asset_names)) + def fill_instance_data_from_asset(self, context): + for instance in context: + asset_doc = instance.data.get("assetEntity") + if not asset_doc: + continue + + asset_data = asset_doc["data"] + for key in ( + "fps", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + ): + if key not in instance.data and key in asset_data: + instance.data[key] = asset_data[key] + def fill_latest_versions(self, context, project_name): """Try to find latest version for each instance's subset. diff --git a/openpype/plugins/publish/collect_anatomy_object.py b/openpype/plugins/publish/collect_anatomy_object.py index b1415098b6..725cae2b14 100644 --- a/openpype/plugins/publish/collect_anatomy_object.py +++ b/openpype/plugins/publish/collect_anatomy_object.py @@ -1,29 +1,32 @@ """Collect Anatomy object. Requires: - os.environ -> AVALON_PROJECT + context -> projectName Provides: context -> anatomy (openpype.pipeline.anatomy.Anatomy) """ -import os + import pyblish.api -from openpype.pipeline import Anatomy +from openpype.pipeline import Anatomy, KnownPublishError class CollectAnatomyObject(pyblish.api.ContextPlugin): - """Collect Anatomy object into Context""" + """Collect Anatomy object into Context. + + Order offset could be changed to '-0.45'. + """ order = pyblish.api.CollectorOrder - 0.4 label = "Collect Anatomy Object" def process(self, context): - project_name = os.environ.get("AVALON_PROJECT") + project_name = context.data.get("projectName") if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` is not set." + raise KnownPublishError(( + "Project name is not set in 'projectName'." "Could not initialize project's Anatomy." - ) + )) context.data["anatomy"] = Anatomy(project_name) diff --git a/openpype/plugins/publish/collect_avalon_entities.py b/openpype/plugins/publish/collect_avalon_entities.py index 6cd0d136e8..3b05b6ae98 100644 --- a/openpype/plugins/publish/collect_avalon_entities.py +++ b/openpype/plugins/publish/collect_avalon_entities.py @@ -1,35 +1,38 @@ """Collect Anatomy and global anatomy data. Requires: - session -> AVALON_PROJECT, AVALON_ASSET + session -> AVALON_ASSET + context -> projectName Provides: - context -> projectEntity - project entity from database - context -> assetEntity - asset entity from database + context -> projectEntity - Project document from database. + context -> assetEntity - Asset document from database only if 'asset' is + set in context. """ import pyblish.api from openpype.client import get_project, get_asset_by_name -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, KnownPublishError class CollectAvalonEntities(pyblish.api.ContextPlugin): - """Collect Anatomy into Context""" + """Collect Anatomy into Context.""" order = pyblish.api.CollectorOrder - 0.1 label = "Collect Avalon Entities" def process(self, context): legacy_io.install() - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = context.data["projectName"] asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] project_entity = get_project(project_name) - assert project_entity, ( - "Project '{0}' was not found." - ).format(project_name) + if not project_entity: + raise KnownPublishError( + "Project '{0}' was not found.".format(project_name) + ) self.log.debug("Collected Project \"{}\"".format(project_entity)) context.data["projectEntity"] = project_entity diff --git a/openpype/plugins/publish/collect_cleanup_keys.py b/openpype/plugins/publish/collect_cleanup_keys.py index 635b038387..b9cd1a9fc9 100644 --- a/openpype/plugins/publish/collect_cleanup_keys.py +++ b/openpype/plugins/publish/collect_cleanup_keys.py @@ -14,7 +14,7 @@ class CollectCleanupKeys(pyblish.api.ContextPlugin): """Prepare keys for 'ExplicitCleanUp' plugin.""" label = "Collect Cleanup Keys" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 def process(self, context): context.data["cleanupFullPaths"] = [] diff --git a/openpype/plugins/publish/collect_current_context.py b/openpype/plugins/publish/collect_current_context.py new file mode 100644 index 0000000000..7e42700d7d --- /dev/null +++ b/openpype/plugins/publish/collect_current_context.py @@ -0,0 +1,47 @@ +""" +Provides: + context -> projectName (str) + context -> asset (str) + context -> task (str) +""" + +import pyblish.api +from openpype.pipeline import legacy_io + + +class CollectCurrentContext(pyblish.api.ContextPlugin): + """Collect project context into publish context data. + + Plugin does not override any value if is already set. + """ + + order = pyblish.api.CollectorOrder - 0.5 + label = "Collect Current context" + + def process(self, context): + # Make sure 'legacy_io' is intalled + legacy_io.install() + + # Check if values are already set + project_name = context.data.get("projectName") + asset_name = context.data.get("asset") + task_name = context.data.get("task") + if not project_name: + project_name = legacy_io.current_project() + context.data["projectName"] = project_name + + if not asset_name: + asset_name = legacy_io.Session.get("AVALON_ASSET") + context.data["asset"] = asset_name + + if not task_name: + task_name = legacy_io.Session.get("AVALON_TASK") + context.data["task"] = task_name + + # QUESTION should we be explicit with keys? (the same on instances) + # - 'asset' -> 'assetName' + # - 'task' -> 'taskName' + + self.log.info(( + "Collected project context\nProject: {}\nAsset: {}\nTask: {}" + ).format(project_name, asset_name, task_name)) diff --git a/openpype/plugins/publish/collect_datetime_data.py b/openpype/plugins/publish/collect_datetime_data.py index 1675ae1a98..b3178ca3d2 100644 --- a/openpype/plugins/publish/collect_datetime_data.py +++ b/openpype/plugins/publish/collect_datetime_data.py @@ -5,14 +5,14 @@ Provides: """ import pyblish.api -from openpype.api import config +from openpype.lib.dateutils import get_datetime_data class CollectDateTimeData(pyblish.api.ContextPlugin): - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 label = "Collect DateTime data" def process(self, context): key = "datetimeData" if key not in context.data: - context.data[key] = config.get_datetime_data() + context.data[key] = get_datetime_data() diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index f6ead98809..9236c698ed 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -19,6 +19,9 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): if not create_context: return + project_name = create_context.project_name + if project_name: + context.data["projectName"] = project_name for created_instance in create_context.instances: instance_data = created_instance.data_to_store() if instance_data["active"]: @@ -44,15 +47,14 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): "subset": subset, "asset": in_data["asset"], "task": in_data["task"], - "label": subset, + "label": in_data.get("label") or subset, "name": subset, "family": in_data["family"], - "families": instance_families + "families": instance_families, + "representations": [] }) for key, value in in_data.items(): if key not in instance.data: instance.data[key] = value self.log.info("collected instance: {}".format(instance.data)) self.log.info("parsing data: {}".format(in_data)) - - instance.data["representations"] = list() diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index 91d5162d62..687397be8a 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -1,7 +1,5 @@ import pyblish.api -from openpype.pipeline import legacy_io - class CollectHierarchy(pyblish.api.ContextPlugin): """Collecting hierarchy from `parents`. @@ -20,7 +18,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): def process(self, context): temp_context = {} - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = context.data["projectName"] final_context = {} final_context[project_name] = {} final_context[project_name]['entity_type'] = 'Project' diff --git a/openpype/plugins/publish/collect_machine_name.py b/openpype/plugins/publish/collect_machine_name.py index 72ef68f8ed..8c25966031 100644 --- a/openpype/plugins/publish/collect_machine_name.py +++ b/openpype/plugins/publish/collect_machine_name.py @@ -11,7 +11,7 @@ import pyblish.api class CollectMachineName(pyblish.api.ContextPlugin): label = "Local Machine Name" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 hosts = ["*"] def process(self, context): diff --git a/openpype/plugins/publish/collect_modules.py b/openpype/plugins/publish/collect_modules.py index 2f6cb1ef0e..d76096bcd9 100644 --- a/openpype/plugins/publish/collect_modules.py +++ b/openpype/plugins/publish/collect_modules.py @@ -7,7 +7,7 @@ import pyblish.api class CollectModules(pyblish.api.ContextPlugin): """Collect OpenPype modules.""" - order = pyblish.api.CollectorOrder - 0.45 + order = pyblish.api.CollectorOrder - 0.5 label = "OpenPype Modules" def process(self, context): diff --git a/openpype/plugins/publish/collect_otio_frame_ranges.py b/openpype/plugins/publish/collect_otio_frame_ranges.py index c86e777850..40e89e29bc 100644 --- a/openpype/plugins/publish/collect_otio_frame_ranges.py +++ b/openpype/plugins/publish/collect_otio_frame_ranges.py @@ -23,7 +23,7 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): label = "Collect OTIO Frame Ranges" order = pyblish.api.CollectorOrder - 0.08 families = ["shot", "clip"] - hosts = ["resolve", "hiero", "flame"] + hosts = ["resolve", "hiero", "flame", "traypublisher"] def process(self, instance): # get basic variables diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py index fc6a9b50f2..9c19f8a78e 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/openpype/plugins/publish/collect_otio_subset_resources.py @@ -116,8 +116,10 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): # check in two way if it is sequence if hasattr(otio.schema, "ImageSequenceReference"): # for OpenTimelineIO 0.13 and newer - if isinstance(media_ref, - otio.schema.ImageSequenceReference): + if isinstance( + media_ref, + otio.schema.ImageSequenceReference + ): is_sequence = True else: # for OpenTimelineIO 0.12 and older @@ -139,11 +141,9 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): padding=media_ref.frame_zero_padding ) collection.indexes.update( - [i for i in range(a_frame_start_h, (a_frame_end_h + 1))]) + list(range(a_frame_start_h, (a_frame_end_h + 1))) + ) - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) else: # in case it is file sequence but not new OTIO schema # `ImageSequenceReference` @@ -152,9 +152,9 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): path, trimmed_media_range_h, metadata) self.staging_dir, collection = collection_data - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) + self.log.debug(collection) + repre = self._create_representation( + frame_start, frame_end, collection=collection) else: _trim = False dirname, filename = os.path.split(media_ref.target_url) @@ -198,7 +198,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): if kwargs.get("collection"): collection = kwargs.get("collection") - files = [f for f in collection] + files = list(collection) ext = collection.format("{tail}") representation_data.update({ "name": ext[1:], @@ -220,7 +220,5 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): }) if kwargs.get("trim") is True: - representation_data.update({ - "tags": ["trim"] - }) + representation_data["tags"] = ["trim"] return representation_data diff --git a/openpype/plugins/publish/collect_rendered_files.py b/openpype/plugins/publish/collect_rendered_files.py index 670e57ed10..8f8d0a5eeb 100644 --- a/openpype/plugins/publish/collect_rendered_files.py +++ b/openpype/plugins/publish/collect_rendered_files.py @@ -1,7 +1,7 @@ """Loads publishing context from json and continues in publish process. Requires: - anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11) + anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.4) Provides: context, instances -> All data from previous publishing process. @@ -12,7 +12,7 @@ import json import pyblish.api -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, KnownPublishError class CollectRenderedFiles(pyblish.api.ContextPlugin): @@ -20,7 +20,12 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): This collector will try to find json files in provided `OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + Note: + We should split this collector and move the part which handle reading + of file and it's context from session data before collect anatomy + and instance creation dependent on anatomy can be done here. """ + order = pyblish.api.CollectorOrder - 0.2 # Keep "filesequence" for backwards compatibility of older jobs targets = ["filesequence", "farm"] @@ -118,23 +123,20 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): def process(self, context): self._context = context - assert os.environ.get("OPENPYPE_PUBLISH_DATA"), ( - "Missing `OPENPYPE_PUBLISH_DATA`") + if not os.environ.get("OPENPYPE_PUBLISH_DATA"): + raise KnownPublishError("Missing `OPENPYPE_PUBLISH_DATA`") + + # QUESTION + # Do we support (or want support) multiple files in the variable? + # - what if they have different context? paths = os.environ["OPENPYPE_PUBLISH_DATA"].split(os.pathsep) - project_name = os.environ.get("AVALON_PROJECT") - if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` was not found." - "Could not set project `root` which may cause issues." - ) - - # TODO root filling should happen after collect Anatomy + # Using already collected Anatomy + anatomy = context.data["anatomy"] self.log.info("Getting root setting for project \"{}\"".format( - project_name + anatomy.project_name )) - anatomy = context.data["anatomy"] self.log.info("anatomy: {}".format(anatomy.roots)) try: session_is_set = False diff --git a/openpype/plugins/publish/collect_resources_path.py b/openpype/plugins/publish/collect_resources_path.py index 8bdf70b529..00f65b8b67 100644 --- a/openpype/plugins/publish/collect_resources_path.py +++ b/openpype/plugins/publish/collect_resources_path.py @@ -13,8 +13,6 @@ import copy import pyblish.api -from openpype.pipeline import legacy_io - class CollectResourcesPath(pyblish.api.InstancePlugin): """Generate directory path where the files and resources will be stored""" @@ -58,7 +56,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "effect", "staticMesh", "skeletalMesh" - ] def process(self, instance): @@ -86,11 +83,10 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) file_path = anatomy_filled["publish"]["path"] # Directory diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index 8d447ba595..6b4e5f48c5 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -30,9 +30,15 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) self.project = None - self.import_to_avalon(project_name, hierarchy_context) + self.import_to_avalon(context, project_name, hierarchy_context) - def import_to_avalon(self, project_name, input_data, parent=None): + def import_to_avalon( + self, + context, + project_name, + input_data, + parent=None, + ): for name in input_data: self.log.info("input_data[name]: {}".format(input_data[name])) entity_data = input_data[name] @@ -127,12 +133,19 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if unarchive_entity is None: # Create entity if doesn"t exist entity = self.create_avalon_asset( - project_name, name, data + name, data ) else: # Unarchive if entity was archived entity = self.unarchive_entity(unarchive_entity, data) + # make sure all relative instances have correct avalon data + self._set_avalon_data_to_relative_instances( + context, + project_name, + entity + ) + if update_data: # Update entity data with input data legacy_io.update_many( @@ -142,7 +155,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if "childs" in entity_data: self.import_to_avalon( - project_name, entity_data["childs"], entity + context, project_name, entity_data["childs"], entity ) def unarchive_entity(self, entity, data): @@ -159,20 +172,52 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): {"_id": entity["_id"]}, new_entity ) + return new_entity - def create_avalon_asset(self, project_name, name, data): - item = { + def create_avalon_asset(self, name, data): + asset_doc = { "schema": "openpype:asset-3.0", "name": name, "parent": self.project["_id"], "type": "asset", "data": data } - self.log.debug("Creating asset: {}".format(item)) - entity_id = legacy_io.insert_one(item).inserted_id + self.log.debug("Creating asset: {}".format(asset_doc)) + asset_doc["_id"] = legacy_io.insert_one(asset_doc).inserted_id - return get_asset_by_id(project_name, entity_id) + return asset_doc + + def _set_avalon_data_to_relative_instances( + self, + context, + project_name, + asset_doc + ): + for instance in context: + # Skip instance if has filled asset entity + if instance.data.get("assetEntity"): + continue + asset_name = asset_doc["name"] + inst_asset_name = instance.data["asset"] + + if asset_name == inst_asset_name: + instance.data["assetEntity"] = asset_doc + + # get parenting data + parents = asset_doc["data"].get("parents") or list() + + # equire only relative parent + parent_name = project_name + if parents: + parent_name = parents[-1] + + # update avalon data on instance + instance.data["anatomyData"].update({ + "hierarchy": "/".join(parents), + "task": {}, + "parent": parent_name + }) def _get_active_assets(self, context): """ Returns only asset dictionary. diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/openpype/plugins/publish/extract_otio_audio_tracks.py index 00c1748cdc..ed30a2f0f5 100644 --- a/openpype/plugins/publish/extract_otio_audio_tracks.py +++ b/openpype/plugins/publish/extract_otio_audio_tracks.py @@ -57,15 +57,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): audio_inputs.insert(0, empty) # create cmd - cmd = path_to_subprocess_arg(self.ffmpeg_path) + " " - cmd += self.create_cmd(audio_inputs) - cmd += path_to_subprocess_arg(audio_temp_fpath) - - # run subprocess - self.log.debug("Executing: {}".format(cmd)) - openpype.api.run_subprocess( - cmd, shell=True, logger=self.log - ) + self.mix_audio(audio_inputs, audio_temp_fpath) # remove empty os.remove(empty["mediaPath"]) @@ -245,46 +237,80 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): "durationSec": max_duration_sec } - def create_cmd(self, inputs): + def mix_audio(self, audio_inputs, audio_temp_fpath): """Creating multiple input cmd string Args: - inputs (list): list of input dicts. Order mater. + audio_inputs (list): list of input dicts. Order mater. Returns: str: the command body - """ + + longest_input = 0 + for audio_input in audio_inputs: + audio_len = audio_input["durationSec"] + if audio_len > longest_input: + longest_input = audio_len + # create cmd segments - _inputs = "" - _filters = "-filter_complex \"" - _channels = "" - for index, input in enumerate(inputs): - input_format = input.copy() - input_format.update({"i": index}) - input_format["mediaPath"] = path_to_subprocess_arg( - input_format["mediaPath"] + input_args = [] + filters = [] + tag_names = [] + for index, audio_input in enumerate(audio_inputs): + input_args.extend([ + "-ss", str(audio_input["startSec"]), + "-t", str(audio_input["durationSec"]), + "-i", audio_input["mediaPath"] + ]) + + # Output tag of a filtered audio input + tag_name = "[r{}]".format(index) + tag_names.append(tag_name) + # Delay in audio by delay in item + filters.append("[{}]adelay={}:all=1{}".format( + index, audio_input["delayMilSec"], tag_name + )) + + # Mixing filter + # - dropout transition (when audio will get loader) is set to be + # higher then any input audio item + # - volume is set to number of inputs - each mix adds 1/n volume + # where n is input inder (to get more info read ffmpeg docs and + # send a giftcard to contributor) + filters.append( + ( + "{}amix=inputs={}:duration=first:" + "dropout_transition={},volume={}[a]" + ).format( + "".join(tag_names), + len(audio_inputs), + (longest_input * 1000) + 1000, + len(audio_inputs), ) + ) - _inputs += ( - "-ss {startSec} " - "-t {durationSec} " - "-i {mediaPath} " - ).format(**input_format) + # Store filters to a file (separated by ',') + # - this is to avoid "too long" command issue in ffmpeg + with tempfile.NamedTemporaryFile( + delete=False, mode="w", suffix=".txt" + ) as tmp_file: + filters_tmp_filepath = tmp_file.name + tmp_file.write(",".join(filters)) - _filters += "[{i}]adelay={delayMilSec}:all=1[r{i}]; ".format( - **input_format) - _channels += "[r{}]".format(index) + args = [self.ffmpeg_path] + args.extend(input_args) + args.extend([ + "-filter_complex_script", filters_tmp_filepath, + "-map", "[a]" + ]) + args.append(audio_temp_fpath) - # merge all cmd segments together - cmd = _inputs + _filters + _channels - cmd += str( - "amix=inputs={inputs}:duration=first:" - "dropout_transition=1000,volume={inputs}[a]\" " - ).format(inputs=len(inputs)) - cmd += "-map \"[a]\" " + # run subprocess + self.log.debug("Executing: {}".format(args)) + openpype.api.run_subprocess(args, logger=self.log) - return cmd + os.remove(filters_tmp_filepath) def create_temp_file(self, name): """Create temp wav file diff --git a/openpype/plugins/publish/extract_otio_file.py b/openpype/plugins/publish/extract_otio_file.py index 3bd217d5d4..4d310ce109 100644 --- a/openpype/plugins/publish/extract_otio_file.py +++ b/openpype/plugins/publish/extract_otio_file.py @@ -12,7 +12,7 @@ class ExtractOTIOFile(openpype.api.Extractor): label = "Extract OTIO file" order = pyblish.api.ExtractorOrder - 0.45 families = ["workfile"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "traypublisher"] def process(self, instance): # create representation data diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 1b6e2a1d61..e16f324e0a 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -360,6 +360,7 @@ class ExtractReview(pyblish.api.InstancePlugin): os.unlink(f) new_repre.update({ + "fps": temp_data["fps"], "name": "{}_{}".format(output_name, output_ext), "outputName": output_name, "outputDef": output_def, @@ -1209,7 +1210,6 @@ class ExtractReview(pyblish.api.InstancePlugin): # Get instance data pixel_aspect = temp_data["pixel_aspect"] - if reformat_in_baking: self.log.debug(( "Using resolution from input. It is already " @@ -1229,6 +1229,10 @@ class ExtractReview(pyblish.api.InstancePlugin): # - settings value can't have None but has value of 0 output_width = output_def.get("width") or output_width or None output_height = output_def.get("height") or output_height or None + # Force to use input resolution if output resolution was not defined + # in settings. Resolution from instance is not used when + # 'use_input_res' is set to 'True'. + use_input_res = False # Overscal color overscan_color_value = "black" @@ -1240,6 +1244,17 @@ class ExtractReview(pyblish.api.InstancePlugin): ) self.log.debug("Overscan color: `{}`".format(overscan_color_value)) + # Scale input to have proper pixel aspect ratio + # - scale width by the pixel aspect ratio + scale_pixel_aspect = output_def.get("scale_pixel_aspect", True) + if scale_pixel_aspect and pixel_aspect != 1: + # Change input width after pixel aspect + input_width = int(input_width * pixel_aspect) + use_input_res = True + filters.append(( + "scale={}x{}:flags=lanczos".format(input_width, input_height) + )) + # Convert overscan value video filters overscan_crop = output_def.get("overscan_crop") overscan = OverscanCrop( @@ -1250,13 +1265,10 @@ class ExtractReview(pyblish.api.InstancePlugin): # resolution by it's values if overscan_crop_filters: filters.extend(overscan_crop_filters) + # Change input resolution after overscan crop input_width = overscan.width() input_height = overscan.height() - # Use output resolution as inputs after cropping to skip usage of - # instance data resolution - if output_width is None or output_height is None: - output_width = input_width - output_height = input_height + use_input_res = True # Make sure input width and height is not an odd number input_width_is_odd = bool(input_width % 2 != 0) @@ -1282,8 +1294,10 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("input_width: `{}`".format(input_width)) self.log.debug("input_height: `{}`".format(input_height)) - # Use instance resolution if output definition has not set it. - if output_width is None or output_height is None: + # Use instance resolution if output definition has not set it + # - use instance resolution only if there were not scale changes + # that may massivelly affect output 'use_input_res' + if not use_input_res and output_width is None or output_height is None: output_width = temp_data["resolution_width"] output_height = temp_data["resolution_height"] @@ -1325,7 +1339,6 @@ class ExtractReview(pyblish.api.InstancePlugin): output_width == input_width and output_height == input_height and not letter_box_enabled - and pixel_aspect == 1 ): self.log.debug( "Output resolution is same as input's" @@ -1335,66 +1348,16 @@ class ExtractReview(pyblish.api.InstancePlugin): new_repre["resolutionHeight"] = input_height return filters - # defining image ratios - input_res_ratio = ( - (float(input_width) * pixel_aspect) / input_height - ) - output_res_ratio = float(output_width) / float(output_height) - self.log.debug("input_res_ratio: `{}`".format(input_res_ratio)) - self.log.debug("output_res_ratio: `{}`".format(output_res_ratio)) - - # Round ratios to 2 decimal places for comparing - input_res_ratio = round(input_res_ratio, 2) - output_res_ratio = round(output_res_ratio, 2) - - # get scale factor - scale_factor_by_width = ( - float(output_width) / (input_width * pixel_aspect) - ) - scale_factor_by_height = ( - float(output_height) / input_height - ) - - self.log.debug( - "scale_factor_by_with: `{}`".format(scale_factor_by_width) - ) - self.log.debug( - "scale_factor_by_height: `{}`".format(scale_factor_by_height) - ) - # scaling none square pixels and 1920 width - if ( - input_height != output_height - or input_width != output_width - or pixel_aspect != 1 - ): - if input_res_ratio < output_res_ratio: - self.log.debug( - "Input's resolution ratio is lower then output's" - ) - width_scale = int(input_width * scale_factor_by_height) - width_half_pad = int((output_width - width_scale) / 2) - height_scale = output_height - height_half_pad = 0 - else: - self.log.debug("Input is heigher then output") - width_scale = output_width - width_half_pad = 0 - height_scale = int(input_height * scale_factor_by_width) - height_half_pad = int((output_height - height_scale) / 2) - - self.log.debug("width_scale: `{}`".format(width_scale)) - self.log.debug("width_half_pad: `{}`".format(width_half_pad)) - self.log.debug("height_scale: `{}`".format(height_scale)) - self.log.debug("height_half_pad: `{}`".format(height_half_pad)) - + if input_height != output_height or input_width != output_width: filters.extend([ - "scale={}x{}:flags=lanczos".format( - width_scale, height_scale - ), - "pad={}:{}:{}:{}:{}".format( + ( + "scale={}x{}" + ":flags=lanczos" + ":force_original_aspect_ratio=decrease" + ).format(output_width, output_height), + "pad={}:{}:(ow-iw)/2:(oh-ih)/2:{}".format( output_width, output_height, - width_half_pad, height_half_pad, overscan_color_value ), "setsar=1" diff --git a/openpype/plugins/publish/extract_review_slate.py b/openpype/plugins/publish/extract_review_slate.py index 28685c2e90..69043ee261 100644 --- a/openpype/plugins/publish/extract_review_slate.py +++ b/openpype/plugins/publish/extract_review_slate.py @@ -285,36 +285,34 @@ class ExtractReviewSlate(openpype.api.Extractor): audio_channels, audio_sample_rate, audio_channel_layout, + input_frame_rate ) # replace slate with silent slate for concat slate_v_path = slate_silent_path - # create ffmpeg concat text file path - conc_text_file = input_file.replace(ext, "") + "_concat" + ".txt" - conc_text_path = os.path.join( - os.path.normpath(stagingdir), conc_text_file) - _remove_at_end.append(conc_text_path) - self.log.debug("__ conc_text_path: {}".format(conc_text_path)) - - new_line = "\n" - with open(conc_text_path, "w") as conc_text_f: - conc_text_f.writelines([ - "file {}".format( - slate_v_path.replace("\\", "/")), - new_line, - "file {}".format(input_path.replace("\\", "/")) - ]) - - # concat slate and videos together + # concat slate and videos together with concat filter + # this will reencode the output + if input_audio: + fmap = [ + "-filter_complex", + "[0:v] [0:a] [1:v] [1:a] concat=n=2:v=1:a=1 [v] [a]", + "-map", '[v]', + "-map", '[a]' + ] + else: + fmap = [ + "-filter_complex", + "[0:v] [1:v] concat=n=2:v=1:a=0 [v]", + "-map", '[v]' + ] concat_args = [ ffmpeg_path, "-y", - "-f", "concat", - "-safe", "0", - "-i", conc_text_path, - "-c", "copy", + "-i", slate_v_path, + "-i", input_path, ] + concat_args.extend(fmap) if offset_timecode: concat_args.extend(["-timecode", offset_timecode]) # NOTE: Added because of OP Atom demuxers @@ -322,12 +320,18 @@ class ExtractReviewSlate(openpype.api.Extractor): # - keep format of output if format_args: concat_args.extend(format_args) + + if codec_args: + concat_args.extend(codec_args) + # Use arguments from ffmpeg preset source_ffmpeg_cmd = repre.get("ffmpeg_cmd") if source_ffmpeg_cmd: copy_args = ( "-metadata", "-metadata:s:v:0", + "-b:v", + "-b:a", ) args = source_ffmpeg_cmd.split(" ") for indx, arg in enumerate(args): @@ -335,12 +339,14 @@ class ExtractReviewSlate(openpype.api.Extractor): concat_args.append(arg) # assumes arg has one parameter concat_args.append(args[indx + 1]) + # add final output path concat_args.append(output_path) # ffmpeg concat subprocess self.log.debug( - "Executing concat: {}".format(" ".join(concat_args)) + "Executing concat filter: {}".format + (" ".join(concat_args)) ) openpype.api.run_subprocess( concat_args, logger=self.log @@ -488,9 +494,10 @@ class ExtractReviewSlate(openpype.api.Extractor): audio_channels, audio_sample_rate, audio_channel_layout, + input_frame_rate ): # Get duration of one frame in micro seconds - items = audio_sample_rate.split("/") + items = input_frame_rate.split("/") if len(items) == 1: one_frame_duration = 1.0 / float(items[0]) elif len(items) == 2: diff --git a/openpype/plugins/publish/extract_thumbnail.py b/openpype/plugins/publish/extract_thumbnail.py index 7a438ca701..14b43beae8 100644 --- a/openpype/plugins/publish/extract_thumbnail.py +++ b/openpype/plugins/publish/extract_thumbnail.py @@ -1,4 +1,5 @@ import os +import tempfile import pyblish.api from openpype.lib import ( @@ -8,8 +9,6 @@ from openpype.lib import ( run_subprocess, path_to_subprocess_arg, - - execute, ) @@ -20,16 +19,36 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): order = pyblish.api.ExtractorOrder families = [ "imagesequence", "render", "render2d", "prerender", - "source", "plate", "take" + "source", "clip", "take" ] - hosts = ["shell", "fusion", "resolve"] + hosts = ["shell", "fusion", "resolve", "traypublisher"] enabled = False # presetable attribute ffmpeg_args = None def process(self, instance): - self.log.info("subset {}".format(instance.data['subset'])) + subset_name = instance.data["subset"] + instance_repres = instance.data.get("representations") + if not instance_repres: + self.log.debug(( + "Instance {} does not have representations. Skipping" + ).format(subset_name)) + return + + self.log.info( + "Processing instance with subset name {}".format(subset_name) + ) + + # Skip if instance have 'review' key in data set to 'False' + if not self._is_review_instance(instance): + self.log.info("Skipping - no review set on instance.") + return + + # Check if already has thumbnail created + if self._already_has_thumbnail(instance_repres): + self.log.info("Thumbnail representation already present.") + return # skip crypto passes. # TODO: This is just a quick fix and has its own side-effects - it is @@ -37,16 +56,29 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): # This must be solved properly, maybe using tags on # representation that can be determined much earlier and # with better precision. - if 'crypto' in instance.data['subset'].lower(): + if "crypto" in subset_name.lower(): self.log.info("Skipping crypto passes.") return - # Skip if review not set. - if not instance.data.get("review", True): - self.log.info("Skipping - no review set on instance.") + filtered_repres = self._get_filtered_repres(instance) + if not filtered_repres: + self.log.info(( + "Instance don't have representations" + " that can be used as source for thumbnail. Skipping" + )) return - filtered_repres = self._get_filtered_repres(instance) + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + instance.context.data["cleanupFullPaths"].append(dst_staging) + + thumbnail_created = False + oiio_supported = is_oiio_supported() for repre in filtered_repres: repre_files = repre["files"] if not isinstance(repre_files, (list, tuple)): @@ -55,41 +87,43 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): file_index = int(float(len(repre_files)) * 0.5) input_file = repre_files[file_index] - stagingdir = os.path.normpath(repre["stagingDir"]) - - full_input_path = os.path.join(stagingdir, input_file) + src_staging = os.path.normpath(repre["stagingDir"]) + full_input_path = os.path.join(src_staging, input_file) self.log.info("input {}".format(full_input_path)) filename = os.path.splitext(input_file)[0] - if not filename.endswith('.'): - filename += "." - jpeg_file = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpeg_file) + jpeg_file = filename + ".jpg" + full_output_path = os.path.join(dst_staging, jpeg_file) - thumbnail_created = False - # Try to use FFMPEG if OIIO is not supported (for cases when - # oiiotool isn't available) - if not is_oiio_supported(): - thumbnail_created = self.create_thumbnail_ffmpeg(full_input_path, full_output_path) # noqa - else: + if oiio_supported: + self.log.info("Trying to convert with OIIO") # If the input can read by OIIO then use OIIO method for # conversion otherwise use ffmpeg - self.log.info("Trying to convert with OIIO") # noqa - thumbnail_created = self.create_thumbnail_oiio(full_input_path, full_output_path) # noqa + thumbnail_created = self.create_thumbnail_oiio( + full_input_path, full_output_path + ) - if not thumbnail_created: - self.log.info("Converting with FFMPEG because input can't be read by OIIO.") # noqa - thumbnail_created = self.create_thumbnail_ffmpeg(full_input_path, full_output_path) # noqa - - # Skip the rest of the process if the thumbnail wasn't created + # Try to use FFMPEG if OIIO is not supported or for cases when + # oiiotool isn't available if not thumbnail_created: - self.log.warning("Thumbanil has not been created.") - return + if oiio_supported: + self.log.info(( + "Converting with FFMPEG because input" + " can't be read by OIIO." + )) + + thumbnail_created = self.create_thumbnail_ffmpeg( + full_input_path, full_output_path + ) + + # Skip representation and try next one if wasn't created + if not thumbnail_created: + continue new_repre = { "name": "thumbnail", "ext": "jpg", "files": jpeg_file, - "stagingDir": stagingdir, + "stagingDir": dst_staging, "thumbnail": True, "tags": ["thumbnail"] } @@ -102,6 +136,23 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): # There is no need to create more then one thumbnail break + if not thumbnail_created: + self.log.warning("Thumbanil has not been created.") + + def _is_review_instance(self, instance): + # TODO: We should probably handle "not creating" of thumbnail + # other way then checking for "review" key on instance data? + if instance.data.get("review", True): + return True + return False + + def _already_has_thumbnail(self, repres): + for repre in repres: + self.log.info("repre {}".format(repre)) + if repre["name"] == "thumbnail": + return True + return False + def _get_filtered_repres(self, instance): filtered_repres = [] src_repres = instance.data.get("representations") or [] @@ -124,12 +175,12 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): def create_thumbnail_oiio(self, src_path, dst_path): self.log.info("outputting {}".format(dst_path)) oiio_tool_path = get_oiio_tools_path() - oiio_cmd = [oiio_tool_path, "-a", - src_path, "-o", - dst_path - ] - subprocess_exr = " ".join(oiio_cmd) - self.log.info(f"running: {subprocess_exr}") + oiio_cmd = [ + oiio_tool_path, + "-a", src_path, + "-o", dst_path + ] + self.log.info("running: {}".format(" ".join(oiio_cmd))) try: run_subprocess(oiio_cmd, logger=self.log) return True diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py b/openpype/plugins/publish/extract_trim_video_audio.py similarity index 74% rename from openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py rename to openpype/plugins/publish/extract_trim_video_audio.py index 51dc84e9a2..06817c4b5a 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py +++ b/openpype/plugins/publish/extract_trim_video_audio.py @@ -14,7 +14,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): # must be before `ExtractThumbnailSP` order = pyblish.api.ExtractorOrder - 0.01 label = "Extract Trim Video/Audio" - hosts = ["standalonepublisher"] + hosts = ["standalonepublisher", "traypublisher"] families = ["clip", "trimming"] # make sure it is enabled only if at least both families are available @@ -40,6 +40,21 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): fps = instance.data["fps"] video_file_path = instance.data["editorialSourcePath"] extensions = instance.data.get("extensions", ["mov"]) + output_file_type = instance.data.get("outputFileType") + reviewable = "review" in instance.data["families"] + + frame_start = int(instance.data["frameStart"]) + frame_end = int(instance.data["frameEnd"]) + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + + clip_start_h = float(instance.data["clipInH"]) + _dur = instance.data["clipDuration"] + handle_dur = (handle_start + handle_end) + clip_dur_h = float(_dur + handle_dur) + + if output_file_type: + extensions = [output_file_type] for ext in extensions: self.log.info("Processing ext: `{}`".format(ext)) @@ -49,16 +64,10 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): clip_trimed_path = os.path.join( staging_dir, instance.data["name"] + ext) - # # check video file metadata - # input_data = plib.get_ffprobe_streams(video_file_path)[0] - # self.log.debug(f"__ input_data: `{input_data}`") - - start = float(instance.data["clipInH"]) - dur = float(instance.data["clipDurationH"]) if ext == ".wav": # offset time as ffmpeg is having bug - start += 0.5 + clip_start_h += 0.5 # remove "review" from families instance.data["families"] = [ fml for fml in instance.data["families"] @@ -67,9 +76,9 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): ffmpeg_args = [ ffmpeg_path, - "-ss", str(start / fps), + "-ss", str(clip_start_h / fps), "-i", video_file_path, - "-t", str(dur / fps) + "-t", str(clip_dur_h / fps) ] if ext in [".mov", ".mp4"]: ffmpeg_args.extend([ @@ -98,14 +107,15 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): "ext": ext[1:], "files": os.path.basename(clip_trimed_path), "stagingDir": staging_dir, - "frameStart": int(instance.data["frameStart"]), - "frameEnd": int(instance.data["frameEnd"]), - "frameStartFtrack": int(instance.data["frameStartH"]), - "frameEndFtrack": int(instance.data["frameEndH"]), + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartFtrack": frame_start - handle_start, + "frameEndFtrack": frame_end + handle_end, "fps": fps, + "tags": [] } - if ext in [".mov", ".mp4"]: + if ext in [".mov", ".mp4"] and reviewable: repre.update({ "thumbnail": True, "tags": ["review", "ftrackreview", "delete"]}) diff --git a/openpype/plugins/publish/help/validate_containers.xml b/openpype/plugins/publish/help/validate_containers.xml new file mode 100644 index 0000000000..5d18bb4c19 --- /dev/null +++ b/openpype/plugins/publish/help/validate_containers.xml @@ -0,0 +1,23 @@ + + + +Not up-to-date assets + +## Outdated containers found + +Scene contains one or more outdated loaded containers, eg. versions loaded into scene by Loader are not latest. + +### How to repair? + +Use 'Scene Inventory' and update all highlighted old container to latest OR + refresh Publish and switch 'Validate Containers' toggle on 'Options' tab. + + WARNING: Skipping this validator will result in publishing (and probably rendering) old version of loaded assets. + + +### __Detailed Info__ (optional) + +This validates whether you're working with the latest versions of published content loaded into your scene. This protects you from using outdated versions of an asset. + + + \ No newline at end of file diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py new file mode 100644 index 0000000000..f99c718f8a --- /dev/null +++ b/openpype/plugins/publish/integrate.py @@ -0,0 +1,907 @@ +import os +import logging +import sys +import copy +import clique +import six + +from openpype.client.operations import ( + OperationsSession, + new_subset_document, + new_version_doc, + new_representation_doc, + prepare_subset_update_data, + prepare_version_update_data, + prepare_representation_update_data, +) +from bson.objectid import ObjectId +import pyblish.api + +from openpype.client import ( + get_representations, + get_subset_by_name, + get_version_by_name, +) +from openpype.lib import source_hash +from openpype.lib.profiles_filtering import filter_profiles +from openpype.lib.file_transaction import FileTransaction +from openpype.pipeline import legacy_io +from openpype.pipeline.publish import KnownPublishError + +log = logging.getLogger(__name__) + + +def get_instance_families(instance): + """Get all families of the instance""" + # todo: move this to lib? + family = instance.data.get("family") + families = [] + if family: + families.append(family) + + for _family in (instance.data.get("families") or []): + if _family not in families: + families.append(_family) + + return families + + +def get_frame_padded(frame, padding): + """Return frame number as string with `padding` amount of padded zeros""" + return "{frame:0{padding}d}".format(padding=padding, frame=frame) + + +class IntegrateAsset(pyblish.api.InstancePlugin): + """Register publish in the database and transfer files to destinations. + + Steps: + 1) Register the subset and version + 2) Transfer the representation files to the destination + 3) Register the representation + + Requires: + instance.data['representations'] - must be a list and each member + must be a dictionary with following data: + 'files': list of filenames for sequence, string for single file. + Only the filename is allowed, without the folder path. + 'stagingDir': "path/to/folder/with/files" + 'name': representation name (usually the same as extension) + 'ext': file extension + optional data + "frameStart" + "frameEnd" + 'fps' + "data": additional metadata for each representation. + """ + + label = "Integrate Asset" + order = pyblish.api.IntegratorOrder + families = ["workfile", + "pointcache", + "camera", + "animation", + "model", + "mayaAscii", + "mayaScene", + "setdress", + "layout", + "ass", + "vdbcache", + "scene", + "vrayproxy", + "vrayscene_layer", + "render", + "prerender", + "imagesequence", + "review", + "rendersetup", + "rig", + "plate", + "look", + "audio", + "yetiRig", + "yeticache", + "nukenodes", + "gizmo", + "source", + "matchmove", + "image", + "assembly", + "fbx", + "textures", + "action", + "harmony.template", + "harmony.palette", + "editorial", + "background", + "camerarig", + "redshiftproxy", + "effect", + "xgen", + "hda", + "usd", + "staticMesh", + "skeletalMesh", + "mvLook", + "mvUsd", + "mvUsdComposition", + "mvUsdOverride", + "simpleUnrealTexture" + ] + + default_template_name = "publish" + + # Representation context keys that should always be written to + # the database even if not used by the destination template + db_representation_context_keys = [ + "project", "asset", "task", "subset", "version", "representation", + "family", "hierarchy", "username", "output" + ] + skip_host_families = [] + + def process(self, instance): + if self._temp_skip_instance_by_settings(instance): + return + + # Mark instance as processed for legacy integrator + instance.data["processedWithNewIntegrator"] = True + + # Instance should be integrated on a farm + if instance.data.get("farm"): + self.log.info( + "Instance is marked to be processed on farm. Skipping") + return + + filtered_repres = self.filter_representations(instance) + # Skip instance if there are not representations to integrate + # all representations should not be integrated + if not filtered_repres: + self.log.warning(( + "Skipping, there are no representations" + " to integrate for instance {}" + ).format(instance.data["family"])) + return + + file_transactions = FileTransaction(log=self.log) + try: + self.register(instance, file_transactions, filtered_repres) + except Exception: + # clean destination + # todo: preferably we'd also rollback *any* changes to the database + file_transactions.rollback() + self.log.critical("Error when registering", exc_info=True) + six.reraise(*sys.exc_info()) + + # Finalizing can't rollback safely so no use for moving it to + # the try, except. + file_transactions.finalize() + + def _temp_skip_instance_by_settings(self, instance): + """Decide if instance will be processed with new or legacy integrator. + + This is temporary solution until we test all usecases with new (this) + integrator plugin. + """ + + host_name = instance.context.data["hostName"] + instance_family = instance.data["family"] + instance_families = set(instance.data.get("families") or []) + + skip = False + for item in self.skip_host_families: + if host_name not in item["host"]: + continue + + families = set(item["families"]) + if instance_family in families: + skip = True + break + + for family in instance_families: + if family in families: + skip = True + break + + if skip: + break + + if skip: + self.log.debug("Instance is marked to be skipped by settings.") + return skip + + def filter_representations(self, instance): + # Prepare repsentations that should be integrated + repres = instance.data.get("representations") + # Raise error if instance don't have any representations + if not repres: + raise KnownPublishError( + "Instance {} has no representations to integrate".format( + instance.data["family"] + ) + ) + + # Validate type of stored representations + if not isinstance(repres, (list, tuple)): + raise TypeError( + "Instance 'files' must be a list, got: {0} {1}".format( + str(type(repres)), str(repres) + ) + ) + + # Filter representations + filtered_repres = [] + for repre in repres: + if "delete" in repre.get("tags", []): + continue + filtered_repres.append(repre) + + return filtered_repres + + def register(self, instance, file_transactions, filtered_repres): + project_name = legacy_io.active_project() + + instance_stagingdir = instance.data.get("stagingDir") + if not instance_stagingdir: + self.log.info(( + "{0} is missing reference to staging directory." + " Will try to get it from representation." + ).format(instance)) + + else: + self.log.debug( + "Establishing staging directory " + "@ {0}".format(instance_stagingdir) + ) + + template_name = self.get_template_name(instance) + + op_session = OperationsSession() + subset = self.prepare_subset( + instance, op_session, project_name + ) + version = self.prepare_version( + instance, op_session, subset, project_name + ) + instance.data["versionEntity"] = version + + # Get existing representations (if any) + existing_repres_by_name = { + repre_doc["name"].lower(): repre_doc + for repre_doc in get_representations( + project_name, + version_ids=[version["_id"]], + fields=["_id", "name"] + ) + } + + # Prepare all representations + prepared_representations = [] + for repre in filtered_repres: + # todo: reduce/simplify what is returned from this function + prepared = self.prepare_representation( + repre, + template_name, + existing_repres_by_name, + version, + instance_stagingdir, + instance) + + for src, dst in prepared["transfers"]: + # todo: add support for hardlink transfers + file_transactions.add(src, dst) + + prepared_representations.append(prepared) + + # Each instance can also have pre-defined transfers not explicitly + # part of a representation - like texture resources used by a + # .ma representation. Those destination paths are pre-defined, etc. + # todo: should we move or simplify this logic? + resource_destinations = set() + for src, dst in instance.data.get("transfers", []): + file_transactions.add(src, dst, mode=FileTransaction.MODE_COPY) + resource_destinations.add(os.path.abspath(dst)) + + for src, dst in instance.data.get("hardlinks", []): + file_transactions.add(src, dst, mode=FileTransaction.MODE_HARDLINK) + resource_destinations.add(os.path.abspath(dst)) + + # Bulk write to the database + # We write the subset and version to the database before the File + # Transaction to reduce the chances of another publish trying to + # publish to the same version number since that chance can greatly + # increase if the file transaction takes a long time. + op_session.commit() + + self.log.info("Subset {subset[name]} and Version {version[name]} " + "written to database..".format(subset=subset, + version=version)) + + # Process all file transfers of all integrations now + self.log.debug("Integrating source files to destination ...") + file_transactions.process() + self.log.debug( + "Backed up existing files: {}".format(file_transactions.backups)) + self.log.debug( + "Transferred files: {}".format(file_transactions.transferred)) + self.log.debug("Retrieving Representation Site Sync information ...") + + # Get the accessible sites for Site Sync + modules_by_name = instance.context.data["openPypeModules"] + sync_server_module = modules_by_name["sync_server"] + sites = sync_server_module.compute_resource_sync_sites( + project_name=instance.data["projectEntity"]["name"] + ) + self.log.debug("Sync Server Sites: {}".format(sites)) + + # Compute the resource file infos once (files belonging to the + # version instance instead of an individual representation) so + # we can re-use those file infos per representation + anatomy = instance.context.data["anatomy"] + resource_file_infos = self.get_files_info(resource_destinations, + sites=sites, + anatomy=anatomy) + + # Finalize the representations now the published files are integrated + # Get 'files' info for representations and its attached resources + new_repre_names_low = set() + for prepared in prepared_representations: + repre_doc = prepared["representation"] + repre_update_data = prepared["repre_doc_update_data"] + transfers = prepared["transfers"] + destinations = [dst for src, dst in transfers] + repre_doc["files"] = self.get_files_info( + destinations, sites=sites, anatomy=anatomy + ) + + # Add the version resource file infos to each representation + repre_doc["files"] += resource_file_infos + + # Set up representation for writing to the database. Since + # we *might* be overwriting an existing entry if the version + # already existed we'll use ReplaceOnce with `upsert=True` + if repre_update_data is None: + op_session.create_entity( + project_name, repre_doc["type"], repre_doc + ) + else: + op_session.update_entity( + project_name, + repre_doc["type"], + repre_doc["_id"], + repre_update_data + ) + + new_repre_names_low.add(repre_doc["name"].lower()) + + # Delete any existing representations that didn't get any new data + # if the instance is not set to append mode + if not instance.data.get("append", False): + for name, existing_repres in existing_repres_by_name.items(): + if name not in new_repre_names_low: + # We add the exact representation name because `name` is + # lowercase for name matching only and not in the database + op_session.delete_entity( + project_name, "representation", existing_repres["_id"] + ) + + self.log.debug("{}".format(op_session.to_data())) + op_session.commit() + + # Backwards compatibility + # todo: can we avoid the need to store this? + instance.data["published_representations"] = { + p["representation"]["_id"]: p for p in prepared_representations + } + + self.log.info("Registered {} representations" + "".format(len(prepared_representations))) + + def prepare_subset(self, instance, op_session, project_name): + asset_doc = instance.data["assetEntity"] + subset_name = instance.data["subset"] + family = instance.data["family"] + self.log.debug("Subset: {}".format(subset_name)) + + # Get existing subset if it exists + existing_subset_doc = get_subset_by_name( + project_name, subset_name, asset_doc["_id"] + ) + + # Define subset data + data = { + "families": get_instance_families(instance) + } + + subset_group = instance.data.get("subsetGroup") + if subset_group: + data["subsetGroup"] = subset_group + + subset_id = None + if existing_subset_doc: + subset_id = existing_subset_doc["_id"] + subset_doc = new_subset_document( + subset_name, family, asset_doc["_id"], data, subset_id + ) + + if existing_subset_doc is None: + # Create a new subset + self.log.info("Subset '%s' not found, creating ..." % subset_name) + op_session.create_entity( + project_name, subset_doc["type"], subset_doc + ) + + else: + # Update existing subset data with new data and set in database. + # We also change the found subset in-place so we don't need to + # re-query the subset afterwards + subset_doc["data"].update(data) + update_data = prepare_subset_update_data( + existing_subset_doc, subset_doc + ) + op_session.update_entity( + project_name, + subset_doc["type"], + subset_doc["_id"], + update_data + ) + + self.log.info("Prepared subset: {}".format(subset_name)) + return subset_doc + + def prepare_version(self, instance, op_session, subset_doc, project_name): + version_number = instance.data["version"] + + existing_version = get_version_by_name( + project_name, + version_number, + subset_doc["_id"], + fields=["_id"] + ) + version_id = None + if existing_version: + version_id = existing_version["_id"] + + version_data = self.create_version_data(instance) + version_doc = new_version_doc( + version_number, + subset_doc["_id"], + version_data, + version_id + ) + + if existing_version: + self.log.debug("Updating existing version ...") + update_data = prepare_version_update_data( + existing_version, version_doc + ) + op_session.update_entity( + project_name, + version_doc["type"], + version_doc["_id"], + update_data + ) + else: + self.log.debug("Creating new version ...") + op_session.create_entity( + project_name, version_doc["type"], version_doc + ) + + self.log.info("Prepared version: v{0:03d}".format(version_doc["name"])) + + return version_doc + + def prepare_representation(self, repre, + template_name, + existing_repres_by_name, + version, + instance_stagingdir, + instance): + + # pre-flight validations + if repre["ext"].startswith("."): + raise KnownPublishError(( + "Extension must not start with a dot '.': {}" + ).format(repre["ext"])) + + if repre.get("transfers"): + raise KnownPublishError(( + "Representation is not allowed to have transfers" + "data before integration. They are computed in " + "the integrator. Got: {}" + ).format(repre["transfers"])) + + # create template data for Anatomy + template_data = copy.deepcopy(instance.data["anatomyData"]) + + # required representation keys + files = repre["files"] + template_data["representation"] = repre["name"] + template_data["ext"] = repre["ext"] + + # optionals + # retrieve additional anatomy data from representation if exists + for key, anatomy_key in { + # Representation Key: Anatomy data key + "resolutionWidth": "resolution_width", + "resolutionHeight": "resolution_height", + "fps": "fps", + "outputName": "output", + "originalBasename": "originalBasename" + }.items(): + # Allow to take value from representation + # if not found also consider instance.data + value = repre.get(key) + if value is None: + value = instance.data.get(key) + + if value is not None: + template_data[anatomy_key] = value + + stagingdir = repre.get("stagingDir") + if not stagingdir: + # Fall back to instance staging dir if not explicitly + # set for representation in the instance + self.log.debug(( + "Representation uses instance staging dir: {}" + ).format(instance_stagingdir)) + stagingdir = instance_stagingdir + + if not stagingdir: + raise KnownPublishError( + "No staging directory set for representation: {}".format(repre) + ) + + self.log.debug("Anatomy template name: {}".format(template_name)) + anatomy = instance.context.data["anatomy"] + publish_template_category = anatomy.templates[template_name] + template = os.path.normpath(publish_template_category["path"]) + + is_udim = bool(repre.get("udim")) + + is_sequence_representation = isinstance(files, (list, tuple)) + if is_sequence_representation: + # Collection of files (sequence) + if any(os.path.isabs(fname) for fname in files): + raise KnownPublishError("Given file names contain full paths") + + src_collections, remainders = clique.assemble(files) + if len(files) < 2 or len(src_collections) != 1 or remainders: + raise KnownPublishError(( + "Files of representation does not contain proper" + " sequence files.\nCollected collections: {}" + "\nCollected remainders: {}" + ).format( + ", ".join([str(col) for col in src_collections]), + ", ".join([str(rem) for rem in remainders]) + )) + + src_collection = src_collections[0] + destination_indexes = list(src_collection.indexes) + # Use last frame for minimum padding + # - that should cover both 'udim' and 'frame' minimum padding + destination_padding = len(str(destination_indexes[-1])) + if not is_udim: + # Change padding for frames if template has defined higher + # padding. + template_padding = int( + publish_template_category["frame_padding"] + ) + if template_padding > destination_padding: + destination_padding = template_padding + + # If the representation has `frameStart` set it renumbers the + # frame indices of the published collection. It will start from + # that `frameStart` index instead. Thus if that frame start + # differs from the collection we want to shift the destination + # frame indices from the source collection. + repre_frame_start = repre.get("frameStart") + if repre_frame_start is not None: + index_frame_start = int(repre["frameStart"]) + # Shift destination sequence to the start frame + destination_indexes = [ + index_frame_start + idx + for idx in range(len(destination_indexes)) + ] + + # To construct the destination template with anatomy we require + # a Frame or UDIM tile set for the template data. We use the first + # index of the destination for that because that could've shifted + # from the source indexes, etc. + first_index_padded = get_frame_padded( + frame=destination_indexes[0], + padding=destination_padding + ) + + # Construct destination collection from template + repre_context = None + dst_filepaths = [] + for index in destination_indexes: + if is_udim: + template_data["udim"] = index + else: + template_data["frame"] = index + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + dst_filepaths.append(template_filled) + if repre_context is None: + self.log.debug( + "Template filled: {}".format(str(template_filled)) + ) + repre_context = template_filled.used_values + + # Make sure context contains frame + # NOTE: Frame would not be available only if template does not + # contain '{frame}' in template -> Do we want support it? + if not is_udim: + repre_context["frame"] = first_index_padded + + # Update the destination indexes and padding + dst_collection = clique.assemble(dst_filepaths)[0][0] + dst_collection.padding = destination_padding + if len(src_collection.indexes) != len(dst_collection.indexes): + raise KnownPublishError(( + "This is a bug. Source sequence frames length" + " does not match integration frames length" + )) + + # Multiple file transfers + transfers = [] + for src_file_name, dst in zip(src_collection, dst_collection): + src = os.path.join(stagingdir, src_file_name) + transfers.append((src, dst)) + + else: + # Single file + fname = files + if os.path.isabs(fname): + self.log.error( + "Filename in representation is filepath {}".format(fname) + ) + raise KnownPublishError( + "This is a bug. Representation file name is full path" + ) + + # Manage anatomy template data + template_data.pop("frame", None) + if is_udim: + template_data["udim"] = repre["udim"][0] + + # Construct destination filepath from template + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + repre_context = template_filled.used_values + dst = os.path.normpath(template_filled) + + # Single file transfer + src = os.path.join(stagingdir, fname) + transfers = [(src, dst)] + + # todo: Are we sure the assumption each representation + # ends up in the same folder is valid? + if not instance.data.get("publishDir"): + instance.data["publishDir"] = ( + anatomy_filled + [template_name] + ["folder"] + ) + + for key in self.db_representation_context_keys: + # Also add these values to the context even if not used by the + # destination template + value = template_data.get(key) + if value is not None: + repre_context[key] = value + + # Explicitly store the full list even though template data might + # have a different value because it uses just a single udim tile + if repre.get("udim"): + repre_context["udim"] = repre.get("udim") # store list + + # Use previous representation's id if there is a name match + existing = existing_repres_by_name.get(repre["name"].lower()) + repre_id = None + if existing: + repre_id = existing["_id"] + + # Store first transferred destination as published path data + # - used primarily for reviews that are integrated to custom modules + # TODO we should probably store all integrated files + # related to the representation? + published_path = transfers[0][1] + repre["published_path"] = published_path + + # todo: `repre` is not the actual `representation` entity + # we should simplify/clarify difference between data above + # and the actual representation entity for the database + data = repre.get("data", {}) + data.update({"path": published_path, "template": template}) + repre_doc = new_representation_doc( + repre["name"], version["_id"], repre_context, data, repre_id + ) + update_data = None + if repre_id is not None: + update_data = prepare_representation_update_data( + existing, repre_doc + ) + + return { + "representation": repre_doc, + "repre_doc_update_data": update_data, + "anatomy_data": template_data, + "transfers": transfers, + # todo: avoid the need for 'published_files' used by Integrate Hero + # backwards compatibility + "published_files": [transfer[1] for transfer in transfers] + } + + def create_version_data(self, instance): + """Create the data dictionary for the version + + Args: + instance: the current instance being published + + Returns: + dict: the required information for version["data"] + """ + + context = instance.context + + # create relative source path for DB + if "source" in instance.data: + source = instance.data["source"] + else: + source = context.data["currentFile"] + anatomy = instance.context.data["anatomy"] + source = self.get_rootless_path(anatomy, source) + self.log.debug("Source: {}".format(source)) + + version_data = { + "families": get_instance_families(instance), + "time": context.data["time"], + "author": context.data["user"], + "source": source, + "comment": context.data.get("comment"), + "machine": context.data.get("machine"), + "fps": instance.data.get("fps", context.data.get("fps")) + } + + # todo: preferably we wouldn't need this "if dict" etc. logic and + # instead be able to rely what the input value is if it's set. + intent_value = context.data.get("intent") + if intent_value and isinstance(intent_value, dict): + intent_value = intent_value.get("value") + + if intent_value: + version_data["intent"] = intent_value + + # Include optional data if present in + optionals = [ + "frameStart", "frameEnd", "step", "handles", + "handleEnd", "handleStart", "sourceHashes" + ] + for key in optionals: + if key in instance.data: + version_data[key] = instance.data[key] + + # Include instance.data[versionData] directly + version_data_instance = instance.data.get("versionData") + if version_data_instance: + version_data.update(version_data_instance) + + return version_data + + def get_template_name(self, instance): + """Return anatomy template name to use for integration""" + # Define publish template name from profiles + filter_criteria = self.get_profile_filter_criteria(instance) + template_name_profiles = self._get_template_name_profiles(instance) + profile = filter_profiles( + template_name_profiles, + filter_criteria, + logger=self.log + ) + + if profile: + return profile["template_name"] + return self.default_template_name + + def _get_template_name_profiles(self, instance): + """Receive profiles for publish template keys. + + Reuse template name profiles from legacy integrator. Goal is to move + the profile settings out of plugin settings but until that happens we + want to be able set it at one place and don't break backwards + compatibility (more then once). + """ + + return ( + instance.context.data["project_settings"] + ["global"] + ["publish"] + ["IntegrateAssetNew"] + ["template_name_profiles"] + ) + + def get_profile_filter_criteria(self, instance): + """Return filter criteria for `filter_profiles`""" + + # Anatomy data is pre-filled by Collectors + anatomy_data = instance.data["anatomyData"] + + # Task can be optional in anatomy data + task = anatomy_data.get("task", {}) + + # Return filter criteria + return { + "families": anatomy_data["family"], + "tasks": task.get("name"), + "task_types": task.get("type"), + "hosts": instance.context.data["hostName"], + } + + def get_rootless_path(self, anatomy, path): + """Returns, if possible, path without absolute portion from root + (eg. 'c:\' or '/opt/..') + + This information is platform dependent and shouldn't be captured. + Example: + 'c:/projects/MyProject1/Assets/publish...' > + '{root}/MyProject1/Assets...' + + Args: + anatomy: anatomy part from instance + path: path (absolute) + Returns: + path: modified path if possible, or unmodified path + + warning logged + """ + + success, rootless_path = anatomy.find_root_template_from_path(path) + if success: + path = rootless_path + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(path)) + return path + + def get_files_info(self, destinations, sites, anatomy): + """Prepare 'files' info portion for representations. + + Arguments: + destinations (list): List of transferred file destinations + sites (list): array of published locations + anatomy: anatomy part from instance + Returns: + output_resources: array of dictionaries to be added to 'files' key + in representation + """ + + file_infos = [] + for file_path in destinations: + file_info = self.prepare_file_info(file_path, anatomy, sites=sites) + file_infos.append(file_info) + return file_infos + + def prepare_file_info(self, path, anatomy, sites): + """ Prepare information for one file (asset or resource) + + Arguments: + path: destination url of published file + anatomy: anatomy part from instance + sites: array of published locations, + [ {'name':'studio', 'created_dt':date} by default + keys expected ['studio', 'site1', 'gdrive1'] + + Returns: + dict: file info dictionary + """ + + return { + "_id": ObjectId(), + "path": self.get_rootless_path(anatomy, path), + "size": os.path.getsize(path), + "hash": source_hash(path), + "sites": sites + } diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index 5f97a9bd41..7d698ff98d 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -71,7 +71,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): template_key = self._get_template_key(instance) anatomy = instance.context.data["anatomy"] - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = anatomy.project_name if template_key not in anatomy.templates: self.log.warning(( "!!! Anatomy of project \"{}\" does not have set" @@ -313,13 +313,9 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): } repre_context = template_filled.used_values for key in self.db_representation_context_keys: - if ( - key in repre_context or - key not in anatomy_data - ): - continue - - repre_context[key] = anatomy_data[key] + value = anatomy_data.get(key) + if value is not None: + repre_context[key] = value # Prepare new repre repre = copy.deepcopy(repre_info["representation"]) @@ -454,7 +450,6 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): ) if bulk_writes: - project_name = legacy_io.Session["AVALON_PROJECT"] legacy_io.database[project_name].bulk_write( bulk_writes ) @@ -517,11 +512,10 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): anatomy_filled = anatomy.format(template_data) # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) file_path = anatomy_filled[template_key]["path"] # Directory diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_legacy.py similarity index 99% rename from openpype/plugins/publish/integrate_new.py rename to openpype/plugins/publish/integrate_legacy.py index f870220421..b90b61f587 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_legacy.py @@ -69,8 +69,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "data": additional metadata for each representation. """ - label = "Integrate Asset New" - order = pyblish.api.IntegratorOrder + label = "Integrate Asset (legacy)" + # Make sure it happens after new integrator + order = pyblish.api.IntegratorOrder + 0.00001 families = ["workfile", "pointcache", "camera", @@ -101,7 +102,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "source", "matchmove", "image", - "source", "assembly", "fbx", "textures", @@ -142,6 +142,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset_grouping_profiles = None def process(self, instance): + if instance.data.get("processedWithNewIntegrator"): + self.log.info("Instance was already processed with new integrator") + return + for ef in self.exclude_families: if ( instance.data["family"] == ef or diff --git a/openpype/plugins/publish/integrate_subset_group.py b/openpype/plugins/publish/integrate_subset_group.py new file mode 100644 index 0000000000..910cb060a6 --- /dev/null +++ b/openpype/plugins/publish/integrate_subset_group.py @@ -0,0 +1,98 @@ +"""Produces instance.data["subsetGroup"] data used during integration. + +Requires: + dict -> context["anatomyData"] *(pyblish.api.CollectorOrder + 0.49) + +Provides: + instance -> subsetGroup (str) + +""" +import pyblish.api + +from openpype.lib.profiles_filtering import filter_profiles +from openpype.lib import ( + prepare_template_data, + StringTemplate, + TemplateUnsolved +) + + +class IntegrateSubsetGroup(pyblish.api.InstancePlugin): + """Integrate Subset Group for publish.""" + + # Run after CollectAnatomyInstanceData + order = pyblish.api.IntegratorOrder - 0.1 + label = "Subset Group" + + # Attributes set by settings + subset_grouping_profiles = None + + def process(self, instance): + """Look into subset group profiles set by settings. + + Attribute 'subset_grouping_profiles' is defined by OpenPype settings. + """ + + # Skip if 'subset_grouping_profiles' is empty + if not self.subset_grouping_profiles: + return + + if instance.data.get("subsetGroup"): + # If subsetGroup is already set then allow that value to remain + self.log.debug(( + "Skipping collect subset group due to existing value: {}" + ).format(instance.data["subsetGroup"])) + return + + # Skip if there is no matching profile + filter_criteria = self.get_profile_filter_criteria(instance) + profile = filter_profiles( + self.subset_grouping_profiles, + filter_criteria, + logger=self.log + ) + + if not profile: + return + + template = profile["template"] + + fill_pairs = prepare_template_data({ + "family": filter_criteria["families"], + "task": filter_criteria["tasks"], + "host": filter_criteria["hosts"], + "subset": instance.data["subset"], + "renderlayer": instance.data.get("renderlayer") + }) + + filled_template = None + try: + filled_template = StringTemplate.format_strict_template( + template, fill_pairs + ) + except (KeyError, TemplateUnsolved): + keys = fill_pairs.keys() + self.log.warning(( + "Subset grouping failed. Only {} are expected in Settings" + ).format(','.join(keys))) + + if filled_template: + instance.data["subsetGroup"] = filled_template + + def get_profile_filter_criteria(self, instance): + """Return filter criteria for `filter_profiles`""" + # TODO: This logic is used in much more plug-ins in one way or another + # Maybe better suited for lib? + # Anatomy data is pre-filled by Collectors + anatomy_data = instance.data["anatomyData"] + + # Task can be optional in anatomy data + task = anatomy_data.get("task", {}) + + # Return filter criteria + return { + "families": anatomy_data["family"], + "tasks": task.get("name"), + "hosts": anatomy_data["app"], + "task_types": task.get("type") + } diff --git a/openpype/plugins/publish/integrate_thumbnail.py b/openpype/plugins/publish/integrate_thumbnail.py index fd50858a91..8ae0dd2d60 100644 --- a/openpype/plugins/publish/integrate_thumbnail.py +++ b/openpype/plugins/publish/integrate_thumbnail.py @@ -39,9 +39,8 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): ) return - project_name = legacy_io.Session["AVALON_PROJECT"] - anatomy = instance.context.data["anatomy"] + project_name = anatomy.project_name if "publish" not in anatomy.templates: self.log.warning("Anatomy is missing the \"publish\" key!") return diff --git a/openpype/plugins/publish/start_timer.py b/openpype/plugins/publish/start_timer.py deleted file mode 100644 index 112d92bef0..0000000000 --- a/openpype/plugins/publish/start_timer.py +++ /dev/null @@ -1,14 +0,0 @@ -import pyblish.api - -from openpype.lib import change_timer_to_current_context - - -class StartTimer(pyblish.api.ContextPlugin): - label = "Start Timer" - order = pyblish.api.IntegratorOrder + 1 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - change_timer_to_current_context() diff --git a/openpype/plugins/publish/stop_timer.py b/openpype/plugins/publish/stop_timer.py deleted file mode 100644 index 414e43a3c4..0000000000 --- a/openpype/plugins/publish/stop_timer.py +++ /dev/null @@ -1,17 +0,0 @@ -import os -import requests - -import pyblish.api - - -class StopTimer(pyblish.api.ContextPlugin): - label = "Stop Timer" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) - requests.post(rest_api_url) diff --git a/openpype/plugins/publish/validate_asset_docs.py b/openpype/plugins/publish/validate_asset_docs.py index bc1f9b9e6c..9a1ca5b8de 100644 --- a/openpype/plugins/publish/validate_asset_docs.py +++ b/openpype/plugins/publish/validate_asset_docs.py @@ -24,6 +24,10 @@ class ValidateAssetDocs(pyblish.api.InstancePlugin): if instance.data.get("assetEntity"): self.log.info("Instance has set asset document in its data.") + elif instance.data.get("newAssetPublishing"): + # skip if it is editorial + self.log.info("Editorial instance is no need to check...") + else: raise PublishValidationError(( "Instance \"{}\" doesn't have asset document " diff --git a/openpype/plugins/publish/validate_containers.py b/openpype/plugins/publish/validate_containers.py index ce91bd3396..79759450e1 100644 --- a/openpype/plugins/publish/validate_containers.py +++ b/openpype/plugins/publish/validate_containers.py @@ -1,5 +1,9 @@ import pyblish.api -import openpype.lib +from openpype.pipeline.load import any_outdated_containers +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) class ShowInventory(pyblish.api.Action): @@ -14,15 +18,21 @@ class ShowInventory(pyblish.api.Action): host_tools.show_scene_inventory() -class ValidateContainers(pyblish.api.ContextPlugin): +class ValidateContainers(OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin): + """Containers are must be updated to latest version on publish.""" label = "Validate Containers" order = pyblish.api.ValidatorOrder - hosts = ["maya", "houdini", "nuke", "harmony", "photoshop"] + hosts = ["maya", "houdini", "nuke", "harmony", "photoshop", "aftereffects"] optional = True actions = [ShowInventory] def process(self, context): - if openpype.lib.any_outdated(): - raise ValueError("There are outdated containers in the scene.") + if not self.is_active(context.data): + return + + if any_outdated_containers(): + msg = "There are outdated containers in the scene." + raise PublishXmlValidationError(self, msg) diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/openpype/plugins/publish/validate_editorial_asset_name.py index 702e87b58d..694788c414 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/openpype/plugins/publish/validate_editorial_asset_name.py @@ -19,7 +19,8 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): "hiero", "standalonepublisher", "resolve", - "flame" + "flame", + "traypublisher" ] def process(self, context): diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index 90c582a319..a447aa916b 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -7,7 +7,7 @@ import time from openpype.lib import PypeLogger from openpype.api import get_app_environments_for_context -from openpype.lib.plugin_tools import parse_json, get_batch_asset_task_info +from openpype.lib.plugin_tools import get_batch_asset_task_info from openpype.lib.remote_publish import ( get_webpublish_conn, start_webpublish_log, @@ -15,6 +15,7 @@ from openpype.lib.remote_publish import ( fail_batch, find_variant_key, get_task_data, + get_timeout, IN_PROGRESS_STATUS ) @@ -170,7 +171,7 @@ class PypeCommands: log.info("Publish finished.") @staticmethod - def remotepublishfromapp(project, batch_path, host_name, + def remotepublishfromapp(project_name, batch_path, host_name, user_email, targets=None): """Opens installed variant of 'host' and run remote publish there. @@ -189,8 +190,8 @@ class PypeCommands: Runs publish process as user would, in automatic fashion. Args: - project (str): project to publish (only single context is expected - per call of remotepublish + project_name (str): project to publish (only single context is + expected per call of remotepublish batch_path (str): Path batch folder. Contains subfolders with resources (workfile, another subfolder 'renders' etc.) host_name (str): 'photoshop' @@ -222,10 +223,17 @@ class PypeCommands: batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS})) if len(batches_in_progress) > 1: - fail_batch(_id, batches_in_progress, dbcon) + running_batches = [str(batch["_id"]) + for batch in batches_in_progress + if batch["_id"] != _id] + msg = "There are still running batches {}\n". \ + format("\n".join(running_batches)) + msg += "Ask admin to check them and reprocess current batch" + fail_batch(_id, dbcon, msg) print("Another batch running, probably stuck, ask admin for help") - asset, task_name, _ = get_batch_asset_task_info(task_data["context"]) + asset_name, task_name, task_type = get_batch_asset_task_info( + task_data["context"]) application_manager = ApplicationManager() found_variant_key = find_variant_key(application_manager, host_name) @@ -233,8 +241,8 @@ class PypeCommands: # must have for proper launch of app env = get_app_environments_for_context( - project, - asset, + project_name, + asset_name, task_name, app_name ) @@ -262,15 +270,22 @@ class PypeCommands: data = { "last_workfile_path": workfile_path, "start_last_workfile": True, - "project_name": project, - "asset_name": asset, + "project_name": project_name, + "asset_name": asset_name, "task_name": task_name } launched_app = application_manager.launch(app_name, **data) + timeout = get_timeout(project_name, host_name, task_type) + + time_start = time.time() while launched_app.poll() is None: time.sleep(0.5) + if time.time() - time_start > timeout: + launched_app.terminate() + msg = "Timeout reached" + fail_batch(_id, dbcon, msg) @staticmethod def remotepublish(project, batch_path, user_email, targets=None): diff --git a/openpype/resources/app_icons/shotgrid.png b/openpype/resources/app_icons/shotgrid.png new file mode 100644 index 0000000000..6d0cc047f9 Binary files /dev/null and b/openpype/resources/app_icons/shotgrid.png differ diff --git a/openpype/scripts/fusion_switch_shot.py b/openpype/scripts/fusion_switch_shot.py index 245fc665f0..fc22f060a2 100644 --- a/openpype/scripts/fusion_switch_shot.py +++ b/openpype/scripts/fusion_switch_shot.py @@ -3,6 +3,8 @@ import re import sys import logging +from openpype.client import get_asset_by_name, get_versions + # Pipeline imports from openpype.hosts.fusion import api import openpype.hosts.fusion.api.lib as fusion_lib @@ -15,13 +17,10 @@ from openpype.pipeline import ( legacy_io, ) -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Update Slap Comp") -self = sys.modules[__name__] -self._project = None - def _format_version_folder(folder): """Format a version folder based on the filepath @@ -131,8 +130,8 @@ def update_frame_range(comp, representations): """ version_ids = [r["parent"] for r in representations] - versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) - versions = list(versions) + project_name = legacy_io.active_project() + versions = list(get_versions(project_name, version_ids=version_ids)) start = min(v["data"]["frameStart"] for v in versions) end = max(v["data"]["frameEnd"] for v in versions) @@ -162,15 +161,10 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = legacy_io.find_one({"type": "asset", "name": asset_name}) + project_name = legacy_io.active_project() + asset = get_asset_by_name(project_name, asset_name) assert asset, "Could not find '%s' in the database" % asset_name - # Get current project - self._project = legacy_io.find_one({ - "type": "project", - "name": legacy_io.Session["AVALON_PROJECT"] - }) - # Go to comp if not filepath: current_comp = api.get_current_comp() diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 831c34835e..3e86581a03 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -124,6 +124,11 @@ "Project Manager" ], "cycle_enabled": false, + "cycle_hour_start": [ + 0, + 0, + 0 + ], "review_session_template": "{yy}{mm}{dd}" } }, @@ -268,6 +273,51 @@ } ] }, + { + "hosts": [ + "traypublisher" + ], + "families": [], + "task_types": [], + "tasks": [], + "add_ftrack_family": true, + "advanced_filtering": [] + }, + { + "hosts": [ + "traypublisher" + ], + "families": [ + "matchmove", + "shot" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": false, + "advanced_filtering": [] + }, + { + "hosts": [ + "traypublisher" + ], + "families": [ + "plate", + "review", + "audio" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": false, + "advanced_filtering": [ + { + "families": [ + "clip", + "review" + ], + "add_ftrack_family": true + } + ] + }, { "hosts": [ "maya" @@ -399,6 +449,9 @@ "enabled": false, "ftrack_custom_attributes": {} }, + "IntegrateFtrackComponentOverwrite": { + "enabled": true + }, "IntegrateFtrackInstance": { "family_mapping": { "camera": "cam", diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index 6131ea1939..0ff9363ba7 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -85,6 +85,7 @@ ], "width": 0, "height": 0, + "scale_pixel_aspect": true, "bg_color": [ 0, 0, @@ -159,7 +160,27 @@ } ] }, + "IntegrateSubsetGroup": { + "subset_grouping_profiles": [ + { + "families": [], + "hosts": [], + "task_types": [], + "tasks": [], + "template": "" + } + ] + }, "IntegrateAssetNew": { + "subset_grouping_profiles": [ + { + "families": [], + "hosts": [], + "task_types": [], + "tasks": [], + "template": "" + } + ], "template_name_profiles": [ { "families": [], @@ -202,17 +223,11 @@ "tasks": [], "template_name": "maya2unreal" } - ], - "subset_grouping_profiles": [ - { - "families": [], - "hosts": [], - "task_types": [], - "tasks": [], - "template": "" - } ] }, + "IntegrateAsset": { + "skip_host_families": [] + }, "IntegrateHeroVersion": { "enabled": true, "optional": true, diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 9bebd92cb9..ce9cd4d606 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -31,6 +31,37 @@ } ] }, + "RenderSettings": { + "apply_render_settings": true, + "default_render_image_folder": "renders", + "aov_separator": "underscore", + "reset_current_frame": false, + "arnold_renderer": { + "image_prefix": "maya///_", + "image_format": "exr", + "multilayer_exr": true, + "tiled": true, + "aov_list": [], + "additional_options": [] + }, + "vray_renderer": { + "image_prefix": "maya///", + "engine": "1", + "image_format": "png", + "aov_list": [], + "additional_options": [] + }, + "redshift_renderer": { + "image_prefix": "maya///", + "primary_gi_engine": "0", + "secondary_gi_engine": "0", + "image_format": "iff", + "multilayer_exr": true, + "force_combine": true, + "aov_list": [], + "additional_options": [] + } + }, "create": { "CreateLook": { "enabled": true, @@ -43,9 +74,7 @@ "enabled": true, "defaults": [ "Main" - ], - "aov_separator": "underscore", - "default_render_image_folder": "renders" + ] }, "CreateUnrealStaticMesh": { "enabled": true, @@ -70,6 +99,20 @@ "enabled": true, "publish_mip_map": true }, + "CreateAnimation": { + "enabled": true, + "write_color_sets": false, + "defaults": [ + "Main" + ] + }, + "CreatePointCache": { + "enabled": true, + "write_color_sets": false, + "defaults": [ + "Main" + ] + }, "CreateMultiverseUsd": { "enabled": true, "defaults": [ @@ -88,12 +131,6 @@ "Main" ] }, - "CreateAnimation": { - "enabled": true, - "defaults": [ - "Main" - ] - }, "CreateAss": { "enabled": true, "defaults": [ @@ -132,12 +169,6 @@ "Sculpt" ] }, - "CreatePointCache": { - "enabled": true, - "defaults": [ - "Main" - ] - }, "CreateRenderSetup": { "enabled": true, "defaults": [ @@ -205,10 +236,15 @@ "enabled": true, "optional": true, "active": true, - "exclude_families": ["model", "rig", "staticMesh"] + "exclude_families": [ + "model", + "rig", + "staticMesh" + ] }, "ValidateShaderName": { "enabled": false, + "optional": true, "regex": "(?P.*)_(.*)_SHD" }, "ValidateShadingEngine": { @@ -222,6 +258,7 @@ }, "ValidateLoadedPlugin": { "enabled": false, + "optional": true, "whitelist_native_plugins": false, "authorized_plugins": [] }, @@ -236,6 +273,7 @@ }, "ValidateUnrealStaticMeshName": { "enabled": true, + "optional": true, "validate_mesh": false, "validate_collision": true }, @@ -252,6 +290,81 @@ "redshift_render_attributes": [], "renderman_render_attributes": [] }, + "ValidateCurrentRenderLayerIsRenderable": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderImageRule": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderNoDefaultCameras": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderSingleCamera": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderLayerAOVs": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateStepSize": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVRayDistributedRendering": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayReferencedAOVs": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVRayTranslatorEnabled": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayProxy": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayProxyMembers": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRenderScriptCallbacks": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigCacheState": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigInputShapesInInstance": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigSettings": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateModelName": { "enabled": false, "database": true, @@ -270,6 +383,7 @@ }, "ValidateTransformNamingSuffix": { "enabled": true, + "optional": true, "SUFFIX_NAMING_TABLE": { "mesh": [ "_GEO", @@ -293,7 +407,7 @@ "ALLOW_IF_NOT_IN_SUFFIX_TABLE": true }, "ValidateColorSets": { - "enabled": false, + "enabled": true, "optional": true, "active": true }, @@ -337,6 +451,16 @@ "optional": true, "active": true }, + "ValidateMeshNoNegativeScale": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateMeshNonZeroEdgeLength": { + "enabled": true, + "optional": true, + "active": true + }, "ValidateMeshNormalsUnlocked": { "enabled": false, "optional": true, @@ -359,22 +483,22 @@ }, "ValidateNoNamespace": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNoNullTransforms": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNoUnknownNodes": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNodeNoGhosting": { "enabled": false, - "optional": true, + "optional": false, "active": true }, "ValidateShapeDefaultNames": { @@ -402,6 +526,21 @@ "optional": true, "active": true }, + "ValidateNoVRayMesh": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateUnrealMeshTriangulated": { + "enabled": false, + "optional": true, + "active": true + }, + "ValidateAlembicVisibleOnly": { + "enabled": true, + "optional": false, + "active": true + }, "ExtractAlembic": { "enabled": true, "families": [ @@ -425,8 +564,34 @@ "optional": true, "active": true }, + "ValidateAnimationContent": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateOutRelatedNodeIds": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRigControllersArnoldAttributes": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateSkeletalMeshHierarchy": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateSkinclusterDeformerSet": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateRigOutSetNodeIds": { "enabled": true, + "optional": false, "allow_history_only": false }, "ValidateCameraAttributes": { @@ -439,14 +604,44 @@ "optional": true, "active": true }, + "ValidateAssemblyNamespaces": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateAssemblyModelTransforms": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateAssRelativePaths": { "enabled": true, + "optional": false, + "active": true + }, + "ValidateInstancerContent": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateInstancerFrameRanges": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateNoDefaultCameras": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateUnrealUpAxis": { + "enabled": false, "optional": true, "active": true }, "ValidateCameraContents": { "enabled": true, - "optional": true, + "optional": false, "validate_shapes": true }, "ExtractPlayblast": { @@ -497,11 +692,29 @@ "override_viewport_options": true, "displayLights": "default", "textureMaxResolution": 1024, - "multiSample": 4, + "renderDepthOfField": true, "shadows": true, "textures": true, "twoSidedLighting": true, - "ssaoEnable": true, + "lineAAEnable": true, + "multiSample": 8, + "ssaoEnable": false, + "ssaoAmount": 1, + "ssaoRadius": 16, + "ssaoFilterRadius": 16, + "ssaoSamples": 16, + "fogging": false, + "hwFogFalloff": "0", + "hwFogDensity": 0.0, + "hwFogStart": 0, + "hwFogEnd": 100, + "hwFogAlpha": 0, + "hwFogColorR": 1.0, + "hwFogColorG": 1.0, + "hwFogColorB": 1.0, + "motionBlurEnable": false, + "motionBlurSampleCount": 8, + "motionBlurShutterOpenFraction": 0.2, "cameras": false, "clipGhosts": false, "controlVertices": false, diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 6c45e2a9c1..3e29122074 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -287,7 +287,11 @@ "LoadClip": { "enabled": true, "_representations": [], - "node_name_template": "{class_name}_{ext}" + "node_name_template": "{class_name}_{ext}", + "options_defaults": { + "start_at_workfile": true, + "add_retime": true + } } }, "workfile_builder": { diff --git a/openpype/settings/defaults/project_settings/shotgrid.json b/openpype/settings/defaults/project_settings/shotgrid.json new file mode 100644 index 0000000000..83b6f69074 --- /dev/null +++ b/openpype/settings/defaults/project_settings/shotgrid.json @@ -0,0 +1,22 @@ +{ + "shotgrid_project_id": 0, + "shotgrid_server": "", + "event": { + "enabled": false + }, + "fields": { + "asset": { + "type": "sg_asset_type" + }, + "sequence": { + "episode_link": "episode" + }, + "shot": { + "episode_link": "sg_episode", + "sequence_link": "sg_sequence" + }, + "task": { + "step": "step" + } + } +} diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/openpype/settings/defaults/project_settings/traypublisher.json index 0b54cfd39e..5db2a79772 100644 --- a/openpype/settings/defaults/project_settings/traypublisher.json +++ b/openpype/settings/defaults/project_settings/traypublisher.json @@ -8,9 +8,10 @@ "default_variants": [ "Main" ], - "description": "Publish workfile backup", - "detailed_description": "", - "allow_sequences": true, + "description": "Backup of a working scene", + "detailed_description": "Workfiles are full scenes from any application that are directly edited by artists. They represent a state of work on a task at a given point and are usually not directly referenced into other scenes.", + "allow_sequences": false, + "allow_multiple_items": false, "extensions": [ ".ma", ".mb", @@ -30,6 +31,277 @@ ".psb", ".aep" ] + }, + { + "family": "model", + "identifier": "", + "label": "Model", + "icon": "fa.cubes", + "default_variants": [ + "Main", + "Proxy", + "Sculpt" + ], + "description": "Clean models", + "detailed_description": "Models should only contain geometry data, without any extras like cameras, locators or bones.\n\nKeep in mind that models published from tray publisher are not validated for correctness. ", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".ma", + ".mb", + ".obj", + ".abc", + ".fbx", + ".bgeo", + ".bgeogz", + ".bgeosc", + ".usd", + ".blend" + ] + }, + { + "family": "pointcache", + "identifier": "", + "label": "Pointcache", + "icon": "fa.gears", + "default_variants": [ + "Main" + ], + "description": "Geometry Caches", + "detailed_description": "Alembic or bgeo cache of animated data", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".abc", + ".bgeo", + ".bgeogz", + ".bgeosc" + ] + }, + { + "family": "plate", + "identifier": "", + "label": "Plate", + "icon": "mdi.camera-image", + "default_variants": [ + "Main", + "BG", + "Animatic", + "Reference", + "Offline" + ], + "description": "Footage Plates", + "detailed_description": "Any type of image seqeuence coming from outside of the studio. Usually camera footage, but could also be animatics used for reference.", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".png", + ".dpx", + ".jpg", + ".tiff", + ".tif", + ".mov", + ".mp4", + ".avi" + ] + }, + { + "family": "render", + "identifier": "", + "label": "Render", + "icon": "mdi.folder-multiple-image", + "default_variants": [], + "description": "Rendered images or video", + "detailed_description": "Sequence or single file renders", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".png", + ".dpx", + ".jpg", + ".jpeg", + ".tiff", + ".tif", + ".mov", + ".mp4", + ".avi" + ] + }, + { + "family": "camera", + "identifier": "", + "label": "Camera", + "icon": "fa.video-camera", + "default_variants": [], + "description": "3d Camera", + "detailed_description": "Ideally this should be only camera itself with baked animation, however, it can technically also include helper geometry.", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".abc", + ".ma", + ".hip", + ".blend", + ".fbx", + ".usd" + ] + }, + { + "family": "image", + "identifier": "", + "label": "Image", + "icon": "fa.image", + "default_variants": [ + "Reference", + "Texture", + "Concept", + "Background" + ], + "description": "Single image", + "detailed_description": "Any image data can be published as image family. References, textures, concept art, matte paints. This is a fallback 2d family for everything that doesn't fit more specific family.", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".jpg", + ".jpeg", + ".dpx", + ".bmp", + ".tif", + ".tiff", + ".png", + ".psb", + ".psd" + ] + }, + { + "family": "vdb", + "identifier": "", + "label": "VDB Volumes", + "icon": "fa.cloud", + "default_variants": [], + "description": "Sparse volumetric data", + "detailed_description": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".vdb" + ] + }, + { + "family": "matchmove", + "identifier": "", + "label": "Matchmove", + "icon": "fa.empire", + "default_variants": [ + "Camera", + "Object", + "Mocap" + ], + "description": "Matchmoving script", + "detailed_description": "Script exported from matchmoving application to be later processed into a tracked camera with additional data", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [] + }, + { + "family": "rig", + "identifier": "", + "label": "Rig", + "icon": "fa.wheelchair", + "default_variants": [], + "description": "CG rig file", + "detailed_description": "CG rigged character or prop. Rig should be clean of any extra data and directly loadable into it's respective application\t", + "allow_sequences": false, + "allow_multiple_items": false, + "extensions": [ + ".ma", + ".blend", + ".hip", + ".hda" + ] + }, + { + "family": "simpleUnrealTexture", + "identifier": "", + "label": "Simple UE texture", + "icon": "fa.image", + "default_variants": [], + "description": "Simple Unreal Engine texture", + "detailed_description": "Texture files with Unreal Engine naming conventions", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [] } - ] + ], + "editorial_creators": { + "editorial_simple": { + "default_variants": [ + "Main" + ], + "clip_name_tokenizer": { + "_sequence_": "(sc\\d{3})", + "_shot_": "(sh\\d{3})" + }, + "shot_rename": { + "enabled": true, + "shot_rename_template": "{project[code]}_{_sequence_}_{_shot_}" + }, + "shot_hierarchy": { + "enabled": true, + "parents_path": "{project}/{folder}/{sequence}", + "parents": [ + { + "type": "Project", + "name": "project", + "value": "{project[name]}" + }, + { + "type": "Folder", + "name": "folder", + "value": "shots" + }, + { + "type": "Sequence", + "name": "sequence", + "value": "{_sequence_}" + } + ] + }, + "shot_add_tasks": {}, + "family_presets": [ + { + "family": "review", + "variant": "Reference", + "review": true, + "output_file_type": ".mp4" + }, + { + "family": "plate", + "variant": "", + "review": false, + "output_file_type": ".mov" + }, + { + "family": "audio", + "variant": "", + "review": false, + "output_file_type": ".wav" + } + ] + } + }, + "BatchMovieCreator": { + "default_variants": [ + "Main" + ], + "default_tasks": [ + "Compositing" + ], + "extensions": [ + ".mov" + ] + } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/webpublisher.json b/openpype/settings/defaults/project_settings/webpublisher.json index 77168c25e6..cba472514e 100644 --- a/openpype/settings/defaults/project_settings/webpublisher.json +++ b/openpype/settings/defaults/project_settings/webpublisher.json @@ -1,4 +1,13 @@ { + "timeout_profiles": [ + { + "hosts": [ + "photoshop" + ], + "task_types": [], + "timeout": 600 + } + ], "publish": { "CollectPublishedFiles": { "task_type_to_family": { diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json index a06947ba77..909ffc1ee4 100644 --- a/openpype/settings/defaults/system_settings/general.json +++ b/openpype/settings/defaults/system_settings/general.json @@ -2,11 +2,7 @@ "studio_name": "Studio name", "studio_code": "stu", "admin_password": "", - "environment": { - "__environment_keys__": { - "global": [] - } - }, + "environment": {}, "log_to_server": true, "disk_mapping": { "windows": [], diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json index 8cd4114cb0..c84d23d3fc 100644 --- a/openpype/settings/defaults/system_settings/modules.json +++ b/openpype/settings/defaults/system_settings/modules.json @@ -26,13 +26,14 @@ "linux": [] }, "intent": { + "allow_empty_intent": true, + "empty_intent_label": "", "items": { - "-": "-", "wip": "WIP", "final": "Final", "test": "Test" }, - "default": "-" + "default": "" }, "custom_attributes": { "show": { @@ -135,6 +136,13 @@ "enabled": false, "server": "" }, + "shotgrid": { + "enabled": false, + "leecher_manager_url": "http://127.0.0.1:3000", + "leecher_backend_url": "http://127.0.0.1:8090", + "filter_projects_by_login": true, + "shotgrid_settings": {} + }, "timers_manager": { "enabled": true, "auto_stop": true, diff --git a/openpype/settings/entities/__init__.py b/openpype/settings/entities/__init__.py index a173e2454f..b2cb2204f4 100644 --- a/openpype/settings/entities/__init__.py +++ b/openpype/settings/entities/__init__.py @@ -107,6 +107,7 @@ from .enum_entity import ( TaskTypeEnumEntity, DeadlineUrlEnumEntity, AnatomyTemplatesEnumEntity, + ShotgridUrlEnumEntity ) from .list_entity import ListEntity @@ -171,6 +172,7 @@ __all__ = ( "ToolsEnumEntity", "TaskTypeEnumEntity", "DeadlineUrlEnumEntity", + "ShotgridUrlEnumEntity", "AnatomyTemplatesEnumEntity", "ListEntity", diff --git a/openpype/settings/entities/enum_entity.py b/openpype/settings/entities/enum_entity.py index 92a397afba..defe4aa1f0 100644 --- a/openpype/settings/entities/enum_entity.py +++ b/openpype/settings/entities/enum_entity.py @@ -1,10 +1,7 @@ import copy from .input_entities import InputEntity from .exceptions import EntitySchemaError -from .lib import ( - NOT_SET, - STRING_TYPE -) +from .lib import NOT_SET, STRING_TYPE class BaseEnumEntity(InputEntity): @@ -26,7 +23,7 @@ class BaseEnumEntity(InputEntity): for item in self.enum_items: key = tuple(item.keys())[0] if key in enum_keys: - reason = "Key \"{}\" is more than once in enum items.".format( + reason = 'Key "{}" is more than once in enum items.'.format( key ) raise EntitySchemaError(self, reason) @@ -34,7 +31,7 @@ class BaseEnumEntity(InputEntity): enum_keys.add(key) if not isinstance(key, STRING_TYPE): - reason = "Key \"{}\" has invalid type {}, expected {}.".format( + reason = 'Key "{}" has invalid type {}, expected {}.'.format( key, type(key), STRING_TYPE ) raise EntitySchemaError(self, reason) @@ -59,7 +56,7 @@ class BaseEnumEntity(InputEntity): for item in check_values: if item not in self.valid_keys: raise ValueError( - "{} Invalid value \"{}\". Expected one of: {}".format( + '{} Invalid value "{}". Expected one of: {}'.format( self.path, item, self.valid_keys ) ) @@ -84,7 +81,7 @@ class EnumEntity(BaseEnumEntity): self.valid_keys = set(all_keys) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) value_on_not_set = [] if enum_default: if not isinstance(enum_default, list): @@ -109,7 +106,7 @@ class EnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -152,6 +149,7 @@ class HostsEnumEntity(BaseEnumEntity): Host name is not the same as application name. Host name defines implementation instead of application name. """ + schema_types = ["hosts-enum"] all_host_names = [ "aftereffects", @@ -169,6 +167,7 @@ class HostsEnumEntity(BaseEnumEntity): "tvpaint", "unreal", "standalonepublisher", + "traypublisher", "webpublisher" ] @@ -210,7 +209,7 @@ class HostsEnumEntity(BaseEnumEntity): self.valid_keys = valid_keys if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: for key in valid_keys: @@ -218,7 +217,7 @@ class HostsEnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -226,14 +225,10 @@ class HostsEnumEntity(BaseEnumEntity): def schema_validations(self): if self.hosts_filter: enum_len = len(self.enum_items) - if ( - enum_len == 0 - or (enum_len == 1 and self.use_empty_value) - ): - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) + if enum_len == 0 or (enum_len == 1 and self.use_empty_value): + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) reason = ( "All host names were removed after applying" " host filters. {}" @@ -246,24 +241,25 @@ class HostsEnumEntity(BaseEnumEntity): invalid_filters.add(item) if invalid_filters: - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) - expected_hosts = ", ".join([ - '"{}"'.format(item) - for item in self.all_host_names - ]) - self.log.warning(( - "Host filters containt invalid host names:" - " \"{}\" Expected values are {}" - ).format(joined_filters, expected_hosts)) + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) + expected_hosts = ", ".join( + ['"{}"'.format(item) for item in self.all_host_names] + ) + self.log.warning( + ( + "Host filters containt invalid host names:" + ' "{}" Expected values are {}' + ).format(joined_filters, expected_hosts) + ) super(HostsEnumEntity, self).schema_validations() class AppsEnumEntity(BaseEnumEntity): """Enum of applications for project anatomy attributes.""" + schema_types = ["apps-enum"] def _item_initialization(self): @@ -271,7 +267,7 @@ class AppsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -352,7 +348,7 @@ class ToolsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -409,10 +405,10 @@ class TaskTypeEnumEntity(BaseEnumEntity): def _item_initialization(self): self.multiselection = self.schema_data.get("multiselection", True) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) self.value_on_not_set = "" self.enum_items = [] @@ -507,7 +503,8 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): enum_items_list = [] for server_name, url_entity in deadline_urls_entity.items(): enum_items_list.append( - {server_name: "{}: {}".format(server_name, url_entity.value)}) + {server_name: "{}: {}".format(server_name, url_entity.value)} + ) valid_keys.add(server_name) return enum_items_list, valid_keys @@ -530,6 +527,50 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): self._current_value = tuple(self.valid_keys)[0] +class ShotgridUrlEnumEntity(BaseEnumEntity): + schema_types = ["shotgrid_url-enum"] + + def _item_initialization(self): + self.multiselection = False + + self.enum_items = [] + self.valid_keys = set() + + self.valid_value_types = (STRING_TYPE,) + self.value_on_not_set = "" + + # GUI attribute + self.placeholder = self.schema_data.get("placeholder") + + def _get_enum_values(self): + shotgrid_settings = self.get_entity_from_path( + "system_settings/modules/shotgrid/shotgrid_settings" + ) + + valid_keys = set() + enum_items_list = [] + for server_name, settings in shotgrid_settings.items(): + enum_items_list.append( + { + server_name: "{}: {}".format( + server_name, settings["shotgrid_url"].value + ) + } + ) + valid_keys.add(server_name) + return enum_items_list, valid_keys + + def set_override_state(self, *args, **kwargs): + super(ShotgridUrlEnumEntity, self).set_override_state(*args, **kwargs) + + self.enum_items, self.valid_keys = self._get_enum_values() + if not self.valid_keys: + self._current_value = "" + + elif self._current_value not in self.valid_keys: + self._current_value = tuple(self.valid_keys)[0] + + class AnatomyTemplatesEnumEntity(BaseEnumEntity): schema_types = ["anatomy-templates-enum"] diff --git a/openpype/settings/entities/schemas/projects_schema/schema_main.json b/openpype/settings/entities/schemas/projects_schema/schema_main.json index 6c07209de3..80b1baad1b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_main.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_main.json @@ -62,6 +62,10 @@ "type": "schema", "name": "schema_project_ftrack" }, + { + "type": "schema", + "name": "schema_project_shotgrid" + }, { "type": "schema", "name": "schema_project_kitsu" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index f8f9d5093d..c06bec0f58 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -410,7 +410,41 @@ { "type": "boolean", "key": "cycle_enabled", - "label": "Create daily review session" + "label": "Run automatically every day" + }, + { + "type": "separator" + }, + { + "type": "list-strict", + "key": "cycle_hour_start", + "label": "Create daily review session at", + "tooltip": "This may take affect on next day", + "object_types": [ + { + "label": "H:", + "type": "number", + "minimum": 0, + "maximum": 23, + "decimal": 0 + }, { + "label": "M:", + "type": "number", + "minimum": 0, + "maximum": 59, + "decimal": 0 + }, { + "label": "S:", + "type": "number", + "minimum": 0, + "maximum": 59, + "decimal": 0 + } + ] + }, + { + "type": "label", + "label": "This can't be overriden per project and any change will take effect on the next day or on restart of event server." }, { "type": "separator" @@ -822,7 +856,7 @@ }, { "type": "label", - "label": "Template may contain formatting keys intent, comment, host_name, app_name, app_label and published_paths." + "label": "Template may contain formatting keys intent, comment, host_name, app_name, app_label, published_paths and source." }, { "type": "text", @@ -896,6 +930,21 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateFtrackComponentOverwrite", + "label": "IntegrateFtrackComponentOverwrite", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "dict", "key": "IntegrateFtrackInstance", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json index 40e98b0333..cb380194a7 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json @@ -57,6 +57,10 @@ "type": "schema", "name": "schema_scriptsmenu" }, + { + "type": "schema", + "name": "schema_maya_render_settings" + }, { "type": "schema", "name": "schema_maya_create" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json new file mode 100644 index 0000000000..4faeca89f3 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json @@ -0,0 +1,98 @@ +{ + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "is_file": true, + "children": [ + { + "type": "number", + "key": "shotgrid_project_id", + "label": "Shotgrid project id" + }, + { + "type": "shotgrid_url-enum", + "key": "shotgrid_server", + "label": "Shotgrid Server" + }, + { + "type": "dict", + "key": "event", + "label": "Event Handler", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, + { + "type": "dict", + "key": "fields", + "label": "Fields Template", + "collapsible": true, + "children": [ + { + "type": "dict", + "key": "asset", + "label": "Asset", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "type", + "label": "Asset Type" + } + ] + }, + { + "type": "dict", + "key": "sequence", + "label": "Sequence", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + } + ] + }, + { + "type": "dict", + "key": "shot", + "label": "Shot", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + }, + { + "type": "text", + "key": "sequence_link", + "label": "Sequence link" + } + ] + }, + { + "type": "dict", + "key": "task", + "label": "Task", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "step", + "label": "Step link" + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json index 55c1b7b7d7..7c61aeed50 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json @@ -67,6 +67,11 @@ "label": "Allow sequences", "type": "boolean" }, + { + "key": "allow_multiple_items", + "label": "Allow multiple items", + "type": "boolean" + }, { "type": "list", "key": "extensions", @@ -78,6 +83,234 @@ } ] } + }, + { + "type": "dict", + "collapsible": true, + "key": "editorial_creators", + "label": "Editorial creator plugins", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "editorial_simple", + "label": "Editorial simple creator", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "splitter" + }, + { + "type": "collapsible-wrap", + "label": "Shot metadata creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "key": "clip_name_tokenizer", + "label": "Clip name tokenizer", + "type": "dict-modifiable", + "highlight_content": true, + "tooltip": "Using Regex expression to create tokens. \nThose can be used later in \"Shot rename\" creator \nor \"Shot hierarchy\". \n\nTokens should be decorated with \"_\" on each side", + "object_type": { + "type": "text" + } + }, + { + "type": "dict", + "key": "shot_rename", + "label": "Shot rename", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "shot_rename_template", + "label": "Shot rename template", + "tooltip":"Template only supports Anatomy keys and Tokens \nfrom \"Clip name tokenizer\"" + } + ] + }, + { + "type": "dict", + "key": "shot_hierarchy", + "label": "Shot hierarchy", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "parents_path", + "label": "Parents path template", + "tooltip": "Using keys from \"Token to parent convertor\" or tokens directly" + }, + { + "key": "parents", + "label": "Token to parent convertor", + "type": "list", + "highlight_content": true, + "tooltip": "The left side is key to be used in template. \nThe right is value build from Tokens comming from \n\"Clip name tokenizer\"", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "type", + "label": "Parent type", + "enum_items": [ + {"Project": "Project"}, + {"Folder": "Folder"}, + {"Episode": "Episode"}, + {"Sequence": "Sequence"} + ] + }, + { + "type": "text", + "key": "name", + "label": "Parent token name", + "tooltip": "Unique name used in \"Parent path template\"" + }, + { + "type": "text", + "key": "value", + "label": "Parent name value", + "tooltip": "Template where any text, Anatomy keys and Tokens could be used" + } + ] + } + } + ] + }, + { + "key": "shot_add_tasks", + "label": "Add tasks to shot", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "task-types-enum", + "key": "type", + "label": "Task type", + "multiselection": false + } + ] + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Shot's subset creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "family_presets", + "label": "Family presets", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "family", + "label": "Family", + "enum_items": [ + {"review": "review"}, + {"plate": "plate"}, + {"audio": "audio"} + ] + }, + { + "type": "text", + "key": "variant", + "label": "Variant", + "placeholder": "< Inherited >" + }, + { + "type": "boolean", + "key": "review", + "label": "Review", + "default": true + }, + { + "type": "enum", + "key": "output_file_type", + "label": "Integrating file type", + "enum_items": [ + {".mp4": "MP4"}, + {".mov": "MOV"}, + {".wav": "WAV"} + ] + } + ] + } + } + ] + } + ] + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "BatchMovieCreator", + "label": "Batch Movie Creator", + "collapsible_key": true, + "children": [ + { + "type": "label", + "label": "Allows to publish multiple video files in one go.
Name of matching asset is parsed from file names ('asset.mov', 'asset_v001.mov', 'my_asset_to_publish.mov')" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "list", + "key": "default_tasks", + "label": "Default tasks", + "object_type": { + "type": "text" + } + }, + { + "type": "list", + "key": "extensions", + "label": "Extensions", + "use_label_wrap": true, + "collapsible_key": true, + "collapsed": false, + "object_type": "text" + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json index b76a0fa844..2ef7a05b21 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json @@ -5,6 +5,38 @@ "label": "Web Publisher", "is_file": true, "children": [ + { + "type": "list", + "collapsible": true, + "use_label_wrap": true, + "key": "timeout_profiles", + "label": "Timeout profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum", + "multiselection": true + }, + { + "type": "separator" + }, + { + "type": "number", + "key": "timeout", + "label": "Timeout (sec)" + } + ] + } + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index a3cbf0cfcd..e1aa230b49 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -319,6 +319,15 @@ "minimum": 0, "maximum": 100000 }, + { + "type": "label", + "label": "Rescale input when it's pixel aspect ratio is not 1. Usefull for anamorph reviews." + }, + { + "key": "scale_pixel_aspect", + "label": "Scale pixel aspect", + "type": "boolean" + }, { "type": "label", "label": "Background color is used only when input have transparency and Alpha is higher than 0." @@ -528,10 +537,111 @@ { "type": "dict", "collapsible": true, - "key": "IntegrateAssetNew", - "label": "IntegrateAssetNew", + "key": "IntegrateSubsetGroup", + "label": "Integrate Subset Group", "is_group": true, "children": [ + { + "type": "list", + "key": "subset_grouping_profiles", + "label": "Subset grouping profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "label", + "label": "Set all published instances as a part of specific group named according to 'Template'.
Implemented all variants of placeholders [{task},{family},{host},{subset},{renderlayer}]" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "tasks", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template", + "label": "Template" + } + ] + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateAssetNew", + "label": "IntegrateAsset (Legacy)", + "is_group": true, + "children": [ + { + "type": "label", + "label": "NOTE: Subset grouping profiles settings were moved to Integrate Subset Group. Please move values there." + }, + { + "type": "list", + "key": "subset_grouping_profiles", + "label": "Subset grouping profiles (DEPRECATED)", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "tasks", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template", + "label": "Template" + } + ] + } + }, { "type": "list", "key": "template_name_profiles", @@ -577,49 +687,34 @@ } ] } - }, + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateAsset", + "label": "Integrate Asset", + "is_group": true, + "children": [ { "type": "list", - "key": "subset_grouping_profiles", - "label": "Subset grouping profiles", + "key": "skip_host_families", + "label": "Skip hosts and families", "use_label_wrap": true, "object_type": { "type": "dict", "children": [ { - "type": "label", - "label": "Set all published instances as a part of specific group named according to 'Template'.
Implemented all variants of placeholders [{task},{family},{host},{subset},{renderlayer}]" + "type": "hosts-enum", + "key": "host", + "label": "Host" }, { + "type": "list", "key": "families", "label": "Families", - "type": "list", "object_type": "text" - }, - { - "type": "hosts-enum", - "key": "hosts", - "label": "Hosts", - "multiselection": true - }, - { - "key": "task_types", - "label": "Task types", - "type": "task-types-enum" - }, - { - "key": "tasks", - "label": "Task names", - "type": "list", - "object_type": "text" - }, - { - "type": "separator" - }, - { - "type": "text", - "key": "template", - "label": "Template" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json index d6b81c8687..7a40f349cc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json @@ -202,12 +202,15 @@ "decimal": 0 }, { - "type": "number", - "key": "multiSample", - "label": "Anti Aliasing Samples", - "decimal": 0, - "minimum": 0, - "maximum": 32 + "type": "splitter" + }, + { + "type":"boolean", + "key": "renderDepthOfField", + "label": "Depth of Field" + }, + { + "type": "splitter" }, { "type": "boolean", @@ -224,11 +227,145 @@ "key": "twoSidedLighting", "label": "Two Sided Lighting" }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "lineAAEnable", + "label": "Enable Anti-Aliasing" + }, + { + "type": "number", + "key": "multiSample", + "label": "Anti Aliasing Samples", + "decimal": 0, + "minimum": 0, + "maximum": 32 + }, + { + "type": "splitter" + }, { "type": "boolean", "key": "ssaoEnable", "label": "Screen Space Ambient Occlusion" }, + { + "type": "number", + "key": "ssaoAmount", + "label": "SSAO Amount" + }, + { + "type": "number", + "key": "ssaoRadius", + "label": "SSAO Radius" + }, + { + "type": "number", + "key": "ssaoFilterRadius", + "label": "SSAO Filter Radius", + "decimal": 0, + "minimum": 1, + "maximum": 32 + }, + { + "type": "number", + "key": "ssaoSamples", + "label": "SSAO Samples", + "decimal": 0, + "minimum": 8, + "maximum": 32 + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "fogging", + "label": "Enable Hardware Fog" + }, + { + "type": "enum", + "key": "hwFogFalloff", + "label": "Hardware Falloff", + "enum_items": [ + { "0": "Linear"}, + { "1": "Exponential"}, + { "2": "Exponential Squared"} + ] + }, + { + "type": "number", + "key": "hwFogDensity", + "label": "Fog Density", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogStart", + "label": "Fog Start" + }, + { + "type": "number", + "key": "hwFogEnd", + "label": "Fog End" + }, + { + "type": "number", + "key": "hwFogAlpha", + "label": "Fog Alpha" + }, + { + "type": "number", + "key": "hwFogColorR", + "label": "Fog Color R", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogColorG", + "label": "Fog Color G", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogColorB", + "label": "Fog Color B", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "motionBlurEnable", + "label": "Enable Motion Blur" + }, + { + "type": "number", + "key": "motionBlurSampleCount", + "label": "Motion Blur Sample Count", + "decimal": 0, + "minimum": 8, + "maximum": 32 + }, + { + "type": "number", + "key": "motionBlurShutterOpenFraction", + "label": "Shutter Open Fraction", + "decimal": 3, + "minimum": 0.01, + "maximum": 32 + }, { "type": "splitter" }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json index 09287a8b50..431add28df 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json @@ -29,42 +29,9 @@ } ] }, - { - "type": "dict", - "collapsible": true, - "key": "CreateRender", - "label": "Create Render", - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "list", - "key": "defaults", - "label": "Default Subsets", - "object_type": "text" - }, - { - "key": "aov_separator", - "label": "AOV Separator character", - "type": "enum", - "multiselection": false, - "default": "underscore", - "enum_items": [ - {"dash": "- (dash)"}, - {"underscore": "_ (underscore)"}, - {"dot": ". (dot)"} - ] - }, - { - "type": "text", - "key": "default_render_image_folder", - "label": "Default render image folder" - } - ] + { + "type": "schema", + "name": "schema_maya_create_render" }, { "type": "dict", @@ -143,6 +110,57 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CreateAnimation", + "label": "Create Animation", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreatePointCache", + "label": "Create Point Cache", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { "type": "schema_template", "name": "template_create_plugin", @@ -159,10 +177,6 @@ "key": "CreateMultiverseUsdOver", "label": "Create Multiverse USD Override" }, - { - "key": "CreateAnimation", - "label": "Create Animation" - }, { "key": "CreateAss", "label": "Create Ass" @@ -187,10 +201,6 @@ "key": "CreateModel", "label": "Create Model" }, - { - "key": "CreatePointCache", - "label": "Create Cache" - }, { "key": "CreateRenderSetup", "label": "Create Render Setup" diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json new file mode 100644 index 0000000000..68ad7ad63d --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json @@ -0,0 +1,20 @@ +{ + "type": "dict", + "collapsible": true, + "key": "CreateRender", + "label": "Create Render", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index 84182973a1..53247f6bd4 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -107,6 +107,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "label", "label": "Shader name regex can use named capture group asset to validate against current asset name.

Example:
^.*(?P=<asset>.+)_SHD

" @@ -159,6 +164,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "whitelist_native_plugins", @@ -246,6 +256,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "validate_mesh", @@ -332,6 +347,72 @@ } ] }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateCurrentRenderLayerIsRenderable", + "label": "Validate Current Render Layer Has Renderable Camera" + }, + { + "key": "ValidateRenderImageRule", + "label": "Validate Images File Rule (Workspace)" + }, + { + "key": "ValidateRenderNoDefaultCameras", + "label": "Validate No Default Cameras Renderable" + }, + { + "key": "ValidateRenderSingleCamera", + "label": "Validate Render Single Camera" + }, + { + "key": "ValidateRenderLayerAOVs", + "label": "Validate Render Passes / AOVs Are Registered" + }, + { + "key": "ValidateStepSize", + "label": "Validate Step Size" + }, + { + "key": "ValidateVRayDistributedRendering", + "label": "VRay Distributed Rendering" + }, + { + "key": "ValidateVrayReferencedAOVs", + "label": "VRay Referenced AOVs" + }, + { + "key": "ValidateVRayTranslatorEnabled", + "label": "VRay Translator Settings" + }, + { + "key": "ValidateVrayProxy", + "label": "VRay Proxy Settings" + }, + { + "key": "ValidateVrayProxyMembers", + "label": "VRay Proxy Members" + }, + { + "key": "ValidateYetiRenderScriptCallbacks", + "label": "Yeti Render Script Callbacks" + }, + { + "key": "ValidateYetiRigCacheState", + "label": "Yeti Rig Cache State" + }, + { + "key": "ValidateYetiRigInputShapesInInstance", + "label": "Yeti Rig Input Shapes In Instance" + }, + { + "key": "ValidateYetiRigSettings", + "label": "Yeti Rig Settings" + } + ] + }, { "type": "collapsible-wrap", "label": "Model", @@ -416,6 +497,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "label", "label": "Validates transform suffix based on the type of its children shapes." @@ -472,6 +558,14 @@ "key": "ValidateMeshNonManifold", "label": "ValidateMeshNonManifold" }, + { + "key": "ValidateMeshNoNegativeScale", + "label": "Validate Mesh No Negative Scale" + }, + { + "key": "ValidateMeshNonZeroEdgeLength", + "label": "Validate Mesh Edge Length Non Zero" + }, { "key": "ValidateMeshNormalsUnlocked", "label": "ValidateMeshNormalsUnlocked" @@ -525,6 +619,18 @@ { "key": "ValidateUniqueNames", "label": "ValidateUniqueNames" + }, + { + "key": "ValidateNoVRayMesh", + "label": "Validate No V-Ray Proxies (VRayMesh)" + }, + { + "key": "ValidateUnrealMeshTriangulated", + "label": "Validate if Mesh is Triangulated" + }, + { + "key": "ValidateAlembicVisibleOnly", + "label": "Validate Alembic visible node" } ] }, @@ -573,6 +679,26 @@ { "key": "ValidateRigControllers", "label": "Validate Rig Controllers" + }, + { + "key": "ValidateAnimationContent", + "label": "Validate Animation Content" + }, + { + "key": "ValidateOutRelatedNodeIds", + "label": "Validate Animation Out Set Related Node Ids" + }, + { + "key": "ValidateRigControllersArnoldAttributes", + "label": "Validate Rig Controllers (Arnold Attributes)" + }, + { + "key": "ValidateSkeletalMeshHierarchy", + "label": "Validate Skeletal Mesh Top Node" + }, + { + "key": "ValidateSkinclusterDeformerSet", + "label": "Validate Skincluster Deformer Relationships" } ] }, @@ -589,6 +715,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "allow_history_only", @@ -611,9 +742,33 @@ "key": "ValidateAssemblyName", "label": "Validate Assembly Name" }, + { + "key": "ValidateAssemblyNamespaces", + "label": "Validate Assembly Namespaces" + }, + { + "key": "ValidateAssemblyModelTransforms", + "label": "Validate Assembly Model Transforms" + }, { "key": "ValidateAssRelativePaths", "label": "ValidateAssRelativePaths" + }, + { + "key": "ValidateInstancerContent", + "label": "Validate Instancer Content" + }, + { + "key": "ValidateInstancerFrameRanges", + "label": "Validate Instancer Cache Frame Ranges" + }, + { + "key": "ValidateNoDefaultCameras", + "label": "Validate No Default Cameras" + }, + { + "key": "ValidateUnrealUpAxis", + "label": "Validate Unreal Up-Axis check" } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json new file mode 100644 index 0000000000..af197604f8 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json @@ -0,0 +1,418 @@ +{ + "type": "dict", + "collapsible": true, + "key": "RenderSettings", + "label": "Render Settings", + "children": [ + { + "type": "boolean", + "key": "apply_render_settings", + "label": "Apply Render Settings on creation" + }, + { + "type": "text", + "key": "default_render_image_folder", + "label": "Default render image folder" + }, + { + "key": "aov_separator", + "label": "AOV Separator character", + "type": "enum", + "multiselection": false, + "default": "underscore", + "enum_items": [ + {"dash": "- (dash)"}, + {"underscore": "_ (underscore)"}, + {"dot": ". (dot)"} + ] + }, + { + "key": "reset_current_frame", + "label": "Reset Current Frame", + "type": "boolean" + }, + { + "type": "dict", + "collapsible": true, + "key": "arnold_renderer", + "label": "Arnold Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"jpeg": "jpeg"}, + {"png": "png"}, + {"deepexr": "deep exr"}, + {"tif": "tif"}, + {"exr": "exr"}, + {"maya": "maya"}, + {"mtoa_shaders": "mtoa_shaders"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "tiled", + "label": "Tiled (tif, exr)", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"ID": "ID"}, + {"N": "N"}, + {"P": "P"}, + {"Pref": "Pref"}, + {"RGBA": "RGBA"}, + {"Z": "Z"}, + {"albedo": "albedo"}, + {"background": "background"}, + {"coat": "coat"}, + {"coat_albedo": "coat_albedo"}, + {"coat_direct": "coat_direct"}, + {"coat_indirect": "coat_indirect"}, + {"cputime": "cputime"}, + {"crypto_asset": "crypto_asset"}, + {"crypto_material": "cypto_material"}, + {"crypto_object": "crypto_object"}, + {"diffuse": "diffuse"}, + {"diffuse_albedo": "diffuse_albedo"}, + {"diffuse_direct": "diffuse_direct"}, + {"diffuse_indirect": "diffuse_indirect"}, + {"direct": "direct"}, + {"emission": "emission"}, + {"highlight": "highlight"}, + {"indirect": "indirect"}, + {"motionvector": "motionvector"}, + {"opacity": "opacity"}, + {"raycount": "raycount"}, + {"rim_light": "rim_light"}, + {"shadow": "shadow"}, + {"shadow_diff": "shadow_diff"}, + {"shadow_mask": "shadow_mask"}, + {"shadow_matte": "shadow_matte"}, + {"sheen": "sheen"}, + {"sheen_albedo": "sheen_albedo"}, + {"sheen_direct": "sheen_direct"}, + {"sheen_indirect": "sheen_indirect"}, + {"specular": "specular"}, + {"specular_albedo": "specular_albedo"}, + {"specular_direct": "specular_direct"}, + {"specular_indirect": "specular_indirect"}, + {"sss": "sss"}, + {"sss_albedo": "sss_albedo"}, + {"sss_direct": "sss_direct"}, + {"sss_indirect": "sss_indirect"}, + {"transmission": "transmission"}, + {"transmission_albedo": "transmission_albedo"}, + {"transmission_direct": "transmission_direct"}, + {"transmission_indirect": "transmission_indirect"}, + {"volume": "volume"}, + {"volume_Z": "volume_Z"}, + {"volume_albedo": "volume_albedo"}, + {"volume_direct": "volume_direct"}, + {"volume_indirect": "volume_indirect"}, + {"volume_opacity": "volume_opacity"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like AASamples" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "vray_renderer", + "label": "V-Ray Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "engine", + "label": "Production Engine", + "type": "enum", + "multiselection": false, + "defaults": "1", + "enum_items": [ + {"1": "V-Ray"}, + {"2": "V-Ray GPU"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"png": "png"}, + {"jpg": "jpg"}, + {"vrimg": "vrimg"}, + {"hdr": "hdr"}, + {"exr": "exr"}, + {"exr (multichannel)": "exr (multichannel)"}, + {"exr (deep)": "exr (deep)"}, + {"tga": "tga"}, + {"bmp": "bmp"}, + {"sgi": "sgi"} + ] + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"atmosphereChannel": "atmosphere"}, + {"backgroundChannel": "background"}, + {"bumpNormalsChannel": "bumpnormals"}, + {"causticsChannel": "caustics"}, + {"coatFilterChannel": "coat_filter"}, + {"coatGlossinessChannel": "coatGloss"}, + {"coatReflectionChannel": "coat_reflection"}, + {"vrayCoatChannel": "coat_specular"}, + {"CoverageChannel": "coverage"}, + {"cryptomatteChannel": "cryptomatte"}, + {"customColor": "custom_color"}, + {"drBucketChannel": "DR"}, + {"denoiserChannel": "denoiser"}, + {"diffuseChannel": "diffuse"}, + {"ExtraTexElement": "extraTex"}, + {"giChannel": "GI"}, + {"LightMixElement": "None"}, + {"lightingChannel": "lighting"}, + {"LightingAnalysisChannel": "LightingAnalysis"}, + {"materialIDChannel": "materialID"}, + {"MaterialSelectElement": "materialSelect"}, + {"matteShadowChannel": "matteShadow"}, + {"MultiMatteElement": "multimatte"}, + {"multimatteIDChannel": "multimatteID"}, + {"normalsChannel": "normals"}, + {"nodeIDChannel": "objectId"}, + {"objectSelectChannel": "objectSelect"}, + {"rawCoatFilterChannel": "raw_coat_filter"}, + {"rawCoatReflectionChannel": "raw_coat_reflection"}, + {"rawDiffuseFilterChannel": "rawDiffuseFilter"}, + {"rawGiChannel": "rawGI"}, + {"rawLightChannel": "rawLight"}, + {"rawReflectionChannel": "rawReflection"}, + {"rawReflectionFilterChannel": "rawReflectionFilter"}, + {"rawRefractionChannel": "rawRefraction"}, + {"rawRefractionFilterChannel": "rawRefractionFilter"}, + {"rawShadowChannel": "rawShadow"}, + {"rawSheenFilterChannel": "raw_sheen_filter"}, + {"rawSheenReflectionChannel": "raw_sheen_reflection"}, + {"rawTotalLightChannel": "rawTotalLight"}, + {"reflectIORChannel": "reflIOR"}, + {"reflectChannel": "reflect"}, + {"reflectionFilterChannel": "reflectionFilter"}, + {"reflectGlossinessChannel": "reflGloss"}, + {"refractChannel": "refract"}, + {"refractionFilterChannel": "refractionFilter"}, + {"refractGlossinessChannel": "refrGloss"}, + {"renderIDChannel": "renderId"}, + {"FastSSS2Channel": "SSS"}, + {"sampleRateChannel": "sampleRate"}, + {"samplerInfo": "samplerInfo"}, + {"selfIllumChannel": "selfIllum"}, + {"shadowChannel": "shadow"}, + {"sheenFilterChannel": "sheen_filter"}, + {"sheenGlossinessChannel": "sheenGloss"}, + {"sheenReflectionChannel": "sheen_reflection"}, + {"vraySheenChannel": "sheen_specular"}, + {"specularChannel": "specular"}, + {"Toon": "Toon"}, + {"toonLightingChannel": "toonLighting"}, + {"toonSpecularChannel": "toonSpecular"}, + {"totalLightChannel": "totalLight"}, + {"unclampedColorChannel": "unclampedColor"}, + {"VRScansPaintMaskChannel": "VRScansPaintMask"}, + {"VRScansZoneMaskChannel": "VRScansZoneMask"}, + {"velocityChannel": "velocity"}, + {"zdepthChannel": "zDepth"}, + {"LightSelectElement": "lightselect"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like aaFilterSize" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "redshift_renderer", + "label": "Redshift Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "primary_gi_engine", + "label": "Primary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"1": "Photon Map"}, + {"2": "Irradiance Cache"}, + {"3": "Brute Force"} + ] + }, + { + "key": "secondary_gi_engine", + "label": "Secondary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"1": "Photon Map"}, + {"2": "Irradiance Cache"}, + {"3": "Brute Force"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"iff": "Maya IFF"}, + {"exr": "OpenEXR"}, + {"tif": "TIFF"}, + {"png": "PNG"}, + {"tga": "Targa"}, + {"jpg": "JPEG"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "force_combine", + "label": "Force combine beauty and AOVs", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< none >"}, + {"AO": "Ambient Occlusion"}, + {"Background": "Background"}, + {"Beauty": "Beauty"}, + {"BumpNormals": "Bump Normals"}, + {"Caustics": "Caustics"}, + {"CausticsRaw": "Caustics Raw"}, + {"Cryptomatte": "Cryptomatte"}, + {"Custom": "Custom"}, + {"Z": "Depth"}, + {"DiffuseFilter": "Diffuse Filter"}, + {"DiffuseLighting": "Diffuse Lighting"}, + {"DiffuseLightingRaw": "Diffuse Lighting Raw"}, + {"Emission": "Emission"}, + {"GI": "Global Illumination"}, + {"GIRaw": "Global Illumination Raw"}, + {"Matte": "Matte"}, + {"MotionVectors": "Ambient Occlusion"}, + {"N": "Normals"}, + {"ID": "ObjectID"}, + {"ObjectBumpNormal": "Object-Space Bump Normals"}, + {"ObjectPosition": "Object-Space Positions"}, + {"PuzzleMatte": "Puzzle Matte"}, + {"Reflections": "Reflections"}, + {"ReflectionsFilter": "Reflections Filter"}, + {"ReflectionsRaw": "Reflections Raw"}, + {"Refractions": "Refractions"}, + {"RefractionsFilter": "Refractions Filter"}, + {"RefractionsRaw": "Refractions Filter"}, + {"Shadows": "Shadows"}, + {"SpecularLighting": "Specular Lighting"}, + {"SSS": "Sub Surface Scatter"}, + {"SSSRaw": "Sub Surface Scatter Raw"}, + {"TotalDiffuseLightingRaw": "Total Diffuse Lighting Raw"}, + {"TotalTransLightingRaw": "Total Translucency Filter"}, + {"TransTint": "Translucency Filter"}, + {"TransGIRaw": "Translucency Lighting Raw"}, + {"VolumeFogEmission": "Volume Fog Emission"}, + {"VolumeFogTint": "Volume Fog Tint"}, + {"VolumeLighting": "Volume Lighting"}, + {"P": "World Position"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like reflectionMaxTraceDepth" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json index 5bd8337e4c..805424c632 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json @@ -11,10 +11,52 @@ { "key": "LoadImage", "label": "Image Loader" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "LoadClip", + "label": "Clip Loader", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" }, { - "key": "LoadClip", - "label": "Clip Loader" + "type": "list", + "key": "_representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "text", + "key": "node_name_template", + "label": "Node name template" + }, + { + "type": "splitter" + }, + { + "type": "dict", + "collapsible": false, + "key": "options_defaults", + "label": "Loader option defaults", + "children": [ + { + "type": "boolean", + "key": "start_at_workfile", + "label": "Start at worfile beggining" + }, + { + "type": "boolean", + "key": "add_retime", + "label": "Add retime" + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json index 484fbf9d07..a4b28f47bc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json @@ -13,6 +13,9 @@ { "ftrackreview": "Add review to Ftrack" }, + { + "shotgridreview": "Add review to Shotgrid" + }, { "delete": "Delete output" }, diff --git a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json index 654ddf2938..7c5774415c 100644 --- a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json +++ b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json @@ -50,8 +50,15 @@ "is_group": true, "children": [ { - "type": "label", - "label": "Intent" + "type": "boolean", + "key": "allow_empty_intent", + "label": "Allow empty intent" + }, + { + "type": "text", + "key": "empty_intent_label", + "label": "Empty item label", + "placeholder": "< Not set >" }, { "type": "dict-modifiable", @@ -64,7 +71,8 @@ { "key": "default", "type": "text", - "label": "Default Intent" + "label": "Default Intent", + "placeholder": "< First available >" }, { "type": "separator" diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json index d22b9016a7..952b38040c 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_modules.json +++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json @@ -48,6 +48,60 @@ "type": "schema", "name": "schema_kitsu" }, + { + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "leecher_manager_url", + "label": "Shotgrid Leecher Manager URL" + }, + { + "type": "text", + "key": "leecher_backend_url", + "label": "Shotgrid Leecher Backend URL" + }, + { + "type": "boolean", + "key": "filter_projects_by_login", + "label": "Filter projects by SG login" + }, + { + "type": "dict-modifiable", + "key": "shotgrid_settings", + "label": "Shotgrid Servers", + "object_type": { + "type": "dict", + "children": [ + { + "key": "shotgrid_url", + "label": "Server URL", + "type": "text" + }, + { + "key": "shotgrid_script_name", + "label": "Script Name", + "type": "text" + }, + { + "key": "shotgrid_script_key", + "label": "Script api key", + "type": "text" + } + ] + } + } + ] + }, { "type": "dict", "key": "timers_manager", diff --git a/openpype/settings/handlers.py b/openpype/settings/handlers.py index c99fc6080b..15ae2351fd 100644 --- a/openpype/settings/handlers.py +++ b/openpype/settings/handlers.py @@ -7,6 +7,8 @@ from abc import ABCMeta, abstractmethod import six import openpype.version +from openpype.client.mongo import OpenPypeMongoConnection +from openpype.client.entities import get_project_connection, get_project from .constants import ( GLOBAL_SETTINGS_KEY, @@ -337,9 +339,6 @@ class MongoSettingsHandler(SettingsHandler): def __init__(self): # Get mongo connection - from openpype.lib import OpenPypeMongoConnection - from openpype.pipeline import AvalonMongoDB - settings_collection = OpenPypeMongoConnection.get_mongo_client() self._anatomy_keys = None @@ -362,7 +361,6 @@ class MongoSettingsHandler(SettingsHandler): self.collection_name = collection_name self.collection = settings_collection[database_name][collection_name] - self.avalon_db = AvalonMongoDB() self.system_settings_cache = CacheValues() self.project_settings_cache = collections.defaultdict(CacheValues) @@ -607,16 +605,14 @@ class MongoSettingsHandler(SettingsHandler): new_data = data_cache.data_copy() # Prepare avalon project document - collection = self.avalon_db.database[project_name] - project_doc = collection.find_one({ - "type": "project" - }) + project_doc = get_project(project_name) if not project_doc: raise ValueError(( "Project document of project \"{}\" does not exists." " Create project first." ).format(project_name)) + collection = get_project_connection(project_name) # Project's data update_dict_data = {} project_doc_data = project_doc.get("data") or {} @@ -1145,8 +1141,7 @@ class MongoSettingsHandler(SettingsHandler): document, version ) else: - collection = self.avalon_db.database[project_name] - project_doc = collection.find_one({"type": "project"}) + project_doc = get_project(project_name) self.project_anatomy_cache[project_name].update_data( self.project_doc_to_anatomy_data(project_doc), self._current_version diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py index ccccc76a08..c8952e5a1c 100644 --- a/openpype/tests/test_lib_restructuralization.py +++ b/openpype/tests/test_lib_restructuralization.py @@ -22,7 +22,6 @@ def test_backward_compatibility(printer): from openpype.lib import any_outdated from openpype.lib import get_asset from openpype.lib import get_linked_assets - from openpype.lib import get_latest_version from openpype.lib import get_ffprobe_streams from openpype.hosts.fusion.lib import switch_item diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index a5174bd804..3ce44ea6c8 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -272,15 +272,15 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): # update availability on active site when version changes if self.sync_server.enabled and version_doc: - query = self._repre_per_version_pipeline( + repre_info = self.sync_server.get_repre_info_for_versions( + project_name, [version_doc["_id"]], self.active_site, self.remote_site ) - docs = list(self.dbcon.aggregate(query)) - if docs: - repre = docs.pop() - version_doc["data"].update(self._get_repre_dict(repre)) + if repre_info: + version_doc["data"].update( + self._get_repre_dict(repre_info[0])) self.set_version(index, version_doc) @@ -478,16 +478,16 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): for _subset_id, doc in last_versions_by_subset_id.items(): version_ids.add(doc["_id"]) - query = self._repre_per_version_pipeline( + repres = self.sync_server.get_repre_info_for_versions( + project_name, list(version_ids), self.active_site, self.remote_site ) - - for doc in self.dbcon.aggregate(query): + for repre in repres: if self._doc_fetching_stop: return doc["active_provider"] = self.active_provider doc["remote_provider"] = self.remote_provider - repre_info[doc["_id"]] = doc + repre_info[repre["_id"]] = repre self._doc_payload = { "asset_docs_by_id": asset_docs_by_id, @@ -827,83 +827,6 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): return data - def _repre_per_version_pipeline(self, version_ids, - active_site, remote_site): - query = [ - {"$match": {"parent": {"$in": version_ids}, - "type": "representation", - "files.sites.name": {"$exists": 1}}}, - {"$unwind": "$files"}, - {'$addFields': { - 'order_local': { - '$filter': { - 'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', active_site]} - } - } - }}, - {'$addFields': { - 'order_remote': { - '$filter': { - 'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', remote_site]} - } - } - }}, - {'$addFields': { - 'progress_local': {"$arrayElemAt": [{ - '$cond': [ - {'$size': "$order_local.progress"}, - "$order_local.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_local.created_dt"}, - [1], - [0] - ]} - ]}, - 0 - ]} - }}, - {'$addFields': { - 'progress_remote': {"$arrayElemAt": [{ - '$cond': [ - {'$size': "$order_remote.progress"}, - "$order_remote.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_remote.created_dt"}, - [1], - [0] - ]} - ]}, - 0 - ]} - }}, - {'$group': { # first group by repre - '_id': '$_id', - 'parent': {'$first': '$parent'}, - 'avail_ratio_local': { - '$first': { - '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}] - } - }, - 'avail_ratio_remote': { - '$first': { - '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}] - } - } - }}, - {'$group': { # second group by parent, eg version_id - '_id': '$parent', - 'repre_count': {'$sum': 1}, # total representations - # fully available representation for site - 'avail_repre_local': {'$sum': "$avail_ratio_local"}, - 'avail_repre_remote': {'$sum': "$avail_ratio_remote"}, - }}, - ] - return query - class GroupMemberFilterProxyModel(QtCore.QSortFilterProxyModel): """Provide the feature of filtering group by the acceptance of members diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index 13e18b3757..48c038418a 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -434,7 +434,8 @@ class SubsetWidget(QtWidgets.QWidget): # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) if self.tool_name: available_loaders = lib.remove_tool_name_from_loaders( available_loaders, self.tool_name @@ -1330,7 +1331,8 @@ class RepresentationWidget(QtWidgets.QWidget): selected_side = self._get_selected_side(point_index, rows) # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) filtered_loaders = [] for loader in available_loaders: diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 915fb7f32e..b48bb61386 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -154,15 +154,20 @@ class PublishReport: self._all_instances_by_id = {} self._current_context = None - def reset(self, context, publish_discover_result=None): + def reset(self, context, create_context): """Reset report and clear all data.""" - self._publish_discover_result = publish_discover_result + + self._publish_discover_result = create_context.publish_discover_result self._plugin_data = [] self._plugin_data_with_plugin = [] self._current_plugin_data = {} self._all_instances_by_id = {} self._current_context = context + for plugin in create_context.publish_plugins_mismatch_targets: + plugin_data = self._add_plugin_data_item(plugin) + plugin_data["skipped"] = True + def add_plugin_iter(self, plugin, context): """Add report about single iteration of plugin.""" for instance in context: @@ -205,6 +210,7 @@ class PublishReport: "name": plugin.__name__, "label": label, "order": plugin.order, + "targets": list(plugin.targets), "instances_data": [], "actions_data": [], "skipped": False, @@ -569,6 +575,8 @@ class PublisherController: # Stop publishing self.stop_publish() + self.save_changes() + # Reset avalon context self.create_context.reset_avalon_context() @@ -777,10 +785,7 @@ class PublisherController: # - pop the key after first collector using it would be safest option? self._publish_context.data["create_context"] = self.create_context - self._publish_report.reset( - self._publish_context, - self.create_context.publish_discover_result - ) + self._publish_report.reset(self._publish_context, self.create_context) self._publish_validation_errors = [] self._publish_current_plugin_validation_errors = None self._publish_error = None diff --git a/openpype/tools/publisher/publish_report_viewer/model.py b/openpype/tools/publisher/publish_report_viewer/model.py index a88129a358..704feeb4bd 100644 --- a/openpype/tools/publisher/publish_report_viewer/model.py +++ b/openpype/tools/publisher/publish_report_viewer/model.py @@ -3,6 +3,7 @@ from Qt import QtCore, QtGui import pyblish.api +from openpype.tools.utils.lib import html_escape from .constants import ( ITEM_ID_ROLE, ITEM_IS_GROUP_ROLE, @@ -45,7 +46,8 @@ class InstancesModel(QtGui.QStandardItemModel): all_removed = True for instance_item in instance_items: item = QtGui.QStandardItem(instance_item.label) - item.setData(instance_item.label, ITEM_LABEL_ROLE) + instance_label = html_escape(instance_item.label) + item.setData(instance_label, ITEM_LABEL_ROLE) item.setData(instance_item.errored, ITEM_ERRORED_ROLE) item.setData(instance_item.id, ITEM_ID_ROLE) item.setData(instance_item.removed, INSTANCE_REMOVED_ROLE) diff --git a/openpype/tools/publisher/publish_report_viewer/report_items.py b/openpype/tools/publisher/publish_report_viewer/report_items.py index b47d14da25..8a01569723 100644 --- a/openpype/tools/publisher/publish_report_viewer/report_items.py +++ b/openpype/tools/publisher/publish_report_viewer/report_items.py @@ -83,10 +83,8 @@ class PublishReport: logs = [] plugins_items_by_id = {} - plugins_id_order = [] for plugin_data in data["plugins_data"]: item = PluginItem(plugin_data) - plugins_id_order.append(item.id) plugins_items_by_id[item.id] = item for instance_data_item in plugin_data["instances_data"]: instance_id = instance_data_item["id"] @@ -95,6 +93,14 @@ class PublishReport: copy.deepcopy(log_item_data), item.id, instance_id ) logs.append(log_item) + sorted_plugins = sorted( + plugins_items_by_id.values(), + key=lambda item: item.order + ) + plugins_id_order = [ + plugin_item.id + for plugin_item in sorted_plugins + ] logs_by_instance_id = collections.defaultdict(list) for log_item in logs: diff --git a/openpype/tools/publisher/publish_report_viewer/widgets.py b/openpype/tools/publisher/publish_report_viewer/widgets.py index fd226ea0e4..61eb814a56 100644 --- a/openpype/tools/publisher/publish_report_viewer/widgets.py +++ b/openpype/tools/publisher/publish_report_viewer/widgets.py @@ -1,3 +1,4 @@ +from math import ceil from Qt import QtWidgets, QtCore, QtGui from openpype.widgets.nice_checkbox import NiceCheckbox @@ -137,13 +138,75 @@ class PluginLoadReportWidget(QtWidgets.QWidget): self._model.set_report(report) +class ZoomPlainText(QtWidgets.QPlainTextEdit): + def __init__(self, *args, **kwargs): + super(ZoomPlainText, self).__init__(*args, **kwargs) + + anim_timer = QtCore.QTimer() + anim_timer.setInterval(20) + + anim_timer.timeout.connect(self._scaling_callback) + + self._anim_timer = anim_timer + self._zoom_enabled = False + self._scheduled_scalings = 0 + self._point_size = None + + def wheelEvent(self, event): + if not self._zoom_enabled: + super(ZoomPlainText, self).wheelEvent(event) + return + + degrees = float(event.delta()) / 8 + steps = int(ceil(degrees / 5)) + self._scheduled_scalings += steps + if (self._scheduled_scalings * steps < 0): + self._scheduled_scalings = steps + + self._anim_timer.start() + + def _scaling_callback(self): + if self._scheduled_scalings == 0: + self._anim_timer.stop() + return + + factor = 1.0 + (self._scheduled_scalings / 300) + font = self.font() + if self._point_size is None: + self._point_size = font.pointSizeF() + + self._point_size *= factor + if self._point_size < 1: + self._point_size = 1.0 + + font.setPointSizeF(self._point_size) + # Using 'self.setFont(font)' would not be propagated when stylesheets + # are applied on this widget + self.setStyleSheet("font-size: {}pt".format(font.pointSize())) + + if self._scheduled_scalings > 0: + self._scheduled_scalings -= 1 + else: + self._scheduled_scalings += 1 + + def keyPressEvent(self, event): + if event.key() == QtCore.Qt.Key_Control: + self._zoom_enabled = True + super(ZoomPlainText, self).keyPressEvent(event) + + def keyReleaseEvent(self, event): + if event.key() == QtCore.Qt.Key_Control: + self._zoom_enabled = False + super(ZoomPlainText, self).keyReleaseEvent(event) + + class DetailsWidget(QtWidgets.QWidget): def __init__(self, parent): super(DetailsWidget, self).__init__(parent) - output_widget = QtWidgets.QPlainTextEdit(self) - output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) + output_widget = ZoomPlainText(self) output_widget.setObjectName("PublishLogConsole") + output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) diff --git a/openpype/tools/publisher/widgets/card_view_widgets.py b/openpype/tools/publisher/widgets/card_view_widgets.py index b6fcee7edb..fa391f4ba0 100644 --- a/openpype/tools/publisher/widgets/card_view_widgets.py +++ b/openpype/tools/publisher/widgets/card_view_widgets.py @@ -28,6 +28,7 @@ from Qt import QtWidgets, QtCore from openpype.widgets.nice_checkbox import NiceCheckbox from openpype.tools.utils import BaseClickableFrame +from openpype.tools.utils.lib import html_escape from .widgets import ( AbstractInstanceView, ContextWarningLabel, @@ -98,6 +99,7 @@ class GroupWidget(QtWidgets.QWidget): instances(list): List of instances in CreateContext. """ + # Store instances by id and by subset name instances_by_id = {} instances_by_subset_name = collections.defaultdict(list) @@ -142,6 +144,7 @@ class GroupWidget(QtWidgets.QWidget): class CardWidget(BaseClickableFrame): """Clickable card used as bigger button.""" + selected = QtCore.Signal(str, str) # Group identifier of card # - this must be set because if send when mouse is released with card id @@ -178,6 +181,7 @@ class ContextCardWidget(CardWidget): Is not visually under group widget and is always at the top of card view. """ + def __init__(self, parent): super(ContextCardWidget, self).__init__(parent) @@ -204,13 +208,14 @@ class ContextCardWidget(CardWidget): class InstanceCardWidget(CardWidget): """Card widget representing instance.""" + active_changed = QtCore.Signal() def __init__(self, instance, group_icon, parent): super(InstanceCardWidget, self).__init__(parent) self._id = instance.id - self._group_identifier = instance.creator_label + self._group_identifier = instance.group_label self._group_icon = group_icon self.instance = instance @@ -303,7 +308,7 @@ class InstanceCardWidget(CardWidget): self._last_variant = variant self._last_subset_name = subset_name # Make `variant` bold - label = self.instance.label + label = html_escape(self.instance.label) found_parts = set(re.findall(variant, label, re.IGNORECASE)) if found_parts: for part in found_parts: diff --git a/openpype/tools/publisher/widgets/list_view_widgets.py b/openpype/tools/publisher/widgets/list_view_widgets.py index 3476ee487e..6e31ba635b 100644 --- a/openpype/tools/publisher/widgets/list_view_widgets.py +++ b/openpype/tools/publisher/widgets/list_view_widgets.py @@ -28,6 +28,7 @@ from Qt import QtWidgets, QtCore, QtGui from openpype.style import get_objected_colors from openpype.widgets.nice_checkbox import NiceCheckbox +from openpype.tools.utils.lib import html_escape from .widgets import AbstractInstanceView from ..constants import ( INSTANCE_ID_ROLE, @@ -113,7 +114,9 @@ class InstanceListItemWidget(QtWidgets.QWidget): self.instance = instance - subset_name_label = QtWidgets.QLabel(instance.label, self) + instance_label = html_escape(instance.label) + + subset_name_label = QtWidgets.QLabel(instance_label, self) subset_name_label.setObjectName("ListViewSubsetName") active_checkbox = NiceCheckbox(parent=self) @@ -178,7 +181,7 @@ class InstanceListItemWidget(QtWidgets.QWidget): # Check subset name label = self.instance.label if label != self._instance_label_widget.text(): - self._instance_label_widget.setText(label) + self._instance_label_widget.setText(html_escape(label)) # Check active state self.set_active(self.instance["active"]) # Check valid states diff --git a/openpype/tools/pyblish_pype/model.py b/openpype/tools/pyblish_pype/model.py index 2931a379b3..31aa63677e 100644 --- a/openpype/tools/pyblish_pype/model.py +++ b/openpype/tools/pyblish_pype/model.py @@ -86,7 +86,7 @@ class IntentModel(QtGui.QStandardItemModel): First and default value is {"< Not Set >": None} """ - default_item = {"< Not Set >": None} + default_empty_label = "< Not set >" def __init__(self, parent=None): super(IntentModel, self).__init__(parent) @@ -102,27 +102,39 @@ class IntentModel(QtGui.QStandardItemModel): self._item_count = 0 self.default_index = 0 - intents_preset = ( + intent_settings = ( get_system_settings() .get("modules", {}) .get("ftrack", {}) .get("intent", {}) ) - default = intents_preset.get("default") - items = intents_preset.get("items", {}) + items = intent_settings.get("items", {}) if not items: return - for idx, item_value in enumerate(items.keys()): + allow_empty_intent = intent_settings.get("allow_empty_intent", True) + empty_intent_label = ( + intent_settings.get("empty_intent_label") + or self.default_empty_label + ) + listed_items = list(items.items()) + if allow_empty_intent: + listed_items.insert(0, ("", empty_intent_label)) + + default = intent_settings.get("default") + + for idx, item in enumerate(listed_items): + item_value = item[0] if item_value == default: self.default_index = idx break - self.add_items(items) + self._add_items(listed_items) - def add_items(self, items): - for value, label in items.items(): + def _add_items(self, items): + for item in items: + value, label = item new_item = QtGui.QStandardItem() new_item.setData(label, QtCore.Qt.DisplayRole) new_item.setData(value, Roles.IntentItemValue) diff --git a/openpype/tools/pyblish_pype/window.py b/openpype/tools/pyblish_pype/window.py index 78590259bc..e167405325 100644 --- a/openpype/tools/pyblish_pype/window.py +++ b/openpype/tools/pyblish_pype/window.py @@ -523,6 +523,7 @@ class Window(QtWidgets.QDialog): instance_item.setData(enable_value, Roles.IsEnabledRole) def _add_intent_to_context(self): + context_value = None if ( self.intent_model.has_items and "intent" not in self.controller.context.data @@ -530,11 +531,17 @@ class Window(QtWidgets.QDialog): idx = self.intent_model.index(self.intent_box.currentIndex(), 0) intent_value = self.intent_model.data(idx, Roles.IntentItemValue) intent_label = self.intent_model.data(idx, QtCore.Qt.DisplayRole) + if intent_value: + context_value = { + "value": intent_value, + "label": intent_label + } - self.controller.context.data["intent"] = { - "value": intent_value, - "label": intent_label - } + # Unset intent if is set to empty value + if context_value is None: + self.controller.context.data.pop("intent", None) + else: + self.controller.context.data["intent"] = context_value def on_instance_toggle(self, index, state=None): """An item is requesting to be toggled""" diff --git a/openpype/tools/sceneinventory/view.py b/openpype/tools/sceneinventory/view.py index 63d181b2d6..e0e43aaba7 100644 --- a/openpype/tools/sceneinventory/view.py +++ b/openpype/tools/sceneinventory/view.py @@ -551,16 +551,16 @@ class SceneInventoryView(QtWidgets.QTreeView): "toggle": selection_model.Toggle, }[options.get("mode", "select")] - for item in iter_model_rows(model, 0): - item = item.data(InventoryModel.ItemRole) + for index in iter_model_rows(model, 0): + item = index.data(InventoryModel.ItemRole) if item.get("isGroupNode"): continue name = item.get("objectName") if name in object_names: - self.scrollTo(item) # Ensure item is visible + self.scrollTo(index) # Ensure item is visible flags = select_mode | selection_model.Rows - selection_model.select(item, flags) + selection_model.select(index, flags) object_names.remove(name) diff --git a/openpype/tools/settings/settings/categories.py b/openpype/tools/settings/settings/categories.py index 764f42f1a3..f42027d9e2 100644 --- a/openpype/tools/settings/settings/categories.py +++ b/openpype/tools/settings/settings/categories.py @@ -854,6 +854,9 @@ class ProjectWidget(SettingsCategoryWidget): project_list_widget.version_change_requested.connect( self._on_source_version_change ) + project_list_widget.extract_to_file_requested.connect( + self._on_extract_to_file + ) self.project_list_widget = project_list_widget diff --git a/openpype/tools/settings/settings/widgets.py b/openpype/tools/settings/settings/widgets.py index 45c21d5685..88d923c16a 100644 --- a/openpype/tools/settings/settings/widgets.py +++ b/openpype/tools/settings/settings/widgets.py @@ -1008,6 +1008,7 @@ class ProjectSortFilterProxy(QtCore.QSortFilterProxyModel): class ProjectListWidget(QtWidgets.QWidget): project_changed = QtCore.Signal() version_change_requested = QtCore.Signal(str) + extract_to_file_requested = QtCore.Signal() def __init__(self, parent, only_active=False): self._parent = parent @@ -1099,7 +1100,12 @@ class ProjectListWidget(QtWidgets.QWidget): self.version_change_requested ) submenu.addAction(action) + + extract_action = QtWidgets.QAction("Extract to file", menu) + extract_action.triggered.connect(self.extract_to_file_requested) + menu.addMenu(submenu) + menu.addAction(extract_action) menu.exec_(QtGui.QCursor.pos()) def on_item_clicked(self, new_index): diff --git a/openpype/tools/traypublisher/window.py b/openpype/tools/traypublisher/window.py index 5934c4aa8a..cc33287091 100644 --- a/openpype/tools/traypublisher/window.py +++ b/openpype/tools/traypublisher/window.py @@ -12,9 +12,7 @@ from openpype.pipeline import ( install_host, AvalonMongoDB, ) -from openpype.hosts.traypublisher import ( - api as traypublisher -) +from openpype.hosts.traypublisher.api import TrayPublisherHost from openpype.tools.publisher import PublisherWindow from openpype.tools.utils.constants import PROJECT_NAME_ROLE from openpype.tools.utils.models import ( @@ -111,9 +109,13 @@ class StandaloneOverlayWidget(QtWidgets.QFrame): if project_name: self._set_project(project_name) + @property + def host(self): + return self._publisher_window.controller.host + def _set_project(self, project_name): self._project_name = project_name - traypublisher.set_project_name(project_name) + self.host.set_project_name(project_name) self.setVisible(False) self.project_selected.emit(project_name) @@ -190,7 +192,8 @@ class TrayPublishWindow(PublisherWindow): def main(): - install_host(traypublisher) + host = TrayPublisherHost() + install_host(host) app = QtWidgets.QApplication([]) window = TrayPublishWindow() window.show() diff --git a/openpype/tools/utils/__init__.py b/openpype/tools/utils/__init__.py index 0f367510bd..5ccc1b40b3 100644 --- a/openpype/tools/utils/__init__.py +++ b/openpype/tools/utils/__init__.py @@ -1,4 +1,5 @@ from .widgets import ( + CustomTextComboBox, PlaceholderLineEdit, BaseClickableFrame, ClickableFrame, @@ -28,6 +29,7 @@ from .overlay_messages import ( __all__ = ( + "CustomTextComboBox", "PlaceholderLineEdit", "BaseClickableFrame", "ClickableFrame", diff --git a/openpype/tools/utils/host_tools.py b/openpype/tools/utils/host_tools.py index ae23e4d089..52d15a59f7 100644 --- a/openpype/tools/utils/host_tools.py +++ b/openpype/tools/utils/host_tools.py @@ -60,31 +60,14 @@ class HostToolsHelper: return self._workfiles_tool - def show_workfiles(self, parent=None, use_context=None, save=None): + def show_workfiles( + self, parent=None, use_context=None, save=None, on_top=None + ): """Workfiles tool for changing context and saving workfiles.""" - if use_context is None: - use_context = True - - if save is None: - save = True with qt_app_context(): workfiles_tool = self.get_workfiles_tool(parent) - workfiles_tool.set_save_enabled(save) - - if not workfiles_tool.isVisible(): - workfiles_tool.show() - - if use_context: - context = { - "asset": legacy_io.Session["AVALON_ASSET"], - "task": legacy_io.Session["AVALON_TASK"] - } - workfiles_tool.set_context(context) - - # Pull window to the front. - workfiles_tool.raise_() - workfiles_tool.activateWindow() + workfiles_tool.ensure_visible(use_context, save, on_top) def get_loader_tool(self, parent): """Create, cache and return loader tool window.""" @@ -395,9 +378,9 @@ def show_tool_by_name(tool_name, parent=None, *args, **kwargs): _SingletonPoint.show_tool_by_name(tool_name, parent, *args, **kwargs) -def show_workfiles(parent=None, use_context=None, save=None): +def show_workfiles(*args, **kwargs): _SingletonPoint.show_tool_by_name( - "workfiles", parent, use_context=use_context, save=save + "workfiles", *args, **kwargs ) diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py index ea1362945f..2169cf8ef1 100644 --- a/openpype/tools/utils/lib.py +++ b/openpype/tools/utils/lib.py @@ -37,6 +37,19 @@ def center_window(window): window.move(geo.topLeft()) +def html_escape(text): + """Basic escape of html syntax symbols in text.""" + + return ( + text + .replace("&", "&") + .replace("<", "<") + .replace(">", ">") + .replace('"', """) + .replace("'", "'") + ) + + def set_style_property(widget, property_name, property_value): """Set widget's property that may affect style. diff --git a/openpype/tools/utils/widgets.py b/openpype/tools/utils/widgets.py index d5ae909be8..df0d349822 100644 --- a/openpype/tools/utils/widgets.py +++ b/openpype/tools/utils/widgets.py @@ -11,6 +11,28 @@ from openpype.style import ( log = logging.getLogger(__name__) +class CustomTextComboBox(QtWidgets.QComboBox): + """Combobox which can have different text showed.""" + + def __init__(self, *args, **kwargs): + self._custom_text = None + super(CustomTextComboBox, self).__init__(*args, **kwargs) + + def set_custom_text(self, text=None): + if self._custom_text != text: + self._custom_text = text + self.repaint() + + def paintEvent(self, event): + painter = QtWidgets.QStylePainter(self) + option = QtWidgets.QStyleOptionComboBox() + self.initStyleOption(option) + if self._custom_text is not None: + option.currentText = self._custom_text + painter.drawComplexControl(QtWidgets.QStyle.CC_ComboBox, option) + painter.drawControl(QtWidgets.QStyle.CE_ComboBoxLabel, option) + + class PlaceholderLineEdit(QtWidgets.QLineEdit): """Set placeholder color of QLineEdit in Qt 5.12 and higher.""" def __init__(self, *args, **kwargs): diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py index 34692b7102..a4109c511e 100644 --- a/openpype/tools/workfiles/files_widget.py +++ b/openpype/tools/workfiles/files_widget.py @@ -12,7 +12,6 @@ from openpype.tools.utils import PlaceholderLineEdit from openpype.tools.utils.delegates import PrettyTimeDelegate from openpype.lib import ( emit_event, - get_workfile_template_key, create_workdir_extra_folders, ) from openpype.lib.avalon_context import ( @@ -24,6 +23,8 @@ from openpype.pipeline import ( legacy_io, Anatomy, ) +from openpype.pipeline.workfile import get_workfile_template_key + from .model import ( WorkAreaFilesModel, PublishFilesModel, diff --git a/openpype/tools/workfiles/save_as_dialog.py b/openpype/tools/workfiles/save_as_dialog.py index b62fd2c889..cded4eb1a5 100644 --- a/openpype/tools/workfiles/save_as_dialog.py +++ b/openpype/tools/workfiles/save_as_dialog.py @@ -5,18 +5,12 @@ import logging from Qt import QtWidgets, QtCore -from openpype.client import ( - get_project, - get_asset_by_name, -) -from openpype.lib import ( - get_last_workfile_with_version, - get_workdir_data, -) from openpype.pipeline import ( registered_host, legacy_io, ) +from openpype.pipeline.workfile import get_last_workfile_with_version +from openpype.pipeline.template_data import get_template_data_with_names from openpype.tools.utils import PlaceholderLineEdit log = logging.getLogger(__name__) @@ -30,16 +24,10 @@ def build_workfile_data(session): asset_name = session["AVALON_ASSET"] task_name = session["AVALON_TASK"] host_name = session["AVALON_APP"] - project_doc = get_project( - project_name, fields=["name", "data.code", "config.tasks"] - ) - asset_doc = get_asset_by_name( - project_name, - asset_name, - fields=["name", "data.tasks", "data.parents"] - ) - data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) data.update({ "version": 1, "comment": "", diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py index c1efe026f2..de42b80d64 100644 --- a/openpype/tools/workfiles/window.py +++ b/openpype/tools/workfiles/window.py @@ -1,17 +1,20 @@ import os import datetime -from Qt import QtCore, QtWidgets +import copy +from Qt import QtCore, QtWidgets, QtGui from openpype.client import ( - get_asset_by_id, get_asset_by_name, get_workfile_info, ) -from openpype import style -from openpype.lib import ( - create_workfile_doc, - save_workfile_data_to_doc, +from openpype.client.operations import ( + OperationsSession, + new_workfile_info_doc, + prepare_workfile_info_update_data, ) +from openpype import style +from openpype import resources +from openpype.pipeline import Anatomy from openpype.pipeline import legacy_io from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.tasks_widget import TasksWidget @@ -142,21 +145,19 @@ class SidePanelWidget(QtWidgets.QWidget): return self._workfile_doc, data -class Window(QtWidgets.QMainWindow): +class Window(QtWidgets.QWidget): """Work Files Window""" title = "Work Files" def __init__(self, parent=None): super(Window, self).__init__(parent=parent) self.setWindowTitle(self.title) - window_flags = QtCore.Qt.Window | QtCore.Qt.WindowCloseButtonHint - if not parent: - window_flags |= QtCore.Qt.WindowStaysOnTopHint - self.setWindowFlags(window_flags) + icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + self.setWindowIcon(icon) + self.setWindowFlags(self.windowFlags() | QtCore.Qt.Window) # Create pages widget and set it as central widget pages_widget = QtWidgets.QStackedWidget(self) - self.setCentralWidget(pages_widget) home_page_widget = QtWidgets.QWidget(pages_widget) home_body_widget = QtWidgets.QWidget(home_page_widget) @@ -191,6 +192,9 @@ class Window(QtWidgets.QMainWindow): # the files widget has a filter field which tasks does not. tasks_widget.setContentsMargins(0, 32, 0, 0) + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.addWidget(pages_widget, 1) + # Set context after asset widget is refreshed # - to do so it is necessary to wait until refresh is done set_context_timer = QtCore.QTimer() @@ -227,6 +231,49 @@ class Window(QtWidgets.QMainWindow): self._first_show = True self._context_to_set = None + def ensure_visible( + self, use_context=None, save=None, on_top=None + ): + if save is None: + save = True + + self.set_save_enabled(save) + + if self.isVisible(): + use_context = False + elif use_context is None: + use_context = True + + if on_top is None and self._first_show: + on_top = self.parent() is None + + window_flags = self.windowFlags() + new_window_flags = window_flags + if on_top is True: + new_window_flags = window_flags | QtCore.Qt.WindowStaysOnTopHint + elif on_top is False: + new_window_flags = window_flags & ~QtCore.Qt.WindowStaysOnTopHint + + if new_window_flags != window_flags: + # Note this is not propagated after initialization of widget in + # some Qt builds + self.setWindowFlags(new_window_flags) + self.show() + + elif not self.isVisible(): + self.show() + + if use_context is None or use_context is True: + context = { + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] + } + self.set_context(context) + + # Pull window to the front. + self.raise_() + self.activateWindow() + @property def project_name(self): return legacy_io.Session["AVALON_PROJECT"] @@ -279,10 +326,23 @@ class Window(QtWidgets.QMainWindow): workfile_doc, data = self.side_panel.get_workfile_data() if not workfile_doc: filepath = self.files_widget._get_selected_filepath() - self._create_workfile_doc(filepath, force=True) - workfile_doc = self._get_current_workfile_doc() + workfile_doc = self._create_workfile_doc(filepath) - save_workfile_data_to_doc(workfile_doc, data, legacy_io) + new_workfile_doc = copy.deepcopy(workfile_doc) + new_workfile_doc["data"] = data + update_data = prepare_workfile_info_update_data( + workfile_doc, new_workfile_doc + ) + if not update_data: + return + + project_name = legacy_io.active_project() + + session = OperationsSession() + session.update_entity( + project_name, "workfile", workfile_doc["_id"], update_data + ) + session.commit() def _get_current_workfile_doc(self, filepath=None): if filepath is None: @@ -298,20 +358,32 @@ class Window(QtWidgets.QMainWindow): project_name, asset_id, task_name, filename ) - def _create_workfile_doc(self, filepath, force=False): - workfile_doc = None - if not force: - workfile_doc = self._get_current_workfile_doc(filepath) + def _create_workfile_doc(self, filepath): + workfile_doc = self._get_current_workfile_doc(filepath) + if workfile_doc: + return workfile_doc - if not workfile_doc: - workdir, filename = os.path.split(filepath) - asset_id = self.assets_widget.get_selected_asset_id() - project_name = legacy_io.active_project() - asset_doc = get_asset_by_id(project_name, asset_id) - task_name = self.tasks_widget.get_selected_task_name() - create_workfile_doc( - asset_doc, task_name, filename, workdir, legacy_io - ) + workdir, filename = os.path.split(filepath) + + project_name = legacy_io.active_project() + asset_id = self.assets_widget.get_selected_asset_id() + task_name = self.tasks_widget.get_selected_task_name() + + anatomy = Anatomy(project_name) + success, rootless_dir = anatomy.find_root_template_from_path(workdir) + filepath = "/".join([ + os.path.normpath(rootless_dir).replace("\\", "/"), + filename + ]) + + workfile_doc = new_workfile_info_doc( + filename, asset_id, task_name, [filepath] + ) + + session = OperationsSession() + session.create_entity(project_name, "workfile", workfile_doc) + session.commit() + return workfile_doc def refresh(self): # Refresh asset widget @@ -331,6 +403,7 @@ class Window(QtWidgets.QMainWindow): if self.assets_widget.refreshing: return + self._set_context_timer.stop() self._context_to_set, context = None, self._context_to_set if "asset" in context: asset_doc = get_asset_by_name( diff --git a/openpype/vendor/python/common/capture.py b/openpype/vendor/python/common/capture.py index 6b4c40a6e8..86c1c60e56 100644 --- a/openpype/vendor/python/common/capture.py +++ b/openpype/vendor/python/common/capture.py @@ -380,7 +380,8 @@ Viewport2Options = { "transparencyAlgorithm": 1, "transparencyQuality": 0.33, "useMaximumHardwareLights": True, - "vertexAnimationCache": 0 + "vertexAnimationCache": 0, + "renderDepthOfField": 0 } @@ -402,7 +403,7 @@ def apply_view(panel, **options): camera_options = options.get("camera_options", {}) _iteritems = getattr(camera_options, "iteritems", camera_options.items) for key, value in _iteritems: - cmds.setAttr("{0}.{1}".format(camera, key), value) + _safe_setAttr("{0}.{1}".format(camera, key), value) # Viewport options viewport_options = options.get("viewport_options", {}) @@ -416,7 +417,7 @@ def apply_view(panel, **options): ) for key, value in _iteritems(): attr = "hardwareRenderingGlobals.{0}".format(key) - cmds.setAttr(attr, value) + _safe_setAttr(attr, value) def parse_active_panel(): @@ -550,10 +551,10 @@ def apply_scene(**options): cmds.playbackOptions(maxTime=options["end_frame"]) if "width" in options: - cmds.setAttr("defaultResolution.width", options["width"]) + _safe_setAttr("defaultResolution.width", options["width"]) if "height" in options: - cmds.setAttr("defaultResolution.height", options["height"]) + _safe_setAttr("defaultResolution.height", options["height"]) if "compression" in options: cmds.optionVar( @@ -664,7 +665,10 @@ def _applied_camera_options(options, panel): _iteritems = getattr(options, "iteritems", options.items) for opt, value in _iteritems(): - cmds.setAttr(camera + "." + opt, value) + if cmds.getAttr(camera + "." + opt, lock=True): + continue + else: + _safe_setAttr(camera + "." + opt, value) try: yield @@ -672,7 +676,11 @@ def _applied_camera_options(options, panel): if old_options: _iteritems = getattr(old_options, "iteritems", old_options.items) for opt, value in _iteritems(): - cmds.setAttr(camera + "." + opt, value) + # + if cmds.getAttr(camera + "." + opt, lock=True): + continue + else: + _safe_setAttr(camera + "." + opt, value) @contextlib.contextmanager @@ -759,7 +767,7 @@ def _applied_viewport2_options(options): # Apply settings _iteritems = getattr(options, "iteritems", options.items) for opt, value in _iteritems(): - cmds.setAttr("hardwareRenderingGlobals." + opt, value) + _safe_setAttr("hardwareRenderingGlobals." + opt, value) try: yield @@ -767,7 +775,7 @@ def _applied_viewport2_options(options): # Restore previous settings _iteritems = getattr(original, "iteritems", original.items) for opt, value in _iteritems(): - cmds.setAttr("hardwareRenderingGlobals." + opt, value) + _safe_setAttr("hardwareRenderingGlobals." + opt, value) @contextlib.contextmanager @@ -801,14 +809,14 @@ def _maintain_camera(panel, camera): else: state = dict((camera, cmds.getAttr(camera + ".rnd")) for camera in cmds.ls(type="camera")) - cmds.setAttr(camera + ".rnd", True) + _safe_setAttr(camera + ".rnd", True) try: yield finally: _iteritems = getattr(state, "iteritems", state.items) for camera, renderable in _iteritems(): - cmds.setAttr(camera + ".rnd", renderable) + _safe_setAttr(camera + ".rnd", renderable) @contextlib.contextmanager @@ -845,6 +853,18 @@ def _in_standalone(): return not hasattr(cmds, "about") or cmds.about(batch=True) +def _safe_setAttr(*args, **kwargs): + """Wrapper to handle failures when attribute is locked. + + Temporary hotfix until better approach (store value, unlock, set new, + return old, lock again) is implemented. + """ + try: + cmds.setAttr(*args, **kwargs) + except RuntimeError: + print("Cannot setAttr {}!".format(args)) + + # -------------------------------- # # Apply version specific settings diff --git a/openpype/version.py b/openpype/version.py index 3239b0e2a2..6ff5dfb7b5 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.12.1-nightly.4" +__version__ = "3.13.1-nightly.2" diff --git a/openpype/widgets/attribute_defs/files_widget.py b/openpype/widgets/attribute_defs/files_widget.py index 698a91a1a5..d29aa1b607 100644 --- a/openpype/widgets/attribute_defs/files_widget.py +++ b/openpype/widgets/attribute_defs/files_widget.py @@ -1,6 +1,7 @@ import os import collections import uuid +import json from Qt import QtWidgets, QtCore, QtGui @@ -26,6 +27,27 @@ IS_SEQUENCE_ROLE = QtCore.Qt.UserRole + 7 EXT_ROLE = QtCore.Qt.UserRole + 8 +def convert_bytes_to_json(bytes_value): + if isinstance(bytes_value, QtCore.QByteArray): + # Raw data are already QByteArray and we don't have to load them + encoded_data = bytes_value + else: + encoded_data = QtCore.QByteArray.fromRawData(bytes_value) + stream = QtCore.QDataStream(encoded_data, QtCore.QIODevice.ReadOnly) + text = stream.readQString() + try: + return json.loads(text) + except Exception: + return None + + +def convert_data_to_bytes(data): + bytes_value = QtCore.QByteArray() + stream = QtCore.QDataStream(bytes_value, QtCore.QIODevice.WriteOnly) + stream.writeQString(json.dumps(data)) + return bytes_value + + class SupportLabel(QtWidgets.QLabel): pass @@ -33,7 +55,7 @@ class SupportLabel(QtWidgets.QLabel): class DropEmpty(QtWidgets.QWidget): _empty_extensions = "Any file" - def __init__(self, single_item, allow_sequences, parent): + def __init__(self, single_item, allow_sequences, extensions_label, parent): super(DropEmpty, self).__init__(parent) drop_label_widget = QtWidgets.QLabel("Drag & Drop files here", self) @@ -61,7 +83,19 @@ class DropEmpty(QtWidgets.QWidget): widget.setAlignment(QtCore.Qt.AlignCenter) widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + update_size_timer = QtCore.QTimer() + update_size_timer.setInterval(10) + update_size_timer.setSingleShot(True) + + update_size_timer.timeout.connect(self._on_update_size_timer) + + self._update_size_timer = update_size_timer + + if extensions_label and not extensions_label.startswith(" "): + extensions_label = " " + extensions_label + self._single_item = single_item + self._extensions_label = extensions_label self._allow_sequences = allow_sequences self._allowed_extensions = set() self._allow_folders = None @@ -114,22 +148,51 @@ class DropEmpty(QtWidgets.QWidget): items_label = "Single " if len(allowed_items) == 1: - allowed_items_label = allowed_items[0] + extensions_label = allowed_items[0] elif len(allowed_items) == 2: - allowed_items_label = " or ".join(allowed_items) + extensions_label = " or ".join(allowed_items) else: last_item = allowed_items.pop(-1) new_last_item = " or ".join(last_item, allowed_items.pop(-1)) allowed_items.append(new_last_item) - allowed_items_label = ", ".join(allowed_items) + extensions_label = ", ".join(allowed_items) + + allowed_items_label = extensions_label items_label += allowed_items_label + label_tooltip = None if self._allowed_extensions: items_label += " of\n{}".format( ", ".join(sorted(self._allowed_extensions)) ) + if self._extensions_label: + label_tooltip = items_label + items_label = self._extensions_label + + if self._items_label_widget.text() == items_label: + return + + self._items_label_widget.setToolTip(label_tooltip) self._items_label_widget.setText(items_label) + self._update_size_timer.start() + + def resizeEvent(self, event): + super(DropEmpty, self).resizeEvent(event) + self._update_size_timer.start() + + def _on_update_size_timer(self): + """Recalculate height of label with extensions. + + Dynamic QLabel with word wrap does not handle properly it's sizeHint + calculations on show. This way it is recalculated. It is good practice + to trigger this method with small offset using '_update_size_timer'. + """ + + width = self._items_label_widget.width() + height = self._items_label_widget.heightForWidth(width) + self._items_label_widget.setMinimumHeight(height) + self._items_label_widget.updateGeometry() def paintEvent(self, event): super(DropEmpty, self).paintEvent(event) @@ -162,6 +225,7 @@ class FilesModel(QtGui.QStandardItemModel): def __init__(self, single_item, allow_sequences): super(FilesModel, self).__init__() + self._id = str(uuid.uuid4()) self._single_item = single_item self._multivalue = False self._allow_sequences = allow_sequences @@ -171,6 +235,10 @@ class FilesModel(QtGui.QStandardItemModel): self._filenames_by_dirpath = collections.defaultdict(set) self._items_by_dirpath = collections.defaultdict(list) + @property + def id(self): + return self._id + def set_multivalue(self, multivalue): """Disable filtering.""" @@ -245,6 +313,66 @@ class FilesModel(QtGui.QStandardItemModel): return item_id, item + def mimeData(self, indexes): + item_ids = [ + index.data(ITEM_ID_ROLE) + for index in indexes + ] + + item_ids_data = convert_data_to_bytes(item_ids) + mime_data = super(FilesModel, self).mimeData(indexes) + mime_data.setData("files_widget/internal_move", item_ids_data) + + file_items = [] + for item_id in item_ids: + file_item = self.get_file_item_by_id(item_id) + if file_item: + file_items.append(file_item.to_dict()) + + full_item_data = convert_data_to_bytes({ + "items": file_items, + "id": self._id + }) + mime_data.setData("files_widget/full_data", full_item_data) + return mime_data + + def dropMimeData(self, mime_data, action, row, col, index): + item_ids = convert_bytes_to_json( + mime_data.data("files_widget/internal_move") + ) + if item_ids is None: + return False + + # Find matching item after which will be items moved + # - store item before moved items are removed + root = self.invisibleRootItem() + if row >= 0: + src_item = self.item(row) + else: + src_item_id = index.data(ITEM_ID_ROLE) + src_item = self._items_by_id.get(src_item_id) + + # Take out items that should be moved + items = [] + for item_id in item_ids: + item = self._items_by_id.get(item_id) + if item: + self.takeRow(item.row()) + items.append(item) + + # Skip if there are not items that can be moved + if not items: + return False + + # Calculate row where items should be inserted + if src_item: + src_row = src_item.row() + else: + src_row = root.rowCount() + + root.insertRow(src_row, items) + return True + class FilesProxyModel(QtCore.QSortFilterProxyModel): def __init__(self, *args, **kwargs): @@ -428,6 +556,9 @@ class FilesView(QtWidgets.QListView): QtWidgets.QAbstractItemView.ExtendedSelection ) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.setAcceptDrops(True) + self.setDragEnabled(True) + self.setDragDropMode(self.InternalMove) remove_btn = InViewButton(self) pix_enabled = paint_image_with_color( @@ -529,11 +660,13 @@ class FilesView(QtWidgets.QListView): class FilesWidget(QtWidgets.QFrame): value_changed = QtCore.Signal() - def __init__(self, single_item, allow_sequences, parent): + def __init__(self, single_item, allow_sequences, extensions_label, parent): super(FilesWidget, self).__init__(parent) self.setAcceptDrops(True) - empty_widget = DropEmpty(single_item, allow_sequences, self) + empty_widget = DropEmpty( + single_item, allow_sequences, extensions_label, self + ) files_model = FilesModel(single_item, allow_sequences) files_proxy_model = FilesProxyModel() @@ -553,6 +686,7 @@ class FilesWidget(QtWidgets.QFrame): files_view.context_menu_requested.connect( self._on_context_menu_requested ) + self._in_set_value = False self._single_item = single_item self._multivalue = False @@ -637,8 +771,6 @@ class FilesWidget(QtWidgets.QFrame): ) self._widgets_by_id[item_id] = widget - self._files_proxy_model.sort(0) - if not self._in_set_value: self.value_changed.emit() @@ -743,12 +875,22 @@ class FilesWidget(QtWidgets.QFrame): event.setDropAction(QtCore.Qt.CopyAction) event.accept() + full_data_value = mime_data.data("files_widget/full_data") + if self._handle_full_data_drag(full_data_value): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + def dragLeaveEvent(self, event): event.accept() def dropEvent(self, event): + if self._multivalue: + return + mime_data = event.mimeData() - if not self._multivalue and mime_data.hasUrls(): + if mime_data.hasUrls(): + event.accept() + # event.setDropAction(QtCore.Qt.CopyAction) filepaths = [] for url in mime_data.urls(): filepath = url.toLocalFile() @@ -759,7 +901,58 @@ class FilesWidget(QtWidgets.QFrame): filepaths = self._files_proxy_model.filter_valid_files(filepaths) if filepaths: self._add_filepaths(filepaths) - event.accept() + + if self._handle_full_data_drop( + mime_data.data("files_widget/full_data") + ): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + super(FilesWidget, self).dropEvent(event) + + def _handle_full_data_drag(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + return True + + def _handle_full_data_drop(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + + for item in full_data["items"]: + filepaths = [ + os.path.join(item["directory"], filename) + for filename in item["filenames"] + ] + filepaths = self._files_proxy_model.filter_valid_files(filepaths) + if filepaths: + self._add_filepaths(filepaths) + + if self._copy_modifiers_enabled(): + return False + return True + + def _copy_modifiers_enabled(self): + if ( + QtWidgets.QApplication.keyboardModifiers() + & QtCore.Qt.ControlModifier + ): + return True + return False def _add_filepaths(self, filepaths): self._files_model.add_filepaths(filepaths) diff --git a/openpype/widgets/attribute_defs/widgets.py b/openpype/widgets/attribute_defs/widgets.py index b6493b80a8..60ae952553 100644 --- a/openpype/widgets/attribute_defs/widgets.py +++ b/openpype/widgets/attribute_defs/widgets.py @@ -15,6 +15,7 @@ from openpype.lib.attribute_definitions import ( UISeparatorDef, UILabelDef ) +from openpype.tools.utils import CustomTextComboBox from openpype.widgets.nice_checkbox import NiceCheckbox from .files_widget import FilesWidget @@ -369,8 +370,12 @@ class BoolAttrWidget(_BaseAttrDefWidget): class EnumAttrWidget(_BaseAttrDefWidget): + def __init__(self, *args, **kwargs): + self._multivalue = False + super(EnumAttrWidget, self).__init__(*args, **kwargs) + def _ui_init(self): - input_widget = QtWidgets.QComboBox(self) + input_widget = CustomTextComboBox(self) combo_delegate = QtWidgets.QStyledItemDelegate(input_widget) input_widget.setItemDelegate(combo_delegate) @@ -394,6 +399,9 @@ class EnumAttrWidget(_BaseAttrDefWidget): def _on_value_change(self): new_value = self.current_value() + if self._multivalue: + self._multivalue = False + self._input_widget.set_custom_text(None) self.value_changed.emit(new_value, self.attr_def.id) def current_value(self): @@ -401,14 +409,23 @@ class EnumAttrWidget(_BaseAttrDefWidget): return self._input_widget.itemData(idx) def set_value(self, value, multivalue=False): + if multivalue: + set_value = set(value) + if len(set_value) == 1: + multivalue = False + value = tuple(set_value)[0] + if not multivalue: idx = self._input_widget.findData(value) cur_idx = self._input_widget.currentIndex() if idx != cur_idx and idx >= 0: self._input_widget.setCurrentIndex(idx) - else: - self._input_widget.lineEdit().setText("Multiselection") + custom_text = None + if multivalue: + custom_text = "< Multiselection >" + self._input_widget.set_custom_text(custom_text) + self._multivalue = multivalue class UnknownAttrWidget(_BaseAttrDefWidget): @@ -443,7 +460,10 @@ class UnknownAttrWidget(_BaseAttrDefWidget): class FileAttrWidget(_BaseAttrDefWidget): def _ui_init(self): input_widget = FilesWidget( - self.attr_def.single_item, self.attr_def.allow_sequences, self + self.attr_def.single_item, + self.attr_def.allow_sequences, + self.attr_def.extensions_label, + self ) if self.attr_def.tooltip: diff --git a/poetry.lock b/poetry.lock index 7221e191ff..919a352505 100644 --- a/poetry.lock +++ b/poetry.lock @@ -92,7 +92,14 @@ version = "1.4.4" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "main" optional = false -python-versions = "*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +develop = false + +[package.source] +type = "git" +url = "https://github.com/ActiveState/appdirs.git" +reference = "master" +resolved_reference = "193a2cbba58cce2542882fcedd0e49f6763672ed" [[package]] name = "arrow" @@ -221,7 +228,7 @@ python-versions = "~=3.7" [[package]] name = "certifi" -version = "2022.5.18.1" +version = "2022.6.15" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false @@ -456,19 +463,20 @@ python-versions = ">=3.7" [[package]] name = "ftrack-python-api" -version = "2.0.0" +version = "2.3.3" description = "Python API for ftrack." category = "main" optional = false -python-versions = ">=2.7.9, <4.0" +python-versions = ">=2.7.9, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, < 3.10" [package.dependencies] +appdirs = ">=1,<2" arrow = ">=0.4.4,<1" -clique = ">=1.2.0,<2" +clique = "1.6.1" future = ">=0.16.0,<1" pyparsing = ">=2.0,<3" requests = ">=2,<3" -six = ">=1,<2" +six = ">=1.13.0,<2" termcolor = ">=1.1.0,<2" websocket-client = ">=0.40.0,<1" @@ -1375,6 +1383,21 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +[[package]] +name = "shotgun-api3" +version = "3.3.3" +description = "Shotgun Python API" +category = "main" +optional = false +python-versions = "*" +develop = false + +[package.source] +type = "git" +url = "https://github.com/shotgunsoftware/python-api.git" +reference = "v3.3.3" +resolved_reference = "b9f066c0edbea6e0733242e18f32f75489064840" + [[package]] name = "six" version = "1.16.0" @@ -1812,10 +1835,7 @@ ansicon = [ {file = "ansicon-1.89.0-py2.py3-none-any.whl", hash = "sha256:f1def52d17f65c2c9682cf8370c03f541f410c1752d6a14029f97318e4b9dfec"}, {file = "ansicon-1.89.0.tar.gz", hash = "sha256:e4d039def5768a47e4afec8e89e83ec3ae5a26bf00ad851f914d1240b444d2b1"}, ] -appdirs = [ - {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, - {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, -] +appdirs = [] arrow = [ {file = "arrow-0.17.0-py2.py3-none-any.whl", hash = "sha256:e098abbd9af3665aea81bdd6c869e93af4feb078e98468dd351c383af187aac5"}, {file = "arrow-0.17.0.tar.gz", hash = "sha256:ff08d10cda1d36c68657d6ad20d74fbea493d980f8b2d45344e00d6ed2bf6ed4"}, @@ -1870,8 +1890,8 @@ cachetools = [ {file = "cachetools-5.2.0.tar.gz", hash = "sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757"}, ] certifi = [ - {file = "certifi-2022.5.18.1-py3-none-any.whl", hash = "sha256:f1d53542ee8cbedbe2118b5686372fb33c297fcd6379b050cca0ef13a597382a"}, - {file = "certifi-2022.5.18.1.tar.gz", hash = "sha256:9c5705e395cd70084351dd8ad5c41e65655e08ce46f2ec9cf6c2c08390f71eb7"}, + {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"}, + {file = "certifi-2022.6.15.tar.gz", hash = "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d"}, ] cffi = [ {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"}, @@ -2137,10 +2157,7 @@ frozenlist = [ {file = "frozenlist-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:772965f773757a6026dea111a15e6e2678fbd6216180f82a48a40b27de1ee2ab"}, {file = "frozenlist-1.3.0.tar.gz", hash = "sha256:ce6f2ba0edb7b0c1d8976565298ad2deba6f8064d2bebb6ffce2ca896eb35b0b"}, ] -ftrack-python-api = [ - {file = "ftrack-python-api-2.0.0.tar.gz", hash = "sha256:dd6f02c31daf5a10078196dc9eac4671e4297c762fbbf4df98de668ac12281d9"}, - {file = "ftrack_python_api-2.0.0-py2.py3-none-any.whl", hash = "sha256:d0df0f2df4b53947272f95e179ec98b477ee425bf4217b37bb59030ad989771e"}, -] +ftrack-python-api = [] future = [ {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, ] @@ -2820,6 +2837,7 @@ semver = [ {file = "semver-2.13.0-py2.py3-none-any.whl", hash = "sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4"}, {file = "semver-2.13.0.tar.gz", hash = "sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f"}, ] +shotgun-api3 = [] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, diff --git a/pyproject.toml b/pyproject.toml index f5bd7cc946..9cbdc295ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.12.1-nightly.4" # OpenPype +version = "3.13.1-nightly.2" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" @@ -33,13 +33,14 @@ aiohttp = "^3.7" aiohttp_json_rpc = "*" # TVPaint server acre = { git = "https://github.com/pypeclub/acre.git" } opentimelineio = { version = "0.14.0.dev1", source = "openpype" } -appdirs = "^1.4.3" +appdirs = { git = "https://github.com/ActiveState/appdirs.git", branch = "master" } blessed = "^1.17" # openpype terminal formatting coolname = "*" clique = "1.6.*" Click = "^7" dnspython = "^2.1.0" -ftrack-python-api = "2.0.*" +ftrack-python-api = "^2.3.3" +shotgun_api3 = {git = "https://github.com/shotgunsoftware/python-api.git", rev = "v3.3.3"} gazu = "^0.8.28" google-api-python-client = "^1.12.8" # sync server google support (should be separate?) jsonschema = "^2.6.0" @@ -135,7 +136,7 @@ hash = "b9950f5d2fa3720b52b8be55bacf5f56d33f9e029d38ee86534995f3d8d253d2" [openpype.thirdparty.oiio.linux] url = "https://distribute.openpype.io/thirdparty/oiio_tools-2.2.20-linux-centos7.tgz" -hash = "be1abf8a50e9da5913298447421af0a17829d83ed6252ae1d40da7fa36a78787" +hash = "3894dec7e4e521463891a869586850e8605f5fd604858b674c87323bf33e273d" [openpype.thirdparty.oiio.darwin] url = "https://distribute.openpype.io/thirdparty/oiio-2.2.0-darwin.tgz" diff --git a/setup.py b/setup.py index 8b5a545c16..eab0187983 100644 --- a/setup.py +++ b/setup.py @@ -152,7 +152,7 @@ build_exe_options = dict( ) bdist_mac_options = dict( - bundle_name="OpenPype", + bundle_name=f"OpenPype {__version__}", iconfile=mac_icon_path ) diff --git a/start.py b/start.py index ace33ab92a..5cdffafb6e 100644 --- a/start.py +++ b/start.py @@ -103,6 +103,9 @@ import site import distutils.spawn from pathlib import Path + +silent_mode = False + # OPENPYPE_ROOT is variable pointing to build (or code) directory # WARNING `OPENPYPE_ROOT` must be defined before igniter import # - igniter changes cwd which cause that filepath of this script won't lead @@ -138,40 +141,44 @@ if sys.__stdout__: term = blessed.Terminal() def _print(message: str): + if silent_mode: + return if message.startswith("!!! "): - print("{}{}".format(term.orangered2("!!! "), message[4:])) + print(f'{term.orangered2("!!! ")}{message[4:]}') return if message.startswith(">>> "): - print("{}{}".format(term.aquamarine3(">>> "), message[4:])) + print(f'{term.aquamarine3(">>> ")}{message[4:]}') return if message.startswith("--- "): - print("{}{}".format(term.darkolivegreen3("--- "), message[4:])) + print(f'{term.darkolivegreen3("--- ")}{message[4:]}') return if message.startswith("*** "): - print("{}{}".format(term.gold("*** "), message[4:])) + print(f'{term.gold("*** ")}{message[4:]}') return if message.startswith(" - "): - print("{}{}".format(term.wheat(" - "), message[4:])) + print(f'{term.wheat(" - ")}{message[4:]}') return if message.startswith(" . "): - print("{}{}".format(term.tan(" . "), message[4:])) + print(f'{term.tan(" . ")}{message[4:]}') return if message.startswith(" - "): - print("{}{}".format(term.seagreen3(" - "), message[7:])) + print(f'{term.seagreen3(" - ")}{message[7:]}') return if message.startswith(" ! "): - print("{}{}".format(term.goldenrod(" ! "), message[7:])) + print(f'{term.goldenrod(" ! ")}{message[7:]}') return if message.startswith(" * "): - print("{}{}".format(term.aquamarine1(" * "), message[7:])) + print(f'{term.aquamarine1(" * ")}{message[7:]}') return if message.startswith(" "): - print("{}{}".format(term.darkseagreen3(" "), message[4:])) + print(f'{term.darkseagreen3(" ")}{message[4:]}') return print(message) else: def _print(message: str): + if silent_mode: + return print(message) @@ -187,9 +194,8 @@ else: if "--headless" in sys.argv: os.environ["OPENPYPE_HEADLESS_MODE"] = "1" sys.argv.remove("--headless") -else: - if os.getenv("OPENPYPE_HEADLESS_MODE") != "1": - os.environ.pop("OPENPYPE_HEADLESS_MODE", None) +elif os.getenv("OPENPYPE_HEADLESS_MODE") != "1": + os.environ.pop("OPENPYPE_HEADLESS_MODE", None) # Enabled logging debug mode when "--debug" is passed if "--verbose" in sys.argv: @@ -203,8 +209,8 @@ if "--verbose" in sys.argv: value = sys.argv.pop(idx) else: raise RuntimeError(( - "Expect value after \"--verbose\" argument. {}" - ).format(expected_values)) + f"Expect value after \"--verbose\" argument. {expected_values}" + )) log_level = None low_value = value.lower() @@ -225,8 +231,9 @@ if "--verbose" in sys.argv: if log_level is None: raise RuntimeError(( - "Unexpected value after \"--verbose\" argument \"{}\". {}" - ).format(value, expected_values)) + "Unexpected value after \"--verbose\" " + f"argument \"{value}\". {expected_values}" + )) os.environ["OPENPYPE_LOG_LEVEL"] = str(log_level) @@ -242,13 +249,14 @@ from igniter.tools import ( get_openpype_global_settings, get_openpype_path_from_settings, validate_mongo_connection, - OpenPypeVersionNotFound + OpenPypeVersionNotFound, + OpenPypeVersionIncompatible ) # noqa from igniter.bootstrap_repos import OpenPypeVersion # noqa: E402 bootstrap = BootstrapRepos() silent_commands = {"run", "igniter", "standalonepublisher", - "extractenvironments"} + "extractenvironments", "version"} def list_versions(openpype_versions: list, local_version=None) -> None: @@ -270,8 +278,11 @@ def set_openpype_global_environments() -> None: general_env = get_general_environments() + # first resolve general environment because merge doesn't expect + # values to be list. + # TODO: switch to OpenPype environment functions merged_env = acre.merge( - acre.parse(general_env), + acre.compute(acre.parse(general_env), cleanup=False), dict(os.environ) ) env = acre.compute( @@ -333,34 +344,33 @@ def run_disk_mapping_commands(settings): destination = destination.rstrip('/') source = source.rstrip('/') - if low_platform == "windows": - args = ["subst", destination, source] - elif low_platform == "darwin": - scr = "do shell script \"ln -s {} {}\" with administrator privileges".format(source, destination) # noqa: E501 + if low_platform == "darwin": + scr = f'do shell script "ln -s {source} {destination}" with administrator privileges' # noqa + args = ["osascript", "-e", scr] + elif low_platform == "windows": + args = ["subst", destination, source] else: args = ["sudo", "ln", "-s", source, destination] - _print("disk mapping args:: {}".format(args)) + _print(f"*** disk mapping arguments: {args}") try: if not os.path.exists(destination): output = subprocess.Popen(args) if output.returncode and output.returncode != 0: - exc_msg = "Executing was not successful: \"{}\"".format( - args) + exc_msg = f'Executing was not successful: "{args}"' raise RuntimeError(exc_msg) except TypeError as exc: - _print("Error {} in mapping drive {}, {}".format(str(exc), - source, - destination)) + _print( + f"Error {str(exc)} in mapping drive {source}, {destination}") raise def set_avalon_environments(): """Set avalon specific environments. - These are non modifiable environments for avalon workflow that must be set + These are non-modifiable environments for avalon workflow that must be set before avalon module is imported because avalon works with globals set with environment variables. """ @@ -505,7 +515,7 @@ def _process_arguments() -> tuple: ) if m and m.group('version'): use_version = m.group('version') - _print(">>> Requested version [ {} ]".format(use_version)) + _print(f">>> Requested version [ {use_version} ]") if "+staging" in use_version: use_staging = True break @@ -611,8 +621,8 @@ def _determine_mongodb() -> str: try: openpype_mongo = bootstrap.secure_registry.get_item( "openPypeMongo") - except ValueError: - raise RuntimeError("Missing MongoDB url") + except ValueError as e: + raise RuntimeError("Missing MongoDB url") from e return openpype_mongo @@ -684,40 +694,47 @@ def _find_frozen_openpype(use_version: str = None, # Specific version is defined if use_version.lower() == "latest": # Version says to use latest version - _print("Finding latest version defined by use version") + _print(">>> Finding latest version defined by use version") openpype_version = bootstrap.find_latest_openpype_version( - use_staging + use_staging, compatible_with=installed_version ) else: - _print("Finding specified version \"{}\"".format(use_version)) + _print(f">>> Finding specified version \"{use_version}\"") openpype_version = bootstrap.find_openpype_version( use_version, use_staging ) if openpype_version is None: raise OpenPypeVersionNotFound( - "Requested version \"{}\" was not found.".format( - use_version - ) + f"Requested version \"{use_version}\" was not found." ) + if not openpype_version.is_compatible(installed_version): + raise OpenPypeVersionIncompatible(( + f"Requested version \"{use_version}\" is not compatible " + f"with installed version \"{installed_version}\"" + )) + elif studio_version is not None: # Studio has defined a version to use - _print("Finding studio version \"{}\"".format(studio_version)) + _print(f">>> Finding studio version \"{studio_version}\"") openpype_version = bootstrap.find_openpype_version( - studio_version, use_staging + studio_version, use_staging, compatible_with=installed_version ) if openpype_version is None: raise OpenPypeVersionNotFound(( - "Requested OpenPype version \"{}\" defined by settings" + "Requested OpenPype version " + f"\"{studio_version}\" defined by settings" " was not found." - ).format(studio_version)) + )) else: # Default behavior to use latest version - _print("Finding latest version") + _print(( + ">>> Finding latest version compatible " + f"with [ {installed_version} ]")) openpype_version = bootstrap.find_latest_openpype_version( - use_staging + use_staging, compatible_with=installed_version ) if openpype_version is None: if use_staging: @@ -798,7 +815,7 @@ def _bootstrap_from_code(use_version, use_staging): if getattr(sys, 'frozen', False): local_version = bootstrap.get_version(Path(_openpype_root)) - switch_str = f" - will switch to {use_version}" if use_version else "" + switch_str = f" - will switch to {use_version}" if use_version and use_version != local_version else "" # noqa _print(f" - booting version: {local_version}{switch_str}") assert local_version else: @@ -813,11 +830,8 @@ def _bootstrap_from_code(use_version, use_staging): use_version, use_staging ) if version_to_use is None: - raise OpenPypeVersionNotFound( - "Requested version \"{}\" was not found.".format( - use_version - ) - ) + raise OpenPypeVersionIncompatible( + f"Requested version \"{use_version}\" was not found.") else: # Staging version should be used version_to_use = bootstrap.find_latest_openpype_version( @@ -903,7 +917,7 @@ def _boot_validate_versions(use_version, local_version): use_version, openpype_versions ) valid, message = bootstrap.validate_openpype_version(version_path) - _print("{}{}".format(">>> " if valid else "!!! ", message)) + _print(f'{">>> " if valid else "!!! "}{message}') def _boot_print_versions(use_staging, local_version, openpype_root): @@ -914,13 +928,24 @@ def _boot_print_versions(use_staging, local_version, openpype_root): _print("--- This will list only staging versions detected.") _print(" To see other version, omit --use-staging argument.") - openpype_versions = bootstrap.find_openpype(include_zips=True, - staging=use_staging) if getattr(sys, 'frozen', False): local_version = bootstrap.get_version(Path(openpype_root)) else: local_version = OpenPypeVersion.get_installed_version_str() + compatible_with = OpenPypeVersion(version=local_version) + if "--all" in sys.argv: + compatible_with = None + _print("--- Showing all version (even those not compatible).") + else: + _print(("--- Showing only compatible versions " + f"with [ {compatible_with.major}.{compatible_with.minor} ]")) + + openpype_versions = bootstrap.find_openpype( + include_zips=True, + staging=use_staging, + compatible_with=compatible_with) + list_versions(openpype_versions, local_version) @@ -937,6 +962,9 @@ def _boot_handle_missing_version(local_version, use_staging, message): def boot(): """Bootstrap OpenPype.""" + global silent_mode + if any(arg in silent_commands for arg in sys.argv): + silent_mode = True # ------------------------------------------------------------------------ # Set environment to OpenPype root path @@ -1040,7 +1068,7 @@ def boot(): if not result[0]: _print(f"!!! Invalid version: {result[1]}") sys.exit(1) - _print(f"--- version is valid") + _print("--- version is valid") else: try: version_path = _bootstrap_from_code(use_version, use_staging) @@ -1113,8 +1141,12 @@ def boot(): def get_info(use_staging=None) -> list: """Print additional information to console.""" - from openpype.lib.mongo import get_default_components - from openpype.lib.log import PypeLogger + from openpype.client.mongo import get_default_components + try: + from openpype.lib.log import Logger + except ImportError: + # Backwards compatibility for 'PypeLogger' + from openpype.lib.log import PypeLogger as Logger components = get_default_components() @@ -1141,14 +1173,14 @@ def get_info(use_staging=None) -> list: os.environ.get("MUSTER_REST_URL"))) # Reinitialize - PypeLogger.initialize() + Logger.initialize() mongo_components = get_default_components() if mongo_components["host"]: inf.append(("Logging to MongoDB", mongo_components["host"])) inf.append((" - port", mongo_components["port"] or "")) - inf.append((" - database", PypeLogger.log_database_name)) - inf.append((" - collection", PypeLogger.log_collection_name)) + inf.append((" - database", Logger.log_database_name)) + inf.append((" - collection", Logger.log_collection_name)) inf.append((" - user", mongo_components["username"] or "")) if mongo_components["auth_db"]: inf.append((" - auth source", mongo_components["auth_db"])) @@ -1157,8 +1189,7 @@ def get_info(use_staging=None) -> list: formatted = [] for info in inf: padding = (maximum - len(info[0])) + 1 - formatted.append( - "... {}:{}[ {} ]".format(info[0], " " * padding, info[1])) + formatted.append(f'... {info[0]}:{" " * padding}[ {info[1]} ]') return formatted diff --git a/tests/integration/hosts/aftereffects/test_publish_in_aftereffects_multiframe.py b/tests/integration/hosts/aftereffects/test_publish_in_aftereffects_multiframe.py new file mode 100644 index 0000000000..c882e0f9b2 --- /dev/null +++ b/tests/integration/hosts/aftereffects/test_publish_in_aftereffects_multiframe.py @@ -0,0 +1,64 @@ +import logging + +from tests.lib.assert_classes import DBAssert +from tests.integration.hosts.aftereffects.lib import AfterEffectsTestClass + +log = logging.getLogger("test_publish_in_aftereffects") + + +class TestPublishInAfterEffects(AfterEffectsTestClass): + """Basic test case for publishing in AfterEffects + + Should publish 5 frames + """ + PERSIST = True + + TEST_FILES = [ + ("12aSDRjthn4X3yw83gz_0FZJcRRiVDEYT", + "test_aftereffects_publish_multiframe.zip", + "") + ] + + APP = "aftereffects" + APP_VARIANT = "" + + APP_NAME = "{}/{}".format(APP, APP_VARIANT) + + TIMEOUT = 120 # publish timeout + + def test_db_asserts(self, dbcon, publish_finished): + """Host and input data dependent expected results in DB.""" + print("test_db_asserts") + failures = [] + + failures.append(DBAssert.count_of_types(dbcon, "version", 2)) + + failures.append( + DBAssert.count_of_types(dbcon, "version", 0, name={"$ne": 1})) + + failures.append( + DBAssert.count_of_types(dbcon, "subset", 1, + name="imageMainBackgroundcopy")) + + failures.append( + DBAssert.count_of_types(dbcon, "subset", 1, + name="workfileTest_task")) + + failures.append( + DBAssert.count_of_types(dbcon, "subset", 1, + name="reviewTesttask")) + + failures.append( + DBAssert.count_of_types(dbcon, "representation", 4)) + + additional_args = {"context.subset": "renderTestTaskDefault", + "context.ext": "png"} + failures.append( + DBAssert.count_of_types(dbcon, "representation", 1, + additional_args=additional_args)) + + assert not any(failures) + + +if __name__ == "__main__": + test_case = TestPublishInAfterEffects() diff --git a/tests/lib/testing_classes.py b/tests/lib/testing_classes.py index f991f02227..2b4d7deb48 100644 --- a/tests/lib/testing_classes.py +++ b/tests/lib/testing_classes.py @@ -314,30 +314,22 @@ class PublishTest(ModuleUnitTest): Compares only presence, not size nor content! """ - published_dir_base = download_test_data - published_dir = os.path.join(output_folder_url, - self.PROJECT, - self.ASSET, - self.TASK, - "**") - expected_dir_base = os.path.join(published_dir_base, + published_dir_base = output_folder_url + expected_dir_base = os.path.join(download_test_data, "expected") - expected_dir = os.path.join(expected_dir_base, - self.PROJECT, - self.ASSET, - self.TASK, - "**") - print("Comparing published:'{}' : expected:'{}'".format(published_dir, - expected_dir)) - published = set(f.replace(published_dir_base, '') for f in - glob.glob(published_dir, recursive=True) if - f != published_dir_base and os.path.exists(f)) - expected = set(f.replace(expected_dir_base, '') for f in - glob.glob(expected_dir, recursive=True) if - f != expected_dir_base and os.path.exists(f)) - not_matched = expected.difference(published) - assert not not_matched, "Missing {} files".format(not_matched) + print("Comparing published:'{}' : expected:'{}'".format( + published_dir_base, expected_dir_base)) + published = set(f.replace(published_dir_base, '') for f in + glob.glob(published_dir_base + "\\**", recursive=True) + if f != published_dir_base and os.path.exists(f)) + expected = set(f.replace(expected_dir_base, '') for f in + glob.glob(expected_dir_base + "\\**", recursive=True) + if f != expected_dir_base and os.path.exists(f)) + + not_matched = expected.symmetric_difference(published) + assert not not_matched, "Missing {} files".format( + "\n".join(sorted(not_matched))) class HostFixtures(PublishTest): diff --git a/tools/build.ps1 b/tools/build.ps1 index ff28544954..195b2dc75e 100644 --- a/tools/build.ps1 +++ b/tools/build.ps1 @@ -28,6 +28,13 @@ if($arguments -eq "--no-submodule-update") { $disable_submodule_update=$true } +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + function Start-Progress { param([ScriptBlock]$code) $scroll = "/-\|/-\|" @@ -72,14 +79,18 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } +function Install-Poetry() { + Write-Color -Text ">>> ", "Installing Poetry ... " -Color Green, Gray + $env:POETRY_HOME="$openpype_root\.poetry" + (Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/install-poetry.py -UseBasicParsing).Content | python - +} + $art = @" . . .. . .. @@ -103,10 +114,6 @@ Write-Host $art -ForegroundColor DarkGreen # Enable if PS 7.x is needed. # Show-PSWarning -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - $env:_INSIDE_OPENPYPE_TOOL = "1" if (-not (Test-Path 'env:POETRY_HOME')) { @@ -119,8 +126,7 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Yellow, Gray Exit-WithCode 1 } @@ -129,75 +135,62 @@ if (-not (Test-Path -PathType Container -Path "$($openpype_root)\build")) { New-Item -ItemType Directory -Force -Path "$($openpype_root)\build" } -Write-Host "--- " -NoNewline -ForegroundColor yellow -Write-Host "Cleaning build directory ..." +Write-Color -Text "--- ", "Cleaning build directory ..." -Color Yellow, Gray try { Remove-Item -Recurse -Force "$($openpype_root)\build\*" } catch { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "Cannot clean build directory, possibly because process is using it." - Write-Host $_.Exception.Message + Write-Color -Text "!!! ", "Cannot clean build directory, possibly because process is using it." -Color Red, Gray + Write-Color -Text $_.Exception.Message -Color Red Exit-WithCode 1 } if (-not $disable_submodule_update) { - Write-Host ">>> " -NoNewLine -ForegroundColor green - Write-Host "Making sure submodules are up-to-date ..." - git submodule update --init --recursive + Write-Color -Text ">>> ", "Making sure submodules are up-to-date ..." -Color Green, Gray + & git submodule update --init --recursive } else { - Write-Host "*** " -NoNewLine -ForegroundColor yellow - Write-Host "Not updating submodules ..." + Write-Color -Text "*** ", "Not updating submodules ..." -Color Green, Gray } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "OpenPype [ " -NoNewline -ForegroundColor white -Write-host $openpype_version -NoNewline -ForegroundColor green -Write-Host " ]" -ForegroundColor white +Write-Color -Text ">>> ", "OpenPype [ ", $openpype_version, " ]" -Color Green, White, Cyan, White -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Cleaning cache files ... " -NoNewline +Write-Color -Text ">>> ", "Cleaning cache files ... " -Color Green, Gray -NoNewline Get-ChildItem $openpype_root -Filter "*.pyc" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force Get-ChildItem $openpype_root -Filter "*.pyo" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force Get-ChildItem $openpype_root -Filter "__pycache__" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force -Recurse -Write-Host "OK" -ForegroundColor green +Write-Color -Text "OK" -Color green -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Building OpenPype ..." +Write-Color -Text ">>> ", "Building OpenPype ..." -Color Green, White $startTime = [int][double]::Parse((Get-Date -UFormat %s)) $out = & "$($env:POETRY_HOME)\bin\poetry" run python setup.py build 2>&1 Set-Content -Path "$($openpype_root)\build\build.log" -Value $out if ($LASTEXITCODE -ne 0) { - Write-Host "------------------------------------------" -ForegroundColor Red + Write-Color -Text "------------------------------------------" -Color Red Get-Content "$($openpype_root)\build\build.log" - Write-Host "------------------------------------------" -ForegroundColor Red - Write-Host "!!! " -NoNewLine -ForegroundColor Red - Write-Host "Build failed. Check the log: " -NoNewline - Write-Host ".\build\build.log" -ForegroundColor Yellow + Write-Color -Text "------------------------------------------" -Color Yellow + Write-Color -Text "!!! ", "Build failed. Check the log: ", ".\build\build.log" -Color Red, Yellow, White Exit-WithCode $LASTEXITCODE } Set-Content -Path "$($openpype_root)\build\build.log" -Value $out & "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\tools\build_dependencies.py" -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "restoring current directory" +Write-Color -Text ">>> ", "Restoring current directory" -Color Green, Gray Set-Location -Path $current_dir $endTime = [int][double]::Parse((Get-Date -UFormat %s)) -Write-Host "*** " -NoNewline -ForegroundColor Cyan -Write-Host "All done in $($endTime - $startTime) secs. You will find OpenPype and build log in " -NoNewLine -Write-Host "'.\build'" -NoNewline -ForegroundColor Green -Write-Host " directory." +try +{ + New-BurntToastNotification -AppLogo "$openpype_root/openpype/resources/icons/openpype_icon.png" -Text "OpenPype build complete!", "All done in $( $endTime - $startTime ) secs. You will find OpenPype and build log in build directory." +} catch {} +Write-Color -Text "*** ", "All done in ", $($endTime - $startTime), " secs. You will find OpenPype and build log in ", "'.\build'", " directory." -Color Green, Gray, White, Gray, White, Gray diff --git a/tools/build.sh b/tools/build.sh index 79fb748cd5..fa2c580648 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -193,15 +193,15 @@ if [ "$disable_submodule_update" == 1 ]; then if [[ "$OSTYPE" == "darwin"* ]]; then # fix code signing issue - codesign --remove-signature "$openpype_root/build/OpenPype.app/Contents/MacOS/lib/Python" + codesign --remove-signature "$openpype_root/build/OpenPype $openpype_version.app/Contents/MacOS/lib/Python" if command -v create-dmg > /dev/null 2>&1; then create-dmg \ - --volname "OpenPype Installer" \ + --volname "OpenPype $openpype_version Installer" \ --window-pos 200 120 \ --window-size 600 300 \ --app-drop-link 100 50 \ - "$openpype_root/build/OpenPype-Installer.dmg" \ - "$openpype_root/build/OpenPype.app" + "$openpype_root/build/OpenPype-Installer-$openpype_version.dmg" \ + "$openpype_root/build/OpenPype $openpype_version.app" else echo -e "${BIYellow}!!!${RST} ${BIWhite}create-dmg${RST} command is not available." fi diff --git a/tools/build_dependencies.py b/tools/build_dependencies.py index d3566dd289..d186ead881 100644 --- a/tools/build_dependencies.py +++ b/tools/build_dependencies.py @@ -29,6 +29,7 @@ import shutil import blessed import enlighten import time +import re term = blessed.Terminal() @@ -52,7 +53,7 @@ def _print(msg: str, type: int = 0) -> None: else: header = term.darkolivegreen3("--- ") - print("{}{}".format(header, msg)) + print(f"{header}{msg}") def count_folders(path: Path) -> int: @@ -95,16 +96,22 @@ assert site_pkg, "No venv site-packages are found." _print(f"Working with: {site_pkg}", 2) openpype_root = Path(os.path.dirname(__file__)).parent +version = {} +with open(openpype_root / "openpype" / "version.py") as fp: + exec(fp.read(), version) + +version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) +openpype_version = version_match[1] # create full path if platform.system().lower() == "darwin": build_dir = openpype_root.joinpath( "build", - "OpenPype.app", + f"OpenPype {openpype_version}.app", "Contents", "MacOS") else: - build_subdir = "exe.{}-{}".format(get_platform(), sys.version[0:3]) + build_subdir = f"exe.{get_platform()}-{sys.version[:3]}" build_dir = openpype_root / "build" / build_subdir _print(f"Using build at {build_dir}", 2) diff --git a/tools/build_win_installer.ps1 b/tools/build_win_installer.ps1 index 49fa803742..b9d1ca2d3f 100644 --- a/tools/build_win_installer.ps1 +++ b/tools/build_win_installer.ps1 @@ -11,6 +11,12 @@ PS> .\build_win_installer.ps1 #> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" function Start-Progress { param([ScriptBlock]$code) @@ -44,7 +50,6 @@ function Start-Progress { #> } - function Exit-WithCode($exitcode) { # Only exit this host process if it's a child of another PowerShell parent process... $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId @@ -56,10 +61,8 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } @@ -87,9 +90,6 @@ Write-Host $art -ForegroundColor DarkGreen # Enable if PS 7.x is needed. # Show-PSWarning -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName Set-Location -Path $openpype_root @@ -97,16 +97,15 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Yellow, Gray Exit-WithCode 1 } + $env:BUILD_VERSION = $openpype_version iscc -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Detecting host Python ... " -NoNewline +Write-Color ">>> ", "Detecting host Python ... " -Color Green, White -NoNewline $python = "python" if (Get-Command "pyenv" -ErrorAction SilentlyContinue) { $pyenv_python = & pyenv which python @@ -115,7 +114,7 @@ if (Get-Command "pyenv" -ErrorAction SilentlyContinue) { } } if (-not (Get-Command $python -ErrorAction SilentlyContinue)) { - Write-Host "!!! Python not detected" -ForegroundColor red + Write-Color "!!! ", "Python not detected" -Color Red, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } @@ -128,7 +127,7 @@ $p = & $python -c $version_command $env:PYTHON_VERSION = $p $m = $p -match '(\d+)\.(\d+)' if(-not $m) { - Write-Host "!!! Cannot determine version" -ForegroundColor red + Write-Color "!!! ", "Cannot determine version" -Color Red, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } @@ -145,8 +144,7 @@ if (($matches[1] -lt 3) -or ($matches[2] -lt 7)) { Write-Host "OK [ $p ]" -ForegroundColor green } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Creating OpenPype installer ... " -ForegroundColor white +Write-Color -Text ">>> ", "Creating OpenPype installer ... " -Color Green, White $build_dir_command = @" import sys @@ -155,24 +153,25 @@ print('exe.{}-{}'.format(get_platform(), sys.version[0:3])) "@ $build_dir = & $python -c $build_dir_command -Write-Host "Build directory ... ${build_dir}" -ForegroundColor white +Write-Color -Text "--- ", "Build directory ", "${build_dir}" -Color Green, Gray, White $env:BUILD_DIR = $build_dir -if (Get-Command iscc -errorAction SilentlyContinue -ErrorVariable ProcessError) -{ - iscc "$openpype_root\inno_setup.iss" -}else { - Write-Host "!!! Cannot find Inno Setup command" -ForegroundColor red - Write-Host "!!! You can download it at https://jrsoftware.org/" -ForegroundColor red +if (-not (Get-Command iscc -errorAction SilentlyContinue -ErrorVariable ProcessError)) { + Write-Color -Text "!!! ", "Cannot find Inno Setup command" -Color Red, Yellow + Write-Color "!!! You can download it at https://jrsoftware.org/" -ForegroundColor red Exit-WithCode 1 } +& iscc "$openpype_root\inno_setup.iss" -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "restoring current directory" +if ($LASTEXITCODE -ne 0) { + Write-Color -Text "!!! ", "Creating installer failed." -Color Red, Yellow + Exit-WithCode 1 +} + +Write-Color -Text ">>> ", "Restoring current directory" -Color Green, Gray Set-Location -Path $current_dir - -Write-Host "*** " -NoNewline -ForegroundColor Cyan -Write-Host "All done. You will find OpenPype installer in " -NoNewLine -Write-Host "'.\build'" -NoNewline -ForegroundColor Green -Write-Host " directory." +try { + New-BurntToastNotification -AppLogo "$openpype_root/openpype/resources/icons/openpype_icon.png" -Text "OpenPype build complete!", "All done. You will find You will find OpenPype installer in '.\build' directory." +} catch {} +Write-Color -Text "*** ", "All done. You will find OpenPype installer in ", "'.\build'", " directory." -Color Green, Gray, White, Gray diff --git a/tools/create_env.ps1 b/tools/create_env.ps1 index c307ba2031..3f956e5c6a 100644 --- a/tools/create_env.ps1 +++ b/tools/create_env.ps1 @@ -24,6 +24,15 @@ if($arguments -eq "--verbose") { $poetry_verbosity="-vvv" } +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +& git submodule update --init --recursive +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + + function Exit-WithCode($exitcode) { # Only exit this host process if it's a child of another PowerShell parent process... $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId @@ -36,30 +45,26 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } function Install-Poetry() { - Write-Host ">>> " -NoNewline -ForegroundColor Green - Write-Host "Installing Poetry ... " + Write-Color -Text ">>> ", "Installing Poetry ... " -Color Green, Gray $python = "python" if (Get-Command "pyenv" -ErrorAction SilentlyContinue) { if (-not (Test-Path -PathType Leaf -Path "$($openpype_root)\.python-version")) { $result = & pyenv global if ($result -eq "no global version configured") { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "Using pyenv but having no local or global version of Python set." + Write-Color -Text "!!! ", "Using pyenv but having no local or global version of Python set." -Color Red, Yellow Exit-WithCode 1 } } $python = & pyenv which python - + } $env:POETRY_HOME="$openpype_root\.poetry" @@ -68,8 +73,7 @@ function Install-Poetry() { function Test-Python() { - Write-Host ">>> " -NoNewline -ForegroundColor green - Write-Host "Detecting host Python ... " -NoNewline + Write-Color -Text ">>> ", "Detecting host Python ... " -Color Green, Gray -NoNewline $python = "python" if (Get-Command "pyenv" -ErrorAction SilentlyContinue) { $pyenv_python = & pyenv which python @@ -97,22 +101,17 @@ print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1])) } # We are supporting python 3.7 only if (($matches[1] -lt 3) -or ($matches[2] -lt 7)) { - Write-Host "FAILED Version [ $p ] is old and unsupported" -ForegroundColor red + Write-Color -Text "FAILED ", "Version ", "[", $p ,"]", "is old and unsupported" -Color Red, Yellow, Cyan, White, Cyan, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } elseif (($matches[1] -eq 3) -and ($matches[2] -gt 7)) { - Write-Host "WARNING Version [ $p ] is unsupported, use at your own risk." -ForegroundColor yellow - Write-Host "*** " -NoNewline -ForegroundColor yellow - Write-Host "OpenPype supports only Python 3.7" -ForegroundColor white + Write-Color -Text "WARNING Version ", "[", $p, "]", " is unsupported, use at your own risk." -Color Yellow, Cyan, White, Cyan, Yellow + Write-Color -Text "*** ", "OpenPype supports only Python 3.7" -Color Yellow, White } else { - Write-Host "OK [ $p ]" -ForegroundColor green + Write-Color "OK ", "[", $p, "]" -Color Green, Cyan, White, Cyan } } -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - if (-not (Test-Path 'env:POETRY_HOME')) { $env:POETRY_HOME = "$openpype_root\.poetry" } @@ -150,41 +149,39 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Red, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Found OpenPype version " -NoNewline -Write-Host "[ $($openpype_version) ]" -ForegroundColor Green +Write-Color -Text ">>> ", "Found OpenPype version ", "[ ", $($openpype_version), " ]" -Color Green, Gray, Cyan, White, Cyan Test-Python -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Color -Text "NOT FOUND" -Color Yellow Install-Poetry - Write-Host "INSTALLED" -ForegroundColor Cyan + Write-Color -Text "INSTALLED" -Color Cyan } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } if (-not (Test-Path -PathType Leaf -Path "$($openpype_root)\poetry.lock")) { - Write-Host ">>> " -NoNewline -ForegroundColor green - Write-Host "Installing virtual environment and creating lock." + Write-Color -Text ">>> ", "Installing virtual environment and creating lock." -Color Green, Gray } else { - Write-Host ">>> " -NoNewline -ForegroundColor green - Write-Host "Installing virtual environment from lock." + Write-Color -Text ">>> ", "Installing virtual environment from lock." -Color Green, Gray } +$startTime = [int][double]::Parse((Get-Date -UFormat %s)) & "$env:POETRY_HOME\bin\poetry" install --no-root $poetry_verbosity --ansi if ($LASTEXITCODE -ne 0) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Poetry command failed." + Write-Color -Text "!!! ", "Poetry command failed." -Color Red, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } +$endTime = [int][double]::Parse((Get-Date -UFormat %s)) Set-Location -Path $current_dir -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Virtual environment created." +try +{ + New-BurntToastNotification -AppLogo "$openpype_root/openpype/resources/icons/openpype_icon.png" -Text "OpenPype", "Virtual environment created.", "All done in $( $endTime - $startTime ) secs." +} catch {} +Write-Color -Text ">>> ", "Virtual environment created." -Color Green, White diff --git a/tools/create_zip.ps1 b/tools/create_zip.ps1 index e33445d1fa..7b852b7c54 100644 --- a/tools/create_zip.ps1 +++ b/tools/create_zip.ps1 @@ -19,6 +19,13 @@ PS> .\create_zip.ps1 --path C:\OpenPype #> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + function Exit-WithCode($exitcode) { # Only exit this host process if it's a child of another PowerShell parent process... $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId @@ -31,18 +38,12 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - $env:_INSIDE_OPENPYPE_TOOL = "1" if (-not (Test-Path 'env:POETRY_HOME')) { @@ -78,31 +79,25 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Yellow, Gray Exit-WithCode 1 } -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Cleaning cache files ... " -NoNewline +Write-Color -Text ">>> ", "Cleaning cache files ... " -Color Green, Gray -NoNewline Get-ChildItem $openpype_root -Filter "__pycache__" -Force -Recurse| Where-Object {( $_.FullName -inotmatch '\\build\\' ) -and ( $_.FullName -inotmatch '\\.venv' )} | Remove-Item -Force -Recurse Get-ChildItem $openpype_root -Filter "*.pyc" -Force -Recurse | Where-Object {( $_.FullName -inotmatch '\\build\\' ) -and ( $_.FullName -inotmatch '\\.venv' )} | Remove-Item -Force Get-ChildItem $openpype_root -Filter "*.pyo" -Force -Recurse | Where-Object {( $_.FullName -inotmatch '\\build\\' ) -and ( $_.FullName -inotmatch '\\.venv' )} | Remove-Item -Force -Write-Host "OK" -ForegroundColor green -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Generating zip from current sources ..." +Write-Color -Text ">>> ", "Generating zip from current sources ..." -Color Green, Gray $env:PYTHONPATH="$($openpype_root);$($env:PYTHONPATH)" $env:OPENPYPE_ROOT="$($openpype_root)" & "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\tools\create_zip.py" $ARGS diff --git a/tools/create_zip.py b/tools/create_zip.py index 2fc351469a..6392428f58 100644 --- a/tools/create_zip.py +++ b/tools/create_zip.py @@ -61,7 +61,7 @@ def _print(msg: str, message_type: int = 0) -> None: else: header = term.darkolivegreen3("--- ") - print("{}{}".format(header, msg)) + print(f"{header}{msg}") if __name__ == "__main__": diff --git a/tools/fetch_thirdparty_libs.ps1 b/tools/fetch_thirdparty_libs.ps1 index 16f7b70e7a..4df007ad67 100644 --- a/tools/fetch_thirdparty_libs.ps1 +++ b/tools/fetch_thirdparty_libs.ps1 @@ -15,6 +15,9 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $env:_INSIDE_OPENPYPE_TOOL = "1" if (-not (Test-Path 'env:POETRY_HOME')) { @@ -23,16 +26,19 @@ if (-not (Test-Path 'env:POETRY_HOME')) { Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } - +$startTime = [int][double]::Parse((Get-Date -UFormat %s)) & "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\tools\fetch_thirdparty_libs.py" +$endTime = [int][double]::Parse((Get-Date -UFormat %s)) Set-Location -Path $current_dir +try +{ + New-BurntToastNotification -AppLogo "$openpype_root/openpype/resources/icons/openpype_icon.png" -Text "OpenPype", "Dependencies downloaded", "All done in $( $endTime - $startTime ) secs." +} catch {} \ No newline at end of file diff --git a/tools/make_docs.ps1 b/tools/make_docs.ps1 index 45a11171ae..43ecd0c09c 100644 --- a/tools/make_docs.ps1 +++ b/tools/make_docs.ps1 @@ -44,27 +44,30 @@ $art = @" "@ +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + Write-Host $art -ForegroundColor DarkGreen -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." - & "$openpype_root\tools\create_env.ps1" + Write-Color -Text "NOT FOUND" -Color Yellow + Install-Poetry + Write-Color -Text "INSTALLED" -Color Cyan } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } -Write-Host "This will not overwrite existing source rst files, only scan and add new." +Write-Color -Text "... ", "This will not overwrite existing source rst files, only scan and add new." -Color Yellow, Gray Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Running apidoc ..." +Write-Color -Text ">>> ", "Running apidoc ..." -Color Green, Gray & "$env:POETRY_HOME\bin\poetry" run sphinx-apidoc -M -e -d 10 --ext-intersphinx --ext-todo --ext-coverage --ext-viewcode -o "$($openpype_root)\docs\source" igniter & "$env:POETRY_HOME\bin\poetry" run sphinx-apidoc.exe -M -e -d 10 --ext-intersphinx --ext-todo --ext-coverage --ext-viewcode -o "$($openpype_root)\docs\source" openpype vendor, openpype\vendor -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Building html ..." +Write-Color -Text ">>> ", "Building html ..." -Color Green, Gray & "$env:POETRY_HOME\bin\poetry" run python "$($openpype_root)\setup.py" build_sphinx Set-Location -Path $current_dir diff --git a/tools/modules/powershell/BurntToast b/tools/modules/powershell/BurntToast new file mode 160000 index 0000000000..f58c9a26d6 --- /dev/null +++ b/tools/modules/powershell/BurntToast @@ -0,0 +1 @@ +Subproject commit f58c9a26d6ede30ecc7998e92b26974887e945fe diff --git a/tools/modules/powershell/PSWriteColor b/tools/modules/powershell/PSWriteColor new file mode 160000 index 0000000000..12eda384eb --- /dev/null +++ b/tools/modules/powershell/PSWriteColor @@ -0,0 +1 @@ +Subproject commit 12eda384ebd7a7954e15855e312215c009c97114 diff --git a/tools/pack_project.ps1 b/tools/pack_project.ps1 new file mode 100644 index 0000000000..856247f7ca --- /dev/null +++ b/tools/pack_project.ps1 @@ -0,0 +1,39 @@ +<# +.SYNOPSIS + Helper script OpenPype Packing project. + +.DESCRIPTION + Once you are happy with the project and want to preserve it for future work, just change the project name on line 38 and copy the file into .\OpenPype\tools. Then use the cmd form .EXAMPLE + +.EXAMPLE + +PS> .\tools\run_pack_project.ps1 + +#> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + +Set-Location -Path $openpype_root + +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green +} + +& "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\start.py" pack-project --project $ARGS +Set-Location -Path $current_dir \ No newline at end of file diff --git a/tools/run_mongo.ps1 b/tools/run_mongo.ps1 index f6fa37207d..c64ff75969 100644 --- a/tools/run_mongo.ps1 +++ b/tools/run_mongo.ps1 @@ -11,6 +11,13 @@ PS> .\run_mongo.ps1 #> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $art = @" . . .. . .. @@ -43,8 +50,7 @@ function Exit-WithCode($exitcode) { function Find-Mongo ($preferred_version) { $defaultPath = "C:\Program Files\MongoDB\Server" - Write-Host ">>> " -NoNewLine -ForegroundColor Green - Write-Host "Detecting MongoDB ... " -NoNewline + Write-Color -Text ">>> ", "Detecting MongoDB ... " -Color Green, Gray -NoNewline if (-not (Get-Command "mongod" -ErrorAction SilentlyContinue)) { if(Test-Path "$($defaultPath)\*\bin\mongod.exe" -PathType Leaf) { # we have mongo server installed on standard Windows location @@ -52,17 +58,14 @@ function Find-Mongo ($preferred_version) { # $preferred_version. $mongoVersions = Get-ChildItem -Directory 'C:\Program Files\MongoDB\Server' | Sort-Object -Property {$_.Name -as [int]} if(Test-Path "$($mongoVersions[-1])\bin\mongod.exe" -PathType Leaf) { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green $use_version = $mongoVersions[-1] foreach ($v in $mongoVersions) { - Write-Host " - found [ " -NoNewline - Write-Host $v -NoNewLine -ForegroundColor Cyan - Write-Host " ]" -NoNewLine - + Write-Color -Text " - found [ ", $v, " ]" -Color Cyan, White, Cyan -NoNewLine $version = Split-Path $v -Leaf if ($preferred_version -eq $version) { - Write-Host " *" -ForegroundColor Green + Write-Color -Text " *" -Color Green $use_version = $v } else { Write-Host "" @@ -71,27 +74,20 @@ function Find-Mongo ($preferred_version) { $env:PATH = "$($env:PATH);$($use_version)\bin\" - Write-Host " - auto-added from [ " -NoNewline - Write-Host "$($use_version)\bin\mongod.exe" -NoNewLine -ForegroundColor Cyan - Write-Host " ]" + Write-Color -Text " - auto-added from [ ", "$($use_version)\bin\mongod.exe", " ]" -Color Cyan, White, Cyan return "$($use_version)\bin\mongod.exe" } else { - Write-Host "FAILED " -NoNewLine -ForegroundColor Red - Write-Host "MongoDB not detected" -ForegroundColor Yellow - Write-Host "Tried to find it on standard location " -NoNewline -ForegroundColor Gray - Write-Host " [ " -NoNewline -ForegroundColor Cyan - Write-Host "$($mongoVersions[-1])\bin\mongod.exe" -NoNewline -ForegroundColor White - Write-Host " ] " -NoNewLine -ForegroundColor Cyan - Write-Host "but failed." -ForegroundColor Gray + Write-Color -Text "FAILED " -Color Red -NoNewLine + Write-Color -Text "MongoDB not detected" -Color Yellow + Write-Color -Text "Tried to find it on standard location ", "[ ", "$($mongoVersions[-1])\bin\mongod.exe", " ]", " but failed." -Color Gray, Cyan, White, Cyan, Gray -NoNewline Exit-WithCode 1 } } else { - Write-Host "FAILED " -NoNewLine -ForegroundColor Red - Write-Host "MongoDB not detected in PATH" -ForegroundColor Yellow + Write-Color -Text "FAILED ", "MongoDB not detected in PATH" -Color Red, Yellow Exit-WithCode 1 } } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green return Get-Command "mongod" -ErrorAction SilentlyContinue } <# @@ -104,9 +100,6 @@ function Find-Mongo ($preferred_version) { #> } -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - # mongodb port $port = 2707 @@ -116,15 +109,7 @@ $dbpath = (Get-Item $openpype_root).parent.FullName + "\mongo_db_data" $preferred_version = "5.0" $mongoPath = Find-Mongo $preferred_version -Write-Host ">>> " -NoNewLine -ForegroundColor Green -Write-Host "Using DB path: " -NoNewLine -Write-Host " [ " -NoNewline -ForegroundColor Cyan -Write-Host "$($dbpath)" -NoNewline -ForegroundColor White -Write-Host " ] "-ForegroundColor Cyan -Write-Host ">>> " -NoNewLine -ForegroundColor Green -Write-Host "Port: " -NoNewLine -Write-Host " [ " -NoNewline -ForegroundColor Cyan -Write-Host "$($port)" -NoNewline -ForegroundColor White -Write-Host " ] " -ForegroundColor Cyan -Start-Process -FilePath $mongopath "--dbpath $($dbpath) --port $($port)" -PassThru | Out-Null +Write-Color -Text ">>> ", "Using DB path: ", "[ ", "$($dbpath)", " ]" -Color Green, Gray, Cyan, White, Cyan +Write-Color -Text ">>> ", "Port: ", "[ ", "$($port)", " ]" -Color Green, Gray, Cyan, White, Cyan +Start-Process -FilePath $mongopath "--dbpath $($dbpath) --port $($port)" -PassThru | Out-Null diff --git a/tools/run_project_manager.ps1 b/tools/run_project_manager.ps1 index a9cfbb1e7b..c1813e4ed9 100644 --- a/tools/run_project_manager.ps1 +++ b/tools/run_project_manager.ps1 @@ -35,6 +35,9 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $env:_INSIDE_OPENPYPE_TOOL = "1" # make sure Poetry is in PATH @@ -45,15 +48,13 @@ $env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." - & "$openpype_root\tools\create_env.ps1" + Write-Color -Text "NOT FOUND" -Color Yellow + Install-Poetry + Write-Color -Text "INSTALLED" -Color Cyan } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } & "$env:POETRY_HOME\bin\poetry" run python "$($openpype_root)\start.py" projectmanager diff --git a/tools/run_settings.ps1 b/tools/run_settings.ps1 index 1c0aa6e8f3..c74ae1ea3a 100644 --- a/tools/run_settings.ps1 +++ b/tools/run_settings.ps1 @@ -15,6 +15,9 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $env:_INSIDE_OPENPYPE_TOOL = "1" # make sure Poetry is in PATH @@ -25,15 +28,13 @@ $env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." - & "$openpype_root\tools\create_env.ps1" + Write-Color -Text "NOT FOUND" -Color Yellow + Install-Poetry + Write-Color -Text "INSTALLED" -Color Cyan } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } & "$env:POETRY_HOME\bin\poetry" run python "$($openpype_root)\start.py" settings --dev diff --git a/tools/run_tests.ps1 b/tools/run_tests.ps1 index e631cb72df..4fa598c413 100644 --- a/tools/run_tests.ps1 +++ b/tools/run_tests.ps1 @@ -11,6 +11,13 @@ PS> .\run_test.ps1 #> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + function Exit-WithCode($exitcode) { # Only exit this host process if it's a child of another PowerShell parent process... $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId @@ -22,10 +29,8 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } @@ -53,10 +58,6 @@ Write-Host $art -ForegroundColor DarkGreen # Enable if PS 7.x is needed. # Show-PSWarning -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - $env:_INSIDE_OPENPYPE_TOOL = "1" if (-not (Test-Path 'env:POETRY_HOME')) { @@ -69,46 +70,32 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Yellow, Gray Exit-WithCode 1 } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "OpenPype [ " -NoNewline -ForegroundColor white -Write-host $openpype_version -NoNewline -ForegroundColor green -Write-Host " ] ..." -ForegroundColor white +Write-Color -Text ">>> ", "OpenPype [ ", $openpype_version, " ]" -Color Green, White, Cyan, White -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Cleaning cache files ... " -NoNewline +Write-Color -Text ">>> ", "Cleaning cache files ... " -Color Green, Gray -NoNewline Get-ChildItem $openpype_root -Filter "*.pyc" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force +Get-ChildItem $openpype_root -Filter "*.pyo" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force Get-ChildItem $openpype_root -Filter "__pycache__" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force -Recurse -Write-Host "OK" -ForegroundColor green +Write-Color -Text "OK" -Color green -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Testing OpenPype ..." +Write-Color -Text ">>> ", "Testing OpenPype ..." -Color Green, White $original_pythonpath = $env:PYTHONPATH $env:PYTHONPATH="$($openpype_root);$($env:PYTHONPATH)" & "$env:POETRY_HOME\bin\poetry" run pytest -x --capture=sys --print -W ignore::DeprecationWarning "$($openpype_root)/tests" $env:PYTHONPATH = $original_pythonpath -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "restoring current directory" +Write-Color -Text ">>> ", "Restoring current directory" -Color Green, Gray Set-Location -Path $current_dir - - - - - - diff --git a/tools/run_tray.ps1 b/tools/run_tray.ps1 index 872c1524a6..40157c4e81 100644 --- a/tools/run_tray.ps1 +++ b/tools/run_tray.ps1 @@ -14,6 +14,9 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $env:_INSIDE_OPENPYPE_TOOL = "1" # make sure Poetry is in PATH @@ -24,15 +27,13 @@ $env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } & "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\start.py" tray --debug diff --git a/tools/unpack_project.ps1 b/tools/unpack_project.ps1 new file mode 100644 index 0000000000..e7b9e87a7f --- /dev/null +++ b/tools/unpack_project.ps1 @@ -0,0 +1,39 @@ +<# +.SYNOPSIS + Helper script OpenPype Unpacking project. + +.DESCRIPTION + Make sure you had dropped the project from your db and removed the poject data in case you were having them previously. Then on line 38 change the to any path where the zip with project is - usually we are having it here https://drive.google.com/drive/u/0/folders/0AKE4mxImOsAGUk9PVA . Copy the file into .\OpenPype\tools. Then use the cmd form .EXAMPLE + +.EXAMPLE + +PS> .\tools\run_unpack_project.ps1 + +#> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + +Set-Location -Path $openpype_root + +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green +} + +& "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\start.py" unpack-project --zipfile $ARGS +Set-Location -Path $current_dir \ No newline at end of file diff --git a/vendor/configs/OpenColorIO-Configs b/vendor/configs/OpenColorIO-Configs new file mode 160000 index 0000000000..0bb079c08b --- /dev/null +++ b/vendor/configs/OpenColorIO-Configs @@ -0,0 +1 @@ +Subproject commit 0bb079c08be410030669cbf5f19ff869b88af953 diff --git a/website/docs/admin_openpype_commands.md b/website/docs/admin_openpype_commands.md index 53b4799d6e..53fc12410f 100644 --- a/website/docs/admin_openpype_commands.md +++ b/website/docs/admin_openpype_commands.md @@ -45,6 +45,7 @@ For more information [see here](admin_use.md#run-openpype). | publish | Pype takes JSON from provided path and use it to publish data in it. | [📑](#publish-arguments) | | extractenvironments | Extract environment variables for entered context to a json file. | [📑](#extractenvironments-arguments) | | run | Execute given python script within OpenPype environment. | [📑](#run-arguments) | +| interactive | Start python like interactive console session. | | | projectmanager | Launch Project Manager UI | [📑](#projectmanager-arguments) | | settings | Open Settings UI | [📑](#settings-arguments) | | standalonepublisher | Open Standalone Publisher UI | [📑](#standalonepublisher-arguments) | diff --git a/website/docs/dev_publishing.md b/website/docs/dev_publishing.md index 8ee3b7e85f..f11a2c3047 100644 --- a/website/docs/dev_publishing.md +++ b/website/docs/dev_publishing.md @@ -66,7 +66,7 @@ Another optional function is **get_current_context**. This function is handy in Main responsibility of create plugin is to create, update, collect and remove instance metadata and propagate changes to create context. Has access to **CreateContext** (`self.create_context`) that discovered the plugin so has also access to other creators and instances. Create plugins have a lot of responsibility so it is recommended to implement common code per host. #### *BaseCreator* -Base implementation of creator plugin. It is not recommended to use this class as base for production plugins but rather use one of **AutoCreator** and **Creator** variants. +Base implementation of creator plugin. It is not recommended to use this class as base for production plugins but rather use one of **HiddenCreator**, **AutoCreator** and **Creator** variants. **Abstractions** - **`family`** (class attr) - Tells what kind of instance will be created. @@ -92,7 +92,7 @@ def collect_instances(self): self._add_instance_to_context(instance) ``` -- **`create`** (method) - Create a new object of **CreatedInstance** store its metadata to the workfile and add the instance into the created context. Failed Creating should raise **CreatorError** if an error happens that artists can fix or give them some useful information. Triggers and implementation differs for **Creator** and **AutoCreator**. +- **`create`** (method) - Create a new object of **CreatedInstance** store its metadata to the workfile and add the instance into the created context. Failed Creating should raise **CreatorError** if an error happens that artists can fix or give them some useful information. Triggers and implementation differs for **Creator**, **HiddenCreator** and **AutoCreator**. - **`update_instances`** (method) - Update data of instances. Receives tuple with **instance** and **changes**. ```python @@ -172,11 +172,11 @@ class RenderLayerCreator(Creator): icon = "fa5.building" ``` -- **`get_instance_attr_defs`** (method) - Attribute definitions of instance. Creator can define attribute values with default values for each instance. These attributes may affect how instances will be instance processed during publishing. Attribute defiitions can be used from `openpype.pipeline.lib.attribute_definitions` (NOTE: Will be moved to `openpype.lib.attribute_definitions` soon). Attribute definitions define basic types of values for different cases e.g. boolean, number, string, enumerator, etc. Default implementation returns **instance_attr_defs**. +- **`get_instance_attr_defs`** (method) - Attribute definitions of instance. Creator can define attribute values with default values for each instance. These attributes may affect how instances will be instance processed during publishing. Attribute defiitions can be used from `openpype.lib.attribute_definitions`. Attribute definitions define basic types of values for different cases e.g. boolean, number, string, enumerator, etc. Default implementation returns **instance_attr_defs**. - **`instance_attr_defs`** (attr) - Attribute for default implementation of **get_instance_attr_defs**. ```python -from openpype.pipeline import attribute_definitions +from openpype.lib import attribute_definitions class RenderLayerCreator(Creator): @@ -199,6 +199,20 @@ class RenderLayerCreator(Creator): - **`get_dynamic_data`** (method) - Can be used to extend data for subset templates which may be required in some cases. +#### *HiddenCreator* +Creator which is not showed in UI so artist can't trigger it directly but is available for other creators. This creator is primarily meant for cases when creation should create different types of instances. For example during editorial publishing where input is single edl file but should create 2 or more kind of instances each with different family, attributes and abilities. Arguments for creation were limited to `instance_data` and `source_data`. Data of `instance_data` should follow what is sent to other creators and `source_data` can be used to send custom data defined by main creator. It is expected that `HiddenCreator` has specific main or "parent" creator. + +```python +def create(self, instance_data, source_data): + variant = instance_data["variant"] + task_name = instance_data["task"] + asset_name = instance_data["asset"] + asset_doc = get_asset_by_name(self.project_name, asset_name) + self.get_subset_name( + variant, task_name, asset_doc, self.project_name, self.host_name) +``` + + #### *AutoCreator* Creator that is triggered on reset of create context. Can be used for families that are expected to be created automatically without artist interaction (e.g. **workfile**). Method `create` is triggered after collecting all creators. @@ -234,14 +248,14 @@ def create(self): # - variant can be filled from settings variant = self._variant_name # Only place where we can look for current context - project_name = io.Session["AVALON_PROJECT"] - asset_name = io.Session["AVALON_ASSET"] - task_name = io.Session["AVALON_TASK"] - host_name = io.Session["AVALON_APP"] + project_name = self.project_name + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] # Create new instance if does not exist yet if existing_instance is None: - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) @@ -264,7 +278,7 @@ def create(self): existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) @@ -297,7 +311,8 @@ class BulkRenderCreator(Creator): - **`pre_create_attr_defs`** (attr) - Attribute for default implementation of **get_pre_create_attr_defs**. ```python -from openpype.pipeline import Creator, attribute_definitions +from openpype.lib import attribute_definitions +from openpype.pipeline.create import Creator class CreateRender(Creator): @@ -470,10 +485,8 @@ Possible attribute definitions can be found in `openpype/pipeline/lib/attribute_ ```python import pyblish.api -from openpype.pipeline import ( - OpenPypePyblishPluginMixin, - attribute_definitions, -) +from openpype.lib import attribute_definitions +from openpype.pipeline import OpenPypePyblishPluginMixin # Example context plugin diff --git a/website/src/css/custom.css b/website/src/css/custom.css index e8dd86256b..58c9305bc7 100644 --- a/website/src/css/custom.css +++ b/website/src/css/custom.css @@ -196,12 +196,12 @@ html[data-theme='dark'] .header-github-link::before { padding: 20px } -.showcase .client { +.showcase .studio { display: flex; justify-content: space-between; } -.showcase .client img { +.showcase .studio img { max-height: 110px; padding: 20px; max-width: 160px; diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 0886706015..52302ec285 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -65,13 +65,17 @@ const collab = [ image: '/img/clothcat.png', infoLink: 'https://www.clothcatanimation.com/' }, { - title: 'Ellipse Studio', - image: '/img/ellipse-studio.png', - infoLink: 'http://www.dargaudmedia.com' + title: 'Ellipse Animation', + image: '/img/ellipse_animation.svg', + infoLink: 'http://www.ellipseanimation.com' }, { title: 'J Cube Inc', image: '/img/jcube_logo_bw.png', infoLink: 'https://j-cube.jp' + }, { + title: 'Normaal Animation', + image: '/img/logo_normaal.png', + infoLink: 'https://j-cube.jp' } ]; @@ -153,7 +157,32 @@ const studios = [ title: "IGG Canada", image: "/img/igg-logo.png", infoLink: "https://www.igg.com/", - } + }, + { + title: "Agora Studio", + image: "/img/agora_studio.png", + infoLink: "https://agora.studio/", + }, + { + title: "Lucan Visuals", + image: "/img/lucan_Logo_On_White-HR.png", + infoLink: "https://www.lucan.tv/", + }, + { + title: "No Ghost", + image: "/img/noghost.png", + infoLink: "https://www.noghost.co.uk/", + }, + { + title: "Static VFX", + image: "/img/staticvfx.png", + infoLink: "http://www.staticvfx.com/", + }, + { + title: "Method n Madness", + image: "/img/methodmadness.png", + infoLink: "https://www.methodnmadness.com/", +} ]; function Service({imageUrl, title, description}) { @@ -166,10 +195,10 @@ function Service({imageUrl, title, description}) { ); } -function Client({title, image, infoLink}) { +function Studio({title, image, infoLink}) { const imgUrl = useBaseUrl(image); return ( - + ); @@ -465,7 +494,7 @@ function Home() {

Studios using openPype

{studios.map((props, idx) => ( - + ))}
diff --git a/website/static/img/NoGhost_Logo_black.svg b/website/static/img/NoGhost_Logo_black.svg new file mode 100644 index 0000000000..b499b1621f --- /dev/null +++ b/website/static/img/NoGhost_Logo_black.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + diff --git a/website/static/img/agora_studio.png b/website/static/img/agora_studio.png new file mode 100644 index 0000000000..48b07b8775 Binary files /dev/null and b/website/static/img/agora_studio.png differ diff --git a/website/static/img/ellipse_animation.svg b/website/static/img/ellipse_animation.svg new file mode 100644 index 0000000000..c1caaa6726 --- /dev/null +++ b/website/static/img/ellipse_animation.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/website/static/img/igg-logo.png b/website/static/img/igg-logo.png index 3c7f7718f7..9fc7a7f84f 100644 Binary files a/website/static/img/igg-logo.png and b/website/static/img/igg-logo.png differ diff --git a/website/static/img/logo_normaal.png b/website/static/img/logo_normaal.png new file mode 100644 index 0000000000..711847c9f2 Binary files /dev/null and b/website/static/img/logo_normaal.png differ diff --git a/website/static/img/lucan_Logo_On_White-HR.png b/website/static/img/lucan_Logo_On_White-HR.png new file mode 100644 index 0000000000..c86030e1e7 Binary files /dev/null and b/website/static/img/lucan_Logo_On_White-HR.png differ diff --git a/website/static/img/methodmadness.png b/website/static/img/methodmadness.png new file mode 100644 index 0000000000..9dd0681d4a Binary files /dev/null and b/website/static/img/methodmadness.png differ diff --git a/website/static/img/noghost.png b/website/static/img/noghost.png new file mode 100644 index 0000000000..febaedcae8 Binary files /dev/null and b/website/static/img/noghost.png differ diff --git a/website/static/img/staticvfx.png b/website/static/img/staticvfx.png new file mode 100644 index 0000000000..41efd7f120 Binary files /dev/null and b/website/static/img/staticvfx.png differ