diff --git a/.gitignore b/.gitignore index 7eaef69873..ea5b20eb69 100644 --- a/.gitignore +++ b/.gitignore @@ -102,5 +102,8 @@ website/.docusaurus .poetry/ .python-version +.editorconfig +.pre-commit-config.yaml +mypy.ini tools/run_eventserver.* diff --git a/.gitmodules b/.gitmodules index dfd89cdb3c..bac3132b77 100644 --- a/.gitmodules +++ b/.gitmodules @@ -5,3 +5,6 @@ [submodule "tools/modules/powershell/PSWriteColor"] path = tools/modules/powershell/PSWriteColor url = https://github.com/EvotecIT/PSWriteColor.git +[submodule "vendor/configs/OpenColorIO-Configs"] + path = vendor/configs/OpenColorIO-Configs + url = https://github.com/imageworks/OpenColorIO-Configs diff --git a/CHANGELOG.md b/CHANGELOG.md index e4fc1d59ca..80673e9f8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,85 @@ # Changelog +## [3.13.1-nightly.3](https://github.com/pypeclub/OpenPype/tree/HEAD) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.13.0...HEAD) + +**πŸ› Bug fixes** + +- General: Extract Review can scale with pixel aspect ratio [\#3644](https://github.com/pypeclub/OpenPype/pull/3644) +- Maya: Refactor moved usage of CreateRender settings [\#3643](https://github.com/pypeclub/OpenPype/pull/3643) +- General: Hero version representations have full context [\#3638](https://github.com/pypeclub/OpenPype/pull/3638) +- Nuke: color settings for render write node is working now [\#3632](https://github.com/pypeclub/OpenPype/pull/3632) +- Maya: FBX support for update in reference loader [\#3631](https://github.com/pypeclub/OpenPype/pull/3631) + +**πŸ”€ Refactored code** + +- TimersManager: Plugins are in timers manager module [\#3639](https://github.com/pypeclub/OpenPype/pull/3639) +- General: Move workfiles functions into pipeline [\#3637](https://github.com/pypeclub/OpenPype/pull/3637) + +**Merged pull requests:** + +- Deadline: Global job pre load is not Pype 2 compatible [\#3666](https://github.com/pypeclub/OpenPype/pull/3666) +- Maya: Remove unused get current renderer logic [\#3645](https://github.com/pypeclub/OpenPype/pull/3645) +- Kitsu|Fix: Movie project type fails & first loop children names [\#3636](https://github.com/pypeclub/OpenPype/pull/3636) +- fix the bug of failing to extract look when UDIMs format used in AiImage [\#3628](https://github.com/pypeclub/OpenPype/pull/3628) + +## [3.13.0](https://github.com/pypeclub/OpenPype/tree/3.13.0) (2022-08-09) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.13.0-nightly.1...3.13.0) + +**πŸ†• New features** + +- Support for mutliple installed versions - 3.13 [\#3605](https://github.com/pypeclub/OpenPype/pull/3605) + +**πŸš€ Enhancements** + +- Editorial: Mix audio use side file for ffmpeg filters [\#3630](https://github.com/pypeclub/OpenPype/pull/3630) +- Ftrack: Comment template can contain optional keys [\#3615](https://github.com/pypeclub/OpenPype/pull/3615) +- Ftrack: Add more metadata to ftrack components [\#3612](https://github.com/pypeclub/OpenPype/pull/3612) +- General: Add context to pyblish context [\#3594](https://github.com/pypeclub/OpenPype/pull/3594) +- Kitsu: Shot&Sequence name with prefix over appends [\#3593](https://github.com/pypeclub/OpenPype/pull/3593) +- Photoshop: implemented {layer} placeholder in subset template [\#3591](https://github.com/pypeclub/OpenPype/pull/3591) +- General: Python module appdirs from git [\#3589](https://github.com/pypeclub/OpenPype/pull/3589) +- Ftrack: Update ftrack api to 2.3.3 [\#3588](https://github.com/pypeclub/OpenPype/pull/3588) +- General: New Integrator small fixes [\#3583](https://github.com/pypeclub/OpenPype/pull/3583) + +**πŸ› Bug fixes** + +- Maya: fix aov separator in Redshift [\#3625](https://github.com/pypeclub/OpenPype/pull/3625) +- Fix for multi-version build on Mac [\#3622](https://github.com/pypeclub/OpenPype/pull/3622) +- Ftrack: Sync hierarchical attributes can handle new created entities [\#3621](https://github.com/pypeclub/OpenPype/pull/3621) +- General: Extract review aspect ratio scale is calculated by ffmpeg [\#3620](https://github.com/pypeclub/OpenPype/pull/3620) +- Maya: Fix types of default settings [\#3617](https://github.com/pypeclub/OpenPype/pull/3617) +- Integrator: Don't force to have dot before frame [\#3611](https://github.com/pypeclub/OpenPype/pull/3611) +- AfterEffects: refactored integrate doesnt work formulti frame publishes [\#3610](https://github.com/pypeclub/OpenPype/pull/3610) +- Maya look data contents fails with custom attribute on group [\#3607](https://github.com/pypeclub/OpenPype/pull/3607) +- TrayPublisher: Fix wrong conflict merge [\#3600](https://github.com/pypeclub/OpenPype/pull/3600) +- Bugfix: Add OCIO as submodule to prepare for handling `maketx` color space conversion. [\#3590](https://github.com/pypeclub/OpenPype/pull/3590) +- Fix general settings environment variables resolution [\#3587](https://github.com/pypeclub/OpenPype/pull/3587) +- Editorial publishing workflow improvements [\#3580](https://github.com/pypeclub/OpenPype/pull/3580) +- General: Update imports in start script [\#3579](https://github.com/pypeclub/OpenPype/pull/3579) +- Nuke: render family integration consistency [\#3576](https://github.com/pypeclub/OpenPype/pull/3576) +- Ftrack: Handle missing published path in integrator [\#3570](https://github.com/pypeclub/OpenPype/pull/3570) +- Nuke: publish existing frames with slate with correct range [\#3555](https://github.com/pypeclub/OpenPype/pull/3555) + +**πŸ”€ Refactored code** + +- General: Plugin settings handled by plugins [\#3623](https://github.com/pypeclub/OpenPype/pull/3623) +- General: Naive implementation of document create, update, delete [\#3601](https://github.com/pypeclub/OpenPype/pull/3601) +- General: Use query functions in general code [\#3596](https://github.com/pypeclub/OpenPype/pull/3596) +- General: Separate extraction of template data into more functions [\#3574](https://github.com/pypeclub/OpenPype/pull/3574) +- General: Lib cleanup [\#3571](https://github.com/pypeclub/OpenPype/pull/3571) + +**Merged pull requests:** + +- Webpublisher: timeout for PS studio processing [\#3619](https://github.com/pypeclub/OpenPype/pull/3619) +- Core: translated validate\_containers.py into New publisher style [\#3614](https://github.com/pypeclub/OpenPype/pull/3614) +- Enable write color sets on animation publish automatically [\#3582](https://github.com/pypeclub/OpenPype/pull/3582) + ## [3.12.2](https://github.com/pypeclub/OpenPype/tree/3.12.2) (2022-07-27) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.1...3.12.2) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.2-nightly.4...3.12.2) ### πŸ“– Documentation @@ -13,13 +90,6 @@ - General: Global thumbnail extractor is ready for more cases [\#3561](https://github.com/pypeclub/OpenPype/pull/3561) - Maya: add additional validators to Settings [\#3540](https://github.com/pypeclub/OpenPype/pull/3540) -- General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) -- Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) -- Ftrack: add source into Note [\#3509](https://github.com/pypeclub/OpenPype/pull/3509) -- Add pack and unpack convenience scripts [\#3502](https://github.com/pypeclub/OpenPype/pull/3502) -- NewPublisher: Keep plugins with mismatch target in report [\#3498](https://github.com/pypeclub/OpenPype/pull/3498) -- Nuke: load clip with options from settings [\#3497](https://github.com/pypeclub/OpenPype/pull/3497) -- TrayPublisher: implemented render\_mov\_batch [\#3486](https://github.com/pypeclub/OpenPype/pull/3486) **πŸ› Bug fixes** @@ -32,15 +102,6 @@ - Module interfaces: Fix import error [\#3547](https://github.com/pypeclub/OpenPype/pull/3547) - Workfiles tool: Show of tool and it's flags [\#3539](https://github.com/pypeclub/OpenPype/pull/3539) - General: Create workfile documents works again [\#3538](https://github.com/pypeclub/OpenPype/pull/3538) -- Additional fixes for powershell scripts [\#3525](https://github.com/pypeclub/OpenPype/pull/3525) -- Maya: Added wrapper around cmds.setAttr [\#3523](https://github.com/pypeclub/OpenPype/pull/3523) -- Nuke: double slate [\#3521](https://github.com/pypeclub/OpenPype/pull/3521) -- General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) -- Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) -- TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) -- NewPublisher: Publish attributes are properly collected [\#3510](https://github.com/pypeclub/OpenPype/pull/3510) -- TrayPublisher: Make sure host name is filled [\#3504](https://github.com/pypeclub/OpenPype/pull/3504) -- NewPublisher: Groups work and enum multivalue [\#3501](https://github.com/pypeclub/OpenPype/pull/3501) **πŸ”€ Refactored code** @@ -49,9 +110,6 @@ - Refactor Integrate Asset [\#3530](https://github.com/pypeclub/OpenPype/pull/3530) - General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) - General: Move load related functions into pipeline [\#3527](https://github.com/pypeclub/OpenPype/pull/3527) -- General: Get current context document functions [\#3522](https://github.com/pypeclub/OpenPype/pull/3522) -- Kitsu: Use query function from client [\#3496](https://github.com/pypeclub/OpenPype/pull/3496) -- Deadline: Use query functions [\#3466](https://github.com/pypeclub/OpenPype/pull/3466) **Merged pull requests:** @@ -61,51 +119,6 @@ [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.1-nightly.6...3.12.1) -### πŸ“– Documentation - -- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) - -**πŸš€ Enhancements** - -- TrayPublisher: Added more options for grouping of instances [\#3494](https://github.com/pypeclub/OpenPype/pull/3494) -- NewPublisher: Align creator attributes from top to bottom [\#3487](https://github.com/pypeclub/OpenPype/pull/3487) -- NewPublisher: Added ability to use label of instance [\#3484](https://github.com/pypeclub/OpenPype/pull/3484) -- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) -- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) -- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) -- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) - -**πŸ› Bug fixes** - -- TrayPublisher: Keep use instance label in list view [\#3493](https://github.com/pypeclub/OpenPype/pull/3493) -- General: Extract review use first frame of input sequence [\#3491](https://github.com/pypeclub/OpenPype/pull/3491) -- General: Fix Plist loading for application launch [\#3485](https://github.com/pypeclub/OpenPype/pull/3485) -- Nuke: Workfile tools open on start [\#3479](https://github.com/pypeclub/OpenPype/pull/3479) -- New Publisher: Disabled context change allows creation [\#3478](https://github.com/pypeclub/OpenPype/pull/3478) -- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) -- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) -- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) -- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) -- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) -- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) -- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) -- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) -- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) -- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) - -**πŸ”€ Refactored code** - -- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) -- Maya: Re-use `maintained\_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) -- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) -- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) -- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) -- General: Use query functions in openpype lib functions [\#3454](https://github.com/pypeclub/OpenPype/pull/3454) -- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) -- General: Move publish plugin and publish render abstractions [\#3442](https://github.com/pypeclub/OpenPype/pull/3442) -- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) -- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) - ## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.0-nightly.3...3.12.0) diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index 08333885c0..750b2f1bf7 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -122,7 +122,7 @@ class OpenPypeVersion(semver.VersionInfo): if self.staging: if kwargs.get("build"): if "staging" not in kwargs.get("build"): - kwargs["build"] = "{}-staging".format(kwargs.get("build")) + kwargs["build"] = f"{kwargs.get('build')}-staging" else: kwargs["build"] = "staging" @@ -136,8 +136,7 @@ class OpenPypeVersion(semver.VersionInfo): return bool(result and self.staging == other.staging) def __repr__(self): - return "<{}: {} - path={}>".format( - self.__class__.__name__, str(self), self.path) + return f"<{self.__class__.__name__}: {str(self)} - path={self.path}>" def __lt__(self, other: OpenPypeVersion): result = super().__lt__(other) @@ -232,10 +231,7 @@ class OpenPypeVersion(semver.VersionInfo): return openpype_version def __hash__(self): - if self.path: - return hash(self.path) - else: - return hash(str(self)) + return hash(self.path) if self.path else hash(str(self)) @staticmethod def is_version_in_dir( @@ -384,7 +380,8 @@ class OpenPypeVersion(semver.VersionInfo): @classmethod def get_local_versions( - cls, production: bool = None, staging: bool = None + cls, production: bool = None, + staging: bool = None, compatible_with: OpenPypeVersion = None ) -> List: """Get all versions available on this machine. @@ -394,6 +391,8 @@ class OpenPypeVersion(semver.VersionInfo): Args: production (bool): Return production versions. staging (bool): Return staging versions. + compatible_with (OpenPypeVersion): Return only those compatible + with specified version. """ # Return all local versions if arguments are set to None if production is None and staging is None: @@ -410,10 +409,19 @@ class OpenPypeVersion(semver.VersionInfo): if not production and not staging: return [] + # DEPRECATED: backwards compatible way to look for versions in root dir_to_search = Path(user_data_dir("openpype", "pypeclub")) versions = OpenPypeVersion.get_versions_from_directory( - dir_to_search + dir_to_search, compatible_with=compatible_with ) + if compatible_with: + dir_to_search = Path( + user_data_dir("openpype", "pypeclub")) / f"{compatible_with.major}.{compatible_with.minor}" # noqa + versions += OpenPypeVersion.get_versions_from_directory( + dir_to_search, compatible_with=compatible_with + ) + + filtered_versions = [] for version in versions: if version.is_staging(): @@ -425,7 +433,8 @@ class OpenPypeVersion(semver.VersionInfo): @classmethod def get_remote_versions( - cls, production: bool = None, staging: bool = None + cls, production: bool = None, + staging: bool = None, compatible_with: OpenPypeVersion = None ) -> List: """Get all versions available in OpenPype Path. @@ -435,6 +444,8 @@ class OpenPypeVersion(semver.VersionInfo): Args: production (bool): Return production versions. staging (bool): Return staging versions. + compatible_with (OpenPypeVersion): Return only those compatible + with specified version. """ # Return all local versions if arguments are set to None if production is None and staging is None: @@ -468,7 +479,14 @@ class OpenPypeVersion(semver.VersionInfo): if not dir_to_search: return [] - versions = cls.get_versions_from_directory(dir_to_search) + # DEPRECATED: look for version in root directory + versions = cls.get_versions_from_directory( + dir_to_search, compatible_with=compatible_with) + if compatible_with: + dir_to_search = dir_to_search / f"{compatible_with.major}.{compatible_with.minor}" # noqa + versions += cls.get_versions_from_directory( + dir_to_search, compatible_with=compatible_with) + filtered_versions = [] for version in versions: if version.is_staging(): @@ -479,11 +497,15 @@ class OpenPypeVersion(semver.VersionInfo): return list(sorted(set(filtered_versions))) @staticmethod - def get_versions_from_directory(openpype_dir: Path) -> List: + def get_versions_from_directory( + openpype_dir: Path, + compatible_with: OpenPypeVersion = None) -> List: """Get all detected OpenPype versions in directory. Args: openpype_dir (Path): Directory to scan. + compatible_with (OpenPypeVersion): Return only versions compatible + with build version specified as OpenPypeVersion. Returns: list of OpenPypeVersion @@ -492,10 +514,10 @@ class OpenPypeVersion(semver.VersionInfo): ValueError: if invalid path is specified. """ - if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") - _openpype_versions = [] + if not openpype_dir.exists() and not openpype_dir.is_dir(): + return _openpype_versions + # iterate over directory in first level and find all that might # contain OpenPype. for item in openpype_dir.iterdir(): @@ -518,6 +540,10 @@ class OpenPypeVersion(semver.VersionInfo): )[0]: continue + if compatible_with and not detected_version.is_compatible( + compatible_with): + continue + detected_version.path = item _openpype_versions.append(detected_version) @@ -549,8 +575,9 @@ class OpenPypeVersion(semver.VersionInfo): def get_latest_version( staging: bool = False, local: bool = None, - remote: bool = None - ) -> OpenPypeVersion: + remote: bool = None, + compatible_with: OpenPypeVersion = None + ) -> Union[OpenPypeVersion, None]: """Get latest available version. The version does not contain information about path and source. @@ -568,6 +595,9 @@ class OpenPypeVersion(semver.VersionInfo): staging (bool, optional): List staging versions if True. local (bool, optional): List local versions if True. remote (bool, optional): List remote versions if True. + compatible_with (OpenPypeVersion, optional) Return only version + compatible with compatible_with. + """ if local is None and remote is None: local = True @@ -598,7 +628,12 @@ class OpenPypeVersion(semver.VersionInfo): return None all_versions.sort() - return all_versions[-1] + latest_version: OpenPypeVersion + latest_version = all_versions[-1] + if compatible_with and not latest_version.is_compatible( + compatible_with): + return None + return latest_version @classmethod def get_expected_studio_version(cls, staging=False, global_settings=None): @@ -621,6 +656,21 @@ class OpenPypeVersion(semver.VersionInfo): return None return OpenPypeVersion(version=result) + def is_compatible(self, version: OpenPypeVersion): + """Test build compatibility. + + This will simply compare major and minor versions (ignoring patch + and the rest). + + Args: + version (OpenPypeVersion): Version to check compatibility with. + + Returns: + bool: if the version is compatible + + """ + return self.major == version.major and self.minor == version.minor + class BootstrapRepos: """Class for bootstrapping local OpenPype installation. @@ -741,8 +791,9 @@ class BootstrapRepos: return # create destination directory - if not self.data_dir.exists(): - self.data_dir.mkdir(parents=True) + destination = self.data_dir / f"{installed_version.major}.{installed_version.minor}" # noqa + if not destination.exists(): + destination.mkdir(parents=True) # create zip inside temporary directory. with tempfile.TemporaryDirectory() as temp_dir: @@ -770,7 +821,9 @@ class BootstrapRepos: Path to moved zip on success. """ - destination = self.data_dir / zip_file.name + version = OpenPypeVersion.version_in_str(zip_file.name) + destination_dir = self.data_dir / f"{version.major}.{version.minor}" + destination = destination_dir / zip_file.name if destination.exists(): self._print( @@ -782,7 +835,7 @@ class BootstrapRepos: self._print(str(e), LOG_ERROR, exc_info=True) return None try: - shutil.move(zip_file.as_posix(), self.data_dir.as_posix()) + shutil.move(zip_file.as_posix(), destination_dir.as_posix()) except shutil.Error as e: self._print(str(e), LOG_ERROR, exc_info=True) return None @@ -995,6 +1048,16 @@ class BootstrapRepos: @staticmethod def _validate_dir(path: Path) -> tuple: + """Validate checksums in a given path. + + Args: + path (Path): path to folder to validate. + + Returns: + tuple(bool, str): returns status and reason as a bool + and str in a tuple. + + """ checksums_file = Path(path / "checksums") if not checksums_file.exists(): # FIXME: This should be set to False sometimes in the future @@ -1076,7 +1139,20 @@ class BootstrapRepos: sys.path.insert(0, directory.as_posix()) @staticmethod - def find_openpype_version(version, staging): + def find_openpype_version( + version: Union[str, OpenPypeVersion], + staging: bool, + compatible_with: OpenPypeVersion = None + ) -> Union[OpenPypeVersion, None]: + """Find location of specified OpenPype version. + + Args: + version (Union[str, OpenPypeVersion): Version to find. + staging (bool): Filter staging versions. + compatible_with (OpenPypeVersion, optional): Find only + versions compatible with specified one. + + """ if isinstance(version, str): version = OpenPypeVersion(version=version) @@ -1085,7 +1161,8 @@ class BootstrapRepos: return installed_version local_versions = OpenPypeVersion.get_local_versions( - staging=staging, production=not staging + staging=staging, production=not staging, + compatible_with=compatible_with ) zip_version = None for local_version in local_versions: @@ -1099,7 +1176,8 @@ class BootstrapRepos: return zip_version remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging, production=not staging + staging=staging, production=not staging, + compatible_with=compatible_with ) for remote_version in remote_versions: if remote_version == version: @@ -1107,13 +1185,14 @@ class BootstrapRepos: return None @staticmethod - def find_latest_openpype_version(staging): + def find_latest_openpype_version( + staging, compatible_with: OpenPypeVersion = None): installed_version = OpenPypeVersion.get_installed_version() local_versions = OpenPypeVersion.get_local_versions( - staging=staging + staging=staging, compatible_with=compatible_with ) remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging + staging=staging, compatible_with=compatible_with ) all_versions = local_versions + remote_versions if not staging: @@ -1138,7 +1217,9 @@ class BootstrapRepos: self, openpype_path: Union[Path, str] = None, staging: bool = False, - include_zips: bool = False) -> Union[List[OpenPypeVersion], None]: + include_zips: bool = False, + compatible_with: OpenPypeVersion = None + ) -> Union[List[OpenPypeVersion], None]: """Get ordered dict of detected OpenPype version. Resolution order for OpenPype is following: @@ -1154,6 +1235,8 @@ class BootstrapRepos: otherwise. include_zips (bool, optional): If set True it will try to find OpenPype in zip files in given directory. + compatible_with (OpenPypeVersion, optional): Find only those + versions compatible with the one specified. Returns: dict of Path: Dictionary of detected OpenPype version. @@ -1172,30 +1255,56 @@ class BootstrapRepos: ("Finding OpenPype in non-filesystem locations is" " not implemented yet.")) - dir_to_search = self.data_dir - user_versions = self.get_openpype_versions(self.data_dir, staging) - # if we have openpype_path specified, search only there. + version_dir = "" + if compatible_with: + version_dir = f"{compatible_with.major}.{compatible_with.minor}" + + # if checks bellow for OPENPYPE_PATH and registry fails, use data_dir + # DEPRECATED: lookup in root of this folder is deprecated in favour + # of major.minor sub-folders. + dirs_to_search = [ + self.data_dir + ] + if compatible_with: + dirs_to_search.append(self.data_dir / version_dir) + if openpype_path: - dir_to_search = openpype_path + dirs_to_search = [openpype_path] + + if compatible_with: + dirs_to_search.append(openpype_path / version_dir) else: - if os.getenv("OPENPYPE_PATH"): - if Path(os.getenv("OPENPYPE_PATH")).exists(): - dir_to_search = Path(os.getenv("OPENPYPE_PATH")) + # first try OPENPYPE_PATH and if that is not available, + # try registry. + if os.getenv("OPENPYPE_PATH") \ + and Path(os.getenv("OPENPYPE_PATH")).exists(): + dirs_to_search = [Path(os.getenv("OPENPYPE_PATH"))] + + if compatible_with: + dirs_to_search.append( + Path(os.getenv("OPENPYPE_PATH")) / version_dir) else: try: registry_dir = Path( str(self.registry.get_item("openPypePath"))) if registry_dir.exists(): - dir_to_search = registry_dir + dirs_to_search = [registry_dir] + if compatible_with: + dirs_to_search.append(registry_dir / version_dir) except ValueError: # nothing found in registry, we'll use data dir pass - openpype_versions = self.get_openpype_versions(dir_to_search, staging) - openpype_versions += user_versions + openpype_versions = [] + for dir_to_search in dirs_to_search: + try: + openpype_versions += self.get_openpype_versions( + dir_to_search, staging, compatible_with=compatible_with) + except ValueError: + # location is invalid, skip it + pass - # remove zip file version if needed. if not include_zips: openpype_versions = [ v for v in openpype_versions if v.path.suffix != ".zip" @@ -1308,9 +1417,8 @@ class BootstrapRepos: raise ValueError( f"version {version} is not associated with any file") - destination = self.data_dir / version.path.stem - if destination.exists(): - assert destination.is_dir() + destination = self.data_dir / f"{version.major}.{version.minor}" / version.path.stem # noqa + if destination.exists() and destination.is_dir(): try: shutil.rmtree(destination) except OSError as e: @@ -1379,7 +1487,7 @@ class BootstrapRepos: else: dir_name = openpype_version.path.stem - destination = self.data_dir / dir_name + destination = self.data_dir / f"{openpype_version.major}.{openpype_version.minor}" / dir_name # noqa # test if destination directory already exist, if so lets delete it. if destination.exists() and force: @@ -1557,14 +1665,18 @@ class BootstrapRepos: return False return True - def get_openpype_versions(self, - openpype_dir: Path, - staging: bool = False) -> list: + def get_openpype_versions( + self, + openpype_dir: Path, + staging: bool = False, + compatible_with: OpenPypeVersion = None) -> list: """Get all detected OpenPype versions in directory. Args: openpype_dir (Path): Directory to scan. staging (bool, optional): Find staging versions if True. + compatible_with (OpenPypeVersion, optional): Get only versions + compatible with the one specified. Returns: list of OpenPypeVersion @@ -1574,7 +1686,7 @@ class BootstrapRepos: """ if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") + raise ValueError(f"specified directory {openpype_dir} is invalid") _openpype_versions = [] # iterate over directory in first level and find all that might @@ -1599,6 +1711,10 @@ class BootstrapRepos: ): continue + if compatible_with and \ + not detected_version.is_compatible(compatible_with): + continue + detected_version.path = item if staging and detected_version.is_staging(): _openpype_versions.append(detected_version) diff --git a/igniter/tools.py b/igniter/tools.py index 57159b5e52..a9d592acf0 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -21,6 +21,11 @@ class OpenPypeVersionNotFound(Exception): pass +class OpenPypeVersionIncompatible(Exception): + """OpenPype version is not compatible with the installed one (build).""" + pass + + def should_add_certificate_path_to_mongo_url(mongo_url): """Check if should add ca certificate to mongo url. diff --git a/openpype/cli.py b/openpype/cli.py index 9a2dfaa141..ffe288040e 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -443,3 +443,26 @@ def interactive(): __version__, sys.version, sys.platform ) code.interact(banner) + + +@main.command() +@click.option("--build", help="Print only build version", + is_flag=True, default=False) +def version(build): + """Print OpenPype version.""" + + from openpype.version import __version__ + from igniter.bootstrap_repos import BootstrapRepos, OpenPypeVersion + from pathlib import Path + import os + + if getattr(sys, 'frozen', False): + local_version = BootstrapRepos.get_version( + Path(os.getenv("OPENPYPE_ROOT"))) + else: + local_version = OpenPypeVersion.get_installed_version_str() + + if build: + print(local_version) + return + print(f"{__version__} (booted: {local_version})") diff --git a/openpype/client/entities.py b/openpype/client/entities.py index dd5d831ecf..7362f57d7f 100644 --- a/openpype/client/entities.py +++ b/openpype/client/entities.py @@ -6,38 +6,12 @@ that has project name as a context (e.g. on 'ProjectEntity'?). + We will need more specific functions doing wery specific queires really fast. """ -import os import collections import six from bson.objectid import ObjectId -from .mongo import OpenPypeMongoConnection - - -def _get_project_database(): - db_name = os.environ.get("AVALON_DB") or "avalon" - return OpenPypeMongoConnection.get_mongo_client()[db_name] - - -def get_project_connection(project_name): - """Direct access to mongo collection. - - We're trying to avoid using direct access to mongo. This should be used - only for Create, Update and Remove operations until there are implemented - api calls for that. - - Args: - project_name(str): Project name for which collection should be - returned. - - Returns: - pymongo.Collection: Collection realated to passed project. - """ - - if not project_name: - raise ValueError("Invalid project name {}".format(str(project_name))) - return _get_project_database()[project_name] +from .mongo import get_project_database, get_project_connection def _prepare_fields(fields, required_fields=None): @@ -72,7 +46,7 @@ def _convert_ids(in_ids): def get_projects(active=True, inactive=False, fields=None): - mongodb = _get_project_database() + mongodb = get_project_database() for project_name in mongodb.collection_names(): if project_name in ("system.indexes",): continue @@ -819,7 +793,7 @@ def get_output_link_versions(project_name, version_id, fields=None): # Does make sense to look for hero versions? query_filter = { "type": "version", - "data.inputLinks.input": version_id + "data.inputLinks.id": version_id } return conn.find(query_filter, _prepare_fields(fields)) diff --git a/openpype/client/mongo.py b/openpype/client/mongo.py index a747250107..72acbc5476 100644 --- a/openpype/client/mongo.py +++ b/openpype/client/mongo.py @@ -208,3 +208,28 @@ class OpenPypeMongoConnection: mongo_url, time.time() - t1 )) return mongo_client + + +def get_project_database(): + db_name = os.environ.get("AVALON_DB") or "avalon" + return OpenPypeMongoConnection.get_mongo_client()[db_name] + + +def get_project_connection(project_name): + """Direct access to mongo collection. + + We're trying to avoid using direct access to mongo. This should be used + only for Create, Update and Remove operations until there are implemented + api calls for that. + + Args: + project_name(str): Project name for which collection should be + returned. + + Returns: + pymongo.Collection: Collection realated to passed project. + """ + + if not project_name: + raise ValueError("Invalid project name {}".format(str(project_name))) + return get_project_database()[project_name] diff --git a/openpype/client/operations.py b/openpype/client/operations.py new file mode 100644 index 0000000000..c4b95bf696 --- /dev/null +++ b/openpype/client/operations.py @@ -0,0 +1,634 @@ +import uuid +import copy +import collections +from abc import ABCMeta, abstractmethod, abstractproperty + +import six +from bson.objectid import ObjectId +from pymongo import DeleteOne, InsertOne, UpdateOne + +from .mongo import get_project_connection + +REMOVED_VALUE = object() + +CURRENT_PROJECT_SCHEMA = "openpype:project-3.0" +CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0" +CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0" +CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0" +CURRENT_VERSION_SCHEMA = "openpype:version-3.0" +CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0" +CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0" + + +def _create_or_convert_to_mongo_id(mongo_id): + if mongo_id is None: + return ObjectId() + return ObjectId(mongo_id) + + +def new_project_document( + project_name, project_code, config, data=None, entity_id=None +): + """Create skeleton data of project document. + + Args: + project_name (str): Name of project. Used as identifier of a project. + project_code (str): Shorter version of projet without spaces and + special characters (in most of cases). Should be also considered + as unique name across projects. + config (Dic[str, Any]): Project config consist of roots, templates, + applications and other project Anatomy related data. + data (Dict[str, Any]): Project data with information about it's + attributes (e.g. 'fps' etc.) or integration specific keys. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of project document. + """ + + if data is None: + data = {} + + data["code"] = project_code + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "name": project_name, + "type": CURRENT_PROJECT_SCHEMA, + "entity_data": data, + "config": config + } + + +def new_asset_document( + name, project_id, parent_id, parents, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + name (str): Is considered as unique identifier of asset in project. + project_id (Union[str, ObjectId]): Id of project doument. + parent_id (Union[str, ObjectId]): Id of parent asset. + parents (List[str]): List of parent assets names. + data (Dict[str, Any]): Asset document data. Empty dictionary is used + if not passed. Value of 'parent_id' is used to fill 'visualParent'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of asset document. + """ + + if data is None: + data = {} + if parent_id is not None: + parent_id = ObjectId(parent_id) + data["visualParent"] = parent_id + data["parents"] = parents + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "asset", + "name": name, + "parent": ObjectId(project_id), + "data": data, + "schema": CURRENT_ASSET_DOC_SCHEMA + } + + +def new_subset_document(name, family, asset_id, data=None, entity_id=None): + """Create skeleton data of subset document. + + Args: + name (str): Is considered as unique identifier of subset under asset. + family (str): Subset's family. + asset_id (Union[str, ObjectId]): Id of parent asset. + data (Dict[str, Any]): Subset document data. Empty dictionary is used + if not passed. Value of 'family' is used to fill 'family'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of subset document. + """ + + if data is None: + data = {} + data["family"] = family + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_SUBSET_SCHEMA, + "type": "subset", + "name": name, + "data": data, + "parent": asset_id + } + + +def new_version_doc(version, subset_id, data=None, entity_id=None): + """Create skeleton data of version document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + subset_id (Union[str, ObjectId]): Id of parent subset. + data (Dict[str, Any]): Version document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_VERSION_SCHEMA, + "type": "version", + "name": int(version), + "parent": subset_id, + "data": data + } + + +def new_representation_doc( + name, version_id, context, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + version_id (Union[str, ObjectId]): Id of parent version. + context (Dict[str, Any]): Representation context used for fill template + of to query. + data (Dict[str, Any]): Representation document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_REPRESENTATION_SCHEMA, + "type": "representation", + "parent": version_id, + "name": name, + "data": data, + + # Imprint shortcut to context for performance reasons. + "context": context + } + + +def new_workfile_info_doc( + filename, asset_id, task_name, files, data=None, entity_id=None +): + """Create skeleton data of workfile info document. + + Workfile document is at this moment used primarily for artist notes. + + Args: + filename (str): Filename of workfile. + asset_id (Union[str, ObjectId]): Id of asset under which workfile live. + task_name (str): Task under which was workfile created. + files (List[str]): List of rootless filepaths related to workfile. + data (Dict[str, Any]): Additional metadata. + + Returns: + Dict[str, Any]: Skeleton of workfile info document. + """ + + if not data: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "workfile", + "parent": ObjectId(asset_id), + "task_name": task_name, + "filename": filename, + "data": data, + "files": files + } + + +def _prepare_update_data(old_doc, new_doc, replace): + changes = {} + for key, value in new_doc.items(): + if key not in old_doc or value != old_doc[key]: + changes[key] = value + + if replace: + for key in old_doc.keys(): + if key not in new_doc: + changes[key] = REMOVED_VALUE + return changes + + +def prepare_subset_update_data(old_doc, new_doc, replace=True): + """Compare two subset documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_version_update_data(old_doc, new_doc, replace=True): + """Compare two version documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_representation_update_data(old_doc, new_doc, replace=True): + """Compare two representation documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): + """Compare two workfile info documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +@six.add_metaclass(ABCMeta) +class AbstractOperation(object): + """Base operation class. + + Opration represent a call into database. The call can create, change or + remove data. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + """ + + def __init__(self, project_name, entity_type): + self._project_name = project_name + self._entity_type = entity_type + self._id = str(uuid.uuid4()) + + @property + def project_name(self): + return self._project_name + + @property + def id(self): + """Identifier of operation.""" + + return self._id + + @property + def entity_type(self): + return self._entity_type + + @abstractproperty + def operation_name(self): + """Stringified type of operation.""" + + pass + + @abstractmethod + def to_mongo_operation(self): + """Convert operation to Mongo batch operation.""" + + pass + + def to_data(self): + """Convert opration to data that can be converted to json or others. + + Warning: + Current state returns ObjectId objects which cannot be parsed by + json. + + Returns: + Dict[str, Any]: Description of operation. + """ + + return { + "id": self._id, + "entity_type": self.entity_type, + "project_name": self.project_name, + "operation": self.operation_name + } + + +class CreateOperation(AbstractOperation): + """Opeartion to create an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + data (Dict[str, Any]): Data of entity that will be created. + """ + + operation_name = "create" + + def __init__(self, project_name, entity_type, data): + super(CreateOperation, self).__init__(project_name, entity_type) + + if not data: + data = {} + else: + data = copy.deepcopy(dict(data)) + + if "_id" not in data: + data["_id"] = ObjectId() + else: + data["_id"] = ObjectId(data["_id"]) + + self._entity_id = data["_id"] + self._data = data + + def __setitem__(self, key, value): + self.set_value(key, value) + + def __getitem__(self, key): + return self.data[key] + + def set_value(self, key, value): + self.data[key] = value + + def get(self, key, *args, **kwargs): + return self.data.get(key, *args, **kwargs) + + @property + def entity_id(self): + return self._entity_id + + @property + def data(self): + return self._data + + def to_mongo_operation(self): + return InsertOne(copy.deepcopy(self._data)) + + def to_data(self): + output = super(CreateOperation, self).to_data() + output["data"] = copy.deepcopy(self.data) + return output + + +class UpdateOperation(AbstractOperation): + """Opeartion to update an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Identifier of an entity. + update_data (Dict[str, Any]): Key -> value changes that will be set in + database. If value is set to 'REMOVED_VALUE' the key will be + removed. Only first level of dictionary is checked (on purpose). + """ + + operation_name = "update" + + def __init__(self, project_name, entity_type, entity_id, update_data): + super(UpdateOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + self._update_data = update_data + + @property + def entity_id(self): + return self._entity_id + + @property + def update_data(self): + return self._update_data + + def to_mongo_operation(self): + unset_data = {} + set_data = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + unset_data[key] = value + else: + set_data[key] = value + + op_data = {} + if unset_data: + op_data["$unset"] = unset_data + if set_data: + op_data["$set"] = set_data + + if not op_data: + return None + + return UpdateOne( + {"_id": self.entity_id}, + op_data + ) + + def to_data(self): + changes = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + value = None + changes[key] = value + + output = super(UpdateOperation, self).to_data() + output.update({ + "entity_id": self.entity_id, + "changes": changes + }) + return output + + +class DeleteOperation(AbstractOperation): + """Opeartion to delete an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Entity id that will be removed. + """ + + operation_name = "delete" + + def __init__(self, project_name, entity_type, entity_id): + super(DeleteOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + + @property + def entity_id(self): + return self._entity_id + + def to_mongo_operation(self): + return DeleteOne({"_id": self.entity_id}) + + def to_data(self): + output = super(DeleteOperation, self).to_data() + output["entity_id"] = self.entity_id + return output + + +class OperationsSession(object): + """Session storing operations that should happen in an order. + + At this moment does not handle anything special can be sonsidered as + stupid list of operations that will happen after each other. If creation + of same entity is there multiple times it's handled in any way and document + values are not validated. + + All operations must be related to single project. + + Args: + project_name (str): Project name to which are operations related. + """ + + def __init__(self): + self._operations = [] + + def add(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + if not isinstance( + operation, + (CreateOperation, UpdateOperation, DeleteOperation) + ): + raise TypeError("Expected Operation object got {}".format( + str(type(operation)) + )) + + self._operations.append(operation) + + def append(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + + self.add(operation) + + def extend(self, operations): + """Add operations to be processed. + + Args: + operations (List[BaseOperation]): Operations that should be + processed. + """ + + for operation in operations: + self.add(operation) + + def remove(self, operation): + """Remove operation.""" + + self._operations.remove(operation) + + def clear(self): + """Clear all registered operations.""" + + self._operations = [] + + def to_data(self): + return [ + operation.to_data() + for operation in self._operations + ] + + def commit(self): + """Commit session operations.""" + + operations, self._operations = self._operations, [] + if not operations: + return + + operations_by_project = collections.defaultdict(list) + for operation in operations: + operations_by_project[operation.project_name].append(operation) + + for project_name, operations in operations_by_project.items(): + bulk_writes = [] + for operation in operations: + mongo_op = operation.to_mongo_operation() + if mongo_op is not None: + bulk_writes.append(mongo_op) + + if bulk_writes: + collection = get_project_connection(project_name) + collection.bulk_write(bulk_writes) + + def create_entity(self, project_name, entity_type, data): + """Fast access to 'CreateOperation'. + + Returns: + CreateOperation: Object of update operation. + """ + + operation = CreateOperation(project_name, entity_type, data) + self.add(operation) + return operation + + def update_entity(self, project_name, entity_type, entity_id, update_data): + """Fast access to 'UpdateOperation'. + + Returns: + UpdateOperation: Object of update operation. + """ + + operation = UpdateOperation( + project_name, entity_type, entity_id, update_data + ) + self.add(operation) + return operation + + def delete_entity(self, project_name, entity_type, entity_id): + """Fast access to 'DeleteOperation'. + + Returns: + DeleteOperation: Object of delete operation. + """ + + operation = DeleteOperation(project_name, entity_type, entity_id) + self.add(operation) + return operation diff --git a/openpype/hooks/pre_copy_template_workfile.py b/openpype/hooks/pre_copy_template_workfile.py index dffac22ee2..70c549919f 100644 --- a/openpype/hooks/pre_copy_template_workfile.py +++ b/openpype/hooks/pre_copy_template_workfile.py @@ -1,11 +1,11 @@ import os import shutil -from openpype.lib import ( - PreLaunchHook, - get_custom_workfile_template_by_context, +from openpype.lib import PreLaunchHook +from openpype.settings import get_project_settings +from openpype.pipeline.workfile import ( + get_custom_workfile_template, get_custom_workfile_template_by_string_context ) -from openpype.settings import get_project_settings class CopyTemplateWorkfile(PreLaunchHook): @@ -54,41 +54,22 @@ class CopyTemplateWorkfile(PreLaunchHook): project_name = self.data["project_name"] asset_name = self.data["asset_name"] task_name = self.data["task_name"] + host_name = self.application.host_name project_settings = get_project_settings(project_name) - host_settings = project_settings[self.application.host_name] - - workfile_builder_settings = host_settings.get("workfile_builder") - if not workfile_builder_settings: - # TODO remove warning when deprecated - self.log.warning(( - "Seems like old version of settings is used." - " Can't access custom templates in host \"{}\"." - ).format(self.application.full_label)) - return - - if not workfile_builder_settings["create_first_version"]: - self.log.info(( - "Project \"{}\" has turned off to create first workfile for" - " application \"{}\"" - ).format(project_name, self.application.full_label)) - return - - # Backwards compatibility - template_profiles = workfile_builder_settings.get("custom_templates") - if not template_profiles: - self.log.info( - "Custom templates are not filled. Skipping template copy." - ) - return project_doc = self.data.get("project_doc") asset_doc = self.data.get("asset_doc") anatomy = self.data.get("anatomy") if project_doc and asset_doc: self.log.debug("Started filtering of custom template paths.") - template_path = get_custom_workfile_template_by_context( - template_profiles, project_doc, asset_doc, task_name, anatomy + template_path = get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy, + project_settings ) else: @@ -96,10 +77,13 @@ class CopyTemplateWorkfile(PreLaunchHook): "Global data collection probably did not execute." " Using backup solution." )) - dbcon = self.data.get("dbcon") template_path = get_custom_workfile_template_by_string_context( - template_profiles, project_name, asset_name, task_name, - dbcon, anatomy + project_name, + asset_name, + task_name, + host_name, + anatomy, + project_settings ) if not template_path: diff --git a/openpype/hooks/pre_global_host_data.py b/openpype/hooks/pre_global_host_data.py index 6577e37cbe..8a178915fb 100644 --- a/openpype/hooks/pre_global_host_data.py +++ b/openpype/hooks/pre_global_host_data.py @@ -1,3 +1,4 @@ +from openpype.client import get_project, get_asset_by_name from openpype.lib import ( PreLaunchHook, EnvironmentPrepData, @@ -69,7 +70,7 @@ class GlobalHostDataHook(PreLaunchHook): self.data["dbcon"] = dbcon # Project document - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) self.data["project_doc"] = project_doc asset_name = self.data.get("asset_name") @@ -79,8 +80,5 @@ class GlobalHostDataHook(PreLaunchHook): ) return - asset_doc = dbcon.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) self.data["asset_doc"] = asset_doc diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index bb199a61f7..d444ead6dc 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -102,7 +102,6 @@ class CollectAERender(publish.AbstractCollectRender): attachTo=False, setMembers='', publish=True, - renderer='aerender', name=subset_name, resolutionWidth=render_q.width, resolutionHeight=render_q.height, @@ -113,7 +112,6 @@ class CollectAERender(publish.AbstractCollectRender): frameStart=frame_start, frameEnd=frame_end, frameStep=1, - toBeRenderedOn='deadline', fps=fps, app_version=app_version, publish_attributes=inst.data.get("publish_attributes", {}), @@ -138,6 +136,9 @@ class CollectAERender(publish.AbstractCollectRender): fam = "render.farm" if fam not in instance.families: instance.families.append(fam) + instance.toBeRenderedOn = "deadline" + instance.renderer = "aerender" + instance.farm = True # to skip integrate instances.append(instance) instances_to_remove.append(inst) diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index c1b5add518..4f8410da74 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -220,12 +220,9 @@ class LaunchQtApp(bpy.types.Operator): self._app.store_window(self.bl_idname, window) self._window = window - if not isinstance( - self._window, - (QtWidgets.QMainWindow, QtWidgets.QDialog, ModuleType) - ): + if not isinstance(self._window, (QtWidgets.QWidget, ModuleType)): raise AttributeError( - "`window` should be a `QDialog or module`. Got: {}".format( + "`window` should be a `QWidget or module`. Got: {}".format( str(type(window)) ) ) @@ -249,9 +246,9 @@ class LaunchQtApp(bpy.types.Operator): self._window.setWindowFlags(on_top_flags) self._window.show() - if on_top_flags != origin_flags: - self._window.setWindowFlags(origin_flags) - self._window.show() + # if on_top_flags != origin_flags: + # self._window.setWindowFlags(origin_flags) + # self._window.show() return {'FINISHED'} diff --git a/openpype/hosts/blender/plugins/publish/extract_layout.py b/openpype/hosts/blender/plugins/publish/extract_layout.py index 75d9cf440d..8502c6fbd4 100644 --- a/openpype/hosts/blender/plugins/publish/extract_layout.py +++ b/openpype/hosts/blender/plugins/publish/extract_layout.py @@ -180,7 +180,7 @@ class ExtractLayout(openpype.api.Extractor): "rotation": { "x": asset.rotation_euler.x, "y": asset.rotation_euler.y, - "z": asset.rotation_euler.z, + "z": asset.rotation_euler.z }, "scale": { "x": asset.scale.x, @@ -189,6 +189,18 @@ class ExtractLayout(openpype.api.Extractor): } } + json_element["transform_matrix"] = [] + + for row in list(asset.matrix_world.transposed()): + json_element["transform_matrix"].append(list(row)) + + json_element["basis"] = [ + [1, 0, 0, 0], + [0, -1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1] + ] + # Extract the animation as well if family == "rig": f, n = self._export_animation( diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py index b59107f155..4d45f67ded 100644 --- a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -3,9 +3,9 @@ import copy from collections import OrderedDict from pprint import pformat import pyblish -from openpype.lib import get_workdir import openpype.hosts.flame.api as opfapi import openpype.pipeline as op_pipeline +from openpype.pipeline.workfile import get_workdir class IntegrateBatchGroup(pyblish.api.InstancePlugin): @@ -324,7 +324,13 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin): project_doc = instance.data["projectEntity"] asset_entity = instance.data["assetEntity"] anatomy = instance.context.data["anatomy"] + project_settings = instance.context.data["project_settings"] return get_workdir( - project_doc, asset_entity, task_data["name"], "flame", anatomy + project_doc, + asset_entity, + task_data["name"], + "flame", + anatomy, + project_settings=project_settings ) diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py index 52a157c56e..49ef340679 100644 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py @@ -3,9 +3,7 @@ import re import sys import logging -# Pipeline imports from openpype.client import ( - get_project, get_asset_by_name, get_versions, ) @@ -17,13 +15,10 @@ from openpype.pipeline import ( from openpype.lib import version_up from openpype.hosts.fusion import api from openpype.hosts.fusion.api import lib -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Update Slap Comp") -self = sys.modules[__name__] -self._project = None - def _format_version_folder(folder): """Format a version folder based on the filepath @@ -212,9 +207,6 @@ def switch(asset_name, filepath=None, new=True): asset = get_asset_by_name(project_name, asset_name) assert asset, "Could not find '%s' in the database" % asset_name - # Get current project - self._project = get_project(project_name) - # Go to comp if not filepath: current_comp = api.get_current_comp() diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/utility_scripts/switch_ui.py index 01d55db647..93f775b24b 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/utility_scripts/switch_ui.py @@ -14,7 +14,7 @@ from openpype.pipeline import ( legacy_io, ) from openpype.hosts.fusion import api -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Fusion Switch Shot") diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index 123b934428..1e883ea43f 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -82,6 +82,14 @@ IMAGE_PREFIXES = { RENDERMAN_IMAGE_DIR = "maya//" + +def has_tokens(string, tokens): + """Return whether any of tokens is in input string (case-insensitive)""" + pattern = "({})".format("|".join(re.escape(token) for token in tokens)) + match = re.search(pattern, string, re.IGNORECASE) + return bool(match) + + @attr.s class LayerMetadata(object): """Data class for Render Layer metadata.""" @@ -99,6 +107,12 @@ class LayerMetadata(object): # Render Products products = attr.ib(init=False, default=attr.Factory(list)) + # The AOV separator token. Note that not all renderers define an explicit + # render separator but allow to put the AOV/RenderPass token anywhere in + # the file path prefix. For those renderers we'll fall back to whatever + # is between the last occurrences of and tokens. + aov_separator = attr.ib(default="_") + @attr.s class RenderProduct(object): @@ -183,7 +197,6 @@ class ARenderProducts: self.layer = layer self.render_instance = render_instance self.multipart = False - self.aov_separator = render_instance.data.get("aovSeparator", "_") # Initialize self.layer_data = self._get_layer_data() @@ -296,6 +309,42 @@ class ARenderProducts: return lib.get_attr_in_layer(plug, layer=self.layer) + @staticmethod + def extract_separator(file_prefix): + """Extract AOV separator character from the prefix. + + Default behavior extracts the part between + last occurrences of and + + Todo: + This code also triggers for V-Ray which overrides it explicitly + so this code will invalidly debug log it couldn't extract the + AOV separator even though it does set it in RenderProductsVray. + + Args: + file_prefix (str): File prefix with tokens. + + Returns: + str or None: prefix character if it can be extracted. + """ + layer_tokens = ["", ""] + aov_tokens = ["", ""] + + def match_last(tokens, text): + """regex match the last occurence from a list of tokens""" + pattern = "(?:.*)({})".format("|".join(tokens)) + return re.search(pattern, text, re.IGNORECASE) + + layer_match = match_last(layer_tokens, file_prefix) + aov_match = match_last(aov_tokens, file_prefix) + separator = None + if layer_match and aov_match: + matches = sorted((layer_match, aov_match), + key=lambda match: match.end(1)) + separator = file_prefix[matches[0].end(1):matches[1].start(1)] + return separator + + def _get_layer_data(self): # type: () -> LayerMetadata # ______________________________________________ @@ -304,7 +353,7 @@ class ARenderProducts: # ____________________/ _, scene_basename = os.path.split(cmds.file(q=True, loc=True)) scene_name, _ = os.path.splitext(scene_basename) - + kwargs = {} file_prefix = self.get_renderer_prefix() # If the Render Layer belongs to a Render Setup layer then the @@ -319,6 +368,13 @@ class ARenderProducts: # defaultRenderLayer renders as masterLayer layer_name = "masterLayer" + separator = self.extract_separator(file_prefix) + if separator: + kwargs["aov_separator"] = separator + else: + log.debug("Couldn't extract aov separator from " + "file prefix: {}".format(file_prefix)) + # todo: Support Custom Frames sequences 0,5-10,100-120 # Deadline allows submitting renders with a custom frame list # to support those cases we might want to allow 'custom frames' @@ -335,7 +391,8 @@ class ARenderProducts: layerName=layer_name, renderer=self.renderer, defaultExt=self._get_attr("defaultRenderGlobals.imfPluginKey"), - filePrefix=file_prefix + filePrefix=file_prefix, + **kwargs ) def _generate_file_sequence( @@ -680,9 +737,17 @@ class RenderProductsVray(ARenderProducts): """ prefix = super(RenderProductsVray, self).get_renderer_prefix() - prefix = "{}{}".format(prefix, self.aov_separator) + aov_separator = self._get_aov_separator() + prefix = "{}{}".format(prefix, aov_separator) return prefix + def _get_aov_separator(self): + # type: () -> str + """Return the V-Ray AOV/Render Elements separator""" + return self._get_attr( + "vraySettings.fileNameRenderElementSeparator" + ) + def _get_layer_data(self): # type: () -> LayerMetadata """Override to get vray specific extension.""" @@ -694,6 +759,8 @@ class RenderProductsVray(ARenderProducts): layer_data.defaultExt = default_ext layer_data.padding = self._get_attr("vraySettings.fileNamePadding") + layer_data.aov_separator = self._get_aov_separator() + return layer_data def get_render_products(self): @@ -913,8 +980,9 @@ class RenderProductsRedshift(ARenderProducts): :func:`ARenderProducts.get_renderer_prefix()` """ - prefix = super(RenderProductsRedshift, self).get_renderer_prefix() - prefix = "{}{}".format(prefix, self.aov_separator) + file_prefix = super(RenderProductsRedshift, self).get_renderer_prefix() + separator = self.extract_separator(file_prefix) + prefix = "{}{}".format(file_prefix, separator or "_") return prefix def get_render_products(self): diff --git a/openpype/hosts/maya/api/lib_rendersettings.py b/openpype/hosts/maya/api/lib_rendersettings.py new file mode 100644 index 0000000000..7cd2193086 --- /dev/null +++ b/openpype/hosts/maya/api/lib_rendersettings.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- +"""Class for handling Render Settings.""" +from maya import cmds # noqa +import maya.mel as mel +import six +import sys + +from openpype.api import ( + get_project_settings, + get_current_project_settings +) + +from openpype.pipeline import legacy_io +from openpype.pipeline import CreatorError +from openpype.pipeline.context_tools import get_current_project_asset +from openpype.hosts.maya.api.commands import reset_frame_range + + +class RenderSettings(object): + + _image_prefix_nodes = { + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + _image_prefixes = { + 'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa + 'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa + 'renderman': 'maya///{aov_separator}', + 'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa + } + + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + @classmethod + def get_image_prefix_attr(cls, renderer): + return cls._image_prefix_nodes[renderer] + + def __init__(self, project_settings=None): + self._project_settings = project_settings + if not self._project_settings: + self._project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + def set_default_renderer_settings(self, renderer=None): + """Set basic settings based on renderer.""" + if not renderer: + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + + asset_doc = get_current_project_asset() + # project_settings/maya/create/CreateRender/aov_separator + try: + aov_separator = self._aov_chars[( + self._project_settings["maya"] + ["RenderSettings"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "_" + reset_frame = self._project_settings["maya"]["RenderSettings"]["reset_current_frame"] # noqa + + if reset_frame: + start_frame = cmds.getAttr("defaultRenderGlobals.startFrame") + cmds.currentTime(start_frame, edit=True) + + if renderer in self._image_prefix_nodes: + prefix = self._image_prefixes[renderer] + prefix = prefix.replace("{aov_separator}", aov_separator) + cmds.setAttr(self._image_prefix_nodes[renderer], + prefix, type="string") # noqa + else: + print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa + + # TODO: handle not having res values in the doc + width = asset_doc["data"].get("resolutionWidth") + height = asset_doc["data"].get("resolutionHeight") + + if renderer == "arnold": + # set renderer settings for Arnold from project settings + self._set_arnold_settings(width, height) + + if renderer == "vray": + self._set_vray_settings(aov_separator, width, height) + + if renderer == "redshift": + self._set_redshift_settings(width, height) + + def _set_arnold_settings(self, width, height): + """Sets settings for Arnold.""" + from mtoa.core import createOptions # noqa + from mtoa.aovs import AOVInterface # noqa + createOptions() + arnold_render_presets = self._project_settings["maya"]["RenderSettings"]["arnold_renderer"] # noqa + # Force resetting settings and AOV list to avoid having to deal with + # AOV checking logic, for now. + # This is a work around because the standard + # function to revert render settings does not reset AOVs list in MtoA + # Fetch current aovs in case there's any. + current_aovs = AOVInterface().getAOVs() + # Remove fetched AOVs + AOVInterface().removeAOVs(current_aovs) + mel.eval("unifiedRenderGlobalsRevertToDefault") + img_ext = arnold_render_presets["image_format"] + img_prefix = arnold_render_presets["image_prefix"] + aovs = arnold_render_presets["aov_list"] + img_tiled = arnold_render_presets["tiled"] + multi_exr = arnold_render_presets["multilayer_exr"] + additional_options = arnold_render_presets["additional_options"] + for aov in aovs: + AOVInterface('defaultArnoldRenderOptions').addAOV(aov) + + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + + self._set_global_output_settings() + + cmds.setAttr( + "defaultRenderGlobals.imageFilePrefix", img_prefix, type="string") + + cmds.setAttr( + "defaultArnoldDriver.ai_translator", img_ext, type="string") + + cmds.setAttr( + "defaultArnoldDriver.exrTiled", img_tiled) + + cmds.setAttr( + "defaultArnoldDriver.mergeAOVs", multi_exr) + # Passes additional options in from the schema as a list + # but converts it to a dictionary because ftrack doesn't + # allow fullstops in custom attributes. Then checks for + # type of MtoA attribute passed to adjust the `setAttr` + # command accordingly. + self._additional_attribs_setter(additional_options) + for item in additional_options: + attribute, value = item + if (cmds.getAttr(str(attribute), type=True)) == "long": + cmds.setAttr(str(attribute), int(value)) + elif (cmds.getAttr(str(attribute), type=True)) == "bool": + cmds.setAttr(str(attribute), int(value), type = "Boolean") # noqa + elif (cmds.getAttr(str(attribute), type=True)) == "string": + cmds.setAttr(str(attribute), str(value), type = "string") # noqa + reset_frame_range() + + def _set_redshift_settings(self, width, height): + """Sets settings for Redshift.""" + redshift_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["redshift_renderer"] + ) + additional_options = redshift_render_presets["additional_options"] + ext = redshift_render_presets["image_format"] + img_exts = ["iff", "exr", "tif", "png", "tga", "jpg"] + img_ext = img_exts.index(ext) + + self._set_global_output_settings() + cmds.setAttr("redshiftOptions.imageFormat", img_ext) + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + self._additional_attribs_setter(additional_options) + + def _set_vray_settings(self, aov_separator, width, height): + # type: (str, int, int) -> None + """Sets important settings for Vray.""" + settings = cmds.ls(type="VRaySettingsNode") + node = settings[0] if settings else cmds.createNode("VRaySettingsNode") + vray_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["vray_renderer"] + ) + # Set aov separator + # First we need to explicitly set the UI items in Render Settings + # because that is also what V-Ray updates to when that Render Settings + # UI did initialize before and refreshes again. + MENU = "vrayRenderElementSeparator" + if cmds.optionMenuGrp(MENU, query=True, exists=True): + items = cmds.optionMenuGrp(MENU, query=True, ill=True) + separators = [cmds.menuItem(i, query=True, label=True) for i in items] # noqa: E501 + try: + sep_idx = separators.index(aov_separator) + except ValueError as e: + six.reraise( + CreatorError, + CreatorError( + "AOV character {} not in {}".format( + aov_separator, separators)), + sys.exc_info()[2]) + + cmds.optionMenuGrp(MENU, edit=True, select=sep_idx + 1) + + # Set the render element attribute as string. This is also what V-Ray + # sets whenever the `vrayRenderElementSeparator` menu items switch + cmds.setAttr( + "{}.fileNameRenderElementSeparator".format(node), + aov_separator, + type="string" + ) + + # Set render file format to exr + cmds.setAttr("{}.imageFormatStr".format(node), "exr", type="string") + + # animType + cmds.setAttr("{}.animType".format(node), 1) + + # resolution + cmds.setAttr("{}.width".format(node), width) + cmds.setAttr("{}.height".format(node), height) + + additional_options = vray_render_presets["additional_options"] + + self._additional_attribs_setter(additional_options) + + @staticmethod + def _set_global_output_settings(): + # enable animation + cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) + cmds.setAttr("defaultRenderGlobals.animation", 1) + cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) + cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + + def _additional_attribs_setter(self, additional_attribs): + print(additional_attribs) + for item in additional_attribs: + attribute, value = item + if (cmds.getAttr(str(attribute), type=True)) == "long": + cmds.setAttr(str(attribute), int(value)) + elif (cmds.getAttr(str(attribute), type=True)) == "bool": + cmds.setAttr(str(attribute), int(value)) # noqa + elif (cmds.getAttr(str(attribute), type=True)) == "string": + cmds.setAttr(str(attribute), str(value), type = "string") # noqa diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 97f06c43af..b7ab529a55 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -6,11 +6,11 @@ from Qt import QtWidgets, QtGui import maya.utils import maya.cmds as cmds -from openpype.api import BuildWorkfile from openpype.settings import get_project_settings from openpype.pipeline import legacy_io +from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools -from openpype.hosts.maya.api import lib +from openpype.hosts.maya.api import lib, lib_rendersettings from .lib import get_main_window, IS_HEADLESS from .commands import reset_frame_range @@ -44,6 +44,7 @@ def install(): parent="MayaWindow" ) + renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() # Create context menu context_label = "{}, {}".format( legacy_io.Session["AVALON_ASSET"], @@ -98,6 +99,13 @@ def install(): cmds.menuItem(divider=True) + cmds.menuItem( + "Set Render Settings", + command=lambda *args: lib_rendersettings.RenderSettings().set_default_renderer_settings() # noqa + ) + + cmds.menuItem(divider=True) + cmds.menuItem( "Work Files...", command=lambda *args: host_tools.show_workfiles( diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 9280805945..e50ebfccad 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -208,7 +208,8 @@ class ReferenceLoader(Loader): file_type = { "ma": "mayaAscii", "mb": "mayaBinary", - "abc": "Alembic" + "abc": "Alembic", + "fbx": "FBX" }.get(representation["name"]) assert file_type, "Unsupported representation: %s" % representation @@ -234,7 +235,7 @@ class ReferenceLoader(Loader): path = self.prepare_root_value(path, representation["context"] ["project"] - ["code"]) + ["name"]) content = cmds.file(path, loadReference=reference_node, type=file_type, diff --git a/openpype/hosts/maya/plugins/create/create_animation.py b/openpype/hosts/maya/plugins/create/create_animation.py index 5cd1f7090a..e47d4e5b5a 100644 --- a/openpype/hosts/maya/plugins/create/create_animation.py +++ b/openpype/hosts/maya/plugins/create/create_animation.py @@ -11,6 +11,7 @@ class CreateAnimation(plugin.Creator): label = "Animation" family = "animation" icon = "male" + write_color_sets = False def __init__(self, *args, **kwargs): super(CreateAnimation, self).__init__(*args, **kwargs) @@ -22,7 +23,7 @@ class CreateAnimation(plugin.Creator): self.data[key] = value # Write vertex colors with the geometry. - self.data["writeColorSets"] = False + self.data["writeColorSets"] = self.write_color_sets self.data["writeFaceSets"] = False # Include only renderable visible shapes. diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py index e876015adb..5516445de8 100644 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ b/openpype/hosts/maya/plugins/create/create_pointcache.py @@ -11,6 +11,7 @@ class CreatePointCache(plugin.Creator): label = "Point Cache" family = "pointcache" icon = "gears" + write_color_sets = False def __init__(self, *args, **kwargs): super(CreatePointCache, self).__init__(*args, **kwargs) @@ -18,7 +19,8 @@ class CreatePointCache(plugin.Creator): # Add animation data self.data.update(lib.collect_animation_data()) - self.data["writeColorSets"] = False # Vertex colors with the geometry. + # Vertex colors with the geometry. + self.data["writeColorSets"] = self.write_color_sets self.data["writeFaceSets"] = False # Vertex colors with the geometry. self.data["renderableOnly"] = False # Only renderable visible shapes self.data["visibleOnly"] = False # only nodes that are visible diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index de07a0b23d..fbe670b1ea 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -1,15 +1,21 @@ # -*- coding: utf-8 -*- """Create ``Render`` instance in Maya.""" -import os import json +import os + import appdirs import requests from maya import cmds -import maya.app.renderSetup.model.renderSetup as renderSetup +from maya.app.renderSetup.model import renderSetup +from openpype.api import ( + get_system_settings, + get_project_settings, +) from openpype.hosts.maya.api import ( lib, + lib_rendersettings, plugin ) from openpype.lib import requests_get @@ -17,6 +23,7 @@ from openpype.api import ( get_system_settings, get_project_settings) from openpype.modules import ModulesManager +from openpype.pipeline import legacy_io from openpype.pipeline import ( CreatorError, legacy_io, @@ -69,35 +76,6 @@ class CreateRender(plugin.Creator): _user = None _password = None - # renderSetup instance - _rs = None - - _image_prefix_nodes = { - 'mentalray': 'defaultRenderGlobals.imageFilePrefix', - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'rmanGlobals.imageFileFormat', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', - } - - _image_prefixes = { - 'mentalray': 'maya///{aov_separator}', # noqa - 'vray': 'maya///', - 'arnold': 'maya///{aov_separator}', # noqa - # this needs `imageOutputDir` - # (/renders/maya/) set separately - 'renderman': '_..', - 'redshift': 'maya///', # noqa - 'mayahardware2': 'maya///', # noqa - } - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - _project_settings = None def __init__(self, *args, **kwargs): @@ -109,18 +87,8 @@ class CreateRender(plugin.Creator): return self._project_settings = get_project_settings( legacy_io.Session["AVALON_PROJECT"]) - - # project_settings/maya/create/CreateRender/aov_separator - try: - self.aov_separator = self._aov_chars[( - self._project_settings["maya"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - self.aov_separator = "_" - + if self._project_settings["maya"]["RenderSettings"]["apply_render_settings"]: # noqa + lib_rendersettings.RenderSettings().set_default_renderer_settings() manager = ModulesManager() self.deadline_module = manager.modules_by_name["deadline"] try: @@ -177,13 +145,13 @@ class CreateRender(plugin.Creator): ]) cmds.setAttr("{}.machineList".format(self.instance), lock=True) - self._rs = renderSetup.instance() - layers = self._rs.getRenderLayers() + rs = renderSetup.instance() + layers = rs.getRenderLayers() if use_selection: - print(">>> processing existing layers") + self.log.info("Processing existing layers") sets = [] for layer in layers: - print(" - creating set for {}:{}".format( + self.log.info(" - creating set for {}:{}".format( namespace, layer.name())) render_set = cmds.sets( n="{}:{}".format(namespace, layer.name())) @@ -193,17 +161,10 @@ class CreateRender(plugin.Creator): # if no render layers are present, create default one with # asterisk selector if not layers: - render_layer = self._rs.createRenderLayer('Main') + render_layer = rs.createRenderLayer('Main') collection = render_layer.createCollection("defaultCollection") collection.getSelector().setPattern('*') - renderer = cmds.getAttr( - 'defaultRenderGlobals.currentRenderer').lower() - # handle various renderman names - if renderer.startswith('renderman'): - renderer = 'renderman' - - self._set_default_renderer_settings(renderer) return self.instance def _deadline_webservice_changed(self): @@ -237,7 +198,7 @@ class CreateRender(plugin.Creator): def _create_render_settings(self): """Create instance settings.""" - # get pools + # get pools (slave machines of the render farm) pool_names = [] default_priority = 50 @@ -281,7 +242,8 @@ class CreateRender(plugin.Creator): # if 'default' server is not between selected, # use first one for initial list of pools. deadline_url = next(iter(self.deadline_servers.values())) - + # Uses function to get pool machines from the assigned deadline + # url in settings pool_names = self.deadline_module.get_deadline_pools(deadline_url, self.log) maya_submit_dl = self._project_settings.get( @@ -400,102 +362,36 @@ class CreateRender(plugin.Creator): self.log.error("Cannot show login form to Muster") raise Exception("Cannot show login form to Muster") - def _set_default_renderer_settings(self, renderer): - """Set basic settings based on renderer. + def _requests_post(self, *args, **kwargs): + """Wrap request post method. - Args: - renderer (str): Renderer name. + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. """ - prefix = self._image_prefixes[renderer] - prefix = prefix.replace("{aov_separator}", self.aov_separator) - cmds.setAttr(self._image_prefix_nodes[renderer], - prefix, - type="string") + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.post(*args, **kwargs) - asset = get_current_project_asset() + def _requests_get(self, *args, **kwargs): + """Wrap request get method. - if renderer == "arnold": - # set format to exr + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. - cmds.setAttr( - "defaultArnoldDriver.ai_translator", "exr", type="string") - self._set_global_output_settings() - # resolution - cmds.setAttr( - "defaultResolution.width", - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "defaultResolution.height", - asset["data"].get("resolutionHeight")) + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. - if renderer == "vray": - self._set_vray_settings(asset) - if renderer == "redshift": - cmds.setAttr("redshiftOptions.imageFormat", 1) - - # resolution - cmds.setAttr( - "defaultResolution.width", - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "defaultResolution.height", - asset["data"].get("resolutionHeight")) - - self._set_global_output_settings() - - if renderer == "renderman": - cmds.setAttr("rmanGlobals.imageOutputDir", - "maya//", type="string") - - def _set_vray_settings(self, asset): - # type: (dict) -> None - """Sets important settings for Vray.""" - settings = cmds.ls(type="VRaySettingsNode") - node = settings[0] if settings else cmds.createNode("VRaySettingsNode") - - # set separator - # set it in vray menu - if cmds.optionMenuGrp("vrayRenderElementSeparator", exists=True, - q=True): - items = cmds.optionMenuGrp( - "vrayRenderElementSeparator", ill=True, query=True) - - separators = [cmds.menuItem(i, label=True, query=True) for i in items] # noqa: E501 - try: - sep_idx = separators.index(self.aov_separator) - except ValueError: - raise CreatorError( - "AOV character {} not in {}".format( - self.aov_separator, separators)) - - cmds.optionMenuGrp( - "vrayRenderElementSeparator", sl=sep_idx + 1, edit=True) - cmds.setAttr( - "{}.fileNameRenderElementSeparator".format(node), - self.aov_separator, - type="string" - ) - # set format to exr - cmds.setAttr( - "{}.imageFormatStr".format(node), "exr", type="string") - - # animType - cmds.setAttr( - "{}.animType".format(node), 1) - - # resolution - cmds.setAttr( - "{}.width".format(node), - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "{}.height".format(node), - asset["data"].get("resolutionHeight")) - - @staticmethod - def _set_global_output_settings(): - # enable animation - cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) - cmds.setAttr("defaultRenderGlobals.animation", 1) - cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) - cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index ec583bcce7..157be5717b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -551,7 +551,9 @@ class CollectLook(pyblish.api.InstancePlugin): if cmds.getAttr(attribute, type=True) == "message": continue node_attributes[attr] = cmds.getAttr(attribute) - + # Only include if there are any properties we care about + if not node_attributes: + continue attributes.append({"name": node, "uuid": lib.get_id(node), "attributes": node_attributes}) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 8b911a867d..c3e6c98020 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -72,7 +72,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): def process(self, context): """Entry point to collector.""" render_instance = None - deadline_url = None for instance in context: if "rendering" in instance.data["families"]: @@ -96,23 +95,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin): asset = legacy_io.Session["AVALON_ASSET"] workspace = context.data["workspaceDir"] - deadline_settings = ( - context.data - ["system_settings"] - ["modules"] - ["deadline"] - ) - - if deadline_settings["enabled"]: - deadline_url = render_instance.data.get("deadlineUrl") - self._rs = renderSetup.instance() - current_layer = self._rs.getVisibleRenderLayer() + # Retrieve render setup layers + rs = renderSetup.instance() maya_render_layers = { - layer.name(): layer for layer in self._rs.getRenderLayers() + layer.name(): layer for layer in rs.getRenderLayers() } - self.maya_layers = maya_render_layers - for layer in collected_render_layers: try: if layer.startswith("LAYER_"): @@ -147,49 +135,28 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.warning(msg) continue - # test if there are sets (subsets) to attach render to + # detect if there are sets (subsets) to attach render to sets = cmds.sets(layer, query=True) or [] attach_to = [] - if sets: - for s in sets: - if "family" not in cmds.listAttr(s): - continue + for s in sets: + if not cmds.attributeQuery("family", node=s, exists=True): + continue - attach_to.append( - { - "version": None, # we need integrator for that - "subset": s, - "family": cmds.getAttr("{}.family".format(s)), - } - ) - self.log.info(" -> attach render to: {}".format(s)) + attach_to.append( + { + "version": None, # we need integrator for that + "subset": s, + "family": cmds.getAttr("{}.family".format(s)), + } + ) + self.log.info(" -> attach render to: {}".format(s)) layer_name = "rs_{}".format(expected_layer_name) # collect all frames we are expecting to be rendered - renderer = cmds.getAttr( - "defaultRenderGlobals.currentRenderer" - ).lower() - # handle various renderman names - if renderer.startswith("renderman"): - renderer = "renderman" - - try: - aov_separator = self._aov_chars[( - context.data["project_settings"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - - render_instance.data["aovSeparator"] = aov_separator - # return all expected files for all cameras and aovs in given # frame range - layer_render_products = get_layer_render_products( - layer_name, render_instance) + layer_render_products = get_layer_render_products(layer_name) render_products = layer_render_products.layer_data.products assert render_products, "no render products generated" exp_files = [] @@ -226,13 +193,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin): ) # append full path - full_exp_files = [] aov_dict = {} default_render_file = context.data.get('project_settings')\ .get('maya')\ - .get('create')\ - .get('CreateRender')\ - .get('default_render_image_folder') + .get('RenderSettings')\ + .get('default_render_image_folder') or "" # replace relative paths with absolute. Render products are # returned as list of dictionaries. publish_meta_path = None @@ -246,6 +211,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): full_paths.append(full_path) publish_meta_path = os.path.dirname(full_path) aov_dict[aov_first_key] = full_paths + full_exp_files = [aov_dict] frame_start_render = int(self.get_render_attribute( "startFrame", layer=layer_name)) @@ -269,8 +235,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): frame_start_handle = frame_start_render frame_end_handle = frame_end_render - full_exp_files.append(aov_dict) - # find common path to store metadata # so if image prefix is branching to many directories # metadata file will be located in top-most common @@ -299,16 +263,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides - try: - aov_separator = self._aov_chars[( - context.data["project_settings"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - data = { "subset": expected_layer_name, "attachTo": attach_to, @@ -357,11 +311,15 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "useReferencedAovs": render_instance.data.get( "useReferencedAovs") or render_instance.data.get( "vrayUseReferencedAovs") or False, - "aovSeparator": aov_separator + "aovSeparator": layer_render_products.layer_data.aov_separator # noqa: E501 } - if deadline_url: - data["deadlineUrl"] = deadline_url + # Collect Deadline url if Deadline module is enabled + deadline_settings = ( + context.data["system_settings"]["modules"]["deadline"] + ) + if deadline_settings["enabled"]: + data["deadlineUrl"] = render_instance.data.get("deadlineUrl") if self.sync_workfile_version: data["version"] = context.data["version"] @@ -370,19 +328,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): if instance.data['family'] == "workfile": instance.data["version"] = context.data["version"] - # Apply each user defined attribute as data - for attr in cmds.listAttr(layer, userDefined=True) or list(): - try: - value = cmds.getAttr("{}.{}".format(layer, attr)) - except Exception: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - - data[attr] = value - # handle standalone renderers if render_instance.data.get("vrayScene") is True: data["families"].append("vrayscene_render") @@ -490,10 +435,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): return pool_a, pool_b - def _get_overrides(self, layer): - rset = self.maya_layers[layer].renderSettingsCollectionInstance() - return rset.getOverrides() - @staticmethod def get_render_attribute(attr, layer): """Get attribute from render options. diff --git a/openpype/hosts/maya/plugins/publish/extract_layout.py b/openpype/hosts/maya/plugins/publish/extract_layout.py new file mode 100644 index 0000000000..991217684a --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_layout.py @@ -0,0 +1,146 @@ +import math +import os +import json + +from maya import cmds +from maya.api import OpenMaya as om + +from bson.objectid import ObjectId + +from openpype.pipeline import legacy_io +import openpype.api + + +class ExtractLayout(openpype.api.Extractor): + """Extract a layout.""" + + label = "Extract Layout" + hosts = ["maya"] + families = ["layout"] + optional = True + + def process(self, instance): + # Define extract output file path + stagingdir = self.staging_dir(instance) + + # Perform extraction + self.log.info("Performing extraction..") + + if "representations" not in instance.data: + instance.data["representations"] = [] + + json_data = [] + + for asset in cmds.sets(str(instance), query=True): + # Find the container + grp_name = asset.split(':')[0] + containers = cmds.ls(f"{grp_name}*_CON") + + assert len(containers) == 1, \ + f"More than one container found for {asset}" + + container = containers[0] + + representation_id = cmds.getAttr(f"{container}.representation") + + representation = legacy_io.find_one( + { + "type": "representation", + "_id": ObjectId(representation_id) + }, projection={"parent": True, "context.family": True}) + + self.log.info(representation) + + version_id = representation.get("parent") + family = representation.get("context").get("family") + + json_element = { + "family": family, + "instance_name": cmds.getAttr(f"{container}.name"), + "representation": str(representation_id), + "version": str(version_id) + } + + loc = cmds.xform(asset, query=True, translation=True) + rot = cmds.xform(asset, query=True, rotation=True, euler=True) + scl = cmds.xform(asset, query=True, relative=True, scale=True) + + json_element["transform"] = { + "translation": { + "x": loc[0], + "y": loc[1], + "z": loc[2] + }, + "rotation": { + "x": math.radians(rot[0]), + "y": math.radians(rot[1]), + "z": math.radians(rot[2]) + }, + "scale": { + "x": scl[0], + "y": scl[1], + "z": scl[2] + } + } + + row_length = 4 + t_matrix_list = cmds.xform(asset, query=True, matrix=True) + + transform_mm = om.MMatrix(t_matrix_list) + transform = om.MTransformationMatrix(transform_mm) + + t = transform.translation(om.MSpace.kWorld) + t = om.MVector(t.x, t.z, -t.y) + transform.setTranslation(t, om.MSpace.kWorld) + transform.rotateBy( + om.MEulerRotation(math.radians(-90), 0, 0), om.MSpace.kWorld) + transform.scaleBy([1.0, 1.0, -1.0], om.MSpace.kObject) + + t_matrix_list = list(transform.asMatrix()) + + t_matrix = [] + for i in range(0, len(t_matrix_list), row_length): + t_matrix.append(t_matrix_list[i:i + row_length]) + + json_element["transform_matrix"] = [] + for row in t_matrix: + json_element["transform_matrix"].append(list(row)) + + basis_list = [ + 1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, -1, 0, + 0, 0, 0, 1 + ] + + basis_mm = om.MMatrix(basis_list) + basis = om.MTransformationMatrix(basis_mm) + + b_matrix_list = list(basis.asMatrix()) + b_matrix = [] + + for i in range(0, len(b_matrix_list), row_length): + b_matrix.append(b_matrix_list[i:i + row_length]) + + json_element["basis"] = [] + for row in b_matrix: + json_element["basis"].append(list(row)) + + json_data.append(json_element) + + json_filename = "{}.json".format(instance.name) + json_path = os.path.join(stagingdir, json_filename) + + with open(json_path, "w+") as file: + json.dump(json_data, fp=file, indent=2) + + json_representation = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(json_representation) + + self.log.info("Extracted instance '%s' to: %s", + instance.name, json_representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index d35b529c76..8be0c7aae5 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -27,6 +27,29 @@ def escape_space(path): return '"{}"'.format(path) if " " in path else path +def get_ocio_config_path(profile_folder): + """Path to OpenPype vendorized OCIO. + + Vendorized OCIO config file path is grabbed from the specific path + hierarchy specified below. + + "{OPENPYPE_ROOT}/vendor/OpenColorIO-Configs/{profile_folder}/config.ocio" + Args: + profile_folder (str): Name of folder to grab config file from. + + Returns: + str: Path to vendorized config file. + """ + return os.path.join( + os.environ["OPENPYPE_ROOT"], + "vendor", + "configs", + "OpenColorIO-Configs", + profile_folder, + "config.ocio" + ) + + def find_paths_by_hash(texture_hash): """Find the texture hash key in the dictionary. @@ -79,10 +102,11 @@ def maketx(source, destination, *args): # use oiio-optimized settings for tile-size, planarconfig, metadata "--oiio", "--filter lanczos3", + escape_space(source) ] cmd.extend(args) - cmd.extend(["-o", escape_space(destination), escape_space(source)]) + cmd.extend(["-o", escape_space(destination)]) cmd = " ".join(cmd) @@ -405,7 +429,19 @@ class ExtractLook(openpype.api.Extractor): # node doesn't have color space attribute color_space = "Raw" else: - if files_metadata[source]["color_space"] == "Raw": + # get the resolved files + metadata = files_metadata.get(source) + # if the files are unresolved from `source` + # assume color space from the first file of + # the resource + if not metadata: + first_file = next(iter(resource.get( + "files", [])), None) + if not first_file: + continue + first_filepath = os.path.normpath(first_file) + metadata = files_metadata[first_filepath] + if metadata["color_space"] == "Raw": # set color space to raw if we linearized it color_space = "Raw" # Remap file node filename to destination @@ -493,6 +529,8 @@ class ExtractLook(openpype.api.Extractor): else: colorconvert = "" + config_path = get_ocio_config_path("nuke-default") + color_config = "--colorconfig {0}".format(config_path) # Ensure folder exists if not os.path.exists(os.path.dirname(converted)): os.makedirs(os.path.dirname(converted)) @@ -502,10 +540,11 @@ class ExtractLook(openpype.api.Extractor): filepath, converted, # Include `source-hash` as string metadata - "-sattrib", + "--sattrib", "sourceHash", escape_space(texture_hash), colorconvert, + color_config ) return converted, COPY, texture_hash diff --git a/openpype/hosts/maya/plugins/publish/validate_look_contents.py b/openpype/hosts/maya/plugins/publish/validate_look_contents.py index 443a0ad719..b1e1d5416b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_contents.py @@ -78,14 +78,13 @@ class ValidateLookContents(pyblish.api.InstancePlugin): # Check if attributes are on a node with an ID, crucial for rebuild! for attr_changes in lookdata["attributes"]: - if not attr_changes["uuid"]: + if not attr_changes["uuid"] and not attr_changes["attributes"]: cls.log.error("Node '%s' has no cbId, please set the " "attributes to its children if it has any" % attr_changes["name"]) invalid.add(instance.name) return list(invalid) - @classmethod def validate_looks(cls, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py b/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py index 642ca9e25d..4d3796e429 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py @@ -1,17 +1,15 @@ -import maya.mel as mel -import pymel.core as pm +from maya import cmds import pyblish.api import openpype.api -def get_file_rule(rule): - """Workaround for a bug in python with cmds.workspace""" - return mel.eval('workspace -query -fileRuleEntry "{}"'.format(rule)) - - class ValidateRenderImageRule(pyblish.api.InstancePlugin): - """Validates "images" file rule is set to "renders/" + """Validates Maya Workpace "images" file rule matches project settings. + + This validates against the configured default render image folder: + Studio Settings > Project > Maya > + Render Settings > Default render image folder. """ @@ -23,24 +21,29 @@ class ValidateRenderImageRule(pyblish.api.InstancePlugin): def process(self, instance): - default_render_file = self.get_default_render_image_folder(instance) + required_images_rule = self.get_default_render_image_folder(instance) + current_images_rule = cmds.workspace(fileRuleEntry="images") - assert get_file_rule("images") == default_render_file, ( - "Workspace's `images` file rule must be set to: {}".format( - default_render_file + assert current_images_rule == required_images_rule, ( + "Invalid workspace `images` file rule value: '{}'. " + "Must be set to: '{}'".format( + current_images_rule, required_images_rule ) ) @classmethod def repair(cls, instance): - default = cls.get_default_render_image_folder(instance) - pm.workspace.fileRules["images"] = default - pm.system.Workspace.save() + + required_images_rule = cls.get_default_render_image_folder(instance) + current_images_rule = cmds.workspace(fileRuleEntry="images") + + if current_images_rule != required_images_rule: + cmds.workspace(fileRule=("images", required_images_rule)) + cmds.workspace(saveWorkspace=True) @staticmethod def get_default_render_image_folder(instance): return instance.context.data.get('project_settings')\ .get('maya') \ - .get('create') \ - .get('CreateRender') \ + .get('RenderSettings') \ .get('default_render_image_folder') diff --git a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py index e6c6ef6c9e..35b87fd0ab 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py @@ -1,20 +1,11 @@ import re import pyblish.api -import openpype.api -import openpype.hosts.maya.api.action - from maya import cmds - -ImagePrefixes = { - 'mentalray': 'defaultRenderGlobals.imageFilePrefix', - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'defaultRenderGlobals.imageFilePrefix', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', -} +import openpype.api +import openpype.hosts.maya.api.action +from openpype.hosts.maya.api.render_settings import RenderSettings class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): @@ -47,7 +38,11 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): # handle various renderman names if renderer.startswith('renderman'): renderer = 'renderman' - file_prefix = cmds.getAttr(ImagePrefixes[renderer]) + + file_prefix = cmds.getAttr( + RenderSettings.get_image_prefix_attr(renderer) + ) + if len(cameras) > 1: if re.search(cls.R_CAMERA_TOKEN, file_prefix): diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 74db164ae5..a53d932db1 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -21,9 +21,7 @@ from openpype.client import ( ) from openpype.api import ( Logger, - BuildWorkfile, get_version_from_path, - get_workdir_data, get_current_project_settings, ) from openpype.tools.utils import host_tools @@ -34,12 +32,17 @@ from openpype.settings import ( get_anatomy_settings, ) from openpype.modules import ModulesManager +from openpype.pipeline.template_data import get_template_data_with_names from openpype.pipeline import ( discover_legacy_creator_plugins, legacy_io, Anatomy, ) -from openpype.pipeline.context_tools import get_current_project_asset +from openpype.pipeline.context_tools import ( + get_current_project_asset, + get_custom_workfile_template_from_session +) +from openpype.pipeline.workfile import BuildWorkfile from . import gizmo_menu @@ -910,19 +913,17 @@ def get_render_path(node): ''' Generate Render path from presets regarding avalon knob data ''' avalon_knob_data = read_avalon_data(node) - data = {'avalon': avalon_knob_data} nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) - host_name = os.environ.get("AVALON_APP") - data.update({ - "app": host_name, + data = { + "avalon": avalon_knob_data, "nuke_imageio_writes": nuke_imageio_writes - }) + } anatomy_filled = format_anatomy(data) return anatomy_filled["render"]["path"].replace("\\", "/") @@ -965,12 +966,11 @@ def format_anatomy(data): data["version"] = get_version_from_path(file) project_name = anatomy.project_name - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, data["avalon"]["asset"]) + asset_name = data["avalon"]["asset"] task_name = os.environ["AVALON_TASK"] host_name = os.environ["AVALON_APP"] - context_data = get_workdir_data( - project_doc, asset_doc, task_name, host_name + context_data = get_template_data_with_names( + project_name, asset_name, task_name, host_name ) data.update(context_data) data.update({ @@ -1128,10 +1128,8 @@ def create_write_node( if knob["name"] == "file_type": representation = knob["value"] - host_name = os.environ.get("AVALON_APP") try: data.update({ - "app": host_name, "imageio_writes": imageio_writes, "representation": representation, }) @@ -1925,7 +1923,7 @@ class WorkfileSettings(object): families.append(avalon_knob_data.get("families")) nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) @@ -2224,7 +2222,7 @@ def get_write_node_template_attr(node): avalon_knob_data = read_avalon_data(node) # get template data nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) @@ -2449,15 +2447,12 @@ def _launch_workfile_app(): def process_workfile_builder(): - from openpype.lib import ( - env_value_to_bool, - get_custom_workfile_template - ) # to avoid looping of the callback, remove it! nuke.removeOnCreate(process_workfile_builder, nodeClass="Root") # get state from settings - workfile_builder = get_current_project_settings()["nuke"].get( + project_settings = get_current_project_settings() + workfile_builder = project_settings["nuke"].get( "workfile_builder", {}) # get all imortant settings @@ -2467,7 +2462,6 @@ def process_workfile_builder(): # get settings createfv_on = workfile_builder.get("create_first_version") or None - custom_templates = workfile_builder.get("custom_templates") or None builder_on = workfile_builder.get("builder_on_start") or None last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE") @@ -2475,8 +2469,8 @@ def process_workfile_builder(): # generate first version in file not existing and feature is enabled if createfv_on and not os.path.exists(last_workfile_path): # get custom template path if any - custom_template_path = get_custom_workfile_template( - custom_templates + custom_template_path = get_custom_workfile_template_from_session( + project_settings=project_settings ) # if custom template is defined diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 0afc56d2f7..c1cd8f771a 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -9,7 +9,6 @@ import pyblish.api import openpype from openpype.api import ( Logger, - BuildWorkfile, get_current_project_settings ) from openpype.lib import register_event_callback @@ -22,6 +21,7 @@ from openpype.pipeline import ( deregister_inventory_action_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools from .command import viewer_update_and_undo_stop diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 925cab0bef..37ce03dc55 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -181,8 +181,6 @@ class ExporterReview(object): # get first and last frame self.first_frame = min(self.collection.indexes) self.last_frame = max(self.collection.indexes) - if "slate" in self.instance.data["families"]: - self.first_frame += 1 else: self.fname = os.path.basename(self.path_in) self.fhead = os.path.splitext(self.fname)[0] + "." diff --git a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py index 4257ed3131..bfe32d8fd1 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py +++ b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py @@ -33,6 +33,7 @@ class CollectSlate(pyblish.api.InstancePlugin): if slate_node: instance.data["slateNode"] = slate_node + instance.data["slate"] = True instance.data["families"].append("slate") instance.data["versionData"]["families"].append("slate") self.log.info( diff --git a/openpype/hosts/nuke/plugins/publish/extract_render_local.py b/openpype/hosts/nuke/plugins/publish/extract_render_local.py index 1595fe03fb..8879f0c999 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_render_local.py +++ b/openpype/hosts/nuke/plugins/publish/extract_render_local.py @@ -31,10 +31,6 @@ class NukeRenderLocal(openpype.api.Extractor): first_frame = instance.data.get("frameStartHandle", None) - # exception for slate workflow - if "slate" in families: - first_frame -= 1 - last_frame = instance.data.get("frameEndHandle", None) node_subset_name = instance.data.get("name", None) @@ -68,10 +64,6 @@ class NukeRenderLocal(openpype.api.Extractor): int(last_frame) ) - # exception for slate workflow - if "slate" in families: - first_frame += 1 - ext = node["file_type"].value() if "representations" not in instance.data: @@ -88,8 +80,11 @@ class NukeRenderLocal(openpype.api.Extractor): repre = { 'name': ext, 'ext': ext, - 'frameStart': "%0{}d".format( - len(str(last_frame))) % first_frame, + 'frameStart': ( + "{{:0>{}}}" + .format(len(str(last_frame))) + .format(first_frame) + ), 'files': filenames, "stagingDir": out_dir } @@ -105,13 +100,16 @@ class NukeRenderLocal(openpype.api.Extractor): instance.data['family'] = 'render' families.remove('render.local') families.insert(0, "render2d") + instance.data["anatomyData"]["family"] = "render" elif "prerender.local" in families: instance.data['family'] = 'prerender' families.remove('prerender.local') families.insert(0, "prerender") + instance.data["anatomyData"]["family"] = "prerender" elif "still.local" in families: instance.data['family'] = 'image' families.remove('still.local') + instance.data["anatomyData"]["family"] = "image" instance.data["families"] = families collections, remainder = clique.assemble(filenames) @@ -123,4 +121,4 @@ class NukeRenderLocal(openpype.api.Extractor): self.log.info('Finished render') - self.log.debug("instance extracted: {}".format(instance.data)) + self.log.debug("_ instance.data: {}".format(instance.data)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index 99ade4cf9b..b5cad143db 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -13,6 +13,7 @@ from openpype.hosts.nuke.api import ( get_view_process_node ) + class ExtractSlateFrame(openpype.api.Extractor): """Extracts movie and thumbnail with baked in luts @@ -236,6 +237,7 @@ class ExtractSlateFrame(openpype.api.Extractor): def _render_slate_to_sequence(self, instance): # set slate frame first_frame = instance.data["frameStartHandle"] + last_frame = instance.data["frameEndHandle"] slate_first_frame = first_frame - 1 # render slate as sequence frame @@ -284,6 +286,13 @@ class ExtractSlateFrame(openpype.api.Extractor): matching_repre["files"] = [first_filename, slate_filename] elif slate_filename not in matching_repre["files"]: matching_repre["files"].insert(0, slate_filename) + matching_repre["frameStart"] = ( + "{{:0>{}}}" + .format(len(str(last_frame))) + .format(slate_first_frame) + ) + self.log.debug( + "__ matching_repre: {}".format(pformat(matching_repre))) self.log.warning("Added slate frame to representation files") diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py index b0da94c4ce..b396056eb9 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py @@ -50,7 +50,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): # establish families family = avalon_knob_data["family"] families_ak = avalon_knob_data.get("families", []) - families = list() + families = [] # except disabled nodes but exclude backdrops in test if ("nukenodes" not in family) and (node["disable"].value()): @@ -111,10 +111,10 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("__ families: `{}`".format(families)) # Get format - format = root['format'].value() - resolution_width = format.width() - resolution_height = format.height() - pixel_aspect = format.pixelAspect() + format_ = root['format'].value() + resolution_width = format_.width() + resolution_height = format_.height() + pixel_aspect = format_.pixelAspect() # get publish knob value if "publish" not in node.knobs(): @@ -125,8 +125,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("__ _families_test: `{}`".format(_families_test)) for family_test in _families_test: if family_test in self.sync_workfile_version_on_families: - self.log.debug("Syncing version with workfile for '{}'" - .format(family_test)) + self.log.debug( + "Syncing version with workfile for '{}'".format( + family_test + ) + ) # get version to instance for integration instance.data['version'] = instance.context.data['version'] diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index a97f34b370..e37cc8a80a 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -144,8 +144,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): self.log.debug("colorspace: `{}`".format(colorspace)) version_data = { - "families": [f.replace(".local", "").replace(".farm", "") - for f in _families_test if "write" not in f], + "families": [ + _f.replace(".local", "").replace(".farm", "") + for _f in _families_test if "write" != _f + ], "colorspace": colorspace } diff --git a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py index af5e8e9d27..5f7b1f3806 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py +++ b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py @@ -98,7 +98,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): self.log.error(msg) raise ValidationException(msg) - collected_frames_len = int(len(collection.indexes)) + collected_frames_len = len(collection.indexes) coll_start = min(collection.indexes) coll_end = max(collection.indexes) diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index f15068b031..2cfbfa8778 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,3 +1,5 @@ +import re + from openpype.hosts.photoshop import api from openpype.lib import BoolDef from openpype.pipeline import ( @@ -5,6 +7,8 @@ from openpype.pipeline import ( CreatedInstance, legacy_io ) +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class ImageCreator(Creator): @@ -38,17 +42,24 @@ class ImageCreator(Creator): top_level_selected_items = stub.get_selected_layers() if pre_create_data.get("use_selection"): only_single_item_selected = len(top_level_selected_items) == 1 - for selected_item in top_level_selected_items: - if ( - only_single_item_selected or - pre_create_data.get("create_multiple")): + if ( + only_single_item_selected or + pre_create_data.get("create_multiple")): + for selected_item in top_level_selected_items: if selected_item.group: groups_to_create.append(selected_item) else: top_layers_to_wrap.append(selected_item) - else: - group = stub.group_selected_layers(subset_name_from_ui) - groups_to_create.append(group) + else: + group = stub.group_selected_layers(subset_name_from_ui) + groups_to_create.append(group) + else: + stub.select_layers(stub.get_layers()) + try: + group = stub.group_selected_layers(subset_name_from_ui) + except: + raise ValueError("Cannot group locked Bakcground layer!") + groups_to_create.append(group) if not groups_to_create and not top_layers_to_wrap: group = stub.create_group(subset_name_from_ui) @@ -60,6 +71,7 @@ class ImageCreator(Creator): group = stub.group_selected_layers(layer.name) groups_to_create.append(group) + layer_name = '' creating_multiple_groups = len(groups_to_create) > 1 for group in groups_to_create: subset_name = subset_name_from_ui # reset to name from creator UI @@ -67,8 +79,16 @@ class ImageCreator(Creator): created_group_name = self._clean_highlights(stub, group.name) if creating_multiple_groups: - # concatenate with layer name to differentiate subsets - subset_name += group.name.title().replace(" ", "") + layer_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + group.name + ) + if "{layer}" not in subset_name.lower(): + subset_name += "{Layer}" + + layer_fill = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill) if group.long_name: for directory in group.long_name[::-1]: @@ -143,3 +163,6 @@ class ImageCreator(Creator): def _clean_highlights(self, stub, item): return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, '') + @classmethod + def get_dynamic_data(cls, *args, **kwargs): + return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py index 9736471a26..2792a775e0 100644 --- a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py @@ -1,7 +1,12 @@ +import re + from Qt import QtWidgets from openpype.pipeline import create from openpype.hosts.photoshop import api as photoshop +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS + class CreateImage(create.LegacyCreator): """Image folder for publish.""" @@ -75,6 +80,7 @@ class CreateImage(create.LegacyCreator): groups.append(group) creator_subset_name = self.data["subset"] + layer_name = '' for group in groups: long_names = [] group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ @@ -82,7 +88,16 @@ class CreateImage(create.LegacyCreator): subset_name = creator_subset_name if len(groups) > 1: - subset_name += group.name.title().replace(" ", "") + layer_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + group.name + ) + if "{layer}" not in subset_name.lower(): + subset_name += "{Layer}" + + layer_fill = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill) if group.long_name: for directory in group.long_name[::-1]: @@ -98,3 +113,7 @@ class CreateImage(create.LegacyCreator): # reusing existing group, need to rename afterwards if not create_group: stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) + + @classmethod + def get_dynamic_data(cls, *args, **kwargs): + return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py index b53f4e8198..8106d6ff16 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py @@ -4,6 +4,7 @@ import pyblish.api import openpype.api from openpype.pipeline import PublishXmlValidationError from openpype.hosts.photoshop import api as photoshop +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class ValidateNamingRepair(pyblish.api.Action): @@ -50,6 +51,13 @@ class ValidateNamingRepair(pyblish.api.Action): subset_name = re.sub(invalid_chars, replace_char, instance.data["subset"]) + # format from Tool Creator + subset_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + subset_name + ) + layer_meta["subset"] = subset_name stub.imprint(instance_id, layer_meta) diff --git a/openpype/hosts/testhost/api/pipeline.py b/openpype/hosts/testhost/api/pipeline.py index 285fe8f8d6..1e05f336fb 100644 --- a/openpype/hosts/testhost/api/pipeline.py +++ b/openpype/hosts/testhost/api/pipeline.py @@ -1,6 +1,6 @@ import os import json -from openpype.pipeline import legacy_io +from openpype.client import get_asset_by_name class HostContext: @@ -17,10 +17,10 @@ class HostContext: if not asset_name: return project_name - asset_doc = legacy_io.find_one( - {"type": "asset", "name": asset_name}, - {"data.parents": 1} + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.parents"] ) + parents = asset_doc.get("data", {}).get("parents") or [] hierarchy = [project_name] diff --git a/openpype/hosts/testhost/plugins/create/auto_creator.py b/openpype/hosts/testhost/plugins/create/auto_creator.py index 06b95375b1..8d59fc3242 100644 --- a/openpype/hosts/testhost/plugins/create/auto_creator.py +++ b/openpype/hosts/testhost/plugins/create/auto_creator.py @@ -1,10 +1,11 @@ from openpype.lib import NumberDef -from openpype.hosts.testhost.api import pipeline +from openpype.client import get_asset_by_name from openpype.pipeline import ( legacy_io, AutoCreator, CreatedInstance, ) +from openpype.hosts.testhost.api import pipeline class MyAutoCreator(AutoCreator): @@ -44,10 +45,7 @@ class MyAutoCreator(AutoCreator): host_name = legacy_io.Session["AVALON_APP"] if existing_instance is None: - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) @@ -69,10 +67,7 @@ class MyAutoCreator(AutoCreator): existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) diff --git a/openpype/hosts/traypublisher/api/editorial.py b/openpype/hosts/traypublisher/api/editorial.py new file mode 100644 index 0000000000..7c392ef508 --- /dev/null +++ b/openpype/hosts/traypublisher/api/editorial.py @@ -0,0 +1,331 @@ +import re +from copy import deepcopy + +from openpype.client import get_asset_by_id +from openpype.pipeline.create import CreatorError + + +class ShotMetadataSolver: + """ Solving hierarchical metadata + + Used during editorial publishing. Works with imput + clip name and settings defining python formatable + template. Settings also define searching patterns + and its token keys used for formating in templates. + """ + + NO_DECOR_PATERN = re.compile(r"\{([a-z]*?)\}") + + # presets + clip_name_tokenizer = None + shot_rename = True + shot_hierarchy = None + shot_add_tasks = None + + def __init__( + self, + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + logger + ): + self.clip_name_tokenizer = clip_name_tokenizer + self.shot_rename = shot_rename + self.shot_hierarchy = shot_hierarchy + self.shot_add_tasks = shot_add_tasks + self.log = logger + + def _rename_template(self, data): + """Shot renaming function + + Args: + data (dict): formating data + + Raises: + CreatorError: If missing keys + + Returns: + str: formated new name + """ + shot_rename_template = self.shot_rename[ + "shot_rename_template"] + try: + # format to new shot name + return shot_rename_template.format(**data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct:: \n\n" + f"From template string {shot_rename_template} > " + f"`{_E}` has no equivalent in \n" + f"{list(data.keys())} input formating keys!" + )) + + def _generate_tokens(self, clip_name, source_data): + """Token generator + + Settings defines token pairs key and regex expression. + + Args: + clip_name (str): name of clip in editorial + source_data (dict): data for formating + + Raises: + CreatorError: if missing key + + Returns: + dict: updated source_data + """ + output_data = deepcopy(source_data["anatomy_data"]) + output_data["clip_name"] = clip_name + + if not self.clip_name_tokenizer: + return output_data + + parent_name = source_data["selected_asset_doc"]["name"] + + search_text = parent_name + clip_name + + for token_key, pattern in self.clip_name_tokenizer.items(): + p = re.compile(pattern) + match = p.findall(search_text) + if not match: + raise CreatorError(( + "Make sure regex expression works with your data: \n\n" + f"'{token_key}' with regex '{pattern}' in your settings\n" + "can't find any match in your clip name " + f"'{search_text}'!\n\nLook to: " + "'project_settings/traypublisher/editorial_creators" + "/editorial_simple/clip_name_tokenizer'\n" + "at your project settings..." + )) + + # QUESTION:how to refactory `match[-1]` to some better way? + output_data[token_key] = match[-1] + + return output_data + + def _create_parents_from_settings(self, parents, data): + """Formating parent components. + + Args: + parents (list): list of dict parent components + data (dict): formating data + + Raises: + CreatorError: missing formating key + CreatorError: missing token key + KeyError: missing parent token + + Returns: + list: list of dict of parent components + """ + # fill the parents parts from presets + shot_hierarchy = deepcopy(self.shot_hierarchy) + hierarchy_parents = shot_hierarchy["parents"] + + # fill parent keys data template from anatomy data + try: + _parent_tokens_formating_data = { + parent_token["name"]: parent_token["value"].format(**data) + for parent_token in hierarchy_parents + } + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n" + f"`{_E}` has no equivalent in \n{list(data.keys())}" + )) + + _parent_tokens_type = { + parent_token["name"]: parent_token["type"] + for parent_token in hierarchy_parents + } + for _index, _parent in enumerate( + shot_hierarchy["parents_path"].split("/") + ): + # format parent token with value which is formated + try: + parent_name = _parent.format( + **_parent_tokens_formating_data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n\n" + f"`{_E}` from template string " + f"{shot_hierarchy['parents_path']}, " + f" has no equivalent in \n" + f"{list(_parent_tokens_formating_data.keys())} parents" + )) + + parent_token_name = ( + self.NO_DECOR_PATERN.findall(_parent).pop()) + + if not parent_token_name: + raise KeyError( + f"Parent token is not found in: `{_parent}`") + + # find parent type + parent_token_type = _parent_tokens_type[parent_token_name] + + # in case selected context is set to the same asset + if ( + _index == 0 + and parents[-1]["entity_name"] == parent_name + ): + self.log.debug(f" skipping : {parent_name}") + continue + + # in case first parent is project then start parents from start + if ( + _index == 0 + and parent_token_type == "Project" + ): + self.log.debug("rebuilding parents from scratch") + project_parent = parents[0] + parents = [project_parent] + continue + + parents.append({ + "entity_type": parent_token_type, + "entity_name": parent_name + }) + + self.log.debug(f"__ parents: {parents}") + + return parents + + def _create_hierarchy_path(self, parents): + """Converting hierarchy path from parents + + Args: + parents (list): list of dict parent components + + Returns: + str: hierarchy path + """ + return "/".join( + [ + p["entity_name"] for p in parents + if p["entity_type"] != "Project" + ] + ) if parents else "" + + def _get_parents_from_selected_asset( + self, + asset_doc, + project_doc + ): + """Returning parents from context on selected asset. + + Context defined in Traypublisher project tree. + + Args: + asset_doc (db obj): selected asset doc + project_doc (db obj): actual project doc + + Returns: + list: list of dict parent components + """ + project_name = project_doc["name"] + visual_hierarchy = [asset_doc] + current_doc = asset_doc + + # looping trought all available visual parents + # if they are not available anymore than it breaks + while True: + visual_parent_id = current_doc["data"]["visualParent"] + visual_parent = None + if visual_parent_id: + visual_parent = get_asset_by_id(project_name, visual_parent_id) + + if not visual_parent: + visual_hierarchy.append(project_doc) + break + visual_hierarchy.append(visual_parent) + current_doc = visual_parent + + # add current selection context hierarchy + return [ + { + "entity_type": entity["data"]["entityType"], + "entity_name": entity["name"] + } + for entity in reversed(visual_hierarchy) + ] + + def _generate_tasks_from_settings(self, project_doc): + """Convert settings inputs to task data. + + Args: + project_doc (db obj): actual project doc + + Raises: + KeyError: Missing task type in project doc + + Returns: + dict: tasks data + """ + tasks_to_add = {} + + project_tasks = project_doc["config"]["tasks"] + for task_name, task_data in self.shot_add_tasks.items(): + _task_data = deepcopy(task_data) + + # check if task type in project task types + if _task_data["type"] in project_tasks.keys(): + tasks_to_add[task_name] = _task_data + else: + raise KeyError( + "Missing task type `{}` for `{}` is not" + " existing in `{}``".format( + _task_data["type"], + task_name, + list(project_tasks.keys()) + ) + ) + + return tasks_to_add + + def generate_data(self, clip_name, source_data): + """Metadata generator. + + Converts input data to hierarchy mentadata. + + Args: + clip_name (str): clip name + source_data (dict): formating data + + Returns: + (str, dict): shot name and hierarchy data + """ + self.log.info(f"_ source_data: {source_data}") + + tasks = {} + asset_doc = source_data["selected_asset_doc"] + project_doc = source_data["project_doc"] + + # match clip to shot name at start + shot_name = clip_name + + # parse all tokens and generate formating data + formating_data = self._generate_tokens(shot_name, source_data) + + # generate parents from selected asset + parents = self._get_parents_from_selected_asset(asset_doc, project_doc) + + if self.shot_rename["enabled"]: + shot_name = self._rename_template(formating_data) + self.log.info(f"Renamed shot name: {shot_name}") + + if self.shot_hierarchy["enabled"]: + parents = self._create_parents_from_settings( + parents, formating_data) + + if self.shot_add_tasks: + tasks = self._generate_tasks_from_settings( + project_doc) + + return shot_name, { + "hierarchy": self._create_hierarchy_path(parents), + "parents": parents, + "tasks": tasks + } diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py index 9b9425855e..a3eead51c8 100644 --- a/openpype/hosts/traypublisher/api/plugin.py +++ b/openpype/hosts/traypublisher/api/plugin.py @@ -1,6 +1,7 @@ from openpype.lib.attribute_definitions import FileDef -from openpype.pipeline import ( +from openpype.pipeline.create import ( Creator, + HiddenCreator, CreatedInstance ) @@ -11,7 +12,6 @@ from .pipeline import ( HostContext, ) - IMAGE_EXTENSIONS = [ ".ani", ".anim", ".apng", ".art", ".bmp", ".bpg", ".bsave", ".cal", ".cin", ".cpc", ".cpt", ".dds", ".dpx", ".ecw", ".exr", ".fits", @@ -35,6 +35,42 @@ VIDEO_EXTENSIONS = [ REVIEW_EXTENSIONS = IMAGE_EXTENSIONS + VIDEO_EXTENSIONS +class HiddenTrayPublishCreator(HiddenCreator): + host_name = "traypublisher" + + def collect_instances(self): + for instance_data in list_instances(): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + update_instances(update_list) + + def remove_instances(self, instances): + remove_instances(instances) + for instance in instances: + self._remove_instance_from_context(instance) + + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + # Add instance to current context + self._add_instance_to_context(new_instance) + + class TrayPublishCreator(Creator): create_allow_context_change = True host_name = "traypublisher" @@ -56,10 +92,6 @@ class TrayPublishCreator(Creator): for instance in instances: self._remove_instance_from_context(instance) - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attrobites - return self.get_instance_attr_defs() - def _store_new_instance(self, new_instance): """Tray publisher specific method to store instance. @@ -81,15 +113,6 @@ class SettingsCreator(TrayPublishCreator): extensions = [] - def collect_instances(self): - for instance_data in list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - def create(self, subset_name, data, pre_create_data): # Pass precreate data to creator attributes data["creator_attributes"] = pre_create_data @@ -120,6 +143,10 @@ class SettingsCreator(TrayPublishCreator): ) ] + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attrobites + return self.get_instance_attr_defs() + @classmethod def from_settings(cls, item_data): identifier = item_data["identifier"] diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/openpype/hosts/traypublisher/plugins/create/create_editorial.py new file mode 100644 index 0000000000..28a115629e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_editorial.py @@ -0,0 +1,869 @@ +import os +from copy import deepcopy +from pprint import pformat +import opentimelineio as otio +from openpype.client import ( + get_asset_by_name, + get_project +) +from openpype.hosts.traypublisher.api.plugin import ( + TrayPublishCreator, + HiddenTrayPublishCreator +) +from openpype.hosts.traypublisher.api.editorial import ( + ShotMetadataSolver +) + +from openpype.pipeline import CreatedInstance + +from openpype.lib import ( + get_ffprobe_data, + convert_ffprobe_fps_value, + + FileDef, + TextDef, + NumberDef, + EnumDef, + BoolDef, + UISeparatorDef, + UILabelDef +) + + +CLIP_ATTR_DEFS = [ + EnumDef( + "fps", + items={ + "from_selection": "From selection", + 23.997: "23.976", + 24: "24", + 25: "25", + 29.97: "29.97", + 30: "30" + }, + label="FPS" + ), + NumberDef( + "workfile_start_frame", + default=1001, + label="Workfile start frame" + ), + NumberDef( + "handle_start", + default=0, + label="Handle start" + ), + NumberDef( + "handle_end", + default=0, + label="Handle end" + ) +] + + +class EditorialClipInstanceCreatorBase(HiddenTrayPublishCreator): + """ Wrapper class for clip family creators + + Args: + HiddenTrayPublishCreator (BaseCreator): hidden supporting class + """ + host_name = "traypublisher" + + def create(self, instance_data, source_data=None): + self.log.info(f"instance_data: {instance_data}") + subset_name = instance_data["subset"] + + # Create new instance + new_instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + self.log.info(f"instance_data: {pformat(new_instance.data)}") + + self._store_new_instance(new_instance) + + return new_instance + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + +class EditorialShotInstanceCreator(EditorialClipInstanceCreatorBase): + """ Shot family class + + The shot metadata instance carrier. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_shot" + family = "shot" + label = "Editorial Shot" + + def get_instance_attr_defs(self): + attr_defs = [ + TextDef( + "asset_name", + label="Asset name", + ) + ] + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs + + +class EditorialPlateInstanceCreator(EditorialClipInstanceCreatorBase): + """ Plate family class + + Plate representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_plate" + family = "plate" + label = "Editorial Plate" + + +class EditorialAudioInstanceCreator(EditorialClipInstanceCreatorBase): + """ Audio family class + + Audio representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_audio" + family = "audio" + label = "Editorial Audio" + + +class EditorialReviewInstanceCreator(EditorialClipInstanceCreatorBase): + """ Review family class + + Review representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_review" + family = "review" + label = "Editorial Review" + + +class EditorialSimpleCreator(TrayPublishCreator): + """ Editorial creator class + + Simple workflow creator. This creator only disecting input + video file into clip chunks and then converts each to + defined format defined Settings for each subset preset. + + Args: + TrayPublishCreator (Creator): Tray publisher plugin class + """ + + label = "Editorial Simple" + family = "editorial" + identifier = "editorial_simple" + default_variants = [ + "main" + ] + description = "Editorial files to generate shots." + detailed_description = """ +Supporting publishing new shots to project +or updating already created. Publishing will create OTIO file. +""" + icon = "fa.file" + + def __init__( + self, project_settings, *args, **kwargs + ): + super(EditorialSimpleCreator, self).__init__( + project_settings, *args, **kwargs + ) + editorial_creators = deepcopy( + project_settings["traypublisher"]["editorial_creators"] + ) + # get this creator settings by identifier + self._creator_settings = editorial_creators.get(self.identifier) + + clip_name_tokenizer = self._creator_settings["clip_name_tokenizer"] + shot_rename = self._creator_settings["shot_rename"] + shot_hierarchy = self._creator_settings["shot_hierarchy"] + shot_add_tasks = self._creator_settings["shot_add_tasks"] + + self._shot_metadata_solver = ShotMetadataSolver( + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + self.log + ) + + # try to set main attributes from settings + if self._creator_settings.get("default_variants"): + self.default_variants = self._creator_settings["default_variants"] + + def create(self, subset_name, instance_data, pre_create_data): + allowed_family_presets = self._get_allowed_family_presets( + pre_create_data) + + clip_instance_properties = { + k: v for k, v in pre_create_data.items() + if k != "sequence_filepath_data" + if k not in [ + i["family"] for i in self._creator_settings["family_presets"] + ] + } + # Create otio editorial instance + asset_name = instance_data["asset"] + asset_doc = get_asset_by_name(self.project_name, asset_name) + + self.log.info(pre_create_data["fps"]) + + if pre_create_data["fps"] == "from_selection": + # get asset doc data attributes + fps = asset_doc["data"]["fps"] + else: + fps = float(pre_create_data["fps"]) + + instance_data.update({ + "fps": fps + }) + + # get path of sequence + sequence_path_data = pre_create_data["sequence_filepath_data"] + media_path_data = pre_create_data["media_filepaths_data"] + + sequence_path = self._get_path_from_file_data(sequence_path_data) + media_path = self._get_path_from_file_data(media_path_data) + + # get otio timeline + otio_timeline = self._create_otio_timeline( + sequence_path, fps) + + # Create all clip instances + clip_instance_properties.update({ + "fps": fps, + "parent_asset_name": asset_name, + "variant": instance_data["variant"] + }) + + # create clip instances + self._get_clip_instances( + otio_timeline, + media_path, + clip_instance_properties, + family_presets=allowed_family_presets + + ) + + # create otio editorial instance + self._create_otio_instance( + subset_name, instance_data, + sequence_path, media_path, + otio_timeline + ) + + def _create_otio_instance( + self, + subset_name, + data, + sequence_path, + media_path, + otio_timeline + ): + """Otio instance creating function + + Args: + subset_name (str): name of subset + data (dict): instnance data + sequence_path (str): path to sequence file + media_path (str): path to media file + otio_timeline (otio.Timeline): otio timeline object + """ + # Pass precreate data to creator attributes + data.update({ + "sequenceFilePath": sequence_path, + "editorialSourcePath": media_path, + "otioTimeline": otio.adapters.write_to_string(otio_timeline) + }) + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._store_new_instance(new_instance) + + def _create_otio_timeline(self, sequence_path, fps): + """Creating otio timeline from sequence path + + Args: + sequence_path (str): path to sequence file + fps (float): frame per second + + Returns: + otio.Timeline: otio timeline object + """ + # get editorial sequence file into otio timeline object + extension = os.path.splitext(sequence_path)[1] + + kwargs = {} + if extension == ".edl": + # EDL has no frame rate embedded so needs explicit + # frame rate else 24 is asssumed. + kwargs["rate"] = fps + kwargs["ignore_timecode_mismatch"] = True + + self.log.info(f"kwargs: {kwargs}") + return otio.adapters.read_from_file(sequence_path, **kwargs) + + def _get_path_from_file_data(self, file_path_data): + """Converting creator path data to single path string + + Args: + file_path_data (FileDefItem): creator path data inputs + + Raises: + FileExistsError: in case nothing had been set + + Returns: + str: path string + """ + # TODO: just temporarly solving only one media file + if isinstance(file_path_data, list): + file_path_data = file_path_data.pop() + + if len(file_path_data["filenames"]) == 0: + raise FileExistsError( + f"File path was not added: {file_path_data}") + + return os.path.join( + file_path_data["directory"], file_path_data["filenames"][0]) + + def _get_clip_instances( + self, + otio_timeline, + media_path, + instance_data, + family_presets + ): + """Helping function fro creating clip instance + + Args: + otio_timeline (otio.Timeline): otio timeline object + media_path (str): media file path string + instance_data (dict): clip instance data + family_presets (list): list of dict settings subset presets + """ + self.asset_name_check = [] + + tracks = otio_timeline.each_child( + descended_from_type=otio.schema.Track + ) + + # media data for audio sream and reference solving + media_data = self._get_media_source_metadata(media_path) + + for track in tracks: + self.log.debug(f"track.name: {track.name}") + try: + track_start_frame = ( + abs(track.source_range.start_time.value) + ) + self.log.debug(f"track_start_frame: {track_start_frame}") + track_start_frame -= self.timeline_frame_start + except AttributeError: + track_start_frame = 0 + + self.log.debug(f"track_start_frame: {track_start_frame}") + + for clip in track.each_child(): + if not self._validate_clip_for_processing(clip): + continue + + # get available frames info to clip data + self._create_otio_reference(clip, media_path, media_data) + + # convert timeline range to source range + self._restore_otio_source_range(clip) + + base_instance_data = self._get_base_instance_data( + clip, + instance_data, + track_start_frame + ) + + parenting_data = { + "instance_label": None, + "instance_id": None + } + self.log.info(( + "Creating subsets from presets: \n" + f"{pformat(family_presets)}" + )) + + for _fpreset in family_presets: + # exclude audio family if no audio stream + if ( + _fpreset["family"] == "audio" + and not media_data.get("audio") + ): + continue + + instance = self._make_subset_instance( + clip, + _fpreset, + deepcopy(base_instance_data), + parenting_data + ) + self.log.debug(f"{pformat(dict(instance.data))}") + + def _restore_otio_source_range(self, otio_clip): + """Infusing source range. + + Otio clip is missing proper source clip range so + here we add them from from parent timeline frame range. + + Args: + otio_clip (otio.Clip): otio clip object + """ + otio_clip.source_range = otio_clip.range_in_parent() + + def _create_otio_reference( + self, + otio_clip, + media_path, + media_data + ): + """Creating otio reference at otio clip. + + Args: + otio_clip (otio.Clip): otio clip object + media_path (str): media file path string + media_data (dict): media metadata + """ + start_frame = media_data["start_frame"] + frame_duration = media_data["duration"] + fps = media_data["fps"] + + available_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime( + start_frame, fps), + duration=otio.opentime.RationalTime( + frame_duration, fps) + ) + # in case old OTIO or video file create `ExternalReference` + media_reference = otio.schema.ExternalReference( + target_url=media_path, + available_range=available_range + ) + + otio_clip.media_reference = media_reference + + def _get_media_source_metadata(self, path): + """Get all available metadata from file + + Args: + path (str): media file path string + + Raises: + AssertionError: ffprobe couldn't read metadata + + Returns: + dict: media file metadata + """ + return_data = {} + + try: + media_data = get_ffprobe_data( + path, self.log + ) + self.log.debug(f"__ media_data: {pformat(media_data)}") + + # get video stream data + video_stream = media_data["streams"][0] + return_data = { + "video": True, + "start_frame": 0, + "duration": int(video_stream["nb_frames"]), + "fps": float( + convert_ffprobe_fps_value( + video_stream["r_frame_rate"] + ) + ) + } + + # get audio streams data + audio_stream = [ + stream for stream in media_data["streams"] + if stream["codec_type"] == "audio" + ] + + if audio_stream: + return_data["audio"] = True + + except Exception as exc: + raise AssertionError(( + "FFprobe couldn't read information about input file: " + f"\"{path}\". Error message: {exc}" + )) + + return return_data + + def _make_subset_instance( + self, + otio_clip, + preset, + instance_data, + parenting_data + ): + """Making subset instance from input preset + + Args: + otio_clip (otio.Clip): otio clip object + preset (dict): sigle family preset + instance_data (dict): instance data + parenting_data (dict): shot instance parent data + + Returns: + CreatedInstance: creator instance object + """ + family = preset["family"] + label = self._make_subset_naming( + preset, + instance_data + ) + instance_data["label"] = label + + # add file extension filter only if it is not shot family + if family == "shot": + instance_data["otioClip"] = ( + otio.adapters.write_to_string(otio_clip)) + c_instance = self.create_context.creators[ + "editorial_shot"].create( + instance_data) + parenting_data.update({ + "instance_label": label, + "instance_id": c_instance.data["instance_id"] + }) + else: + # add review family if defined + instance_data.update({ + "outputFileType": preset["output_file_type"], + "parent_instance_id": parenting_data["instance_id"], + "creator_attributes": { + "parent_instance": parenting_data["instance_label"], + "add_review_family": preset.get("review") + } + }) + + creator_identifier = f"editorial_{family}" + editorial_clip_creator = self.create_context.creators[ + creator_identifier] + c_instance = editorial_clip_creator.create( + instance_data) + + return c_instance + + def _make_subset_naming( + self, + preset, + instance_data + ): + """ Subset name maker + + Args: + preset (dict): single preset item + instance_data (dict): instance data + + Returns: + str: label string + """ + shot_name = instance_data["shotName"] + variant_name = instance_data["variant"] + family = preset["family"] + + # get variant name from preset or from inharitance + _variant_name = preset.get("variant") or variant_name + + self.log.debug(f"__ family: {family}") + self.log.debug(f"__ preset: {preset}") + + # subset name + subset_name = "{}{}".format( + family, _variant_name.capitalize() + ) + label = "{}_{}".format( + shot_name, + subset_name + ) + + instance_data.update({ + "family": family, + "label": label, + "variant": _variant_name, + "subset": subset_name, + }) + + return label + + def _get_base_instance_data( + self, + otio_clip, + instance_data, + track_start_frame, + ): + """ Factoring basic set of instance data. + + Args: + otio_clip (otio.Clip): otio clip object + instance_data (dict): precreate instance data + track_start_frame (int): track start frame + + Returns: + dict: instance data + """ + # get clip instance properties + parent_asset_name = instance_data["parent_asset_name"] + handle_start = instance_data["handle_start"] + handle_end = instance_data["handle_end"] + timeline_offset = instance_data["timeline_offset"] + workfile_start_frame = instance_data["workfile_start_frame"] + fps = instance_data["fps"] + variant_name = instance_data["variant"] + + # basic unique asset name + clip_name = os.path.splitext(otio_clip.name)[0].lower() + project_doc = get_project(self.project_name) + + shot_name, shot_metadata = self._shot_metadata_solver.generate_data( + clip_name, + { + "anatomy_data": { + "project": { + "name": self.project_name, + "code": project_doc["data"]["code"] + }, + "parent": parent_asset_name, + "app": self.host_name + }, + "selected_asset_doc": get_asset_by_name( + self.project_name, parent_asset_name), + "project_doc": project_doc + } + ) + + self._validate_name_uniqueness(shot_name) + + timing_data = self._get_timing_data( + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ) + + # create creator attributes + creator_attributes = { + "asset_name": shot_name, + "Parent hierarchy path": shot_metadata["hierarchy"], + "workfile_start_frame": workfile_start_frame, + "fps": fps, + "handle_start": int(handle_start), + "handle_end": int(handle_end) + } + creator_attributes.update(timing_data) + + # create shared new instance data + base_instance_data = { + "shotName": shot_name, + "variant": variant_name, + + # HACK: just for temporal bug workaround + # TODO: should loockup shot name for update + "asset": parent_asset_name, + "task": "", + + "newAssetPublishing": True, + + # parent time properties + "trackStartFrame": track_start_frame, + "timelineOffset": timeline_offset, + # creator_attributes + "creator_attributes": creator_attributes + } + # add hierarchy shot metadata + base_instance_data.update(shot_metadata) + + return base_instance_data + + def _get_timing_data( + self, + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ): + """Returning available timing data + + Args: + otio_clip (otio.Clip): otio clip object + timeline_offset (int): offset value + track_start_frame (int): starting frame input + workfile_start_frame (int): start frame for shot's workfiles + + Returns: + dict: timing metadata + """ + # frame ranges data + clip_in = otio_clip.range_in_parent().start_time.value + clip_in += track_start_frame + clip_out = otio_clip.range_in_parent().end_time_inclusive().value + clip_out += track_start_frame + self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}") + + # add offset in case there is any + self.log.debug(f"__ timeline_offset: {timeline_offset}") + if timeline_offset: + clip_in += timeline_offset + clip_out += timeline_offset + + clip_duration = otio_clip.duration().value + self.log.info(f"clip duration: {clip_duration}") + + source_in = otio_clip.trimmed_range().start_time.value + source_out = source_in + clip_duration + + # define starting frame for future shot + frame_start = ( + clip_in if workfile_start_frame is None + else workfile_start_frame + ) + frame_end = frame_start + (clip_duration - 1) + + return { + "frameStart": int(frame_start), + "frameEnd": int(frame_end), + "clipIn": int(clip_in), + "clipOut": int(clip_out), + "clipDuration": int(otio_clip.duration().value), + "sourceIn": int(source_in), + "sourceOut": int(source_out) + } + + def _get_allowed_family_presets(self, pre_create_data): + """ Filter out allowed family presets. + + Args: + pre_create_data (dict): precreate attributes inputs + + Returns: + list: lit of dict with preset items + """ + self.log.debug(f"__ pre_create_data: {pre_create_data}") + return [ + {"family": "shot"}, + *[ + preset for preset in self._creator_settings["family_presets"] + if pre_create_data[preset["family"]] + ] + ] + + def _validate_clip_for_processing(self, otio_clip): + """Validate otio clip attribues + + Args: + otio_clip (otio.Clip): otio clip object + + Returns: + bool: True if all passing conditions + """ + if otio_clip.name is None: + return False + + if isinstance(otio_clip, otio.schema.Gap): + return False + + # skip all generators like black empty + if isinstance( + otio_clip.media_reference, + otio.schema.GeneratorReference): + return False + + # Transitions are ignored, because Clips have the full frame + # range. + if isinstance(otio_clip, otio.schema.Transition): + return False + + return True + + def _validate_name_uniqueness(self, name): + """ Validating name uniqueness. + + In context of other clip names in sequence file. + + Args: + name (str): shot name string + """ + if name not in self.asset_name_check: + self.asset_name_check.append(name) + else: + self.log.warning( + f"Duplicate shot name: {name}! " + "Please check names in the input sequence files." + ) + + def get_pre_create_attr_defs(self): + """ Creating pre-create attributes at creator plugin. + + Returns: + list: list of attribute object instances + """ + # Use same attributes as for instance attrobites + attr_defs = [ + FileDef( + "sequence_filepath_data", + folders=False, + extensions=[ + ".edl", + ".xml", + ".aaf", + ".fcpxml" + ], + allow_sequences=False, + single_item=True, + label="Sequence file", + ), + FileDef( + "media_filepaths_data", + folders=False, + extensions=[ + ".mov", + ".mp4", + ".wav" + ], + allow_sequences=False, + single_item=False, + label="Media files", + ), + # TODO: perhpas better would be timecode and fps input + NumberDef( + "timeline_offset", + default=0, + label="Timeline offset" + ), + UISeparatorDef(), + UILabelDef("Clip instance attributes"), + UISeparatorDef() + ] + # add variants swithers + attr_defs.extend( + BoolDef(_var["family"], label=_var["family"]) + for _var in self._creator_settings["family_presets"] + ) + attr_defs.append(UISeparatorDef()) + + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs diff --git a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py index baca274ea6..41c1c29bb0 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py +++ b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py @@ -1,6 +1,7 @@ import os +from openpype.api import get_project_settings, Logger -from openpype.api import get_project_settings +log = Logger.get_logger(__name__) def initialize(): @@ -13,6 +14,7 @@ def initialize(): global_variables = globals() for item in simple_creators: + dynamic_plugin = SettingsCreator.from_settings(item) global_variables[dynamic_plugin.__name__] = dynamic_plugin diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py new file mode 100644 index 0000000000..bdf7c05f3d --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py @@ -0,0 +1,36 @@ +from pprint import pformat +import pyblish.api + + +class CollectClipInstance(pyblish.api.InstancePlugin): + """Collect clip instances and resolve its parent""" + + label = "Collect Clip Instances" + order = pyblish.api.CollectorOrder - 0.081 + + hosts = ["traypublisher"] + families = ["plate", "review", "audio"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + instance.data["families"].append("clip") + + parent_instance_id = instance.data["parent_instance_id"] + edit_shared_data = instance.context.data["editorialSharedData"] + instance.data.update( + edit_shared_data[parent_instance_id] + ) + + if "editorialSourcePath" in instance.context.data.keys(): + instance.data["editorialSourcePath"] = ( + instance.context.data["editorialSourcePath"]) + instance.data["families"].append("trimming") + + self.log.debug(pformat(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py new file mode 100644 index 0000000000..e181d0abe5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py @@ -0,0 +1,48 @@ +import os +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectEditorialInstance(pyblish.api.InstancePlugin): + """Collect data for instances created by settings creators.""" + + label = "Collect Editorial Instances" + order = pyblish.api.CollectorOrder - 0.1 + + hosts = ["traypublisher"] + families = ["editorial"] + + def process(self, instance): + + if "families" not in instance.data: + instance.data["families"] = [] + + if "representations" not in instance.data: + instance.data["representations"] = [] + + fpath = instance.data["sequenceFilePath"] + otio_timeline_string = instance.data.pop("otioTimeline") + otio_timeline = otio.adapters.read_from_string( + otio_timeline_string) + + instance.context.data["otioTimeline"] = otio_timeline + instance.context.data["editorialSourcePath"] = ( + instance.data["editorialSourcePath"]) + + self.log.info(fpath) + + instance.data["stagingDir"] = os.path.dirname(fpath) + + _, ext = os.path.splitext(fpath) + + instance.data["representations"].append({ + "ext": ext[1:], + "name": ext[1:], + "stagingDir": instance.data["stagingDir"], + "files": os.path.basename(fpath) + }) + + self.log.debug("Created Editorial Instance {}".format( + pformat(instance.data) + )) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py new file mode 100644 index 0000000000..4af4fb94e9 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py @@ -0,0 +1,30 @@ +import pyblish.api + + +class CollectEditorialReviewable(pyblish.api.InstancePlugin): + """ Collect review input from user. + + Adds the input to instance data. + """ + + label = "Collect Editorial Reviewable" + order = pyblish.api.CollectorOrder + + families = ["plate", "review", "audio"] + hosts = ["traypublisher"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + creator_attributes = instance.data["creator_attributes"] + + if creator_attributes["add_review_family"]: + instance.data["families"].append("review") + + self.log.debug("instance.data {}".format(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py new file mode 100644 index 0000000000..716f73022e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py @@ -0,0 +1,213 @@ +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectShotInstance(pyblish.api.InstancePlugin): + """ Collect shot instances + + Resolving its user inputs from creator attributes + to instance data. + """ + + label = "Collect Shot Instances" + order = pyblish.api.CollectorOrder - 0.09 + + hosts = ["traypublisher"] + families = ["shot"] + + SHARED_KEYS = [ + "asset", + "fps", + "handleStart", + "handleEnd", + "frameStart", + "frameEnd", + "clipIn", + "clipOut", + "clipDuration", + "sourceIn", + "sourceOut", + "otioClip", + "workfileFrameStart" + ] + + def process(self, instance): + self.log.debug(pformat(instance.data)) + + creator_identifier = instance.data["creator_identifier"] + if "editorial" not in creator_identifier: + return + + # get otio clip object + otio_clip = self._get_otio_clip(instance) + instance.data["otioClip"] = otio_clip + + # first solve the inputs from creator attr + data = self._solve_inputs_to_data(instance) + instance.data.update(data) + + # distribute all shared keys to clips instances + self._distribute_shared_data(instance) + self._solve_hierarchy_context(instance) + + self.log.debug(pformat(instance.data)) + + def _get_otio_clip(self, instance): + """ Converts otio string data. + + Convert them to proper otio object + and finds its equivalent at otio timeline. + This process is a hack to support also + resolving parent range. + + Args: + instance (obj): publishing instance + + Returns: + otio.Clip: otio clip object + """ + context = instance.context + # convert otio clip from string to object + otio_clip_string = instance.data.pop("otioClip") + otio_clip = otio.adapters.read_from_string( + otio_clip_string) + + otio_timeline = context.data["otioTimeline"] + + clips = [ + clip for clip in otio_timeline.each_child( + descended_from_type=otio.schema.Clip) + if clip.name == otio_clip.name + ] + + otio_clip = clips.pop() + self.log.debug(f"__ otioclip.parent: {otio_clip.parent}") + + return otio_clip + + def _distribute_shared_data(self, instance): + """ Distribute all defined keys. + + All data are shared between all related + instances in context. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + instance_id = instance.data["instance_id"] + + if not context.data.get("editorialSharedData"): + context.data["editorialSharedData"] = {} + + context.data["editorialSharedData"][instance_id] = { + _k: _v for _k, _v in instance.data.items() + if _k in self.SHARED_KEYS + } + + def _solve_inputs_to_data(self, instance): + """ Resolve all user inputs into instance data. + + Args: + instance (obj): publishing instance + + Returns: + dict: instance data updating data + """ + _cr_attrs = instance.data["creator_attributes"] + workfile_start_frame = _cr_attrs["workfile_start_frame"] + frame_start = _cr_attrs["frameStart"] + frame_end = _cr_attrs["frameEnd"] + frame_dur = frame_end - frame_start + + return { + "asset": _cr_attrs["asset_name"], + "fps": float(_cr_attrs["fps"]), + "handleStart": _cr_attrs["handle_start"], + "handleEnd": _cr_attrs["handle_end"], + "frameStart": workfile_start_frame, + "frameEnd": workfile_start_frame + frame_dur, + "clipIn": _cr_attrs["clipIn"], + "clipOut": _cr_attrs["clipOut"], + "clipDuration": _cr_attrs["clipDuration"], + "sourceIn": _cr_attrs["sourceIn"], + "sourceOut": _cr_attrs["sourceOut"], + "workfileFrameStart": workfile_start_frame + } + + def _solve_hierarchy_context(self, instance): + """ Adding hierarchy data to context shared data. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + final_context = ( + context.data["hierarchyContext"] + if context.data.get("hierarchyContext") + else {} + ) + + name = instance.data["asset"] + + # get handles + handle_start = int(instance.data["handleStart"]) + handle_end = int(instance.data["handleEnd"]) + + in_info = { + "entity_type": "Shot", + "custom_attributes": { + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "clipIn": instance.data["clipIn"], + "clipOut": instance.data["clipOut"], + "fps": instance.data["fps"] + }, + "tasks": instance.data["tasks"] + } + + parents = instance.data.get('parents', []) + self.log.debug(f"parents: {pformat(parents)}") + + actual = {name: in_info} + + for parent in reversed(parents): + parent_name = parent["entity_name"] + next_dict = { + parent_name: { + "entity_type": parent["entity_type"], + "childs": actual + } + } + actual = next_dict + + final_context = self._update_dict(final_context, actual) + + # adding hierarchy context to instance + context.data["hierarchyContext"] = final_context + self.log.debug(pformat(final_context)) + + def _update_dict(self, ex_dict, new_dict): + """ Recursion function + + Updating nested data with another nested data. + + Args: + ex_dict (dict): nested data + new_dict (dict): nested data + + Returns: + dict: updated nested data + """ + for key in ex_dict: + if key in new_dict and isinstance(ex_dict[key], dict): + new_dict[key] = self._update_dict(ex_dict[key], new_dict[key]) + elif not ex_dict.get(key) or not new_dict.get(key): + new_dict[key] = ex_dict[key] + + return new_dict diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index c6dc765a27..a99b300730 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,17 +1,16 @@ import os -from openpype.client import get_project, get_asset_by_name -from openpype.lib import ( - StringTemplate, - get_workfile_template_key_from_context, - get_workdir_data, - get_last_workfile_with_version, -) +from openpype.lib import StringTemplate from openpype.pipeline import ( registered_host, legacy_io, Anatomy, ) +from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context, + get_last_workfile_with_version, +) +from openpype.pipeline.template_data import get_template_data_with_names from openpype.hosts.tvpaint.api import lib, pipeline, plugin @@ -54,19 +53,17 @@ class LoadWorkfile(plugin.Loader): asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, asset_name) - template_key = get_workfile_template_key_from_context( asset_name, task_name, host_name, - project_name=project_name, - dbcon=legacy_io + project_name=project_name ) anatomy = Anatomy(project_name) - data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) data["root"] = anatomy.roots file_template = anatomy.templates[template_key]["file"] diff --git a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py index 5be04fc841..50b34bd573 100644 --- a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py +++ b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- """Hook to launch Unreal and prepare projects.""" import os +import copy from pathlib import Path from openpype.lib import ( PreLaunchHook, ApplicationLaunchFailed, ApplicationNotFound, - get_workdir_data, get_workfile_template_key ) import openpype.hosts.unreal.lib as unreal_lib @@ -35,18 +35,13 @@ class UnrealPrelaunchHook(PreLaunchHook): return last_workfile.name # Prepare data for fill data and for getting workfile template key - task_name = self.data["task_name"] anatomy = self.data["anatomy"] - asset_doc = self.data["asset_doc"] project_doc = self.data["project_doc"] - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") + # Use already prepared workdir data + workdir_data = copy.deepcopy(self.data["workdir_data"]) + task_type = workdir_data.get("task", {}).get("type") - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, self.host_name - ) # QUESTION raise exception if version is part of filename template? workdir_data["version"] = 1 workdir_data["ext"] = "uproject" diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py index b2c3889f68..9fe5f3ab4b 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py @@ -20,6 +20,34 @@ class SkeletalMeshAlembicLoader(plugin.Loader): icon = "cube" color = "orange" + def get_task(self, filename, asset_dir, asset_name, replace): + task = unreal.AssetImportTask() + options = unreal.AbcImportSettings() + sm_settings = unreal.AbcStaticMeshSettings() + conversion_settings = unreal.AbcConversionSettings( + preset=unreal.AbcConversionPreset.CUSTOM, + flip_u=False, flip_v=False, + rotation=[0.0, 0.0, 0.0], + scale=[1.0, 1.0, 1.0]) + + task.set_editor_property('filename', filename) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', replace) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options.set_editor_property( + 'import_type', unreal.AlembicImportType.SKELETAL) + + options.static_mesh_settings = sm_settings + options.conversion_settings = conversion_settings + task.options = options + + return task + def load(self, context, name, namespace, data): """Load and containerise representation into Content Browser. @@ -50,36 +78,24 @@ class SkeletalMeshAlembicLoader(plugin.Loader): asset_name = "{}_{}".format(asset, name) else: asset_name = "{}".format(name) + version = context.get('version').get('name') tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}_v{version:03d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) - task = unreal.AssetImportTask() + task = self.get_task(self.fname, asset_dir, asset_name, False) - task.set_editor_property('filename', self.fname) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.SKELETAL) - - task.options = options - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) data = { "schema": "openpype:container-2.0", @@ -110,23 +126,8 @@ class SkeletalMeshAlembicLoader(plugin.Loader): source_path = get_representation_path(representation) destination_path = container["namespace"] - task = unreal.AssetImportTask() + task = self.get_task(source_path, destination_path, name, True) - task.set_editor_property('filename', source_path) - task.set_editor_property('destination_path', destination_path) - # strip suffix - task.set_editor_property('destination_name', name) - task.set_editor_property('replace_existing', True) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) - - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.SKELETAL) - - task.options = options # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) container_path = "{}/{}".format(container["namespace"], diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py index 5a73c72c64..50e498dbb0 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py @@ -24,7 +24,11 @@ class StaticMeshAlembicLoader(plugin.Loader): task = unreal.AssetImportTask() options = unreal.AbcImportSettings() sm_settings = unreal.AbcStaticMeshSettings() - conversion_settings = unreal.AbcConversionSettings() + conversion_settings = unreal.AbcConversionSettings( + preset=unreal.AbcConversionPreset.CUSTOM, + flip_u=False, flip_v=False, + rotation=[0.0, 0.0, 0.0], + scale=[1.0, 1.0, 1.0]) task.set_editor_property('filename', filename) task.set_editor_property('destination_path', asset_dir) @@ -40,13 +44,6 @@ class StaticMeshAlembicLoader(plugin.Loader): sm_settings.set_editor_property('merge_meshes', True) - conversion_settings.set_editor_property('flip_u', False) - conversion_settings.set_editor_property('flip_v', True) - conversion_settings.set_editor_property( - 'scale', unreal.Vector(x=100.0, y=100.0, z=100.0)) - conversion_settings.set_editor_property( - 'rotation', unreal.Vector(x=-90.0, y=0.0, z=180.0)) - options.static_mesh_settings = sm_settings options.conversion_settings = conversion_settings task.options = options @@ -83,22 +80,24 @@ class StaticMeshAlembicLoader(plugin.Loader): asset_name = "{}_{}".format(asset, name) else: asset_name = "{}".format(name) + version = context.get('version').get('name') tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}_v{version:03d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) - task = self.get_task(self.fname, asset_dir, asset_name, False) + task = self.get_task(self.fname, asset_dir, asset_name, False) - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) data = { "schema": "openpype:container-2.0", diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py index 01d589c69b..926c932a85 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/openpype/hosts/unreal/plugins/load/load_layout.py @@ -9,7 +9,10 @@ from unreal import EditorLevelLibrary from unreal import EditorLevelUtils from unreal import AssetToolsHelpers from unreal import FBXImportType -from unreal import MathLibrary as umath +from unreal import MovieSceneLevelVisibilityTrack +from unreal import MovieSceneSubTrack + +from bson.objectid import ObjectId from openpype.client import get_asset_by_name, get_assets from openpype.pipeline import ( @@ -21,6 +24,7 @@ from openpype.pipeline import ( legacy_io, ) from openpype.pipeline.context_tools import get_current_project_asset +from openpype.api import get_current_project_settings from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -159,9 +163,29 @@ class LayoutLoader(plugin.Loader): hid_section.set_row_index(index) hid_section.set_level_names(maps) - @staticmethod + def _transform_from_basis(self, transform, basis): + """Transform a transform from a basis to a new basis.""" + # Get the basis matrix + basis_matrix = unreal.Matrix( + basis[0], + basis[1], + basis[2], + basis[3] + ) + transform_matrix = unreal.Matrix( + transform[0], + transform[1], + transform[2], + transform[3] + ) + + new_transform = ( + basis_matrix.get_inverse() * transform_matrix * basis_matrix) + + return new_transform.transform() + def _process_family( - assets, class_name, transform, sequence, inst_name=None + self, assets, class_name, transform, basis, sequence, inst_name=None ): ar = unreal.AssetRegistryHelpers.get_asset_registry() @@ -171,30 +195,12 @@ class LayoutLoader(plugin.Loader): for asset in assets: obj = ar.get_asset_by_object_path(asset).get_asset() if obj.get_class().get_name() == class_name: + t = self._transform_from_basis(transform, basis) actor = EditorLevelLibrary.spawn_actor_from_object( - obj, - transform.get('translation') + obj, t.translation ) - if inst_name: - try: - # Rename method leads to crash - # actor.rename(name=inst_name) - - # The label works, although it make it slightly more - # complicated to check for the names, as we need to - # loop through all the actors in the level - actor.set_actor_label(inst_name) - except Exception as e: - print(e) - actor.set_actor_rotation(unreal.Rotator( - umath.radians_to_degrees( - transform.get('rotation').get('x')), - -umath.radians_to_degrees( - transform.get('rotation').get('y')), - umath.radians_to_degrees( - transform.get('rotation').get('z')), - ), False) - actor.set_actor_scale3d(transform.get('scale')) + actor.set_actor_rotation(t.rotation.rotator(), False) + actor.set_actor_scale3d(t.scale3d) if class_name == 'SkeletalMesh': skm_comp = actor.get_editor_property( @@ -203,16 +209,17 @@ class LayoutLoader(plugin.Loader): actors.append(actor) - binding = None - for p in sequence.get_possessables(): - if p.get_name() == actor.get_name(): - binding = p - break + if sequence: + binding = None + for p in sequence.get_possessables(): + if p.get_name() == actor.get_name(): + binding = p + break - if not binding: - binding = sequence.add_possessable(actor) + if not binding: + binding = sequence.add_possessable(actor) - bindings.append(binding) + bindings.append(binding) return actors, bindings @@ -301,52 +308,53 @@ class LayoutLoader(plugin.Loader): actor.skeletal_mesh_component.animation_data.set_editor_property( 'anim_to_play', animation) - # Add animation to the sequencer - bindings = bindings_dict.get(instance_name) + if sequence: + # Add animation to the sequencer + bindings = bindings_dict.get(instance_name) - ar = unreal.AssetRegistryHelpers.get_asset_registry() + ar = unreal.AssetRegistryHelpers.get_asset_registry() - for binding in bindings: - tracks = binding.get_tracks() - track = None - track = tracks[0] if tracks else binding.add_track( - unreal.MovieSceneSkeletalAnimationTrack) + for binding in bindings: + tracks = binding.get_tracks() + track = None + track = tracks[0] if tracks else binding.add_track( + unreal.MovieSceneSkeletalAnimationTrack) - sections = track.get_sections() - section = None - if not sections: - section = track.add_section() - else: - section = sections[0] + sections = track.get_sections() + section = None + if not sections: + section = track.add_section() + else: + section = sections[0] + sec_params = section.get_editor_property('params') + curr_anim = sec_params.get_editor_property('animation') + + if curr_anim: + # Checks if the animation path has a container. + # If it does, it means that the animation is + # already in the sequencer. + anim_path = str(Path( + curr_anim.get_path_name()).parent + ).replace('\\', '/') + + _filter = unreal.ARFilter( + class_names=["AssetContainer"], + package_paths=[anim_path], + recursive_paths=False) + containers = ar.get_assets(_filter) + + if len(containers) > 0: + return + + section.set_range( + sequence.get_playback_start(), + sequence.get_playback_end()) sec_params = section.get_editor_property('params') - curr_anim = sec_params.get_editor_property('animation') - - if curr_anim: - # Checks if the animation path has a container. - # If it does, it means that the animation is already - # in the sequencer. - anim_path = str(Path( - curr_anim.get_path_name()).parent - ).replace('\\', '/') - - _filter = unreal.ARFilter( - class_names=["AssetContainer"], - package_paths=[anim_path], - recursive_paths=False) - containers = ar.get_assets(_filter) - - if len(containers) > 0: - return - - section.set_range( - sequence.get_playback_start(), - sequence.get_playback_end()) - sec_params = section.get_editor_property('params') - sec_params.set_editor_property('animation', animation) + sec_params.set_editor_property('animation', animation) @staticmethod - def _generate_sequence(self, h, h_dir): + def _generate_sequence(h, h_dir): tools = unreal.AssetToolsHelpers().get_asset_tools() sequence = tools.create_asset( @@ -402,7 +410,7 @@ class LayoutLoader(plugin.Loader): return sequence, (min_frame, max_frame) - def _process(self, lib_path, asset_dir, sequence, loaded=None): + def _process(self, lib_path, asset_dir, sequence, repr_loaded=None): ar = unreal.AssetRegistryHelpers.get_asset_registry() with open(lib_path, "r") as fp: @@ -410,8 +418,8 @@ class LayoutLoader(plugin.Loader): all_loaders = discover_loader_plugins() - if not loaded: - loaded = [] + if not repr_loaded: + repr_loaded = [] path = Path(lib_path) @@ -422,36 +430,65 @@ class LayoutLoader(plugin.Loader): loaded_assets = [] for element in data: - reference = None - if element.get('reference_fbx'): - reference = element.get('reference_fbx') + representation = None + repr_format = None + if element.get('representation'): + # representation = element.get('representation') + + self.log.info(element.get("version")) + + valid_formats = ['fbx', 'abc'] + + repr_data = legacy_io.find_one({ + "type": "representation", + "parent": ObjectId(element.get("version")), + "name": {"$in": valid_formats} + }) + repr_format = repr_data.get('name') + + if not repr_data: + self.log.error( + f"No valid representation found for version " + f"{element.get('version')}") + continue + + representation = str(repr_data.get('_id')) + print(representation) + # This is to keep compatibility with old versions of the + # json format. + elif element.get('reference_fbx'): + representation = element.get('reference_fbx') + repr_format = 'fbx' elif element.get('reference_abc'): - reference = element.get('reference_abc') + representation = element.get('reference_abc') + repr_format = 'abc' # If reference is None, this element is skipped, as it cannot be # imported in Unreal - if not reference: + if not representation: continue instance_name = element.get('instance_name') skeleton = None - if reference not in loaded: - loaded.append(reference) + if representation not in repr_loaded: + repr_loaded.append(representation) family = element.get('family') loaders = loaders_from_representation( - all_loaders, reference) + all_loaders, representation) loader = None - if reference == element.get('reference_fbx'): + if repr_format == 'fbx': loader = self._get_fbx_loader(loaders, family) - elif reference == element.get('reference_abc'): + elif repr_format == 'abc': loader = self._get_abc_loader(loaders, family) if not loader: + self.log.error( + f"No valid loader found for {representation}") continue options = { @@ -460,7 +497,7 @@ class LayoutLoader(plugin.Loader): assets = load_container( loader, - reference, + representation, namespace=instance_name, options=options ) @@ -478,28 +515,36 @@ class LayoutLoader(plugin.Loader): instances = [ item for item in data - if (item.get('reference_fbx') == reference or - item.get('reference_abc') == reference)] + if ((item.get('version') and + item.get('version') == element.get('version')) or + item.get('reference_fbx') == representation or + item.get('reference_abc') == representation)] for instance in instances: - transform = instance.get('transform') + # transform = instance.get('transform') + transform = instance.get('transform_matrix') + basis = instance.get('basis') inst = instance.get('instance_name') actors = [] if family == 'model': actors, _ = self._process_family( - assets, 'StaticMesh', transform, sequence, inst) + assets, 'StaticMesh', transform, basis, + sequence, inst + ) elif family == 'rig': actors, bindings = self._process_family( - assets, 'SkeletalMesh', transform, sequence, inst) + assets, 'SkeletalMesh', transform, basis, + sequence, inst + ) actors_dict[inst] = actors bindings_dict[inst] = bindings if skeleton: - skeleton_dict[reference] = skeleton + skeleton_dict[representation] = skeleton else: - skeleton = skeleton_dict.get(reference) + skeleton = skeleton_dict.get(representation) animation_file = element.get('animation') @@ -573,6 +618,9 @@ class LayoutLoader(plugin.Loader): Returns: list(str): list of container content """ + data = get_current_project_settings() + create_sequences = data["unreal"]["level_sequences_for_layouts"] + # Create directory for asset and avalon container hierarchy = context.get('asset').get('data').get('parents') root = self.ASSET_ROOT @@ -593,81 +641,88 @@ class LayoutLoader(plugin.Loader): EditorAssetLibrary.make_directory(asset_dir) - # Create map for the shot, and create hierarchy of map. If the maps - # already exist, we will use them. - h_dir = hierarchy_dir_list[0] - h_asset = hierarchy[0] - master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" - if not EditorAssetLibrary.does_asset_exist(master_level): - EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") + master_level = None + shot = None + sequences = [] level = f"{asset_dir}/{asset}_map.{asset}_map" EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map") - EditorLevelLibrary.load_level(master_level) - EditorLevelUtils.add_level_to_world( - EditorLevelLibrary.get_editor_world(), - level, - unreal.LevelStreamingDynamic - ) - EditorLevelLibrary.save_all_dirty_levels() - EditorLevelLibrary.load_level(level) + if create_sequences: + # Create map for the shot, and create hierarchy of map. If the + # maps already exist, we will use them. + if hierarchy: + h_dir = hierarchy_dir_list[0] + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + if not EditorAssetLibrary.does_asset_exist(master_level): + EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") - # Get all the sequences in the hierarchy. It will create them, if - # they don't exist. - sequences = [] - frame_ranges = [] - for (h_dir, h) in zip(hierarchy_dir_list, hierarchy): - root_content = EditorAssetLibrary.list_assets( - h_dir, recursive=False, include_folder=False) + if master_level: + EditorLevelLibrary.load_level(master_level) + EditorLevelUtils.add_level_to_world( + EditorLevelLibrary.get_editor_world(), + level, + unreal.LevelStreamingDynamic + ) + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(level) - existing_sequences = [ - EditorAssetLibrary.find_asset_data(asset) - for asset in root_content - if EditorAssetLibrary.find_asset_data( - asset).get_class().get_name() == 'LevelSequence' - ] + # Get all the sequences in the hierarchy. It will create them, if + # they don't exist. + frame_ranges = [] + for (h_dir, h) in zip(hierarchy_dir_list, hierarchy): + root_content = EditorAssetLibrary.list_assets( + h_dir, recursive=False, include_folder=False) - if not existing_sequences: - sequence, frame_range = self._generate_sequence(h, h_dir) + existing_sequences = [ + EditorAssetLibrary.find_asset_data(asset) + for asset in root_content + if EditorAssetLibrary.find_asset_data( + asset).get_class().get_name() == 'LevelSequence' + ] - sequences.append(sequence) - frame_ranges.append(frame_range) - else: - for e in existing_sequences: - sequences.append(e.get_asset()) - frame_ranges.append(( - e.get_asset().get_playback_start(), - e.get_asset().get_playback_end())) + if not existing_sequences: + sequence, frame_range = self._generate_sequence(h, h_dir) - shot = tools.create_asset( - asset_name=asset, - package_path=asset_dir, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() - ) + sequences.append(sequence) + frame_ranges.append(frame_range) + else: + for e in existing_sequences: + sequences.append(e.get_asset()) + frame_ranges.append(( + e.get_asset().get_playback_start(), + e.get_asset().get_playback_end())) - # sequences and frame_ranges have the same length - for i in range(0, len(sequences) - 1): - self._set_sequence_hierarchy( - sequences[i], sequences[i + 1], - frame_ranges[i][1], - frame_ranges[i + 1][0], frame_ranges[i + 1][1], - [level]) + shot = tools.create_asset( + asset_name=asset, + package_path=asset_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) - project_name = legacy_io.active_project() - data = get_asset_by_name(project_name, asset)["data"] - shot.set_display_rate( - unreal.FrameRate(data.get("fps"), 1.0)) - shot.set_playback_start(0) - shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) - self._set_sequence_hierarchy( - sequences[-1], shot, - frame_ranges[-1][1], - data.get('clipIn'), data.get('clipOut'), - [level]) + # sequences and frame_ranges have the same length + for i in range(0, len(sequences) - 1): + self._set_sequence_hierarchy( + sequences[i], sequences[i + 1], + frame_ranges[i][1], + frame_ranges[i + 1][0], frame_ranges[i + 1][1], + [level]) - EditorLevelLibrary.load_level(level) + project_name = legacy_io.active_project() + data = get_asset_by_name(project_name, asset)["data"] + shot.set_display_rate( + unreal.FrameRate(data.get("fps"), 1.0)) + shot.set_playback_start(0) + shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) + if sequences: + self._set_sequence_hierarchy( + sequences[-1], shot, + frame_ranges[-1][1], + data.get('clipIn'), data.get('clipOut'), + [level]) + + EditorLevelLibrary.load_level(level) loaded_assets = self._process(self.fname, asset_dir, shot) @@ -702,32 +757,47 @@ class LayoutLoader(plugin.Loader): for a in asset_content: EditorAssetLibrary.save_asset(a) - EditorLevelLibrary.load_level(master_level) + if master_level: + EditorLevelLibrary.load_level(master_level) return asset_content def update(self, container, representation): + data = get_current_project_settings() + create_sequences = data["unreal"]["level_sequences_for_layouts"] + ar = unreal.AssetRegistryHelpers.get_asset_registry() root = "/Game/OpenPype" asset_dir = container.get('namespace') - context = representation.get("context") - hierarchy = context.get('hierarchy').split("/") - h_dir = f"{root}/{hierarchy[0]}" - h_asset = hierarchy[0] - master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + sequence = None + master_level = None - # # Create a temporary level to delete the layout level. - # EditorLevelLibrary.save_all_dirty_levels() - # EditorAssetLibrary.make_directory(f"{root}/tmp") - # tmp_level = f"{root}/tmp/temp_map" - # if not EditorAssetLibrary.does_asset_exist(f"{tmp_level}.temp_map"): - # EditorLevelLibrary.new_level(tmp_level) - # else: - # EditorLevelLibrary.load_level(tmp_level) + if create_sequences: + hierarchy = context.get('hierarchy').split("/") + h_dir = f"{root}/{hierarchy[0]}" + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[asset_dir], + recursive_paths=False) + sequences = ar.get_assets(filter) + sequence = sequences[0].get_asset() + + prev_level = None + + if not master_level: + curr_level = unreal.LevelEditorSubsystem().get_current_level() + curr_level_path = curr_level.get_outer().get_path_name() + # If the level path does not start with "/Game/", the current + # level is a temporary, unsaved level. + if curr_level_path.startswith("/Game/"): + prev_level = curr_level_path # Get layout level filter = unreal.ARFilter( @@ -735,11 +805,6 @@ class LayoutLoader(plugin.Loader): package_paths=[asset_dir], recursive_paths=False) levels = ar.get_assets(filter) - filter = unreal.ARFilter( - class_names=["LevelSequence"], - package_paths=[asset_dir], - recursive_paths=False) - sequences = ar.get_assets(filter) layout_level = levels[0].get_editor_property('object_path') @@ -751,14 +816,14 @@ class LayoutLoader(plugin.Loader): for actor in actors: unreal.EditorLevelLibrary.destroy_actor(actor) - EditorLevelLibrary.save_current_level() + if create_sequences: + EditorLevelLibrary.save_current_level() EditorAssetLibrary.delete_directory(f"{asset_dir}/animations/") source_path = get_representation_path(representation) - loaded_assets = self._process( - source_path, asset_dir, sequences[0].get_asset()) + loaded_assets = self._process(source_path, asset_dir, sequence) data = { "representation": str(representation["_id"]), @@ -776,13 +841,20 @@ class LayoutLoader(plugin.Loader): for a in asset_content: EditorAssetLibrary.save_asset(a) - EditorLevelLibrary.load_level(master_level) + if master_level: + EditorLevelLibrary.load_level(master_level) + elif prev_level: + EditorLevelLibrary.load_level(prev_level) def remove(self, container): """ Delete the layout. First, check if the assets loaded with the layout are used by other layouts. If not, delete the assets. """ + data = get_current_project_settings() + create_sequences = data["unreal"]["level_sequences_for_layouts"] + + root = "/Game/OpenPype" path = Path(container.get("namespace")) containers = unreal_pipeline.ls() @@ -793,7 +865,7 @@ class LayoutLoader(plugin.Loader): # Check if the assets have been loaded by other layouts, and deletes # them if they haven't. - for asset in container.get('loaded_assets'): + for asset in eval(container.get('loaded_assets')): layouts = [ lc for lc in layout_containers if asset in lc.get('loaded_assets')] @@ -801,71 +873,87 @@ class LayoutLoader(plugin.Loader): if not layouts: EditorAssetLibrary.delete_directory(str(Path(asset).parent)) - # Remove the Level Sequence from the parent. - # We need to traverse the hierarchy from the master sequence to find - # the level sequence. - root = "/Game/OpenPype" - namespace = container.get('namespace').replace(f"{root}/", "") - ms_asset = namespace.split('/')[0] - ar = unreal.AssetRegistryHelpers.get_asset_registry() - _filter = unreal.ARFilter( - class_names=["LevelSequence"], - package_paths=[f"{root}/{ms_asset}"], - recursive_paths=False) - sequences = ar.get_assets(_filter) - master_sequence = sequences[0].get_asset() - _filter = unreal.ARFilter( - class_names=["World"], - package_paths=[f"{root}/{ms_asset}"], - recursive_paths=False) - levels = ar.get_assets(_filter) - master_level = levels[0].get_editor_property('object_path') + # Delete the parent folder if there aren't any more + # layouts in it. + asset_content = EditorAssetLibrary.list_assets( + str(Path(asset).parent.parent), recursive=False, + include_folder=True + ) - sequences = [master_sequence] + if len(asset_content) == 0: + EditorAssetLibrary.delete_directory( + str(Path(asset).parent.parent)) - parent = None - for s in sequences: - tracks = s.get_master_tracks() - subscene_track = None - visibility_track = None - for t in tracks: - if t.get_class() == unreal.MovieSceneSubTrack.static_class(): - subscene_track = t - if (t.get_class() == - unreal.MovieSceneLevelVisibilityTrack.static_class()): - visibility_track = t - if subscene_track: - sections = subscene_track.get_sections() - for ss in sections: - if ss.get_sequence().get_name() == container.get('asset'): - parent = s - subscene_track.remove_section(ss) - break - sequences.append(ss.get_sequence()) - # Update subscenes indexes. - i = 0 - for ss in sections: - ss.set_row_index(i) - i += 1 + master_sequence = None + master_level = None + sequences = [] - if visibility_track: - sections = visibility_track.get_sections() - for ss in sections: - if (unreal.Name(f"{container.get('asset')}_map") - in ss.get_level_names()): - visibility_track.remove_section(ss) - # Update visibility sections indexes. - i = -1 - prev_name = [] - for ss in sections: - if prev_name != ss.get_level_names(): + if create_sequences: + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to + # find the level sequence. + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + ar = unreal.AssetRegistryHelpers.get_asset_registry() + _filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(_filter) + master_sequence = sequences[0].get_asset() + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_editor_property('object_path') + + sequences = [master_sequence] + + parent = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + visibility_track = None + for t in tracks: + if t.get_class() == MovieSceneSubTrack.static_class(): + subscene_track = t + if (t.get_class() == + MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if (ss.get_sequence().get_name() == + container.get('asset')): + parent = s + subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) i += 1 - ss.set_row_index(i) - prev_name = ss.get_level_names() - if parent: - break - assert parent, "Could not find the parent sequence" + if visibility_track: + sections = visibility_track.get_sections() + for ss in sections: + if (unreal.Name(f"{container.get('asset')}_map") + in ss.get_level_names()): + visibility_track.remove_section(ss) + # Update visibility sections indexes. + i = -1 + prev_name = [] + for ss in sections: + if prev_name != ss.get_level_names(): + i += 1 + ss.set_row_index(i) + prev_name = ss.get_level_names() + if parent: + break + + assert parent, "Could not find the parent sequence" # Create a temporary level to delete the layout level. EditorLevelLibrary.save_all_dirty_levels() @@ -879,10 +967,9 @@ class LayoutLoader(plugin.Loader): # Delete the layout directory. EditorAssetLibrary.delete_directory(str(path)) - EditorLevelLibrary.load_level(master_level) - EditorAssetLibrary.delete_directory(f"{root}/tmp") - - EditorLevelLibrary.save_current_level() + if create_sequences: + EditorLevelLibrary.load_level(master_level) + EditorAssetLibrary.delete_directory(f"{root}/tmp") # Delete the parent folder if there aren't any more layouts in it. asset_content = EditorAssetLibrary.list_assets( diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index 31cd5e7510..3d3e425a86 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -115,6 +115,7 @@ from .transcoding import ( get_ffmpeg_codec_args, get_ffmpeg_format_args, convert_ffprobe_fps_value, + convert_ffprobe_fps_to_float, ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, @@ -287,6 +288,7 @@ __all__ = [ "get_ffmpeg_codec_args", "get_ffmpeg_format_args", "convert_ffprobe_fps_value", + "convert_ffprobe_fps_to_float", "CURRENT_DOC_SCHEMAS", "PROJECT_NAME_ALLOWED_SYMBOLS", diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index f46197e15f..8c92665366 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -27,12 +27,6 @@ from openpype.settings.constants import ( from . import PypeLogger from .profiles_filtering import filter_profiles from .local_settings import get_openpype_username -from .avalon_context import ( - get_workdir_data, - get_workdir_with_workdir_data, - get_workfile_template_key, - get_last_workfile -) from .python_module_tools import ( modules_from_path, @@ -1576,6 +1570,9 @@ def prepare_context_environments(data, env_group=None): data (EnvironmentPrepData): Dictionary where result and intermediate result will be stored. """ + + from openpype.pipeline.template_data import get_template_data + # Context environments log = data["log"] @@ -1596,7 +1593,9 @@ def prepare_context_environments(data, env_group=None): # Load project specific environments project_name = project_doc["name"] project_settings = get_project_settings(project_name) + system_settings = get_system_settings() data["project_settings"] = project_settings + data["system_settings"] = system_settings # Apply project specific environments on current env value apply_project_environments_value( project_name, data["env"], project_settings, env_group @@ -1619,8 +1618,8 @@ def prepare_context_environments(data, env_group=None): if not app.is_host: return - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, app.host_name + workdir_data = get_template_data( + project_doc, asset_doc, task_name, app.host_name, system_settings ) data["workdir_data"] = workdir_data @@ -1631,7 +1630,14 @@ def prepare_context_environments(data, env_group=None): data["task_type"] = task_type try: - workdir = get_workdir_with_workdir_data(workdir_data, anatomy) + from openpype.pipeline.workfile import get_workdir_with_workdir_data + + workdir = get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + project_settings=project_settings + ) except Exception as exc: raise ApplicationLaunchFailed( @@ -1721,11 +1727,19 @@ def _prepare_last_workfile(data, workdir): if not last_workfile_path: extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) if extensions: + from openpype.pipeline.workfile import ( + get_workfile_template_key, + get_last_workfile + ) + anatomy = data["anatomy"] project_settings = data["project_settings"] task_type = workdir_data["task"]["type"] template_key = get_workfile_template_key( - task_type, app.host_name, project_settings=project_settings + task_type, + app.host_name, + project_name, + project_settings=project_settings ) # Find last workfile file_template = str(anatomy.templates[template_key]["file"]) diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 4076a91c36..3c7e9b6289 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -13,22 +13,16 @@ from openpype.client import ( get_project, get_assets, get_asset_by_name, - get_subset_by_name, get_subsets, get_last_versions, - get_last_version_by_subset_id, get_last_version_by_subset_name, get_representations, get_workfile_info, ) -from openpype.settings import ( - get_project_settings, - get_system_settings -) +from openpype.settings import get_project_settings from .profiles_filtering import filter_profiles from .events import emit_event from .path_templates import StringTemplate -from .local_settings import get_openpype_username legacy_io = None @@ -188,6 +182,9 @@ def is_latest(representation): Returns: bool: Whether the representation is of latest version. + + Deprecated: + Function will be removed after release version 3.14.* """ from openpype.pipeline.context_tools import is_representation_from_latest @@ -197,7 +194,11 @@ def is_latest(representation): @deprecated("openpype.pipeline.load.any_outdated_containers") def any_outdated(): - """Return whether the current scene has any outdated content""" + """Return whether the current scene has any outdated content. + + Deprecated: + Function will be removed after release version 3.14.* + """ from openpype.pipeline.load import any_outdated_containers @@ -215,6 +216,9 @@ def get_asset(asset_name=None): Returns: (MongoDB document) + + Deprecated: + Function will be removed after release version 3.14.* """ from openpype.pipeline.context_tools import get_current_project_asset @@ -222,17 +226,15 @@ def get_asset(asset_name=None): return get_current_project_asset(asset_name=asset_name) +@deprecated("openpype.pipeline.template_data.get_general_template_data") def get_system_general_anatomy_data(system_settings=None): - if not system_settings: - system_settings = get_system_settings() - studio_name = system_settings["general"]["studio_name"] - studio_code = system_settings["general"]["studio_code"] - return { - "studio": { - "name": studio_name, - "code": studio_code - } - } + """ + Deprecated: + Function will be removed after release version 3.14.* + """ + from openpype.pipeline.template_data import get_general_template_data + + return get_general_template_data(system_settings) def get_linked_asset_ids(asset_doc): @@ -297,7 +299,10 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): Returns: None: If asset, subset or version were not found. - dict: Last version document for entered . + dict: Last version document for entered. + + Deprecated: + Function will be removed after release version 3.14.* """ if not project_name: @@ -316,6 +321,8 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): ) +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key_from_context") def get_workfile_template_key_from_context( asset_name, task_name, host_name, project_name=None, dbcon=None, project_settings=None @@ -344,27 +351,26 @@ def get_workfile_template_key_from_context( ValueError: When both 'dbcon' and 'project_name' were not passed. """ + + from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context + ) + if not project_name: if not dbcon: raise ValueError(( "`get_workfile_template_key_from_context` requires to pass" " one of 'dbcon' or 'project_name' arguments." )) - project_name = dbcon.active_project() - asset_doc = get_asset_by_name( - project_name, asset_name, fields=["data.tasks"] - ) - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") - - return get_workfile_template_key( - task_type, host_name, project_name, project_settings + return get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings ) +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key") def get_workfile_template_key( task_type, host_name, project_name=None, project_settings=None ): @@ -388,43 +394,15 @@ def get_workfile_template_key( ValueError: When both 'project_name' and 'project_settings' were not passed. """ - default = "work" - if not task_type or not host_name: - return default - if not project_settings: - if not project_name: - raise ValueError(( - "`get_workfile_template_key` requires to pass" - " one of 'project_name' or 'project_settings' arguments." - )) - project_settings = get_project_settings(project_name) + from openpype.pipeline.workfile import get_workfile_template_key - try: - profiles = ( - project_settings - ["global"] - ["tools"] - ["Workfiles"] - ["workfile_template_profiles"] - ) - except Exception: - profiles = [] - - if not profiles: - return default - - profile_filter = { - "task_types": task_type, - "hosts": host_name - } - profile = filter_profiles(profiles, profile_filter) - if profile: - return profile["workfile_template"] or default - return default + return get_workfile_template_key( + task_type, host_name, project_name, project_settings + ) -# TODO rename function as is not just "work" specific +@deprecated("openpype.pipeline.template_data.get_template_data") def get_workdir_data(project_doc, asset_doc, task_name, host_name): """Prepare data for workdir template filling from entered information. @@ -437,42 +415,19 @@ def get_workdir_data(project_doc, asset_doc, task_name, host_name): Returns: dict: Data prepared for filling workdir template. + + Deprecated: + Function will be removed after release version 3.14.* """ - task_type = asset_doc['data']['tasks'].get(task_name, {}).get('type') - project_task_types = project_doc["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") + from openpype.pipeline.template_data import get_template_data - asset_parents = asset_doc["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_doc["name"] - if asset_parents: - parent_name = asset_parents[-1] - - data = { - "project": { - "name": project_doc["name"], - "code": project_doc["data"].get("code") - }, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - }, - "asset": asset_doc["name"], - "parent": parent_name, - "app": host_name, - "user": get_openpype_username(), - "hierarchy": hierarchy, - } - - system_general_data = get_system_general_anatomy_data() - data.update(system_general_data) - - return data + return get_template_data( + project_doc, asset_doc, task_name, host_name + ) +@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") def get_workdir_with_workdir_data( workdir_data, anatomy=None, project_name=None, template_key=None ): @@ -499,31 +454,24 @@ def get_workdir_with_workdir_data( Raises: ValueError: When both `anatomy` and `project_name` are set to None. """ + if not anatomy and not project_name: raise ValueError(( "Missing required arguments one of `project_name` or `anatomy`" " must be entered." )) - if not anatomy: - from openpype.pipeline import Anatomy - anatomy = Anatomy(project_name) + if not project_name: + project_name = anatomy.project_name - if not template_key: - template_key = get_workfile_template_key( - workdir_data["task"]["type"], - workdir_data["app"], - project_name=workdir_data["project"]["name"] - ) + from openpype.pipeline.workfile import get_workdir_with_workdir_data - anatomy_filled = anatomy.format(workdir_data) - # Output is TemplateResult object which contain useful data - output = anatomy_filled[template_key]["folder"] - if output: - return output.normalized() - return output + return get_workdir_with_workdir_data( + workdir_data, project_name, anatomy, template_key + ) +@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") def get_workdir( project_doc, asset_doc, @@ -552,40 +500,36 @@ def get_workdir( TemplateResult: Workdir path. """ - if not anatomy: - from openpype.pipeline import Anatomy - anatomy = Anatomy(project_doc["name"]) - - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, host_name - ) + from openpype.pipeline.workfile import get_workdir # Output is TemplateResult object which contain useful data - return get_workdir_with_workdir_data( - workdir_data, anatomy, template_key=template_key + return get_workdir( + project_doc, + asset_doc, + task_name, + host_name, + anatomy, + template_key ) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_template_data_from_session") def template_data_from_session(session=None): """ Return dictionary with template from session keys. Args: session (dict, Optional): The Session to use. If not provided use the currently active global Session. + Returns: dict: All available data from session. + + Deprecated: + Function will be removed after release version 3.14.* """ - if session is None: - session = legacy_io.Session + from openpype.pipeline.context_tools import get_template_data_from_session - project_name = session["AVALON_PROJECT"] - asset_name = session["AVALON_ASSET"] - task_name = session["AVALON_TASK"] - host_name = session["AVALON_APP"] - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, asset_name) - return get_workdir_data(project_doc, asset_doc, task_name, host_name) + return get_template_data_from_session(session) @with_pipeline_io @@ -611,6 +555,8 @@ def compute_session_changes( dict: The required changes in the Session dictionary. """ + from openpype.pipeline.context_tools import get_workdir_from_session + changes = dict() # If no changes, return directly @@ -657,29 +603,11 @@ def compute_session_changes( return changes -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_workdir_from_session") def get_workdir_from_session(session=None, template_key=None): - from openpype.pipeline import Anatomy + from openpype.pipeline.context_tools import get_workdir_from_session - if session is None: - session = legacy_io.Session - project_name = session["AVALON_PROJECT"] - host_name = session["AVALON_APP"] - anatomy = Anatomy(project_name) - template_data = template_data_from_session(session) - anatomy_filled = anatomy.format(template_data) - - if not template_key: - task_type = template_data["task"]["type"] - template_key = get_workfile_template_key( - task_type, - host_name, - project_name=project_name - ) - path = anatomy_filled[template_key]["folder"] - if path: - path = os.path.normpath(path) - return path + return get_workdir_from_session(session, template_key) @with_pipeline_io @@ -695,8 +623,8 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): Returns: dict: The changed key, values in the current Session. - """ + changes = compute_session_changes( legacy_io.Session, task=task, @@ -726,7 +654,6 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): return changes -@with_pipeline_io @deprecated("openpype.client.get_workfile_info") def get_workfile_doc(asset_id, task_name, filename, dbcon=None): """Return workfile document for entered context. @@ -747,13 +674,14 @@ def get_workfile_doc(asset_id, task_name, filename, dbcon=None): # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io project_name = dbcon.active_project() return get_workfile_info(project_name, asset_id, task_name, filename) -@with_pipeline_io +@deprecated def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): """Creates or replace workfile document in mongo. @@ -768,10 +696,13 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and `legacy_io` is used if not entered. """ + from openpype.pipeline import Anatomy + from openpype.pipeline.template_data import get_template_data # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io # Filter of workfile document @@ -787,7 +718,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): # Prepare project for workdir data project_name = dbcon.active_project() project_doc = get_project(project_name) - workdir_data = get_workdir_data( + workdir_data = get_template_data( project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"] ) # Prepare anatomy @@ -818,7 +749,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): ) -@with_pipeline_io +@deprecated def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): if not workfile_doc: # TODO add log message @@ -829,6 +760,7 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io # Convert data to mongo modification keys/values @@ -846,661 +778,11 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): ) -class BuildWorkfile: - """Wrapper for build workfile process. +@deprecated("openpype.pipeline.workfile.BuildWorkfile") +def BuildWorkfile(): + from openpype.pipeline.workfile import BuildWorkfile - Load representations for current context by build presets. Build presets - are host related, since each host has it's loaders. - """ - - log = logging.getLogger("BuildWorkfile") - - @staticmethod - def map_subsets_by_family(subsets): - subsets_by_family = collections.defaultdict(list) - for subset in subsets: - family = subset["data"].get("family") - if not family: - families = subset["data"].get("families") - if not families: - continue - family = families[0] - - subsets_by_family[family].append(subset) - return subsets_by_family - - def process(self): - """Main method of this wrapper. - - Building of workfile is triggered and is possible to implement - post processing of loaded containers if necessary. - """ - containers = self.build_workfile() - - return containers - - @with_pipeline_io - def build_workfile(self): - """Prepares and load containers into workfile. - - Loads latest versions of current and linked assets to workfile by logic - stored in Workfile profiles from presets. Profiles are set by host, - filtered by current task name and used by families. - - Each family can specify representation names and loaders for - representations and first available and successful loaded - representation is returned as container. - - At the end you'll get list of loaded containers per each asset. - - loaded_containers [{ - "asset_entity": , - "containers": [, , ...] - }, { - "asset_entity": , - "containers": [, ...] - }, { - ... - }] - """ - from openpype.pipeline import discover_loader_plugins - - # Get current asset name and entity - project_name = legacy_io.active_project() - current_asset_name = legacy_io.Session["AVALON_ASSET"] - current_asset_entity = get_asset_by_name( - project_name, current_asset_name - ) - # Skip if asset was not found - if not current_asset_entity: - print("Asset entity with name `{}` was not found".format( - current_asset_name - )) - return - - # Prepare available loaders - loaders_by_name = {} - for loader in discover_loader_plugins(): - loader_name = loader.__name__ - if loader_name in loaders_by_name: - raise KeyError( - "Duplicated loader name {0}!".format(loader_name) - ) - loaders_by_name[loader_name] = loader - - # Skip if there are any loaders - if not loaders_by_name: - self.log.warning("There are no registered loaders.") - return - - # Get current task name - current_task_name = legacy_io.Session["AVALON_TASK"] - - # Load workfile presets for task - self.build_presets = self.get_build_presets( - current_task_name, current_asset_entity - ) - - # Skip if there are any presets for task - if not self.build_presets: - self.log.warning( - "Current task `{}` does not have any loading preset.".format( - current_task_name - ) - ) - return - - # Get presets for loading current asset - current_context_profiles = self.build_presets.get("current_context") - # Get presets for loading linked assets - link_context_profiles = self.build_presets.get("linked_assets") - # Skip if both are missing - if not current_context_profiles and not link_context_profiles: - self.log.warning( - "Current task `{}` has empty loading preset.".format( - current_task_name - ) - ) - return - - elif not current_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any loading" - " preset for it's context." - ).format(current_task_name)) - - elif not link_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any" - "loading preset for it's linked assets." - ).format(current_task_name)) - - # Prepare assets to process by workfile presets - assets = [] - current_asset_id = None - if current_context_profiles: - # Add current asset entity if preset has current context set - assets.append(current_asset_entity) - current_asset_id = current_asset_entity["_id"] - - if link_context_profiles: - # Find and append linked assets if preset has set linked mapping - link_assets = get_linked_assets(current_asset_entity) - if link_assets: - assets.extend(link_assets) - - # Skip if there are no assets. This can happen if only linked mapping - # is set and there are no links for his asset. - if not assets: - self.log.warning( - "Asset does not have linked assets. Nothing to process." - ) - return - - # Prepare entities from database for assets - prepared_entities = self._collect_last_version_repres(assets) - - # Load containers by prepared entities and presets - loaded_containers = [] - # - Current asset containers - if current_asset_id and current_asset_id in prepared_entities: - current_context_data = prepared_entities.pop(current_asset_id) - loaded_data = self.load_containers_by_asset_data( - current_context_data, current_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # - Linked assets container - for linked_asset_data in prepared_entities.values(): - loaded_data = self.load_containers_by_asset_data( - linked_asset_data, link_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # Return list of loaded containers - return loaded_containers - - @with_pipeline_io - def get_build_presets(self, task_name, asset_doc): - """ Returns presets to build workfile for task name. - - Presets are loaded for current project set in - io.Session["AVALON_PROJECT"], filtered by registered host - and entered task name. - - Args: - task_name (str): Task name used for filtering build presets. - - Returns: - (dict): preset per entered task name - """ - host_name = os.environ["AVALON_APP"] - project_settings = get_project_settings( - legacy_io.Session["AVALON_PROJECT"] - ) - - host_settings = project_settings.get(host_name) or {} - # Get presets for host - wb_settings = host_settings.get("workfile_builder") - if not wb_settings: - # backward compatibility - wb_settings = host_settings.get("workfile_build") or {} - - builder_profiles = wb_settings.get("profiles") - if not builder_profiles: - return None - - task_type = ( - asset_doc - .get("data", {}) - .get("tasks", {}) - .get(task_name, {}) - .get("type") - ) - filter_data = { - "task_types": task_type, - "tasks": task_name - } - return filter_profiles(builder_profiles, filter_data) - - def _filter_build_profiles(self, build_profiles, loaders_by_name): - """ Filter build profiles by loaders and prepare process data. - - Valid profile must have "loaders", "families" and "repre_names" keys - with valid values. - - "loaders" expects list of strings representing possible loaders. - - "families" expects list of strings for filtering - by main subset family. - - "repre_names" expects list of strings for filtering by - representation name. - - Lowered "families" and "repre_names" are prepared for each profile with - all required keys. - - Args: - build_profiles (dict): Profiles for building workfile. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list): Filtered and prepared profiles. - """ - valid_profiles = [] - for profile in build_profiles: - # Check loaders - profile_loaders = profile.get("loaders") - if not profile_loaders: - self.log.warning(( - "Build profile has missing loaders configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check if any loader is available - loaders_match = False - for loader_name in profile_loaders: - if loader_name in loaders_by_name: - loaders_match = True - break - - if not loaders_match: - self.log.warning(( - "All loaders from Build profile are not available: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check families - profile_families = profile.get("families") - if not profile_families: - self.log.warning(( - "Build profile is missing families configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check representation names - profile_repre_names = profile.get("repre_names") - if not profile_repre_names: - self.log.warning(( - "Build profile is missing" - " representation names filtering: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Prepare lowered families and representation names - profile["families_lowered"] = [ - fam.lower() for fam in profile_families - ] - profile["repre_names_lowered"] = [ - name.lower() for name in profile_repre_names - ] - - valid_profiles.append(profile) - - return valid_profiles - - def _prepare_profile_for_subsets(self, subsets, profiles): - """Select profile for each subset by it's data. - - Profiles are filtered for each subset individually. - Profile is filtered by subset's family, optionally by name regex and - representation names set in profile. - It is possible to not find matching profile for subset, in that case - subset is skipped and it is possible that none of subsets have - matching profile. - - Args: - subsets (list): Subset documents. - profiles (dict): Build profiles. - - Returns: - (dict) Profile by subset's id. - """ - # Prepare subsets - subsets_by_family = self.map_subsets_by_family(subsets) - - profiles_per_subset_id = {} - for family, subsets in subsets_by_family.items(): - family_low = family.lower() - for profile in profiles: - # Skip profile if does not contain family - if family_low not in profile["families_lowered"]: - continue - - # Precompile name filters as regexes - profile_regexes = profile.get("subset_name_filters") - if profile_regexes: - _profile_regexes = [] - for regex in profile_regexes: - _profile_regexes.append(re.compile(regex)) - profile_regexes = _profile_regexes - - # TODO prepare regex compilation - for subset in subsets: - # Verify regex filtering (optional) - if profile_regexes: - valid = False - for pattern in profile_regexes: - if re.match(pattern, subset["name"]): - valid = True - break - - if not valid: - continue - - profiles_per_subset_id[subset["_id"]] = profile - - # break profiles loop on finding the first matching profile - break - return profiles_per_subset_id - - def load_containers_by_asset_data( - self, asset_entity_data, build_profiles, loaders_by_name - ): - """Load containers for entered asset entity by Build profiles. - - Args: - asset_entity_data (dict): Prepared data with subsets, last version - and representations for specific asset. - build_profiles (dict): Build profiles. - loaders_by_name (dict): Available loaders per name. - - Returns: - (dict) Output contains asset document and loaded containers. - """ - - # Make sure all data are not empty - if not asset_entity_data or not build_profiles or not loaders_by_name: - return - - asset_entity = asset_entity_data["asset_entity"] - - valid_profiles = self._filter_build_profiles( - build_profiles, loaders_by_name - ) - if not valid_profiles: - self.log.warning( - "There are not valid Workfile profiles. Skipping process." - ) - return - - self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) - - subsets_by_id = {} - version_by_subset_id = {} - repres_by_version_id = {} - for subset_id, in_data in asset_entity_data["subsets"].items(): - subset_entity = in_data["subset_entity"] - subsets_by_id[subset_entity["_id"]] = subset_entity - - version_data = in_data["version"] - version_entity = version_data["version_entity"] - version_by_subset_id[subset_id] = version_entity - repres_by_version_id[version_entity["_id"]] = ( - version_data["repres"] - ) - - if not subsets_by_id: - self.log.warning("There are not subsets for asset {0}".format( - asset_entity["name"] - )) - return - - profiles_per_subset_id = self._prepare_profile_for_subsets( - subsets_by_id.values(), valid_profiles - ) - if not profiles_per_subset_id: - self.log.warning("There are not valid subsets.") - return - - valid_repres_by_subset_id = collections.defaultdict(list) - for subset_id, profile in profiles_per_subset_id.items(): - profile_repre_names = profile["repre_names_lowered"] - - version_entity = version_by_subset_id[subset_id] - version_id = version_entity["_id"] - repres = repres_by_version_id[version_id] - for repre in repres: - repre_name_low = repre["name"].lower() - if repre_name_low in profile_repre_names: - valid_repres_by_subset_id[subset_id].append(repre) - - # DEBUG message - msg = "Valid representations for Asset: `{}`".format( - asset_entity["name"] - ) - for subset_id, repres in valid_repres_by_subset_id.items(): - subset = subsets_by_id[subset_id] - msg += "\n# Subset Name/ID: `{}`/{}".format( - subset["name"], subset_id - ) - for repre in repres: - msg += "\n## Repre name: `{}`".format(repre["name"]) - - self.log.debug(msg) - - containers = self._load_containers( - valid_repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ) - - return { - "asset_entity": asset_entity, - "containers": containers - } - - @with_pipeline_io - def _load_containers( - self, repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ): - """Real load by collected data happens here. - - Loading of representations per subset happens here. Each subset can - loads one representation. Loading is tried in specific order. - Representations are tried to load by names defined in configuration. - If subset has representation matching representation name each loader - is tried to load it until any is successful. If none of them was - successful then next representation name is tried. - Subset process loop ends when any representation is loaded or - all matching representations were already tried. - - Args: - repres_by_subset_id (dict): Available representations mapped - by their parent (subset) id. - subsets_by_id (dict): Subset documents mapped by their id. - profiles_per_subset_id (dict): Build profiles mapped by subset id. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list) Objects of loaded containers. - """ - from openpype.pipeline import ( - IncompatibleLoaderError, - load_container, - ) - - loaded_containers = [] - - # Get subset id order from build presets. - build_presets = self.build_presets.get("current_context", []) - build_presets += self.build_presets.get("linked_assets", []) - subset_ids_ordered = [] - for preset in build_presets: - for preset_family in preset["families"]: - for id, subset in subsets_by_id.items(): - if preset_family not in subset["data"].get("families", []): - continue - - subset_ids_ordered.append(id) - - # Order representations from subsets. - print("repres_by_subset_id", repres_by_subset_id) - representations_ordered = [] - representations = [] - for id in subset_ids_ordered: - for subset_id, repres in repres_by_subset_id.items(): - if repres in representations: - continue - - if id == subset_id: - representations_ordered.append((subset_id, repres)) - representations.append(repres) - - print("representations", representations) - - # Load ordered representations. - for subset_id, repres in representations_ordered: - subset_name = subsets_by_id[subset_id]["name"] - - profile = profiles_per_subset_id[subset_id] - loaders_last_idx = len(profile["loaders"]) - 1 - repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 - - repre_by_low_name = { - repre["name"].lower(): repre for repre in repres - } - - is_loaded = False - for repre_name_idx, profile_repre_name in enumerate( - profile["repre_names_lowered"] - ): - # Break iteration if representation was already loaded - if is_loaded: - break - - repre = repre_by_low_name.get(profile_repre_name) - if not repre: - continue - - for loader_idx, loader_name in enumerate(profile["loaders"]): - if is_loaded: - break - - loader = loaders_by_name.get(loader_name) - if not loader: - continue - try: - container = load_container( - loader, - repre["_id"], - name=subset_name - ) - loaded_containers.append(container) - is_loaded = True - - except Exception as exc: - if exc == IncompatibleLoaderError: - self.log.info(( - "Loader `{}` is not compatible with" - " representation `{}`" - ).format(loader_name, repre["name"])) - - else: - self.log.error( - "Unexpected error happened during loading", - exc_info=True - ) - - msg = "Loading failed." - if loader_idx < loaders_last_idx: - msg += " Trying next loader." - elif repre_name_idx < repre_names_last_idx: - msg += ( - " Loading of subset `{}` was not successful." - ).format(subset_name) - else: - msg += " Trying next representation." - self.log.info(msg) - - return loaded_containers - - @with_pipeline_io - def _collect_last_version_repres(self, asset_docs): - """Collect subsets, versions and representations for asset_entities. - - Args: - asset_entities (list): Asset entities for which want to find data - - Returns: - (dict): collected entities - - Example output: - ``` - { - {Asset ID}: { - "asset_entity": , - "subsets": { - {Subset ID}: { - "subset_entity": , - "version": { - "version_entity": , - "repres": [ - , , ... - ] - } - }, - ... - } - }, - ... - } - output[asset_id]["subsets"][subset_id]["version"]["repres"] - ``` - """ - - output = {} - if not asset_docs: - return output - - asset_docs_by_ids = {asset["_id"]: asset for asset in asset_docs} - - project_name = legacy_io.active_project() - subsets = list(get_subsets( - project_name, asset_ids=asset_docs_by_ids.keys() - )) - subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - - last_version_by_subset_id = get_last_versions( - project_name, subset_entity_by_ids.keys() - ) - last_version_docs_by_id = { - version["_id"]: version - for version in last_version_by_subset_id.values() - } - repre_docs = get_representations( - project_name, version_ids=last_version_docs_by_id.keys() - ) - - for repre_doc in repre_docs: - version_id = repre_doc["parent"] - version_doc = last_version_docs_by_id[version_id] - - subset_id = version_doc["parent"] - subset_doc = subset_entity_by_ids[subset_id] - - asset_id = subset_doc["parent"] - asset_doc = asset_docs_by_ids[asset_id] - - if asset_id not in output: - output[asset_id] = { - "asset_entity": asset_doc, - "subsets": {} - } - - if subset_id not in output[asset_id]["subsets"]: - output[asset_id]["subsets"][subset_id] = { - "subset_entity": subset_doc, - "version": { - "version_entity": version_doc, - "repres": [] - } - } - - output[asset_id]["subsets"][subset_id]["version"]["repres"].append( - repre_doc - ) - - return output + return BuildWorkfile() @with_pipeline_io @@ -1533,13 +815,21 @@ def get_creator_by_name(creator_name, case_sensitive=False): return None -@with_pipeline_io +@deprecated def change_timer_to_current_context(): """Called after context change to change timers. + Deprecated: + This method is specific for TimersManager module so please use the + functionality from there. Function will be removed after release + version 3.14.* + TODO: - use TimersManager's static method instead of reimplementing it here """ + + from openpype.pipeline import legacy_io + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") if not webserver_url: log.warning("Couldn't find webserver url") @@ -1627,6 +917,8 @@ def _get_task_context_data_for_anatomy( return data +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_context") def get_custom_workfile_template_by_context( template_profiles, project_doc, asset_doc, task_name, anatomy=None ): @@ -1680,6 +972,9 @@ def get_custom_workfile_template_by_context( return None +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_string_context" +) def get_custom_workfile_template_by_string_context( template_profiles, project_name, asset_name, task_name, dbcon=None, anatomy=None @@ -1724,7 +1019,7 @@ def get_custom_workfile_template_by_string_context( ) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_custom_workfile_template") def get_custom_workfile_template(template_profiles): """Filter and fill workfile template profiles by current context. @@ -1739,6 +1034,8 @@ def get_custom_workfile_template(template_profiles): context. (Existence of formatted path is not validated.) """ + from openpype.pipeline import legacy_io + return get_custom_workfile_template_by_string_context( template_profiles, legacy_io.Session["AVALON_PROJECT"], @@ -1748,6 +1045,7 @@ def get_custom_workfile_template(template_profiles): ) +@deprecated("openpype.pipeline.workfile.get_last_workfile_with_version") def get_last_workfile_with_version( workdir, file_template, fill_data, extensions ): @@ -1763,78 +1061,15 @@ def get_last_workfile_with_version( tuple: Last workfile with version if there is any otherwise returns (None, None). """ - if not os.path.exists(workdir): - return None, None - # Fast match on extension - filenames = [ - filename - for filename in os.listdir(workdir) - if os.path.splitext(filename)[1] in extensions - ] + from openpype.pipeline.workfile import get_last_workfile_with_version - # Build template without optionals, version to digits only regex - # and comment to any definable value. - _ext = [] - for ext in extensions: - if not ext.startswith("."): - ext = "." + ext - # Escape dot for regex - ext = "\\" + ext - _ext.append(ext) - ext_expression = "(?:" + "|".join(_ext) + ")" - - # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end - file_template = re.sub(r"\.?{ext}", ext_expression, file_template) - # Replace optional keys with optional content regex - file_template = re.sub(r"<.*?>", r".*?", file_template) - # Replace `{version}` with group regex - file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) - file_template = re.sub(r"{comment.*?}", r".+?", file_template) - file_template = StringTemplate.format_strict_template( - file_template, fill_data + return get_last_workfile_with_version( + workdir, file_template, fill_data, extensions ) - # Match with ignore case on Windows due to the Windows - # OS not being case-sensitive. This avoids later running - # into the error that the file did exist if it existed - # with a different upper/lower-case. - kwargs = {} - if platform.system().lower() == "windows": - kwargs["flags"] = re.IGNORECASE - - # Get highest version among existing matching files - version = None - output_filenames = [] - for filename in sorted(filenames): - match = re.match(file_template, filename, **kwargs) - if not match: - continue - - file_version = int(match.group(1)) - if version is None or file_version > version: - output_filenames[:] = [] - version = file_version - - if file_version == version: - output_filenames.append(filename) - - output_filename = None - if output_filenames: - if len(output_filenames) == 1: - output_filename = output_filenames[0] - else: - last_time = None - for _output_filename in output_filenames: - full_path = os.path.join(workdir, _output_filename) - mod_time = os.path.getmtime(full_path) - if last_time is None or last_time < mod_time: - output_filename = _output_filename - last_time = mod_time - - return output_filename, version - +@deprecated("openpype.pipeline.workfile.get_last_workfile") def get_last_workfile( workdir, file_template, fill_data, extensions, full_path=False ): @@ -1852,22 +1087,12 @@ def get_last_workfile( Returns: str: Last or first workfile as filename of full path to filename. """ - filename, version = get_last_workfile_with_version( - workdir, file_template, fill_data, extensions + + from openpype.pipeline.workfile import get_last_workfile + + return get_last_workfile( + workdir, file_template, fill_data, extensions, full_path ) - if filename is None: - data = copy.deepcopy(fill_data) - data["version"] = 1 - data.pop("comment", None) - if not data.get("ext"): - data["ext"] = extensions[0] - data["ext"] = data["ext"].replace('.', '') - filename = StringTemplate.format_strict_template(file_template, data) - - if full_path: - return os.path.normpath(os.path.join(workdir, filename)) - - return filename @with_pipeline_io diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index c1282016ef..e4b18ec258 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -211,15 +211,28 @@ class StringTemplate(object): if counted_symb > -1: parts = tmp_parts.pop(counted_symb) counted_symb -= 1 + # If part contains only single string keep value + # unchanged if parts: # Remove optional start char parts.pop(0) - if counted_symb < 0: - out_parts = new_parts - else: - out_parts = tmp_parts[counted_symb] - # Store temp parts - out_parts.append(OptionalPart(parts)) + + if not parts: + value = "<>" + elif ( + len(parts) == 1 + and isinstance(parts[0], six.string_types) + ): + value = "<{}>".format(parts[0]) + else: + value = OptionalPart(parts) + + if counted_symb < 0: + out_parts = new_parts + else: + out_parts = tmp_parts[counted_symb] + # Store value + out_parts.append(value) continue if counted_symb < 0: @@ -793,6 +806,7 @@ class OptionalPart: parts(list): Parts of template. Can contain 'str', 'OptionalPart' or 'FormattingPart'. """ + def __init__(self, parts): self._parts = parts diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 1d3c1eec6b..060db94ae0 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -1,11 +1,13 @@ # -*- coding: utf-8 -*- """Avalon/Pyblish plugin tools.""" import os -import inspect import logging import re import json +import warnings +import functools + from openpype.client import get_asset_by_id from openpype.settings import get_project_settings @@ -17,6 +19,51 @@ log = logging.getLogger(__name__) DEFAULT_SUBSET_TEMPLATE = "{family}{Variant}" +class PluginToolsDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", PluginToolsDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=PluginToolsDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + class TaskNotSetError(KeyError): def __init__(self, msg=None): if not msg: @@ -197,6 +244,7 @@ def prepare_template_data(fill_pairs): return fill_data +@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins") def filter_pyblish_plugins(plugins): """Filter pyblish plugins by presets. @@ -206,57 +254,14 @@ def filter_pyblish_plugins(plugins): Args: plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base` `discover()` method. - """ - from pyblish import api - host = api.current_host() + from openpype.pipeline.publish.lib import filter_pyblish_plugins - presets = get_project_settings(os.environ['AVALON_PROJECT']) or {} - # skip if there are no presets to process - if not presets: - return - - # iterate over plugins - for plugin in plugins[:]: - - try: - config_data = presets[host]["publish"][plugin.__name__] - except KeyError: - # host determined from path - file = os.path.normpath(inspect.getsourcefile(plugin)) - file = os.path.normpath(file) - - split_path = file.split(os.path.sep) - if len(split_path) < 4: - log.warning( - 'plugin path too short to extract host {}'.format(file) - ) - continue - - host_from_file = split_path[-4] - plugin_kind = split_path[-2] - - # TODO: change after all plugins are moved one level up - if host_from_file == "openpype": - host_from_file = "global" - - try: - config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501 - except KeyError: - continue - - for option, value in config_data.items(): - if option == "enabled" and value is False: - log.info('removing plugin {}'.format(plugin.__name__)) - plugins.remove(plugin) - else: - log.info('setting {}:{} on plugin {}'.format( - option, value, plugin.__name__)) - - setattr(plugin, option, value) + filter_pyblish_plugins(plugins) +@deprecated def set_plugin_attributes_from_settings( plugins, superclass, host_name=None, project_name=None ): @@ -273,6 +278,8 @@ def set_plugin_attributes_from_settings( project_name (str): Name of project for which settings will be loaded. Value from environment `AVALON_PROJECT` is used if not entered. """ + + # Function is not used anymore from openpype.pipeline import LegacyCreator, LoaderPlugin # determine host application to use for finding presets diff --git a/openpype/lib/remote_publish.py b/openpype/lib/remote_publish.py index 38c6b07c5b..b4b05c053b 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/lib/remote_publish.py @@ -9,6 +9,8 @@ import pyblish.api from openpype.client.mongo import OpenPypeMongoConnection from openpype.lib.plugin_tools import parse_json +from openpype.lib.profiles_filtering import filter_profiles +from openpype.api import get_project_settings ERROR_STATUS = "error" IN_PROGRESS_STATUS = "in_progress" @@ -175,14 +177,12 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): ) -def fail_batch(_id, batches_in_progress, dbcon): - """Set current batch as failed as there are some stuck batches.""" - running_batches = [str(batch["_id"]) - for batch in batches_in_progress - if batch["_id"] != _id] - msg = "There are still running batches {}\n". \ - format("\n".join(running_batches)) - msg += "Ask admin to check them and reprocess current batch" +def fail_batch(_id, dbcon, msg): + """Set current batch as failed as there is some problem. + + Raises: + ValueError + """ dbcon.update_one( {"_id": _id}, {"$set": @@ -259,3 +259,19 @@ def get_task_data(batch_dir): "Cannot parse batch meta in {} folder".format(task_data)) return task_data + + +def get_timeout(project_name, host_name, task_type): + """Returns timeout(seconds) from Setting profile.""" + filter_data = { + "task_types": task_type, + "hosts": host_name + } + timeout_profiles = (get_project_settings(project_name)["webpublisher"] + ["timeout_profiles"]) + matching_item = filter_profiles(timeout_profiles, filter_data) + timeout = 3600 + if matching_item: + timeout = matching_item["timeout"] + + return timeout diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index ee9a0f08de..60d5d3ed4a 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -938,3 +938,40 @@ def convert_ffprobe_fps_value(str_value): fps = int(fps) return str(fps) + + +def convert_ffprobe_fps_to_float(value): + """Convert string value of frame rate to float. + + Copy of 'convert_ffprobe_fps_value' which raises exceptions on invalid + value, does not convert value to string and does not return "Unknown" + string. + + Args: + value (str): Value to be converted. + + Returns: + Float: Converted frame rate in float. If divisor in value is '0' then + '0.0' is returned. + + Raises: + ValueError: Passed value is invalid for conversion. + """ + + if not value: + raise ValueError("Got empty value.") + + items = value.split("/") + if len(items) == 1: + return float(items[0]) + + if len(items) > 2: + raise ValueError(( + "FPS expression contains multiple dividers \"{}\"." + ).format(value)) + + dividend = float(items.pop(0)) + divisor = float(items.pop(0)) + if divisor == 0.0: + return 0.0 + return dividend / divisor diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index de8df3dd9e..c55f85c8da 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -80,7 +80,8 @@ class AfterEffectsSubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index a1ee5e0957..3f9c09b592 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -274,7 +274,8 @@ class HarmonySubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index fdf67b51bc..95856137e2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -130,6 +130,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "houdini17.5;pluginx2.3" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index aca88c7440..beda753723 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -101,6 +101,7 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 3707c5709f..13dfc0183a 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -413,8 +413,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Gather needed data ------------------------------------------------ default_render_file = instance.context.data.get('project_settings')\ .get('maya')\ - .get('create')\ - .get('CreateRender')\ + .get('RenderSettings')\ .get('default_render_image_folder') filename = os.path.basename(filepath) comment = context.data.get("comment", "") @@ -519,12 +518,14 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", + "OPENPYPE_SG_USER", "AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index 57572fcb24..38ae5d2f7f 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -5,7 +5,6 @@ from maya import cmds from openpype.pipeline import legacy_io, PublishXmlValidationError from openpype.settings import get_project_settings -import openpype.api import pyblish.api @@ -34,7 +33,9 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): targets = ["local"] def process(self, instance): - settings = get_project_settings(os.getenv("AVALON_PROJECT")) + project_name = instance.context.data["projectName"] + # TODO settings can be received from 'context.data["project_settings"]' + settings = get_project_settings(project_name) # use setting for publish job on farm, no reason to have it separately deadline_publish_job_sett = (settings["deadline"] ["publish"] @@ -53,9 +54,6 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): scene = instance.context.data["currentFile"] scenename = os.path.basename(scene) - # Get project code - project_name = legacy_io.Session["AVALON_PROJECT"] - job_name = "{scene} [PUBLISH]".format(scene=scenename) batch_name = "{code} - {scene}".format(code=project_name, scene=scenename) @@ -102,13 +100,14 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", - "FTRACK_SERVER" + "FTRACK_SERVER", + "OPENPYPE_VERSION" ] environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) - # TODO replace legacy_io with context.data ? - environment["AVALON_PROJECT"] = legacy_io.Session["AVALON_PROJECT"] + # TODO replace legacy_io with context.data + environment["AVALON_PROJECT"] = project_name environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"] environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"] environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index 93fb511a34..336a56ec45 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -80,10 +80,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Using published scene for render {}".format(script_path) ) - # exception for slate workflow - if "slate" in instance.data["families"]: - submit_frame_start -= 1 - response = self.payload_submit( instance, script_path, @@ -99,10 +95,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["publishJobState"] = "Suspended" if instance.data.get("bakingNukeScripts"): - # exception for slate workflow - if "slate" in instance.data["families"]: - submit_frame_start += 1 - for baking_script in instance.data["bakingNukeScripts"]: render_path = baking_script["bakeRenderPath"] script_path = baking_script["bakeScriptPath"] @@ -261,7 +253,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "PYBLISHPLUGINPATH", "NUKE_PATH", "TOOL_ENV", - "FOUNDRY_LICENSE" + "FOUNDRY_LICENSE", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): @@ -365,7 +358,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): if not instance.data.get("expectedFiles"): instance.data["expectedFiles"] = [] - dir = os.path.dirname(path) + dirname = os.path.dirname(path) file = os.path.basename(path) if "#" in file: @@ -377,9 +370,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["expectedFiles"].append(path) return + if instance.data.get("slate"): + start_frame -= 1 + for i in range(start_frame, (end_frame + 1)): instance.data["expectedFiles"].append( - os.path.join(dir, (file % i)).replace("\\", "/")) + os.path.join(dirname, (file % i)).replace("\\", "/")) def get_limit_groups(self): """Search for limit group nodes and return group name. diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index 43ea64e565..379953c9e4 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -141,7 +141,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "OPENPYPE_USERNAME", "OPENPYPE_RENDER_JOB", "OPENPYPE_PUBLISH_JOB", - "OPENPYPE_MONGO" + "OPENPYPE_MONGO", + "OPENPYPE_VERSION" ] # custom deadline attributes @@ -158,7 +159,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # mapping of instance properties to be transfered to new instance for every # specified family instance_transfer = { - "slate": ["slateFrames"], + "slate": ["slateFrames", "slate"], "review": ["lutPath"], "render2d": ["bakingNukeScripts", "version"], "renderlayer": ["convertToScanline"] @@ -585,11 +586,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): " This may cause issues on farm." ).format(staging)) + frame_start = int(instance.get("frameStartHandle")) + if instance.get("slate"): + frame_start -= 1 + rep = { "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(collection)], - "frameStart": int(instance.get("frameStartHandle")), + "frameStart": frame_start, "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": staging, diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index bcd853f374..61b95cf06d 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -6,13 +6,52 @@ import subprocess import json import platform import uuid -from Deadline.Scripting import RepositoryUtils, FileUtils +import re +from Deadline.Scripting import RepositoryUtils, FileUtils, DirectoryUtils + + +def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + str or None: version of OpenPype if found. + + """ + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(" ! path is not a build: {}".format(path)) + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) + return version_match[1] def get_openpype_executable(): """Return OpenPype Executable from Event Plug-in Settings""" config = RepositoryUtils.GetPluginConfig("OpenPype") - return config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + dir_list = config.GetConfigEntryWithDefault( + "OpenPypeInstallationDirs", "") + return exe_list, dir_list def inject_openpype_environment(deadlinePlugin): @@ -25,16 +64,94 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Injecting OpenPype environments ...") try: print(">>> Getting OpenPype executable ...") - exe_list = get_openpype_executable() - openpype_app = FileUtils.SearchFileList(exe_list) - if openpype_app == "": + exe_list, dir_list = get_openpype_executable() + openpype_versions = [] + # if the job requires specific OpenPype version, + # lets go over all available and find compatible build. + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + print(( + ">>> Scanning for compatible requested version {}" + ).format(requested_version)) + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + print("--- Looking for OpenPype at: {}".format(install_dir)) + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = get_openpype_version_from_path(subdir) + if not version: + continue + print(" - found: {} - {}".format(version, subdir)) + openpype_versions.append((version, subdir)) + + exe = FileUtils.SearchFileList(exe_list) + if openpype_versions: + # if looking for requested compatible version, + # add the implicitly specified to the list too. + print("Looking for OpenPype at: {}".format(os.path.dirname(exe))) + version = get_openpype_version_from_path( + os.path.dirname(exe)) + if version: + print(" - found: {} - {}".format( + version, os.path.dirname(exe) + )) + openpype_versions.append((version, os.path.dirname(exe))) + + if requested_version: + # sort detected versions + if openpype_versions: + # use natural sorting + openpype_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + print(( + "*** Latest available version found is {}" + ).format(openpype_versions[-1][0])) + requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 + compatible_versions = [] + for version in openpype_versions: + v = version[0].split(".")[:3] + if v[0] == requested_major and v[1] == requested_minor: + compatible_versions.append(version) + if not compatible_versions: + raise RuntimeError( + ("Cannot find compatible version available " + "for version {} requested by the job. " + "Please add it through plugin configuration " + "in Deadline or install it to configured " + "directory.").format(requested_version)) + # sort compatible versions nad pick the last one + compatible_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + print(( + "*** Latest compatible version found is {}" + ).format(compatible_versions[-1][0])) + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join( + compatible_versions[-1][1], "openpype_console.exe"), + os.path.join( + compatible_versions[-1][1], "openpype_console") + ] + exe = FileUtils.SearchFileList(";".join(exe_list)) + if exe == "": raise RuntimeError( "OpenPype executable was not found " + - "in the semicolon separated list \"" + exe_list + "\". " + + "in the semicolon separated list " + + "\"" + ";".join(exe_list) + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") - print("--- OpenPype executable: {}".format(openpype_app)) + print("--- OpenPype executable: {}".format(exe)) # tempfile.TemporaryFile cannot be used because of locking temp_file_name = "{}_{}.json".format( @@ -45,7 +162,7 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Temporary path: {}".format(export_url)) args = [ - openpype_app, + exe, "--headless", 'extractenvironments', export_url @@ -75,9 +192,9 @@ def inject_openpype_environment(deadlinePlugin): env["OPENPYPE_HEADLESS_MODE"] = "1" env["AVALON_TIMEOUT"] = "5000" - print(">>> Executing: {}".format(args)) + print(">>> Executing: {}".format(" ".join(args))) std_output = subprocess.check_output(args, - cwd=os.path.dirname(openpype_app), + cwd=os.path.dirname(exe), env=env) print(">>> Process result {}".format(std_output)) @@ -122,78 +239,6 @@ def inject_render_job_id(deadlinePlugin): print(">>> Injection end.") -def pype_command_line(executable, arguments, workingDirectory): - """Remap paths in comand line argument string. - - Using Deadline rempper it will remap all path found in command-line. - - Args: - executable (str): path to executable - arguments (str): arguments passed to executable - workingDirectory (str): working directory path - - Returns: - Tuple(executable, arguments, workingDirectory) - - """ - print("-" * 40) - print("executable: {}".format(executable)) - print("arguments: {}".format(arguments)) - print("workingDirectory: {}".format(workingDirectory)) - print("-" * 40) - print("Remapping arguments ...") - arguments = RepositoryUtils.CheckPathMapping(arguments) - print("* {}".format(arguments)) - print("-" * 40) - return executable, arguments, workingDirectory - - -def pype(deadlinePlugin): - """Remaps `PYPE_METADATA_FILE` and `PYPE_PYTHON_EXE` environment vars. - - `PYPE_METADATA_FILE` is used on farm to point to rendered data. This path - originates on platform from which this job was published. To be able to - publish on different platform, this path needs to be remapped. - - `PYPE_PYTHON_EXE` can be used to specify custom location of python - interpreter to use for Pype. This is remappeda also if present even - though it probably doesn't make much sense. - - Arguments: - deadlinePlugin: Deadline job plugin passed by Deadline - - """ - print(">>> Getting job ...") - job = deadlinePlugin.GetJob() - # PYPE should be here, not OPENPYPE - backward compatibility!! - pype_metadata = job.GetJobEnvironmentKeyValue("PYPE_METADATA_FILE") - pype_python = job.GetJobEnvironmentKeyValue("PYPE_PYTHON_EXE") - print(">>> Having backward compatible env vars {}/{}".format(pype_metadata, - pype_python)) - # test if it is pype publish job. - if pype_metadata: - pype_metadata = RepositoryUtils.CheckPathMapping(pype_metadata) - if platform.system().lower() == "linux": - pype_metadata = pype_metadata.replace("\\", "/") - - print("- remapping PYPE_METADATA_FILE: {}".format(pype_metadata)) - job.SetJobEnvironmentKeyValue("PYPE_METADATA_FILE", pype_metadata) - deadlinePlugin.SetProcessEnvironmentVariable( - "PYPE_METADATA_FILE", pype_metadata) - - if pype_python: - pype_python = RepositoryUtils.CheckPathMapping(pype_python) - if platform.system().lower() == "linux": - pype_python = pype_python.replace("\\", "/") - - print("- remapping PYPE_PYTHON_EXE: {}".format(pype_python)) - job.SetJobEnvironmentKeyValue("PYPE_PYTHON_EXE", pype_python) - deadlinePlugin.SetProcessEnvironmentVariable( - "PYPE_PYTHON_EXE", pype_python) - - deadlinePlugin.ModifyCommandLineCallback += pype_command_line - - def __main__(deadlinePlugin): print("*** GlobalJobPreload start ...") print(">>> Getting job ...") @@ -217,5 +262,3 @@ def __main__(deadlinePlugin): inject_render_job_id(deadlinePlugin) elif openpype_render_job == '1' or openpype_remote_job == '1': inject_openpype_environment(deadlinePlugin) - else: - pype(deadlinePlugin) # backward compatibility with Pype2 diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param index 8bd6dce12d..b3ac18e20c 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param @@ -7,11 +7,20 @@ Index=0 Default=OpenPype Plugin for Deadline Description=Not configurable +[OpenPypeInstallationDirs] +Type=multilinemultifolder +Label=Directories where OpenPype versions are installed +Category=OpenPype Installation Directories +CategoryOrder=0 +Index=0 +Default=C:\Program Files (x86)\OpenPype +Description=Path or paths to directories where multiple versions of OpenPype might be installed. Enter every such path on separate lines. + [OpenPypeExecutable] Type=multilinemultifilename Label=OpenPype Executable Category=OpenPype Executables -CategoryOrder=0 +CategoryOrder=1 Index=0 Default= Description=The path to the OpenPype executable. Enter alternative paths on separate lines. diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py index 451d71fb63..6b0f69d98f 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py @@ -1,10 +1,19 @@ +#!/usr/bin/env python3 + from System.IO import Path from System.Text.RegularExpressions import Regex from Deadline.Plugins import PluginType, DeadlinePlugin -from Deadline.Scripting import StringUtils, FileUtils, RepositoryUtils +from Deadline.Scripting import ( + StringUtils, + FileUtils, + DirectoryUtils, + RepositoryUtils +) import re +import os +import platform ###################################################################### @@ -52,13 +61,115 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): self.AddStdoutHandlerCallback( ".*Progress: (\d+)%.*").HandleCallback += self.HandleProgress + @staticmethod + def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + str or None: version of OpenPype if found. + + """ + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(f" ! path is not a build: {path}") + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) + return version_match[1] + def RenderExecutable(self): - exeList = self.GetConfigEntry("OpenPypeExecutable") - exe = FileUtils.SearchFileList(exeList) + job = self.GetJob() + openpype_versions = [] + # if the job requires specific OpenPype version, + # lets go over all available and find compatible build. + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + self.LogInfo(( + "Scanning for compatible requested " + f"version {requested_version}")) + dir_list = self.GetConfigEntry("OpenPypeInstallationDirs") + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if dir: + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = self.get_openpype_version_from_path(subdir) + if not version: + continue + openpype_versions.append((version, subdir)) + + exe_list = self.GetConfigEntry("OpenPypeExecutable") + exe = FileUtils.SearchFileList(exe_list) + if openpype_versions: + # if looking for requested compatible version, + # add the implicitly specified to the list too. + version = self.get_openpype_version_from_path( + os.path.dirname(exe)) + if version: + openpype_versions.append((version, os.path.dirname(exe))) + + if requested_version: + # sort detected versions + if openpype_versions: + openpype_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 + compatible_versions = [] + for version in openpype_versions: + v = version[0].split(".")[:3] + if v[0] == requested_major and v[1] == requested_minor: + compatible_versions.append(version) + if not compatible_versions: + self.FailRender(("Cannot find compatible version available " + "for version {} requested by the job. " + "Please add it through plugin configuration " + "in Deadline or install it to configured " + "directory.").format(requested_version)) + # sort compatible versions nad pick the last one + compatible_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join( + compatible_versions[-1][1], "openpype_console.exe"), + os.path.join( + compatible_versions[-1][1], "openpype_console") + ] + exe = FileUtils.SearchFileList(";".join(exe_list)) + if exe == "": self.FailRender( "OpenPype executable was not found " + - "in the semicolon separated list \"" + exeList + "\". " + + "in the semicolon separated list " + + "\"" + ";".join(exe_list) + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") return exe diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py index 0914933de4..dc76920a57 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py @@ -1,10 +1,11 @@ import collections import datetime +import copy import ftrack_api from openpype_modules.ftrack.lib import ( BaseEvent, - query_custom_attributes + query_custom_attributes, ) @@ -124,10 +125,15 @@ class PushFrameValuesToTaskEvent(BaseEvent): # Separate value changes and task parent changes _entities_info = [] + added_entities = [] + added_entity_ids = set() task_parent_changes = [] for entity_info in entities_info: if entity_info["entity_type"].lower() == "task": task_parent_changes.append(entity_info) + elif entity_info.get("action") == "add": + added_entities.append(entity_info) + added_entity_ids.add(entity_info["entityId"]) else: _entities_info.append(entity_info) entities_info = _entities_info @@ -136,6 +142,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data, changed_keys_by_object_id = self.filter_changes( session, event, entities_info, interest_attributes ) + self.interesting_data_for_added( + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ) if not interesting_data and not task_parent_changes: return @@ -151,9 +164,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): # - it is a complex way how to find out if interesting_data: self.process_attribute_changes( - session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ) if task_parent_changes: @@ -163,8 +180,12 @@ class PushFrameValuesToTaskEvent(BaseEvent): ) def process_task_parent_change( - self, session, object_types_by_name, task_parent_changes, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + task_parent_changes, + interest_entity_types, + interest_attributes ): """Push custom attribute values if task parent has changed. @@ -176,6 +197,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): real hierarchical value and non hierarchical custom attribute value should be set to hierarchical value. """ + # Store task ids which were created or moved under parent with entity # type defined in settings (interest_entity_types). task_ids = set() @@ -380,33 +402,49 @@ class PushFrameValuesToTaskEvent(BaseEvent): uncommited_changes = False for idx, item in enumerate(changes): new_value = item["new_value"] + old_value = item["old_value"] attr_id = item["attr_id"] entity_id = item["entity_id"] attr_key = item["attr_key"] - entity_key = collections.OrderedDict() - entity_key["configuration_id"] = attr_id - entity_key["entity_id"] = entity_id + entity_key = collections.OrderedDict(( + ("configuration_id", attr_id), + ("entity_id", entity_id) + )) self._cached_changes.append({ "attr_key": attr_key, "entity_id": entity_id, "value": new_value, "time": datetime.datetime.now() }) + old_value_is_set = ( + old_value is not ftrack_api.symbol.NOT_SET + and old_value is not None + ) if new_value is None: + if not old_value_is_set: + continue op = ftrack_api.operation.DeleteEntityOperation( "CustomAttributeValue", entity_key ) - else: + + elif old_value_is_set: op = ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + "CustomAttributeValue", entity_key, "value", - ftrack_api.symbol.NOT_SET, + old_value, new_value ) + else: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + entity_key, + {"value": new_value} + ) + session.recorded_operations.push(op) self.log.info(( "Changing Custom Attribute \"{}\" to value" @@ -432,9 +470,14 @@ class PushFrameValuesToTaskEvent(BaseEvent): self.log.warning("Changing of values failed.", exc_info=True) def process_attribute_changes( - self, session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ): # Prepare task object id task_object_id = object_types_by_name["task"]["id"] @@ -522,15 +565,26 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id_by_task_id[task_id] = task_entity["parent_id"] self.finalize_attribute_changes( - session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ) def finalize_attribute_changes( - self, session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + self, + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ): attr_id_to_key = {} for attr_confs in attrs_by_obj_id.values(): @@ -550,7 +604,11 @@ class PushFrameValuesToTaskEvent(BaseEvent): attr_ids = set(attr_id_to_key.keys()) current_values_by_id = self.get_current_values( - session, attr_ids, entity_ids, task_entity_ids, hier_attrs + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ) changes = [] @@ -560,14 +618,25 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id = entity_id values = interesting_data[parent_id] + added_entity = entity_id in added_entity_ids for attr_id, old_value in current_values.items(): + if added_entity and attr_id in hier_attrs: + continue + attr_key = attr_id_to_key.get(attr_id) if not attr_key: continue # Convert new value from string new_value = values.get(attr_key) - if new_value is not None and old_value is not None: + new_value_is_valid = ( + old_value is not ftrack_api.symbol.NOT_SET + and new_value is not None + ) + if added_entity and not new_value_is_valid: + continue + + if new_value is not None and new_value_is_valid: try: new_value = type(old_value)(new_value) except Exception: @@ -581,6 +650,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): changes.append({ "new_value": new_value, "attr_id": attr_id, + "old_value": old_value, "entity_id": entity_id, "attr_key": attr_key }) @@ -599,6 +669,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data = {} changed_keys_by_object_id = {} + for entity_info in entities_info: # Care only about changes if specific keys entity_changes = {} @@ -644,16 +715,123 @@ class PushFrameValuesToTaskEvent(BaseEvent): return interesting_data, changed_keys_by_object_id + def interesting_data_for_added( + self, + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ): + if not added_entities or not interest_attributes: + return + + object_type_ids = set() + entity_ids = set() + all_entity_ids = set() + object_id_by_entity_id = {} + project_id = None + entity_ids_by_parent_id = collections.defaultdict(set) + for entity_info in added_entities: + object_id = entity_info["objectTypeId"] + entity_id = entity_info["entityId"] + object_type_ids.add(object_id) + entity_ids.add(entity_id) + object_id_by_entity_id[entity_id] = object_id + + for item in entity_info["parents"]: + entity_id = item["entityId"] + all_entity_ids.add(entity_id) + parent_id = item["parentId"] + if not parent_id: + project_id = entity_id + else: + entity_ids_by_parent_id[parent_id].add(entity_id) + + hier_attrs = self.get_hierarchical_configurations( + session, interest_attributes + ) + if not hier_attrs: + return + + hier_attrs_key_by_id = { + attr_conf["id"]: attr_conf["key"] + for attr_conf in hier_attrs + } + default_values_by_key = { + attr_conf["key"]: attr_conf["default"] + for attr_conf in hier_attrs + } + + values = query_custom_attributes( + session, list(hier_attrs_key_by_id.keys()), all_entity_ids, True + ) + values_per_entity_id = {} + for entity_id in all_entity_ids: + values_per_entity_id[entity_id] = {} + for attr_name in interest_attributes: + values_per_entity_id[entity_id][attr_name] = None + + for item in values: + entity_id = item["entity_id"] + key = hier_attrs_key_by_id[item["configuration_id"]] + values_per_entity_id[entity_id][key] = item["value"] + + fill_queue = collections.deque() + fill_queue.append((project_id, default_values_by_key)) + while fill_queue: + item = fill_queue.popleft() + entity_id, values_by_key = item + entity_values = values_per_entity_id[entity_id] + new_values_by_key = copy.deepcopy(values_by_key) + for key, value in values_by_key.items(): + current_value = entity_values[key] + if current_value is None: + entity_values[key] = value + else: + new_values_by_key[key] = current_value + + for child_id in entity_ids_by_parent_id[entity_id]: + fill_queue.append((child_id, new_values_by_key)) + + for entity_id in entity_ids: + entity_changes = {} + for key, value in values_per_entity_id[entity_id].items(): + if value is not None: + entity_changes[key] = value + + if not entity_changes: + continue + + interesting_data[entity_id] = entity_changes + object_id = object_id_by_entity_id[entity_id] + if object_id not in changed_keys_by_object_id: + changed_keys_by_object_id[object_id] = set() + changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) + def get_current_values( - self, session, attr_ids, entity_ids, task_entity_ids, hier_attrs + self, + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ): current_values_by_id = {} if not attr_ids or not entity_ids: return current_values_by_id + for entity_id in entity_ids: + current_values_by_id[entity_id] = {} + for attr_id in attr_ids: + current_values_by_id[entity_id][attr_id] = ( + ftrack_api.symbol.NOT_SET + ) + values = query_custom_attributes( session, attr_ids, entity_ids, True ) + for item in values: entity_id = item["entity_id"] attr_id = item["configuration_id"] @@ -699,6 +877,18 @@ class PushFrameValuesToTaskEvent(BaseEvent): output[obj_id][attr["key"]] = attr["id"] return output, hiearchical + def get_hierarchical_configurations(self, session, interest_attributes): + hier_attr_query = ( + "select id, key, object_type_id, is_hierarchical, default" + " from CustomAttributeConfiguration" + " where key in ({}) and is_hierarchical is true" + ) + if not interest_attributes: + return [] + return list(session.query(hier_attr_query.format( + self.join_query_keys(interest_attributes), + )).all()) + def register(session): PushFrameValuesToTaskEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index d91649d7ba..fb1cdf340e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -11,13 +11,11 @@ from openpype.client import ( get_project, get_assets, ) -from openpype.settings import get_project_settings -from openpype.lib import ( - get_workfile_template_key, - get_workdir_data, - StringTemplate, -) +from openpype.settings import get_project_settings, get_system_settings +from openpype.lib import StringTemplate from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data +from openpype.pipeline.workfile import get_workfile_template_key from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks @@ -279,14 +277,19 @@ class FillWorkfileAttributeAction(BaseAction): extension = "{ext}" project_doc = get_project(project_name) project_settings = get_project_settings(project_name) + system_settings = get_system_settings() anatomy = Anatomy(project_name) templates_by_key = {} operations = [] for asset_doc, task_entities in asset_docs_with_task_entities: for task_entity in task_entities: - workfile_data = get_workdir_data( - project_doc, asset_doc, task_entity["name"], host_name + workfile_data = get_template_data( + project_doc, + asset_doc, + task_entity["name"], + host_name, + system_settings ) # Use version 1 for each workfile workfile_data["version"] = 1 @@ -294,7 +297,10 @@ class FillWorkfileAttributeAction(BaseAction): task_type = workfile_data["task"]["type"] template_key = get_workfile_template_key( - task_type, host_name, project_settings=project_settings + task_type, + host_name, + project_name, + project_settings=project_settings ) if template_key in templates_by_key: template = templates_by_key[template_key] diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py index 3da1e7c7f0..947dacf917 100644 --- a/openpype/modules/ftrack/ftrack_server/lib.py +++ b/openpype/modules/ftrack/ftrack_server/lib.py @@ -7,6 +7,7 @@ import threading import datetime import time import queue +import collections import appdirs import pymongo @@ -309,7 +310,20 @@ class CustomEventHubSession(ftrack_api.session.Session): # Currently pending operations. self.recorded_operations = ftrack_api.operation.Operations() - self.record_operations = True + + # OpenPype change - In new API are operations properties + new_api = hasattr(self.__class__, "record_operations") + + if new_api: + self._record_operations = collections.defaultdict( + lambda: True + ) + self._auto_populate = collections.defaultdict( + lambda: auto_populate + ) + else: + self.record_operations = True + self.auto_populate = auto_populate self.cache_key_maker = cache_key_maker if self.cache_key_maker is None: @@ -328,6 +342,9 @@ class CustomEventHubSession(ftrack_api.session.Session): if cache is not None: self.cache.caches.append(cache) + if new_api: + self.merge_lock = threading.RLock() + self._managed_request = None self._request = requests.Session() self._request.auth = ftrack_api.session.SessionAuthentication( @@ -335,8 +352,6 @@ class CustomEventHubSession(ftrack_api.session.Session): ) self.request_timeout = timeout - self.auto_populate = auto_populate - # Fetch server information and in doing so also check credentials. self._server_information = self._fetch_server_information() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py index c4f7b1f05d..20a69e060c 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py @@ -26,8 +26,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): families = ["ftrack"] def process(self, instance): - session = instance.context.data["ftrackSession"] - context = instance.context component_list = instance.data.get("ftrackComponentsList") if not component_list: self.log.info( @@ -36,8 +34,8 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): ) return - session = instance.context.data["ftrackSession"] context = instance.context + session = context.data["ftrackSession"] parent_entity = None default_asset_name = None @@ -89,6 +87,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): asset_versions_data_by_id = {} used_asset_versions = [] + # Iterate over components and publish for data in component_list: self.log.debug("data: {}".format(data)) @@ -118,9 +117,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): asset_version_status_ids_by_name ) - # Component - self.create_component(session, asset_version_entity, data) - # Store asset version and components items that were version_id = asset_version_entity["id"] if version_id not in asset_versions_data_by_id: @@ -137,6 +133,8 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): if asset_version_entity not in used_asset_versions: used_asset_versions.append(asset_version_entity) + self._create_components(session, asset_versions_data_by_id) + instance.data["ftrackIntegratedAssetVersionsData"] = ( asset_versions_data_by_id ) @@ -625,3 +623,40 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session.rollback() session._configure_locations() six.reraise(tp, value, tb) + + def _create_components(self, session, asset_versions_data_by_id): + for item in asset_versions_data_by_id.values(): + asset_version_entity = item["asset_version"] + component_items = item["component_items"] + + component_entities = session.query( + ( + "select id, name from Component where version_id is \"{}\"" + ).format(asset_version_entity["id"]) + ).all() + + existing_component_names = { + component["name"] + for component in component_entities + } + + contain_review = "ftrackreview-mp4" in existing_component_names + thumbnail_component_item = None + for component_item in component_items: + component_data = component_item.get("component_data") or {} + component_name = component_data.get("name") + if component_name == "ftrackreview-mp4": + contain_review = True + elif component_name == "ftrackreview-image": + thumbnail_component_item = component_item + + if contain_review and thumbnail_component_item: + thumbnail_component_item["component_data"]["name"] = ( + "thumbnail" + ) + + # Component + for component_item in component_items: + self.create_component( + session, asset_version_entity, component_item + ) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py index 047fd8462c..8cb2336391 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py @@ -13,7 +13,10 @@ class IntegrateFtrackComponentOverwrite(pyblish.api.InstancePlugin): active = False def process(self, instance): - component_list = instance.data['ftrackComponentsList'] + component_list = instance.data.get('ftrackComponentsList') + if not component_list: + self.log.info("No component to overwrite...") + return for cl in component_list: cl['component_overwrite'] = True diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py index c6a3d47f66..e7c265988e 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py @@ -6,9 +6,11 @@ Requires: """ import sys +import json import six import pyblish.api +from openpype.lib import StringTemplate class IntegrateFtrackDescription(pyblish.api.InstancePlugin): @@ -25,6 +27,10 @@ class IntegrateFtrackDescription(pyblish.api.InstancePlugin): description_template = "{comment}" def process(self, instance): + if not self.description_template: + self.log.info("Skipping. Description template is not set.") + return + # Check if there are any integrated AssetVersion entities asset_versions_key = "ftrackIntegratedAssetVersionsData" asset_versions_data_by_id = instance.data.get(asset_versions_key) @@ -38,39 +44,62 @@ class IntegrateFtrackDescription(pyblish.api.InstancePlugin): else: self.log.debug("Comment is set to `{}`".format(comment)) - session = instance.context.data["ftrackSession"] - intent = instance.context.data.get("intent") - intent_label = None - if intent and isinstance(intent, dict): - intent_val = intent.get("value") - intent_label = intent.get("label") - else: - intent_val = intent + if intent and "{intent}" in self.description_template: + value = intent.get("value") + if value: + intent = intent.get("label") or value - if not intent_label: - intent_label = intent_val or "" + if not intent and not comment: + self.log.info("Skipping. Intent and comment are empty.") + return # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) - if intent_label: - self.log.debug( - "Intent label is set to `{}`.".format(intent_label) - ) - + if intent: + self.log.debug("Intent is set to `{}`.".format(intent)) else: self.log.debug("Intent is not set.") + # If we would like to use more "optional" possibilities we would have + # come up with some expressions in templates or speicifc templates + # for all 3 possible combinations when comment and intent are + # set or not (when both are not set then description does not + # make sense). + fill_data = {} + if comment: + fill_data["comment"] = comment + if intent: + fill_data["intent"] = intent + + description = StringTemplate.format_template( + self.description_template, fill_data + ) + if not description.solved: + self.log.warning(( + "Couldn't solve template \"{}\" with data {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + if not description: + self.log.debug(( + "Skipping. Result of template is empty string." + " Template \"{}\" Fill data: {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + session = instance.context.data["ftrackSession"] for asset_version_data in asset_versions_data_by_id.values(): asset_version = asset_version_data["asset_version"] # Backwards compatibility for older settings using # attribute 'note_with_intent_template' - comment = self.description_template.format(**{ - "intent": intent_label, - "comment": comment - }) - asset_version["comment"] = comment + + asset_version["comment"] = description try: session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index c8d9e4117d..a1e5922730 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -3,7 +3,10 @@ import json import copy import pyblish.api -from openpype.lib import get_ffprobe_streams +from openpype.lib.transcoding import ( + get_ffprobe_streams, + convert_ffprobe_fps_to_float, +) from openpype.lib.profiles_filtering import filter_profiles @@ -58,7 +61,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): version_number = int(instance_version) family = instance.data["family"] - family_low = instance.data["family"].lower() + family_low = family.lower() asset_type = instance.data.get("ftrackFamily") if not asset_type and family_low in self.family_mapping: @@ -79,11 +82,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ).format(family)) return - # Prepare FPS - instance_fps = instance.data.get("fps") - if instance_fps is None: - instance_fps = instance.context.data["fps"] - status_name = self._get_asset_version_status_name(instance) # Base of component item data @@ -140,24 +138,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component = None first_thumbnail_component_repre = None for repre in thumbnail_representations: - published_path = repre.get("published_path") - if not published_path: - comp_files = repre["files"] - if isinstance(comp_files, (tuple, list, set)): - filename = comp_files[0] - else: - filename = comp_files - - published_path = os.path.join( - repre["stagingDir"], filename + repre_path = self._get_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." ) - if not os.path.exists(published_path): - continue - repre["published_path"] = published_path + continue # Create copy of base comp item and append it thumbnail_item = copy.deepcopy(base_component_item) - thumbnail_item["component_path"] = repre["published_path"] + thumbnail_item["component_path"] = repre_path thumbnail_item["component_data"] = { "name": "thumbnail" } @@ -176,10 +166,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Add item to component list component_list.append(thumbnail_item) - if ( - not review_representations - and first_thumbnail_component is not None - ): + if first_thumbnail_component is not None: width = first_thumbnail_component_repre.get("width") height = first_thumbnail_component_repre.get("height") if not width or not height: @@ -216,6 +203,13 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): extended_asset_name = "" multiple_reviewable = len(review_representations) > 1 for repre in review_representations: + repre_path = self._get_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." + ) + continue + # Create copy of base comp item and append it review_item = copy.deepcopy(base_component_item) @@ -254,33 +248,18 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component[ "asset_data"]["name"] = extended_asset_name - frame_start = repre.get("frameStartFtrack") - frame_end = repre.get("frameEndFtrack") - if frame_start is None or frame_end is None: - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - - # Frame end of uploaded video file should be duration in frames - # - frame start is always 0 - # - frame end is duration in frames - duration = frame_end - frame_start + 1 - - fps = repre.get("fps") - if fps is None: - fps = instance_fps + component_meta = self._prepare_component_metadata( + instance, repre, repre_path, True + ) # Change location - review_item["component_path"] = repre["published_path"] + review_item["component_path"] = repre_path # Change component data review_item["component_data"] = { # Default component name is "main". "name": "ftrackreview-mp4", "metadata": { - "ftr_meta": json.dumps({ - "frameIn": 0, - "frameOut": int(duration), - "frameRate": float(fps) - }) + "ftr_meta": json.dumps(component_meta) } } @@ -323,11 +302,18 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): component_data = copy_src_item["component_data"] component_name = component_data["name"] component_data["name"] = component_name + "_src" + component_meta = self._prepare_component_metadata( + instance, repre, copy_src_item["component_path"], False + ) + if component_meta: + component_data["metadata"] = { + "ftr_meta": json.dumps(component_meta) + } component_list.append(copy_src_item) # Add others representations as component for repre in other_representations: - published_path = repre.get("published_path") + published_path = self._get_repre_path(instance, repre, True) if not published_path: continue # Create copy of base comp item and append it @@ -340,9 +326,17 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ): other_item["asset_data"]["name"] = extended_asset_name - other_item["component_data"] = { + component_meta = self._prepare_component_metadata( + instance, repre, published_path, False + ) + component_data = { "name": repre["name"] } + if component_meta: + component_data["metadata"] = { + "ftr_meta": json.dumps(component_meta) + } + other_item["component_data"] = component_data other_item["component_location_name"] = unmanaged_location_name other_item["component_path"] = published_path component_list.append(other_item) @@ -360,6 +354,51 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): )) instance.data["ftrackComponentsList"] = component_list + def _get_repre_path(self, instance, repre, only_published): + """Get representation path that can be used for integration. + + When 'only_published' is set to true the validation of path is not + relevant. In that case we just need what is set in 'published_path' + as "reference". The reference is not used to get or upload the file but + for reference where the file was published. + + Args: + instance (pyblish.Instance): Processed instance object. Used + for source of staging dir if representation does not have + filled it. + repre (dict): Representation on instance which could be and + could not be integrated with main integrator. + only_published (bool): Care only about published paths and + ignore if filepath is not existing anymore. + + Returns: + str: Path to representation file. + None: Path is not filled or does not exists. + """ + + published_path = repre.get("published_path") + if published_path: + published_path = os.path.normpath(published_path) + if os.path.exists(published_path): + return published_path + + if only_published: + return published_path + + comp_files = repre["files"] + if isinstance(comp_files, (tuple, list, set)): + filename = comp_files[0] + else: + filename = comp_files + + staging_dir = repre.get("stagingDir") + if not staging_dir: + staging_dir = instance.data["stagingDir"] + src_path = os.path.normpath(os.path.join(staging_dir, filename)) + if os.path.exists(src_path): + return src_path + return None + def _get_asset_version_status_name(self, instance): if not self.asset_versions_status_profiles: return None @@ -380,3 +419,107 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): return None return matching_profile["status"] or None + + def _prepare_component_metadata( + self, instance, repre, component_path, is_review + ): + extension = os.path.splitext(component_path)[-1] + streams = [] + try: + streams = get_ffprobe_streams(component_path) + except Exception: + self.log.debug(( + "Failed to retrieve information about intput {}" + ).format(component_path)) + + # Find video streams + video_streams = [ + stream + for stream in streams + if stream["codec_type"] == "video" + ] + # Skip if there are not video streams + # - exr is special case which can have issues with reading through + # ffmpegh but we want to set fps for it + if not video_streams and extension not in [".exr"]: + return {} + + stream_width = None + stream_height = None + stream_fps = None + frame_out = None + for video_stream in video_streams: + tmp_width = video_stream.get("width") + tmp_height = video_stream.get("height") + if tmp_width and tmp_height: + stream_width = tmp_width + stream_height = tmp_height + + input_framerate = video_stream.get("r_frame_rate") + duration = video_stream.get("duration") + if input_framerate is None or duration is None: + continue + try: + stream_fps = convert_ffprobe_fps_to_float( + input_framerate + ) + except ValueError: + self.log.warning(( + "Could not convert ffprobe fps to float \"{}\"" + ).format(input_framerate)) + continue + + stream_width = tmp_width + stream_height = tmp_height + + self.log.debug("FPS from stream is {} and duration is {}".format( + input_framerate, duration + )) + frame_out = float(duration) * stream_fps + break + + # Prepare FPS + instance_fps = instance.data.get("fps") + if instance_fps is None: + instance_fps = instance.context.data["fps"] + + if not is_review: + output = {} + fps = stream_fps or instance_fps + if fps: + output["frameRate"] = fps + + if stream_width and stream_height: + output["width"] = int(stream_width) + output["height"] = int(stream_height) + return output + + frame_start = repre.get("frameStartFtrack") + frame_end = repre.get("frameEndFtrack") + if frame_start is None or frame_end is None: + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + + fps = None + repre_fps = repre.get("fps") + if repre_fps is not None: + repre_fps = float(repre_fps) + + fps = stream_fps or repre_fps or instance_fps + + # Frame end of uploaded video file should be duration in frames + # - frame start is always 0 + # - frame end is duration in frames + if not frame_out: + frame_out = frame_end - frame_start + 1 + + # Ftrack documentation says that it is required to have + # 'width' and 'height' in review component. But with those values + # review video does not play. + component_meta = { + "frameIn": 0, + "frameOut": frame_out, + "frameRate": float(fps) + } + + return component_meta diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py index 77a7ebdfcf..ac3fa874e0 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py @@ -9,9 +9,11 @@ Requires: """ import sys +import copy import six import pyblish.api +from openpype.lib import StringTemplate class IntegrateFtrackNote(pyblish.api.InstancePlugin): @@ -53,14 +55,10 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): intent = instance.context.data.get("intent") intent_label = None - if intent and isinstance(intent, dict): - intent_val = intent.get("value") - intent_label = intent.get("label") - else: - intent_val = intent - - if not intent_label: - intent_label = intent_val or "" + if intent: + value = intent["value"] + if value: + intent_label = intent["label"] or value # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) @@ -96,6 +94,14 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels.append(label) + base_format_data = { + "host_name": host_name, + "app_name": app_name, + "app_label": app_label, + "source": instance.data.get("source", '') + } + if comment: + base_format_data["comment"] = comment for asset_version_data in asset_versions_data_by_id.values(): asset_version = asset_version_data["asset_version"] component_items = asset_version_data["component_items"] @@ -109,23 +115,31 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): template = self.note_template if template is None: template = self.note_with_intent_template - format_data = { - "intent": intent_label, - "comment": comment, - "host_name": host_name, - "app_name": app_name, - "app_label": app_label, - "published_paths": "
".join(sorted(published_paths)), - "source": instance.data.get("source", '') - } - comment = template.format(**format_data) - if not comment: + format_data = copy.deepcopy(base_format_data) + format_data["published_paths"] = "
".join( + sorted(published_paths) + ) + if intent: + if "{intent}" in template: + format_data["intent"] = intent_label + else: + format_data["intent"] = intent + + note_text = StringTemplate.format_template(template, format_data) + if not note_text.solved: + self.log.warning(( + "Note template require more keys then can be provided." + "\nTemplate: {}\nData: {}" + ).format(template, format_data)) + continue + + if not note_text: self.log.info(( "Note for AssetVersion {} would be empty. Skipping." "\nTemplate: {}\nData: {}" ).format(asset_version["id"], template, format_data)) continue - asset_version.create_note(comment, author=user, labels=labels) + asset_version.create_note(note_text, author=user, labels=labels) try: session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py index 1a5d74bf26..8d39baa8d7 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py @@ -1,9 +1,12 @@ import sys import collections import six -import pyblish.api from copy import deepcopy + +import pyblish.api + from openpype.client import get_asset_by_id +from openpype.lib import filter_profiles # Copy of constant `openpype_modules.ftrack.lib.avalon_sync.CUST_ATTR_AUTO_SYNC` @@ -65,8 +68,15 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder - 0.04 label = 'Integrate Hierarchy To Ftrack' families = ["shot"] - hosts = ["hiero", "resolve", "standalonepublisher", "flame"] + hosts = [ + "hiero", + "resolve", + "standalonepublisher", + "flame", + "traypublisher" + ] optional = False + create_task_status_profiles = [] def process(self, context): self.context = context @@ -76,14 +86,16 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): hierarchy_context = self._get_active_assets(context) self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) - self.session = self.context.data["ftrackSession"] + session = self.context.data["ftrackSession"] project_name = self.context.data["projectEntity"]["name"] query = 'Project where full_name is "{}"'.format(project_name) - project = self.session.query(query).one() - auto_sync_state = project[ - "custom_attributes"][CUST_ATTR_AUTO_SYNC] + project = session.query(query).one() + auto_sync_state = project["custom_attributes"][CUST_ATTR_AUTO_SYNC] - self.ft_project = None + self.session = session + self.ft_project = project + self.task_types = self.get_all_task_types(project) + self.task_statuses = self.get_task_statuses(project) # disable termporarily ftrack project's autosyncing if auto_sync_state: @@ -115,10 +127,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): self.log.debug(entity_type) if entity_type.lower() == 'project': - query = 'Project where full_name is "{}"'.format(entity_name) - entity = self.session.query(query).one() - self.ft_project = entity - self.task_types = self.get_all_task_types(entity) + entity = self.ft_project elif self.ft_project is None or parent is None: raise AssertionError( @@ -211,13 +220,6 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): task_type=task_type, parent=entity ) - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) # Incoming links. self.create_links(project_name, entity_data, entity) @@ -297,7 +299,37 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): return tasks + def get_task_statuses(self, project_entity): + project_schema = project_entity["project_schema"] + task_workflow_statuses = project_schema["_task_workflow"]["statuses"] + return { + status["id"]: status + for status in task_workflow_statuses + } + def create_task(self, name, task_type, parent): + filter_data = { + "task_names": name, + "task_types": task_type + } + profile = filter_profiles( + self.create_task_status_profiles, + filter_data + ) + status_id = None + if profile: + status_name = profile["status_name"] + status_name_low = status_name.lower() + for _status_id, status in self.task_statuses.items(): + if status["name"].lower() == status_name_low: + status_id = _status_id + break + + if status_id is None: + self.log.warning( + "Task status \"{}\" was not found".format(status_name) + ) + task = self.session.create('Task', { 'name': name, 'parent': parent @@ -306,6 +338,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): self.log.info(task_type) self.log.info(self.task_types) task['type'] = self.task_types[task_type] + if status_id is not None: + task["status_id"] = status_id try: self.session.commit() diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py index d28ded06c7..c9e78b59eb 100644 --- a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py @@ -39,10 +39,12 @@ class CollectKitsuEntities(pyblish.api.ContextPlugin): kitsu_entity = gazu.asset.get_asset(zou_asset_data["id"]) if not kitsu_entity: - raise AssertionError(f"{entity_type} not found in kitsu!") + raise AssertionError("{} not found in kitsu!".format(entity_type)) context.data["kitsu_entity"] = kitsu_entity - self.log.debug(f"Collect kitsu {entity_type}: {kitsu_entity}") + self.log.debug( + "Collect kitsu {}: {}".format(entity_type, kitsu_entity) + ) if zou_task_data: kitsu_task = gazu.task.get_task(zou_task_data["id"]) diff --git a/openpype/modules/kitsu/utils/update_op_with_zou.py b/openpype/modules/kitsu/utils/update_op_with_zou.py index 02c27382eb..e03cf2b30e 100644 --- a/openpype/modules/kitsu/utils/update_op_with_zou.py +++ b/openpype/modules/kitsu/utils/update_op_with_zou.py @@ -219,20 +219,25 @@ def update_op_assets( # Add parents for hierarchy item_data["parents"] = [] - while parent_zou_id is not None: - parent_doc = asset_doc_ids[parent_zou_id] + ancestor_id = parent_zou_id + while ancestor_id is not None: + parent_doc = asset_doc_ids[ancestor_id] item_data["parents"].insert(0, parent_doc["name"]) # Get parent entity parent_entity = parent_doc["data"]["zou"] - parent_zou_id = parent_entity.get("parent_id") + ancestor_id = parent_entity.get("parent_id") - if item_type in ["Shot", "Sequence"]: + # Build OpenPype compatible name + if item_type in ["Shot", "Sequence"] and parent_zou_id is not None: # Name with parents hierarchy "({episode}_){sequence}_{shot}" # to avoid duplicate name issue - item_name = "_".join(item_data["parents"] + [item_doc["name"]]) + item_name = f"{item_data['parents'][-1]}_{item['name']}" + + # Update doc name + asset_doc_ids[item["id"]]["name"] = item_name else: - item_name = item_doc["name"] + item_name = item["name"] # Set root folders parents item_data["parents"] = entity_parent_folders + item_data["parents"] @@ -276,7 +281,7 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: project_doc = create_project(project_name, project_name, dbcon=dbcon) # Project data and tasks - project_data = project["data"] or {} + project_data = project_doc["data"] or {} # Build project code and update Kitsu project_code = project.get("code") @@ -305,6 +310,7 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: "config.tasks": { t["name"]: {"short_name": t.get("short_name", t["name"])} for t in gazu.task.all_task_types_for_project(project) + or gazu.task.all_task_types() }, "data": project_data, } diff --git a/openpype/modules/shotgrid/README.md b/openpype/modules/shotgrid/README.md new file mode 100644 index 0000000000..cbee0e9bf4 --- /dev/null +++ b/openpype/modules/shotgrid/README.md @@ -0,0 +1,19 @@ +## Shotgrid Module + +### Pre-requisites + +Install and launch a [shotgrid leecher](https://github.com/Ellipsanime/shotgrid-leecher) server + +### Quickstart + +The goal of this tutorial is to synchronize an already existing shotgrid project with OpenPype. + +- Activate the shotgrid module in the **system settings** and inform the shotgrid leecher server API url + +- Create a new OpenPype project with the **project manager** + +- Inform the shotgrid authentication infos (url, script name, api key) and the shotgrid project ID related to this OpenPype project in the **project settings** + +- Use the batch interface (Tray > shotgrid > Launch batch), select your project and click "batch" + +- You can now access your shotgrid entities within the **avalon launcher** and publish informations to shotgrid with **pyblish** diff --git a/openpype/modules/shotgrid/__init__.py b/openpype/modules/shotgrid/__init__.py new file mode 100644 index 0000000000..f1337a9492 --- /dev/null +++ b/openpype/modules/shotgrid/__init__.py @@ -0,0 +1,5 @@ +from .shotgrid_module import ( + ShotgridModule, +) + +__all__ = ("ShotgridModule",) diff --git a/openpype/modules/shotgrid/lib/__init__.py b/openpype/modules/shotgrid/lib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/modules/shotgrid/lib/const.py b/openpype/modules/shotgrid/lib/const.py new file mode 100644 index 0000000000..2a34800fac --- /dev/null +++ b/openpype/modules/shotgrid/lib/const.py @@ -0,0 +1 @@ +MODULE_NAME = "shotgrid" diff --git a/openpype/modules/shotgrid/lib/credentials.py b/openpype/modules/shotgrid/lib/credentials.py new file mode 100644 index 0000000000..337c4f6ecb --- /dev/null +++ b/openpype/modules/shotgrid/lib/credentials.py @@ -0,0 +1,125 @@ + +from urllib.parse import urlparse + +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSecureRegistry, OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.record import Credentials + + +def _get_shotgrid_secure_key(hostname, key): + """Secure item key for entered hostname.""" + return f"shotgrid/{hostname}/{key}" + + +def _get_secure_value_and_registry( + hostname, + name, +): + key = _get_shotgrid_secure_key(hostname, name) + registry = OpenPypeSecureRegistry(key) + return registry.get_item(name, None), registry + + +def get_shotgrid_hostname(shotgrid_url): + + if not shotgrid_url: + raise Exception("Shotgrid url cannot be a null") + valid_shotgrid_url = ( + f"//{shotgrid_url}" if "//" not in shotgrid_url else shotgrid_url + ) + return urlparse(valid_shotgrid_url).hostname + + +# Credentials storing function (using keyring) + + +def get_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + if not hostname: + return None + login_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + return Credentials(login_value, password_value) + + +def save_credentials(login, password, shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + _, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + _, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + clear_credentials(shotgrid_url) + login_registry.set_item(Credentials.login_key_prefix(), login) + password_registry.set_item(Credentials.password_key_prefix(), password) + + +def clear_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + login_value, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + + if login_value is not None: + login_registry.delete_item(Credentials.login_key_prefix()) + + if password_value is not None: + password_registry.delete_item(Credentials.password_key_prefix()) + + +# Login storing function (using json) + + +def get_local_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None + + +def save_local_login(login): + reg = OpenPypeSettingsRegistry() + reg.set_item("shotgrid_login", login) + + +def clear_local_login(): + reg = OpenPypeSettingsRegistry() + reg.delete_item("shotgrid_login") + + +def check_credentials( + login, + password, + shotgrid_url, +): + + if not shotgrid_url or not login or not password: + return False + try: + session = shotgun_api3.Shotgun( + shotgrid_url, + login=login, + password=password, + ) + session.preferences_read() + session.close() + except AuthenticationFault: + return False + return True diff --git a/openpype/modules/shotgrid/lib/record.py b/openpype/modules/shotgrid/lib/record.py new file mode 100644 index 0000000000..f62f4855d5 --- /dev/null +++ b/openpype/modules/shotgrid/lib/record.py @@ -0,0 +1,20 @@ + +class Credentials: + login = None + password = None + + def __init__(self, login, password) -> None: + super().__init__() + self.login = login + self.password = password + + def is_empty(self): + return not (self.login and self.password) + + @staticmethod + def login_key_prefix(): + return "login" + + @staticmethod + def password_key_prefix(): + return "password" diff --git a/openpype/modules/shotgrid/lib/settings.py b/openpype/modules/shotgrid/lib/settings.py new file mode 100644 index 0000000000..924099f04b --- /dev/null +++ b/openpype/modules/shotgrid/lib/settings.py @@ -0,0 +1,18 @@ +from openpype.api import get_system_settings, get_project_settings +from openpype.modules.shotgrid.lib.const import MODULE_NAME + + +def get_shotgrid_project_settings(project): + return get_project_settings(project).get(MODULE_NAME, {}) + + +def get_shotgrid_settings(): + return get_system_settings().get("modules", {}).get(MODULE_NAME, {}) + + +def get_shotgrid_servers(): + return get_shotgrid_settings().get("shotgrid_settings", {}) + + +def get_leecher_backend_url(): + return get_shotgrid_settings().get("leecher_backend_url") diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py new file mode 100644 index 0000000000..0b03ac2e5d --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py @@ -0,0 +1,100 @@ +import os + +import pyblish.api +from openpype.lib.mongo import OpenPypeMongoConnection + + +class CollectShotgridEntities(pyblish.api.ContextPlugin): + """Collect shotgrid entities according to the current context""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Shotgrid entities" + + def process(self, context): + + avalon_project = context.data.get("projectEntity") + avalon_asset = context.data.get("assetEntity") + avalon_task_name = os.getenv("AVALON_TASK") + + self.log.info(avalon_project) + self.log.info(avalon_asset) + + sg_project = _get_shotgrid_project(context) + sg_task = _get_shotgrid_task( + avalon_project, + avalon_asset, + avalon_task_name + ) + sg_entity = _get_shotgrid_entity(avalon_project, avalon_asset) + + if sg_project: + context.data["shotgridProject"] = sg_project + self.log.info( + "Collected correspondig shotgrid project : {}".format( + sg_project + ) + ) + + if sg_task: + context.data["shotgridTask"] = sg_task + self.log.info( + "Collected correspondig shotgrid task : {}".format(sg_task) + ) + + if sg_entity: + context.data["shotgridEntity"] = sg_entity + self.log.info( + "Collected correspondig shotgrid entity : {}".format(sg_entity) + ) + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + + sg = context.data.get("shotgridSession") + return sg.find_one("Version", filters, []) + + +def _get_shotgrid_collection(project): + client = OpenPypeMongoConnection.get_mongo_client() + return client.get_database("shotgrid_openpype").get_collection(project) + + +def _get_shotgrid_project(context): + shotgrid_project_id = context.data["project_settings"].get( + "shotgrid_project_id") + if shotgrid_project_id: + return {"type": "Project", "id": shotgrid_project_id} + return {} + + +def _get_shotgrid_task(avalon_project, avalon_asset, avalon_task): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_task_hierarchy_row = sg_col.find_one( + { + "type": "Task", + "_id": {"$regex": "^" + avalon_task + "_[0-9]*"}, + "parent": {"$regex": ".*," + avalon_asset["name"] + ","}, + } + ) + if shotgrid_task_hierarchy_row: + return {"type": "Task", "id": shotgrid_task_hierarchy_row["src_id"]} + return {} + + +def _get_shotgrid_entity(avalon_project, avalon_asset): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_entity_hierarchy_row = sg_col.find_one( + {"_id": avalon_asset["name"]} + ) + if shotgrid_entity_hierarchy_row: + return { + "type": shotgrid_entity_hierarchy_row["type"], + "id": shotgrid_entity_hierarchy_row["src_id"], + } + return {} diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py new file mode 100644 index 0000000000..9d5d2271bf --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py @@ -0,0 +1,123 @@ +import os + +import pyblish.api +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.settings import ( + get_shotgrid_servers, + get_shotgrid_project_settings, +) + + +class CollectShotgridSession(pyblish.api.ContextPlugin): + """Collect shotgrid session using user credentials""" + + order = pyblish.api.CollectorOrder + label = "Shotgrid user session" + + def process(self, context): + + certificate_path = os.getenv("SHOTGUN_API_CACERTS") + if certificate_path is None or not os.path.exists(certificate_path): + self.log.info( + "SHOTGUN_API_CACERTS does not contains a valid \ + path: {}".format( + certificate_path + ) + ) + certificate_path = get_shotgrid_certificate() + self.log.info("Get Certificate from shotgrid_api") + + if not os.path.exists(certificate_path): + self.log.error( + "Could not find certificate in shotgun_api3: \ + {}".format( + certificate_path + ) + ) + return + + set_shotgrid_certificate(certificate_path) + self.log.info("Set Certificate: {}".format(certificate_path)) + + avalon_project = os.getenv("AVALON_PROJECT") + + shotgrid_settings = get_shotgrid_project_settings(avalon_project) + self.log.info("shotgrid settings: {}".format(shotgrid_settings)) + shotgrid_servers_settings = get_shotgrid_servers() + self.log.info( + "shotgrid_servers_settings: {}".format(shotgrid_servers_settings) + ) + + shotgrid_server = shotgrid_settings.get("shotgrid_server", "") + if not shotgrid_server: + self.log.error( + "No Shotgrid server found, please choose a credential" + "in script name and script key in OpenPype settings" + ) + + shotgrid_server_setting = shotgrid_servers_settings.get( + shotgrid_server, {} + ) + shotgrid_url = shotgrid_server_setting.get("shotgrid_url", "") + + shotgrid_script_name = shotgrid_server_setting.get( + "shotgrid_script_name", "" + ) + shotgrid_script_key = shotgrid_server_setting.get( + "shotgrid_script_key", "" + ) + if not shotgrid_script_name and not shotgrid_script_key: + self.log.error( + "No Shotgrid api credential found, please enter " + "script name and script key in OpenPype settings" + ) + + login = get_login() or os.getenv("OPENPYPE_SG_USER") + + if not login: + self.log.error( + "No Shotgrid login found, please " + "login to shotgrid withing openpype Tray" + ) + + session = shotgun_api3.Shotgun( + base_url=shotgrid_url, + script_name=shotgrid_script_name, + api_key=shotgrid_script_key, + sudo_as_login=login, + ) + + try: + session.preferences_read() + except AuthenticationFault: + raise ValueError( + "Could not connect to shotgrid {} with user {}".format( + shotgrid_url, login + ) + ) + + self.log.info( + "Logged to shotgrid {} with user {}".format(shotgrid_url, login) + ) + context.data["shotgridSession"] = session + context.data["shotgridUser"] = login + + +def get_shotgrid_certificate(): + shotgun_api_path = os.path.dirname(shotgun_api3.__file__) + return os.path.join(shotgun_api_path, "lib", "certifi", "cacert.pem") + + +def set_shotgrid_certificate(certificate): + os.environ["SHOTGUN_API_CACERTS"] = certificate + + +def get_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py new file mode 100644 index 0000000000..cfd2d10fd9 --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py @@ -0,0 +1,77 @@ +import os +import pyblish.api + + +class IntegrateShotgridPublish(pyblish.api.InstancePlugin): + """ + Create published Files from representations and add it to version. If + representation is tagged add shotgrid review, it will add it in + path to movie for a movie file or path to frame for an image sequence. + """ + + order = pyblish.api.IntegratorOrder + 0.499 + label = "Shotgrid Published Files" + + def process(self, instance): + + context = instance.context + + self.sg = context.data.get("shotgridSession") + + shotgrid_version = instance.data.get("shotgridVersion") + + for representation in instance.data.get("representations", []): + + local_path = representation.get("published_path") + code = os.path.basename(local_path) + + if representation.get("tags", []): + continue + + published_file = self._find_existing_publish( + code, context, shotgrid_version + ) + + published_file_data = { + "project": context.data.get("shotgridProject"), + "code": code, + "entity": context.data.get("shotgridEntity"), + "task": context.data.get("shotgridTask"), + "version": shotgrid_version, + "path": {"local_path": local_path}, + } + if not published_file: + published_file = self._create_published(published_file_data) + self.log.info( + "Create Shotgrid PublishedFile: {}".format(published_file) + ) + else: + self.sg.update( + published_file["type"], + published_file["id"], + published_file_data, + ) + self.log.info( + "Update Shotgrid PublishedFile: {}".format(published_file) + ) + + if instance.data["family"] == "image": + self.sg.upload_thumbnail( + published_file["type"], published_file["id"], local_path + ) + instance.data["shotgridPublishedFile"] = published_file + + def _find_existing_publish(self, code, context, shotgrid_version): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["version", "is", shotgrid_version], + ["code", "is", code], + ] + return self.sg.find_one("PublishedFile", filters, []) + + def _create_published(self, published_file_data): + + return self.sg.create("PublishedFile", published_file_data) diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py new file mode 100644 index 0000000000..a1b7140e22 --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py @@ -0,0 +1,92 @@ +import os +import pyblish.api + + +class IntegrateShotgridVersion(pyblish.api.InstancePlugin): + """Integrate Shotgrid Version""" + + order = pyblish.api.IntegratorOrder + 0.497 + label = "Shotgrid Version" + + sg = None + + def process(self, instance): + + context = instance.context + self.sg = context.data.get("shotgridSession") + + # TODO: Use path template solver to build version code from settings + anatomy = instance.data.get("anatomyData", {}) + code = "_".join( + [ + anatomy["project"]["code"], + anatomy["parent"], + anatomy["asset"], + anatomy["task"]["name"], + "v{:03}".format(int(anatomy["version"])), + ] + ) + + version = self._find_existing_version(code, context) + + if not version: + version = self._create_version(code, context) + self.log.info("Create Shotgrid version: {}".format(version)) + else: + self.log.info("Use existing Shotgrid version: {}".format(version)) + + data_to_update = {} + status = context.data.get("intent", {}).get("value") + if status: + data_to_update["sg_status_list"] = status + + for representation in instance.data.get("representations", []): + local_path = representation.get("published_path") + code = os.path.basename(local_path) + + if "shotgridreview" in representation.get("tags", []): + + if representation["ext"] in ["mov", "avi"]: + self.log.info( + "Upload review: {} for version shotgrid {}".format( + local_path, version.get("id") + ) + ) + self.sg.upload( + "Version", + version.get("id"), + local_path, + field_name="sg_uploaded_movie", + ) + + data_to_update["sg_path_to_movie"] = local_path + + elif representation["ext"] in ["jpg", "png", "exr", "tga"]: + path_to_frame = local_path.replace("0000", "#") + data_to_update["sg_path_to_frames"] = path_to_frame + + self.log.info("Update Shotgrid version with {}".format(data_to_update)) + self.sg.update("Version", version["id"], data_to_update) + + instance.data["shotgridVersion"] = version + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + return self.sg.find_one("Version", filters, []) + + def _create_version(self, code, context): + + version_data = { + "project": context.data.get("shotgridProject"), + "sg_task": context.data.get("shotgridTask"), + "entity": context.data.get("shotgridEntity"), + "code": code, + } + + return self.sg.create("Version", version_data) diff --git a/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py new file mode 100644 index 0000000000..c14c980e2a --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py @@ -0,0 +1,38 @@ +import pyblish.api +import openpype.api + + +class ValidateShotgridUser(pyblish.api.ContextPlugin): + """ + Check if user is valid and have access to the project. + """ + + label = "Validate Shotgrid User" + order = openpype.api.ValidateContentsOrder + + def process(self, context): + sg = context.data.get("shotgridSession") + + login = context.data.get("shotgridUser") + self.log.info("Login shotgrid set in OpenPype is {}".format(login)) + project = context.data.get("shotgridProject") + self.log.info("Current shotgun project is {}".format(project)) + + if not (login and sg and project): + raise KeyError() + + user = sg.find_one("HumanUser", [["login", "is", login]], ["projects"]) + + self.log.info(user) + self.log.info(login) + user_projects_id = [p["id"] for p in user.get("projects", [])] + if not project.get("id") in user_projects_id: + raise PermissionError( + "Login {} don't have access to the project {}".format( + login, project + ) + ) + + self.log.info( + "Login {} have access to the project {}".format(login, project) + ) diff --git a/openpype/modules/shotgrid/server/README.md b/openpype/modules/shotgrid/server/README.md new file mode 100644 index 0000000000..15e056ff3e --- /dev/null +++ b/openpype/modules/shotgrid/server/README.md @@ -0,0 +1,5 @@ + +### Shotgrid server + +Please refer to the external project that covers Openpype/Shotgrid communication: + - https://github.com/Ellipsanime/shotgrid-leecher diff --git a/openpype/modules/shotgrid/shotgrid_module.py b/openpype/modules/shotgrid/shotgrid_module.py new file mode 100644 index 0000000000..5644f0c35f --- /dev/null +++ b/openpype/modules/shotgrid/shotgrid_module.py @@ -0,0 +1,58 @@ +import os + +from openpype_interfaces import ( + ITrayModule, + IPluginPaths, + ILaunchHookPaths, +) + +from openpype.modules import OpenPypeModule + +SHOTGRID_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class ShotgridModule( + OpenPypeModule, ITrayModule, IPluginPaths, ILaunchHookPaths +): + leecher_manager_url = None + name = "shotgrid" + enabled = False + project_id = None + tray_wrapper = None + + def initialize(self, modules_settings): + shotgrid_settings = modules_settings.get(self.name, dict()) + self.enabled = shotgrid_settings.get("enabled", False) + self.leecher_manager_url = shotgrid_settings.get( + "leecher_manager_url", "" + ) + + def connect_with_modules(self, enabled_modules): + pass + + def get_global_environments(self): + return {"PROJECT_ID": self.project_id} + + def get_plugin_paths(self): + return { + "publish": [ + os.path.join(SHOTGRID_MODULE_DIR, "plugins", "publish") + ] + } + + def get_launch_hook_paths(self): + return os.path.join(SHOTGRID_MODULE_DIR, "hooks") + + def tray_init(self): + from .tray.shotgrid_tray import ShotgridTrayWrapper + + self.tray_wrapper = ShotgridTrayWrapper(self) + + def tray_start(self): + return self.tray_wrapper.validate() + + def tray_exit(self, *args, **kwargs): + return self.tray_wrapper + + def tray_menu(self, tray_menu): + return self.tray_wrapper.tray_menu(tray_menu) diff --git a/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py new file mode 100644 index 0000000000..1f78cf77c9 --- /dev/null +++ b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py @@ -0,0 +1,34 @@ +import pytest +from assertpy import assert_that + +import openpype.modules.shotgrid.lib.credentials as sut + + +def test_missing_shotgrid_url(): + with pytest.raises(Exception) as ex: + # arrange + url = "" + # act + sut.get_shotgrid_hostname(url) + # assert + assert_that(ex).is_equal_to("Shotgrid url cannot be a null") + + +def test_full_shotgrid_url(): + # arrange + url = "https://shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") + + +def test_incomplete_shotgrid_url(): + # arrange + url = "shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") diff --git a/openpype/modules/shotgrid/tray/credential_dialog.py b/openpype/modules/shotgrid/tray/credential_dialog.py new file mode 100644 index 0000000000..9d841d98be --- /dev/null +++ b/openpype/modules/shotgrid/tray/credential_dialog.py @@ -0,0 +1,201 @@ +import os +from Qt import QtCore, QtWidgets, QtGui + +from openpype import style +from openpype import resources +from openpype.modules.shotgrid.lib import settings, credentials + + +class CredentialsDialog(QtWidgets.QDialog): + SIZE_W = 450 + SIZE_H = 200 + + _module = None + _is_logged = False + url_label = None + login_label = None + password_label = None + url_input = None + login_input = None + password_input = None + input_layout = None + login_button = None + buttons_layout = None + main_widget = None + + login_changed = QtCore.Signal() + + def __init__(self, module, parent=None): + super(CredentialsDialog, self).__init__(parent) + + self._module = module + self._is_logged = False + + self.setWindowTitle("OpenPype - Shotgrid Login") + + icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + self.setWindowIcon(icon) + + self.setWindowFlags( + QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowMinimizeButtonHint + ) + self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) + self.setMaximumSize(QtCore.QSize(self.SIZE_W + 100, self.SIZE_H + 100)) + self.setStyleSheet(style.load_stylesheet()) + + self.ui_init() + + def ui_init(self): + self.url_label = QtWidgets.QLabel("Shotgrid server:") + self.login_label = QtWidgets.QLabel("Login:") + self.password_label = QtWidgets.QLabel("Password:") + + self.url_input = QtWidgets.QComboBox() + # self.url_input.setReadOnly(True) + + self.login_input = QtWidgets.QLineEdit() + self.login_input.setPlaceholderText("login") + + self.password_input = QtWidgets.QLineEdit() + self.password_input.setPlaceholderText("password") + self.password_input.setEchoMode(QtWidgets.QLineEdit.Password) + + self.error_label = QtWidgets.QLabel("") + self.error_label.setStyleSheet("color: red;") + self.error_label.setWordWrap(True) + self.error_label.hide() + + self.input_layout = QtWidgets.QFormLayout() + self.input_layout.setContentsMargins(10, 15, 10, 5) + + self.input_layout.addRow(self.url_label, self.url_input) + self.input_layout.addRow(self.login_label, self.login_input) + self.input_layout.addRow(self.password_label, self.password_input) + self.input_layout.addRow(self.error_label) + + self.login_button = QtWidgets.QPushButton("Login") + self.login_button.setToolTip("Log in shotgrid instance") + self.login_button.clicked.connect(self._on_shotgrid_login_clicked) + + self.logout_button = QtWidgets.QPushButton("Logout") + self.logout_button.setToolTip("Log out shotgrid instance") + self.logout_button.clicked.connect(self._on_shotgrid_logout_clicked) + + self.buttons_layout = QtWidgets.QHBoxLayout() + self.buttons_layout.addWidget(self.logout_button) + self.buttons_layout.addWidget(self.login_button) + + self.main_widget = QtWidgets.QVBoxLayout(self) + self.main_widget.addLayout(self.input_layout) + self.main_widget.addLayout(self.buttons_layout) + self.setLayout(self.main_widget) + + def show(self, *args, **kwargs): + super(CredentialsDialog, self).show(*args, **kwargs) + self._fill_shotgrid_url() + self._fill_shotgrid_login() + + def _fill_shotgrid_url(self): + servers = settings.get_shotgrid_servers() + + if servers: + for _, v in servers.items(): + self.url_input.addItem("{}".format(v.get('shotgrid_url'))) + self._valid_input(self.url_input) + self.login_button.show() + self.logout_button.show() + enabled = True + else: + self.set_error("Ask your admin to add shotgrid server in settings") + self._invalid_input(self.url_input) + self.login_button.hide() + self.logout_button.hide() + enabled = False + + self.login_input.setEnabled(enabled) + self.password_input.setEnabled(enabled) + + def _fill_shotgrid_login(self): + login = credentials.get_local_login() + + if login: + self.login_input.setText(login) + + def _clear_shotgrid_login(self): + self.login_input.setText("") + self.password_input.setText("") + + def _on_shotgrid_login_clicked(self): + login = self.login_input.text().strip() + password = self.password_input.text().strip() + missing = [] + + if login == "": + missing.append("login") + self._invalid_input(self.login_input) + + if password == "": + missing.append("password") + self._invalid_input(self.password_input) + + url = self.url_input.currentText() + if url == "": + missing.append("url") + self._invalid_input(self.url_input) + + if len(missing) > 0: + self.set_error("You didn't enter {}".format(" and ".join(missing))) + return + + # if credentials.check_credentials( + # login=login, + # password=password, + # shotgrid_url=url, + # ): + credentials.save_local_login( + login=login + ) + os.environ['OPENPYPE_SG_USER'] = login + self._on_login() + + self.set_error("CANT LOGIN") + + def _on_shotgrid_logout_clicked(self): + credentials.clear_local_login() + del os.environ['OPENPYPE_SG_USER'] + self._clear_shotgrid_login() + self._on_logout() + + def set_error(self, msg): + self.error_label.setText(msg) + self.error_label.show() + + def _on_login(self): + self._is_logged = True + self.login_changed.emit() + self._close_widget() + + def _on_logout(self): + self._is_logged = False + self.login_changed.emit() + + def _close_widget(self): + self.hide() + + def _valid_input(self, input_widget): + input_widget.setStyleSheet("") + + def _invalid_input(self, input_widget): + input_widget.setStyleSheet("border: 1px solid red;") + + def login_with_credentials( + self, url, login, password + ): + verification = credentials.check_credentials(url, login, password) + if verification: + credentials.save_credentials(login, password, False) + self._module.set_credentials_to_env(login, password) + self.set_credentials(login, password) + self.login_changed.emit() + return verification diff --git a/openpype/modules/shotgrid/tray/shotgrid_tray.py b/openpype/modules/shotgrid/tray/shotgrid_tray.py new file mode 100644 index 0000000000..4038d77b03 --- /dev/null +++ b/openpype/modules/shotgrid/tray/shotgrid_tray.py @@ -0,0 +1,75 @@ +import os +import webbrowser + +from Qt import QtWidgets + +from openpype.modules.shotgrid.lib import credentials +from openpype.modules.shotgrid.tray.credential_dialog import ( + CredentialsDialog, +) + + +class ShotgridTrayWrapper: + module = None + credentials_dialog = None + logged_user_label = None + + def __init__(self, module): + self.module = module + self.credentials_dialog = CredentialsDialog(module) + self.credentials_dialog.login_changed.connect(self.set_login_label) + self.logged_user_label = QtWidgets.QAction("") + self.logged_user_label.setDisabled(True) + self.set_login_label() + + def show_batch_dialog(self): + if self.module.leecher_manager_url: + webbrowser.open(self.module.leecher_manager_url) + + def show_connect_dialog(self): + self.show_credential_dialog() + + def show_credential_dialog(self): + self.credentials_dialog.show() + self.credentials_dialog.activateWindow() + self.credentials_dialog.raise_() + + def set_login_label(self): + login = credentials.get_local_login() + if login: + self.logged_user_label.setText("{}".format(login)) + else: + self.logged_user_label.setText( + "No User logged in {0}".format(login) + ) + + def tray_menu(self, tray_menu): + # Add login to user menu + menu = QtWidgets.QMenu("Shotgrid", tray_menu) + show_connect_action = QtWidgets.QAction("Connect to Shotgrid", menu) + show_connect_action.triggered.connect(self.show_connect_dialog) + menu.addAction(self.logged_user_label) + menu.addSeparator() + menu.addAction(show_connect_action) + tray_menu.addMenu(menu) + + # Add manager to Admin menu + for m in tray_menu.findChildren(QtWidgets.QMenu): + if m.title() == "Admin": + shotgrid_manager_action = QtWidgets.QAction( + "Shotgrid manager", menu + ) + shotgrid_manager_action.triggered.connect( + self.show_batch_dialog + ) + m.addAction(shotgrid_manager_action) + + def validate(self): + login = credentials.get_local_login() + + if not login: + self.show_credential_dialog() + else: + os.environ["OPENPYPE_SG_USER"] = login + + return True diff --git a/openpype/modules/sync_server/providers/abstract_provider.py b/openpype/modules/sync_server/providers/abstract_provider.py index 688a17f14f..8c2fe1cad9 100644 --- a/openpype/modules/sync_server/providers/abstract_provider.py +++ b/openpype/modules/sync_server/providers/abstract_provider.py @@ -62,7 +62,7 @@ class AbstractProvider: @abc.abstractmethod def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Copy file from 'source_path' to 'target_path' on provider. @@ -75,7 +75,7 @@ class AbstractProvider: arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): name of project_name file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -87,7 +87,7 @@ class AbstractProvider: @abc.abstractmethod def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download file from provider into local system @@ -99,7 +99,7 @@ class AbstractProvider: arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name diff --git a/openpype/modules/sync_server/providers/dropbox.py b/openpype/modules/sync_server/providers/dropbox.py index dfc42fed75..89d6990841 100644 --- a/openpype/modules/sync_server/providers/dropbox.py +++ b/openpype/modules/sync_server/providers/dropbox.py @@ -224,7 +224,7 @@ class DropboxHandler(AbstractProvider): return False def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Copy file from 'source_path' to 'target_path' on provider. @@ -237,7 +237,7 @@ class DropboxHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -290,7 +290,7 @@ class DropboxHandler(AbstractProvider): cursor.offset = f.tell() server.update_db( - collection=collection, + project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -301,7 +301,7 @@ class DropboxHandler(AbstractProvider): return path def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download file from provider into local system @@ -313,7 +313,7 @@ class DropboxHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -337,7 +337,7 @@ class DropboxHandler(AbstractProvider): self.dbx.files_download_to_file(local_path, source_path) server.update_db( - collection=collection, + project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/sync_server/providers/gdrive.py index aa7329b104..bef707788b 100644 --- a/openpype/modules/sync_server/providers/gdrive.py +++ b/openpype/modules/sync_server/providers/gdrive.py @@ -251,7 +251,7 @@ class GDriveHandler(AbstractProvider): return folder_id def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. @@ -264,7 +264,7 @@ class GDriveHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -324,7 +324,7 @@ class GDriveHandler(AbstractProvider): while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, - project_name=collection): + project_name=project_name): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) @@ -333,7 +333,7 @@ class GDriveHandler(AbstractProvider): last_tick = time.time() log.debug("Uploaded %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -358,7 +358,7 @@ class GDriveHandler(AbstractProvider): return response['id'] def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'local_path'. @@ -372,7 +372,7 @@ class GDriveHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -410,7 +410,7 @@ class GDriveHandler(AbstractProvider): while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, - project_name=collection): + project_name=project_name): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) @@ -419,7 +419,7 @@ class GDriveHandler(AbstractProvider): last_tick = time.time() log.debug("Downloaded %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/providers/local_drive.py b/openpype/modules/sync_server/providers/local_drive.py index 172cb338cf..01bc891d08 100644 --- a/openpype/modules/sync_server/providers/local_drive.py +++ b/openpype/modules/sync_server/providers/local_drive.py @@ -82,7 +82,7 @@ class LocalDriveHandler(AbstractProvider): return editable def upload_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False, direction="Upload"): """ Copies file from 'source_path' to 'target_path' @@ -95,7 +95,7 @@ class LocalDriveHandler(AbstractProvider): thread = threading.Thread(target=self._copy, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, direction) else: if os.path.exists(target_path): @@ -105,13 +105,14 @@ class LocalDriveHandler(AbstractProvider): return os.path.basename(target_path) def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download a file form 'source_path' to 'local_path' """ return self.upload_file(source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, + representation, site, overwrite, direction="Download") def delete_file(self, path): @@ -188,7 +189,7 @@ class LocalDriveHandler(AbstractProvider): except shutil.SameFileError: print("same files, skipping") - def _mark_progress(self, collection, file, representation, server, site, + def _mark_progress(self, project_name, file, representation, server, site, source_path, target_path, direction): """ Updates progress field in DB by values 0-1. @@ -204,7 +205,7 @@ class LocalDriveHandler(AbstractProvider): status_val = target_file_size / source_file_size last_tick = time.time() log.debug(direction + "ed %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/providers/sftp.py b/openpype/modules/sync_server/providers/sftp.py index 49b87b14ec..302ffae3e6 100644 --- a/openpype/modules/sync_server/providers/sftp.py +++ b/openpype/modules/sync_server/providers/sftp.py @@ -222,7 +222,7 @@ class SFTPHandler(AbstractProvider): return os.path.basename(path) def upload_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. @@ -235,7 +235,7 @@ class SFTPHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -256,7 +256,7 @@ class SFTPHandler(AbstractProvider): thread = threading.Thread(target=self._upload, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, "upload") return os.path.basename(target_path) @@ -267,7 +267,7 @@ class SFTPHandler(AbstractProvider): conn.put(source_path, target_path) def download_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'target_path'. @@ -281,7 +281,7 @@ class SFTPHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -302,7 +302,7 @@ class SFTPHandler(AbstractProvider): thread = threading.Thread(target=self._download, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, "download") return os.path.basename(target_path) @@ -425,7 +425,7 @@ class SFTPHandler(AbstractProvider): pysftp.exceptions.ConnectionException): log.warning("Couldn't connect", exc_info=True) - def _mark_progress(self, collection, file, representation, server, site, + def _mark_progress(self, project_name, file, representation, server, site, source_path, target_path, direction): """ Updates progress field in DB by values 0-1. @@ -446,7 +446,7 @@ class SFTPHandler(AbstractProvider): status_val = target_file_size / source_file_size last_tick = time.time() log.debug(direction + "ed %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/sync_server.py b/openpype/modules/sync_server/sync_server.py index 356a75f99d..97538fcd4e 100644 --- a/openpype/modules/sync_server/sync_server.py +++ b/openpype/modules/sync_server/sync_server.py @@ -14,7 +14,7 @@ from .utils import SyncStatus, ResumableError log = PypeLogger().get_logger("SyncServer") -async def upload(module, collection, file, representation, provider_name, +async def upload(module, project_name, file, representation, provider_name, remote_site_name, tree=None, preset=None): """ Upload single 'file' of a 'representation' to 'provider'. @@ -31,7 +31,7 @@ async def upload(module, collection, file, representation, provider_name, Args: module(SyncServerModule): object to run SyncServerModule API - collection (str): source collection + project_name (str): source db file (dictionary): of file from representation in Mongo representation (dictionary): of representation provider_name (string): gdrive, gdc etc. @@ -47,15 +47,16 @@ async def upload(module, collection, file, representation, provider_name, # thread can do that at a time, upload/download to prepared # structure should be run in parallel remote_handler = lib.factory.get_provider(provider_name, - collection, + project_name, remote_site_name, tree=tree, presets=preset) file_path = file.get("path", "") try: - local_file_path, remote_file_path = resolve_paths(module, - file_path, collection, remote_site_name, remote_handler + local_file_path, remote_file_path = resolve_paths( + module, file_path, project_name, + remote_site_name, remote_handler ) except Exception as exp: print(exp) @@ -74,27 +75,28 @@ async def upload(module, collection, file, representation, provider_name, local_file_path, remote_file_path, module, - collection, + project_name, file, representation, remote_site_name, True ) - module.handle_alternate_site(collection, representation, remote_site_name, + module.handle_alternate_site(project_name, representation, + remote_site_name, file["_id"], file_id) return file_id -async def download(module, collection, file, representation, provider_name, +async def download(module, project_name, file, representation, provider_name, remote_site_name, tree=None, preset=None): """ Downloads file to local folder denoted in representation.Context. Args: module(SyncServerModule): object to run SyncServerModule API - collection (str): source collection + project_name (str): source file (dictionary) : info about processed file representation (dictionary): repr that 'file' belongs to provider_name (string): 'gdrive' etc @@ -108,20 +110,20 @@ async def download(module, collection, file, representation, provider_name, """ with module.lock: remote_handler = lib.factory.get_provider(provider_name, - collection, + project_name, remote_site_name, tree=tree, presets=preset) file_path = file.get("path", "") local_file_path, remote_file_path = resolve_paths( - module, file_path, collection, remote_site_name, remote_handler + module, file_path, project_name, remote_site_name, remote_handler ) local_folder = os.path.dirname(local_file_path) os.makedirs(local_folder, exist_ok=True) - local_site = module.get_active_site(collection) + local_site = module.get_active_site(project_name) loop = asyncio.get_running_loop() file_id = await loop.run_in_executor(None, @@ -129,20 +131,20 @@ async def download(module, collection, file, representation, provider_name, remote_file_path, local_file_path, module, - collection, + project_name, file, representation, local_site, True ) - module.handle_alternate_site(collection, representation, local_site, + module.handle_alternate_site(project_name, representation, local_site, file["_id"], file_id) return file_id -def resolve_paths(module, file_path, collection, +def resolve_paths(module, file_path, project_name, remote_site_name=None, remote_handler=None): """ Returns tuple of local and remote file paths with {root} @@ -153,7 +155,7 @@ def resolve_paths(module, file_path, collection, Args: module(SyncServerModule): object to run SyncServerModule API file_path(string): path with {root} - collection(string): project name + project_name(string): project name remote_site_name(string): remote site remote_handler(AbstractProvider): implementation Returns: @@ -164,7 +166,7 @@ def resolve_paths(module, file_path, collection, remote_file_path = remote_handler.resolve_path(file_path) local_handler = lib.factory.get_provider( - 'local_drive', collection, module.get_active_site(collection)) + 'local_drive', project_name, module.get_active_site(project_name)) local_file_path = local_handler.resolve_path(file_path) return local_file_path, remote_file_path @@ -269,8 +271,8 @@ class SyncServerThread(threading.Thread): - gets list of collections in DB - gets list of active remote providers (has configuration, credentials) - - for each collection it looks for representations that should - be synced + - for each project_name it looks for representations that + should be synced - synchronize found collections - update representations - fills error messages for exceptions - waits X seconds and repeat @@ -282,17 +284,17 @@ class SyncServerThread(threading.Thread): import time start_time = time.time() self.module.set_sync_project_settings() # clean cache - collection = None + project_name = None enabled_projects = self.module.get_enabled_projects() - for collection in enabled_projects: - preset = self.module.sync_project_settings[collection] + for project_name in enabled_projects: + preset = self.module.sync_project_settings[project_name] - local_site, remote_site = self._working_sites(collection) + local_site, remote_site = self._working_sites(project_name) if not all([local_site, remote_site]): continue sync_repres = self.module.get_sync_representations( - collection, + project_name, local_site, remote_site ) @@ -310,7 +312,7 @@ class SyncServerThread(threading.Thread): remote_provider = \ self.module.get_provider_for_site(site=remote_site) handler = lib.factory.get_provider(remote_provider, - collection, + project_name, remote_site, presets=site_preset) limit = lib.factory.get_provider_batch_limit( @@ -341,7 +343,7 @@ class SyncServerThread(threading.Thread): limit -= 1 task = asyncio.create_task( upload(self.module, - collection, + project_name, file, sync, remote_provider, @@ -353,7 +355,7 @@ class SyncServerThread(threading.Thread): files_processed_info.append((file, sync, remote_site, - collection + project_name )) processed_file_path.add(file_path) if status == SyncStatus.DO_DOWNLOAD: @@ -361,7 +363,7 @@ class SyncServerThread(threading.Thread): limit -= 1 task = asyncio.create_task( download(self.module, - collection, + project_name, file, sync, remote_provider, @@ -373,7 +375,7 @@ class SyncServerThread(threading.Thread): files_processed_info.append((file, sync, local_site, - collection + project_name )) processed_file_path.add(file_path) @@ -384,12 +386,12 @@ class SyncServerThread(threading.Thread): return_exceptions=True) for file_id, info in zip(files_created, files_processed_info): - file, representation, site, collection = info + file, representation, site, project_name = info error = None if isinstance(file_id, BaseException): error = str(file_id) file_id = None - self.module.update_db(collection, + self.module.update_db(project_name, file_id, file, representation, @@ -399,7 +401,7 @@ class SyncServerThread(threading.Thread): duration = time.time() - start_time log.debug("One loop took {:.2f}s".format(duration)) - delay = self.module.get_loop_delay(collection) + delay = self.module.get_loop_delay(project_name) log.debug("Waiting for {} seconds to new loop".format(delay)) self.timer = asyncio.create_task(self.run_timer(delay)) await asyncio.gather(self.timer) @@ -458,19 +460,19 @@ class SyncServerThread(threading.Thread): self.timer.cancel() self.timer = None - def _working_sites(self, collection): - if self.module.is_project_paused(collection): + def _working_sites(self, project_name): + if self.module.is_project_paused(project_name): log.debug("Both sites same, skipping") return None, None - local_site = self.module.get_active_site(collection) - remote_site = self.module.get_remote_site(collection) + local_site = self.module.get_active_site(project_name) + remote_site = self.module.get_remote_site(project_name) if local_site == remote_site: log.debug("{}-{} sites same, skipping".format(local_site, remote_site)) return None, None - configured_sites = _get_configured_sites(self.module, collection) + configured_sites = _get_configured_sites(self.module, project_name) if not all([local_site in configured_sites, remote_site in configured_sites]): log.debug("Some of the sites {} - {} is not ".format(local_site, diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index 4027561d22..8fdfab9c2e 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -25,6 +25,8 @@ from .providers import lib from .utils import time_function, SyncStatus, SiteAlreadyPresentError +from openpype.client import get_representations, get_representation_by_id + log = PypeLogger.get_logger("SyncServer") @@ -128,12 +130,12 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.projects_processed = set() """ Start of Public API """ - def add_site(self, collection, representation_id, site_name=None, + def add_site(self, project_name, representation_id, site_name=None, force=False): """ Adds new site to representation to be synced. - 'collection' must have synchronization enabled (globally or + 'project_name' must have synchronization enabled (globally or project only) Used as a API endpoint from outside applications (Loader etc). @@ -141,7 +143,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Use 'force' to reset existing site. Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site force (bool): reset site if exists @@ -151,25 +153,25 @@ class SyncServerModule(OpenPypeModule, ITrayModule): not 'force' ValueError - other errors (repre not found, misconfiguration) """ - if not self.get_sync_project_setting(collection): + if not self.get_sync_project_setting(project_name): raise ValueError("Project not configured") if not site_name: site_name = self.DEFAULT_SITE - self.reset_site_on_representation(collection, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, force=force) - def remove_site(self, collection, representation_id, site_name, + def remove_site(self, project_name, representation_id, site_name, remove_local_files=False): """ Removes 'site_name' for particular 'representation_id' on - 'collection' + 'project_name' Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site remove_local_files (bool): remove only files for 'local_id' @@ -178,15 +180,15 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: throws ValueError if any issue """ - if not self.get_sync_project_setting(collection): + if not self.get_sync_project_setting(project_name): raise ValueError("Project not configured") - self.reset_site_on_representation(collection, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, remove=True) if remove_local_files: - self._remove_local_file(collection, representation_id, site_name) + self._remove_local_file(project_name, representation_id, site_name) def compute_resource_sync_sites(self, project_name): """Get available resource sync sites state for publish process. @@ -333,9 +335,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return alt_site_pairs - def clear_project(self, collection, site_name): + def clear_project(self, project_name, site_name): """ - Clear 'collection' of 'site_name' and its local files + Clear 'project_name' of 'site_name' and its local files Works only on real local sites, not on 'studio' """ @@ -344,16 +346,17 @@ class SyncServerModule(OpenPypeModule, ITrayModule): "files.sites.name": site_name } + # TODO currently not possible to replace with get_representations representations = list( - self.connection.database[collection].find(query)) + self.connection.database[project_name].find(query)) if not representations: self.log.debug("No repre found") return for repre in representations: - self.remove_site(collection, repre.get("_id"), site_name, True) + self.remove_site(project_name, repre.get("_id"), site_name, True) - def create_validate_project_task(self, collection, site_name): + def create_validate_project_task(self, project_name, site_name): """Adds metadata about project files validation on a queue. This process will loop through all representation and check if @@ -370,33 +373,28 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ task = { "type": "validate", - "project_name": collection, - "func": lambda: self.validate_project(collection, site_name, + "project_name": project_name, + "func": lambda: self.validate_project(project_name, site_name, reset_missing=True) } - self.projects_processed.add(collection) + self.projects_processed.add(project_name) self.long_running_tasks.append(task) - def validate_project(self, collection, site_name, reset_missing=False): - """Validate 'collection' of 'site_name' and its local files + def validate_project(self, project_name, site_name, reset_missing=False): + """Validate 'project_name' of 'site_name' and its local files If file present and not marked with a 'site_name' in DB, DB is updated with site name and file modified date. Args: - collection (string): project name + project_name (string): project name site_name (string): active site name reset_missing (bool): if True reset site in DB if missing physically """ - self.log.debug("Validation of {} for {} started".format(collection, + self.log.debug("Validation of {} for {} started".format(project_name, site_name)) - query = { - "type": "representation" - } - - representations = list( - self.connection.database[collection].find(query)) + representations = list(get_representations(project_name)) if not representations: self.log.debug("No repre found") return @@ -416,7 +414,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): continue file_path = repre_file.get("path", "") - local_file_path = self.get_local_file_path(collection, + local_file_path = self.get_local_file_path(project_name, site_name, file_path) @@ -428,14 +426,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): "Adding site {} for {}".format(site_name, repre_id)) - query = { - "_id": repre_id - } created_dt = datetime.fromtimestamp( os.path.getmtime(local_file_path)) elem = {"name": site_name, "created_dt": created_dt} - self._add_site(collection, query, repre, elem, + self._add_site(project_name, repre, elem, site_name=site_name, file_id=repre_file["_id"], force=True) @@ -445,41 +440,42 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.log.debug("Resetting site {} for {}". format(site_name, repre_id)) self.reset_site_on_representation( - collection, repre_id, site_name=site_name, + project_name, repre_id, site_name=site_name, file_id=repre_file["_id"]) sites_reset += 1 if sites_added % 100 == 0: self.log.debug("Sites added {}".format(sites_added)) - self.log.debug("Validation of {} for {} ended".format(collection, + self.log.debug("Validation of {} for {} ended".format(project_name, site_name)) self.log.info("Sites added {}, sites reset {}".format(sites_added, reset_missing)) - def pause_representation(self, collection, representation_id, site_name): + def pause_representation(self, project_name, representation_id, site_name): """ Sets 'representation_id' as paused, eg. no syncing should be happening on it. Args: - collection (string): project name + project_name (string): project name representation_id (string): MongoDB objectId value site_name (string): 'gdrive', 'studio' etc. """ log.info("Pausing SyncServer for {}".format(representation_id)) self._paused_representations.add(representation_id) - self.reset_site_on_representation(collection, representation_id, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, pause=True) - def unpause_representation(self, collection, representation_id, site_name): + def unpause_representation(self, project_name, + representation_id, site_name): """ Sets 'representation_id' as unpaused. Does not fail or warn if repre wasn't paused. Args: - collection (string): project name + project_name (string): project name representation_id (string): MongoDB objectId value site_name (string): 'gdrive', 'studio' etc. """ @@ -489,7 +485,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): except KeyError: pass # self.paused_representations is not persistent - self.reset_site_on_representation(collection, representation_id, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, pause=False) def is_representation_paused(self, representation_id, @@ -520,7 +516,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): happening on all representation inside. Args: - project_name (string): collection name + project_name (string): project_name name """ log.info("Pausing SyncServer for {}".format(project_name)) self._paused_projects.add(project_name) @@ -532,7 +528,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Does not fail or warn if project wasn't paused. Args: - project_name (string): collection name + project_name (string): """ log.info("Unpausing SyncServer for {}".format(project_name)) try: @@ -545,7 +541,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns if 'project_name' is paused or not. Args: - project_name (string): collection name + project_name (string): check_parents (bool): check if server itself is not paused Returns: @@ -944,8 +940,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return True return False - def handle_alternate_site(self, collection, representation, processed_site, - file_id, synced_file_id): + def handle_alternate_site(self, project_name, representation, + processed_site, file_id, synced_file_id): """ For special use cases where one site vendors another. @@ -958,7 +954,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): same location >> file is accesible on 'sftp' site right away. Args: - collection (str): name of project + project_name (str): name of project representation (dict) processed_site (str): real site_name of published/uploaded file file_id (ObjectId): DB id of file handled @@ -982,26 +978,112 @@ class SyncServerModule(OpenPypeModule, ITrayModule): alternate_sites = set(alternate_sites) for alt_site in alternate_sites: - query = { - "_id": representation["_id"] - } elem = {"name": alt_site, "created_dt": datetime.now(), "id": synced_file_id} self.log.debug("Adding alternate {} to {}".format( alt_site, representation["_id"])) - self._add_site(collection, query, + self._add_site(project_name, representation, elem, alt_site, file_id=file_id, force=True) + def get_repre_info_for_versions(self, project_name, version_ids, + active_site, remote_site): + """Returns representation documents for versions and sites combi + + Args: + project_name (str) + version_ids (list): of version[_id] + active_site (string): 'local', 'studio' etc + remote_site (string): dtto + Returns: + + """ + self.connection.Session["AVALON_PROJECT"] = project_name + query = [ + {"$match": {"parent": {"$in": version_ids}, + "type": "representation", + "files.sites.name": {"$exists": 1}}}, + {"$unwind": "$files"}, + {'$addFields': { + 'order_local': { + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', active_site]} + } + } + }}, + {'$addFields': { + 'order_remote': { + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', remote_site]} + } + } + }}, + {'$addFields': { + 'progress_local': {"$arrayElemAt": [{ + '$cond': [ + {'$size': "$order_local.progress"}, + "$order_local.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_local.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} + }}, + {'$addFields': { + 'progress_remote': {"$arrayElemAt": [{ + '$cond': [ + {'$size': "$order_remote.progress"}, + "$order_remote.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_remote.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} + }}, + {'$group': { # first group by repre + '_id': '$_id', + 'parent': {'$first': '$parent'}, + 'avail_ratio_local': { + '$first': { + '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}] + } + }, + 'avail_ratio_remote': { + '$first': { + '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}] + } + } + }}, + {'$group': { # second group by parent, eg version_id + '_id': '$parent', + 'repre_count': {'$sum': 1}, # total representations + # fully available representation for site + 'avail_repre_local': {'$sum': "$avail_ratio_local"}, + 'avail_repre_remote': {'$sum': "$avail_ratio_remote"}, + }}, + ] + # docs = list(self.connection.aggregate(query)) + return self.connection.aggregate(query) + """ End of Public API """ - def get_local_file_path(self, collection, site_name, file_path): + def get_local_file_path(self, project_name, site_name, file_path): """ Externalized for app """ - handler = LocalDriveHandler(collection, site_name) + handler = LocalDriveHandler(project_name, site_name) local_file_path = handler.resolve_path(file_path) return local_file_path @@ -1288,7 +1370,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return sites.get(site, 'N/A') @time_function - def get_sync_representations(self, collection, active_site, remote_site): + def get_sync_representations(self, project_name, active_site, remote_site): """ Get representations that should be synced, these could be recognised by presence of document in 'files.sites', where key is @@ -1299,8 +1381,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): better performance. Goal is to get as few representations as possible. Args: - collection (string): name of collection (in most cases matches - project name + project_name (string): active_site (string): identifier of current active site (could be 'local_0' when working from home, 'studio' when working in the studio (default) @@ -1309,10 +1390,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: (list) of dictionaries """ - log.debug("Check representations for : {}".format(collection)) - self.connection.Session["AVALON_PROJECT"] = collection + log.debug("Check representations for : {}".format(project_name)) + self.connection.Session["AVALON_PROJECT"] = project_name # retry_cnt - number of attempts to sync specific file before giving up - retries_arr = self._get_retries_arr(collection) + retries_arr = self._get_retries_arr(project_name) match = { "type": "representation", "$or": [ @@ -1449,14 +1530,14 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return SyncStatus.DO_NOTHING - def update_db(self, collection, new_file_id, file, representation, + def update_db(self, project_name, new_file_id, file, representation, site, error=None, progress=None, priority=None): """ Update 'provider' portion of records in DB with success (file_id) or error (exception) Args: - collection (string): name of project - force to db connection as + project_name (string): name of project - force to db connection as each file might come from different collection new_file_id (string): file (dictionary): info about processed file (pulled from DB) @@ -1499,7 +1580,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if file_id: arr_filter.append({'f._id': ObjectId(file_id)}) - self.connection.database[collection].update_one( + self.connection.database[project_name].update_one( query, update, upsert=True, @@ -1562,7 +1643,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return -1, None - def reset_site_on_representation(self, collection, representation_id, + def reset_site_on_representation(self, project_name, representation_id, side=None, file_id=None, site_name=None, remove=False, pause=None, force=False): """ @@ -1579,7 +1660,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Should be used when repre should be synced to new site. Args: - collection (string): name of project (eg. collection) in DB + project_name (string): name of project (eg. collection) in DB representation_id(string): _id of representation file_id (string): file _id in representation side (string): local or remote side @@ -1593,20 +1674,18 @@ class SyncServerModule(OpenPypeModule, ITrayModule): not 'force' ValueError - other errors (repre not found, misconfiguration) """ - query = { - "_id": ObjectId(representation_id) - } - - representation = self.connection.database[collection].find_one(query) + representation = get_representation_by_id(project_name, + representation_id) if not representation: raise ValueError("Representation {} not found in {}". - format(representation_id, collection)) + format(representation_id, project_name)) + if side and site_name: raise ValueError("Misconfiguration, only one of side and " + "site_name arguments should be passed.") - local_site = self.get_active_site(collection) - remote_site = self.get_remote_site(collection) + local_site = self.get_active_site(project_name) + remote_site = self.get_remote_site(project_name) if side: if side == 'local': @@ -1617,37 +1696,43 @@ class SyncServerModule(OpenPypeModule, ITrayModule): elem = {"name": site_name} if file_id: # reset site for particular file - self._reset_site_for_file(collection, query, + self._reset_site_for_file(project_name, representation_id, elem, file_id, site_name) elif side: # reset site for whole representation - self._reset_site(collection, query, elem, site_name) + self._reset_site(project_name, representation_id, elem, site_name) elif remove: # remove site for whole representation - self._remove_site(collection, query, representation, site_name) + self._remove_site(project_name, + representation, site_name) elif pause is not None: - self._pause_unpause_site(collection, query, + self._pause_unpause_site(project_name, representation, site_name, pause) else: # add new site to all files for representation - self._add_site(collection, query, representation, elem, site_name, + self._add_site(project_name, representation, elem, site_name, force=force) - def _update_site(self, collection, query, update, arr_filter): + def _update_site(self, project_name, representation_id, + update, arr_filter): """ Auxiliary method to call update_one function on DB Used for refactoring ugly reset_provider_for_file """ - self.connection.database[collection].update_one( + query = { + "_id": ObjectId(representation_id) + } + + self.connection.database[project_name].update_one( query, update, upsert=True, array_filters=arr_filter ) - def _reset_site_for_file(self, collection, query, + def _reset_site_for_file(self, project_name, representation_id, elem, file_id, site_name): """ Resets 'site_name' for 'file_id' on representation in 'query' on - 'collection' + 'project_name' """ update = { "$set": {"files.$[f].sites.$[s]": elem} @@ -1660,9 +1745,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'f._id': file_id} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, update, arr_filter) - def _reset_site(self, collection, query, elem, site_name): + def _reset_site(self, project_name, representation_id, elem, site_name): """ Resets 'site_name' for all files of representation in 'query' """ @@ -1674,9 +1759,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'s.name': site_name} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, update, arr_filter) - def _remove_site(self, collection, query, representation, site_name): + def _remove_site(self, project_name, representation, site_name): """ Removes 'site_name' for 'representation' in 'query' @@ -1698,10 +1783,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): } arr_filter = [] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation["_id"], + update, arr_filter) - def _pause_unpause_site(self, collection, query, - representation, site_name, pause): + def _pause_unpause_site(self, project_name, representation, + site_name, pause): """ Pauses/unpauses all files for 'representation' based on 'pause' @@ -1733,12 +1819,13 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'s.name': site_name} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation["_id"], + update, arr_filter) - def _add_site(self, collection, query, representation, elem, site_name, + def _add_site(self, project_name, representation, elem, site_name, force=False, file_id=None): """ - Adds 'site_name' to 'representation' on 'collection' + Adds 'site_name' to 'representation' on 'project_name' Args: representation (dict) @@ -1746,10 +1833,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Use 'force' to remove existing or raises ValueError """ + representation_id = representation["_id"] reset_existing = False files = representation.get("files", []) if not files: - log.debug("No files for {}".format(representation["_id"])) + log.debug("No files for {}".format(representation_id)) return for repre_file in files: @@ -1759,7 +1847,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule): for site in repre_file.get("sites"): if site["name"] == site_name: if force or site.get("error"): - self._reset_site_for_file(collection, query, + self._reset_site_for_file(project_name, + representation_id, elem, repre_file["_id"], site_name) reset_existing = True @@ -1785,14 +1874,15 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'f._id': file_id} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, + update, arr_filter) - def _remove_local_file(self, collection, representation_id, site_name): + def _remove_local_file(self, project_name, representation_id, site_name): """ Removes all local files for 'site_name' of 'representation_id' Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site @@ -1808,21 +1898,17 @@ class SyncServerModule(OpenPypeModule, ITrayModule): provider_name = self.get_provider_for_site(site=site_name) if provider_name == 'local_drive': - query = { - "_id": ObjectId(representation_id) - } - - representation = list( - self.connection.database[collection].find(query)) + representation = get_representation_by_id(project_name, + representation_id, + fields=["files"]) if not representation: self.log.debug("No repre {} found".format( representation_id)) return - representation = representation.pop() local_file_path = '' for file in representation.get("files"): - local_file_path = self.get_local_file_path(collection, + local_file_path = self.get_local_file_path(project_name, site_name, file.get("path", "") ) diff --git a/openpype/modules/sync_server/tray/models.py b/openpype/modules/sync_server/tray/models.py index 6d1e85c17a..629c4cbbf1 100644 --- a/openpype/modules/sync_server/tray/models.py +++ b/openpype/modules/sync_server/tray/models.py @@ -11,6 +11,7 @@ from openpype.tools.utils.delegates import pretty_timestamp from openpype.lib import PypeLogger from openpype.api import get_local_site_id +from openpype.client import get_representation_by_id from . import lib @@ -440,7 +441,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): full text filtering. Allows pagination, most of heavy lifting is being done on DB side. - Single model matches to single collection. When project is changed, + Single model matches to single project. When project is changed, model is reset and refreshed. Args: @@ -919,11 +920,10 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): repre_id = self.data(index, Qt.UserRole) - representation = list(self.dbcon.find({"type": "representation", - "_id": repre_id})) + representation = get_representation_by_id(self.project, repre_id) if representation: self.sync_server.update_db(self.project, None, None, - representation.pop(), + representation, get_local_site_id(), priority=value) self.is_editing = False @@ -1357,11 +1357,10 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): file_id = self.data(index, Qt.UserRole) updated_file = None - # conversion from cursor to list - representations = list(self.dbcon.find({"type": "representation", - "_id": self._id})) + representation = get_representation_by_id(self.project, self._id) + if not representation: + return - representation = representations.pop() for repre_file in representation["files"]: if repre_file["_id"] == file_id: updated_file = repre_file diff --git a/openpype/modules/timers_manager/plugins/publish/start_timer.py b/openpype/modules/timers_manager/plugins/publish/start_timer.py new file mode 100644 index 0000000000..6408327ca1 --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/start_timer.py @@ -0,0 +1,39 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + +import pyblish.api + +from openpype.pipeline import legacy_io + + +class StartTimer(pyblish.api.ContextPlugin): + label = "Start Timer" + order = pyblish.api.IntegratorOrder + 1 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + project_name = legacy_io.active_project() + asset_name = legacy_io.Session.get("AVALON_ASSET") + task_name = legacy_io.Session.get("AVALON_TASK") + if not project_name or not asset_name or not task_name: + self.log.info(( + "Current context does not contain all" + " required information to start a timer." + )) + return + timers_manager.start_timer_with_webserver( + project_name, asset_name, task_name, self.log + ) diff --git a/openpype/modules/timers_manager/plugins/publish/stop_timer.py b/openpype/modules/timers_manager/plugins/publish/stop_timer.py new file mode 100644 index 0000000000..a8674ff2ca --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/stop_timer.py @@ -0,0 +1,27 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + + +import pyblish.api + + +class StopTimer(pyblish.api.ContextPlugin): + label = "Stop Timer" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + timers_manager.stop_timer_with_webserver(self.log) diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/timers_manager/timers_manager.py index 3453e4bc4c..93332ace4f 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/openpype/modules/timers_manager/timers_manager.py @@ -6,12 +6,15 @@ from openpype.client import get_asset_by_name from openpype.modules import OpenPypeModule from openpype_interfaces import ( ITrayService, - ILaunchHookPaths + ILaunchHookPaths, + IPluginPaths ) from openpype.lib.events import register_event_callback from .exceptions import InvalidContextError +TIMER_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + class ExampleTimersManagerConnector: """Timers manager can handle timers of multiple modules/addons. @@ -33,6 +36,7 @@ class ExampleTimersManagerConnector: } ``` """ + # Not needed at all def __init__(self, module): # Store timer manager module to be able call it's methods when needed @@ -72,7 +76,12 @@ class ExampleTimersManagerConnector: self._timers_manager_module.timer_stopped(self._module.id) -class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): +class TimersManager( + OpenPypeModule, + ITrayService, + ILaunchHookPaths, + IPluginPaths +): """ Handles about Timers. Should be able to start/stop all timers at once. @@ -177,11 +186,19 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): def get_launch_hook_paths(self): """Implementation of `ILaunchHookPaths`.""" + return os.path.join( - os.path.dirname(os.path.abspath(__file__)), + TIMER_MODULE_DIR, "launch_hooks" ) + def get_plugin_paths(self): + """Implementation of `IPluginPaths`.""" + + return { + "publish": [os.path.join(TIMER_MODULE_DIR, "plugins", "publish")] + } + @staticmethod def get_timer_data_for_context( project_name, asset_name, task_name, logger=None @@ -388,6 +405,7 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): logger (logging.Logger): Logger object. Using 'print' if not passed. """ + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") if not webserver_url: msg = "Couldn't find webserver url" @@ -415,6 +433,36 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): return requests.post(rest_api_url, json=data) + @staticmethod + def stop_timer_with_webserver(logger=None): + """Prepared method for calling stop timers on REST api. + + Args: + logger (logging.Logger): Logger used for logging messages. + """ + + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + if not webserver_url: + msg = "Couldn't find webserver url" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) + try: + import requests + except Exception: + msg = "Couldn't start timer ('requests' is not available)" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + return requests.post(rest_api_url) + def on_host_install(self, host, host_name, project_name): self.log.debug("Installing task changed callback") register_event_callback("taskChanged", self._on_host_task_change) diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py index a8e55479b6..5f763cd249 100644 --- a/openpype/pipeline/context_tools.py +++ b/openpype/pipeline/context_tools.py @@ -18,8 +18,14 @@ from openpype.client import ( ) from openpype.modules import load_modules, ModulesManager from openpype.settings import get_project_settings -from openpype.lib import filter_pyblish_plugins + +from .publish.lib import filter_pyblish_plugins from .anatomy import Anatomy +from .template_data import get_template_data_with_names +from .workfile import ( + get_workfile_template_key, + get_custom_workfile_template_by_string_context, +) from . import ( legacy_io, register_loader_plugin_path, @@ -336,6 +342,7 @@ def get_current_project_asset(asset_name=None, asset_id=None, fields=None): return None return get_asset_by_name(project_name, asset_name, fields=fields) + def is_representation_from_latest(representation): """Return whether the representation is from latest version @@ -348,3 +355,93 @@ def is_representation_from_latest(representation): project_name = legacy_io.active_project() return version_is_latest(project_name, representation["parent"]) + + +def get_template_data_from_session(session=None, system_settings=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + system_settings (Union[Dict[str, Any], Any]): Prepared system settings. + Optional are auto received if not passed. + + Returns: + Dict[str, Any]: All available data from session. + """ + + if session is None: + session = legacy_io.Session + + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] + host_name = session["AVALON_APP"] + + return get_template_data_with_names( + project_name, asset_name, task_name, host_name, system_settings + ) + + +def get_workdir_from_session(session=None, template_key=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + template_key (str): Prepared template key from which workdir is + calculated. + + Returns: + str: Workdir path. + """ + + if session is None: + session = legacy_io.Session + project_name = session["AVALON_PROJECT"] + host_name = session["AVALON_APP"] + anatomy = Anatomy(project_name) + template_data = get_template_data_from_session(session) + anatomy_filled = anatomy.format(template_data) + + if not template_key: + task_type = template_data["task"]["type"] + template_key = get_workfile_template_key( + task_type, + host_name, + project_name=project_name + ) + path = anatomy_filled[template_key]["folder"] + if path: + path = os.path.normpath(path) + return path + + +def get_custom_workfile_template_from_session( + session=None, project_settings=None +): + """Filter and fill workfile template profiles by current context. + + Current context is defined by `legacy_io.Session`. That's why this + function should be used only inside host where context is set and stable. + + Args: + session (Union[None, Dict[str, str]]): Session from which are taken + data. + project_settings(Dict[str, Any]): Template profiles from settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + """ + + if session is None: + session = legacy_io.Session + + return get_custom_workfile_template_by_string_context( + session["AVALON_PROJECT"], + session["AVALON_ASSET"], + session["AVALON_TASK"], + session["AVALON_APP"], + project_settings=project_settings + ) diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 1beeb4267b..bd196ccfd1 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -7,6 +7,7 @@ from .creator_plugins import ( BaseCreator, Creator, AutoCreator, + HiddenCreator, discover_creator_plugins, discover_legacy_creator_plugins, @@ -35,6 +36,7 @@ __all__ = ( "BaseCreator", "Creator", "AutoCreator", + "HiddenCreator", "discover_creator_plugins", "discover_legacy_creator_plugins", diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index 9b55c3b21e..eaaed39357 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -6,6 +6,7 @@ import inspect from uuid import uuid4 from contextlib import contextmanager +from openpype.client import get_assets from openpype.host import INewPublisher from openpype.pipeline import legacy_io from openpype.pipeline.mongodb import ( @@ -1082,15 +1083,10 @@ class CreateContext: for asset_name in task_names_by_asset_name.keys() if asset_name is not None ] - asset_docs = list(self.dbcon.find( - { - "type": "asset", - "name": {"$in": asset_names} - }, - { - "name": True, - "data.tasks": True - } + asset_docs = list(get_assets( + self.project_name, + asset_names=asset_names, + fields=["name", "data.tasks"] )) task_names_by_asset_name = {} diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 52c76db5ef..9a5d559774 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -1,3 +1,4 @@ +import os import copy from abc import ( @@ -7,10 +8,8 @@ from abc import ( ) import six -from openpype.lib import ( - get_subset_name_with_asset_doc, - set_plugin_attributes_from_settings, -) +from openpype.settings import get_system_settings, get_project_settings +from openpype.lib import get_subset_name_with_asset_doc from openpype.pipeline.plugin_discover import ( discover, register_plugin, @@ -416,6 +415,12 @@ class Creator(BaseCreator): return self.pre_create_attr_defs +class HiddenCreator(BaseCreator): + @abstractmethod + def create(self, instance_data, source_data): + pass + + class AutoCreator(BaseCreator): """Creator which is automatically triggered without user interaction. @@ -432,8 +437,24 @@ def discover_creator_plugins(): def discover_legacy_creator_plugins(): + from openpype.lib import Logger + + log = Logger.get_logger("CreatorDiscover") + plugins = discover(LegacyCreator) - set_plugin_attributes_from_settings(plugins, LegacyCreator) + project_name = os.environ.get("AVALON_PROJECT") + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to loader {}".format( + plugin.__name__ + ), + exc_info=True + ) return plugins diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index 46e0e3d663..2764b3cb95 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -5,6 +5,7 @@ Renamed classes and functions - 'create' -> 'legacy_create' """ +import os import logging import collections @@ -37,6 +38,48 @@ class LegacyCreator(object): self.data.update(data or {}) + @classmethod + def apply_settings(cls, project_settings, system_settings): + """Apply OpenPype settings to a plugin class.""" + + host_name = os.environ.get("AVALON_APP") + plugin_type = "create" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + setattr(cls, "active", False) + print(" - is disabled by preset") + else: + setattr(cls, option, value) + print(" - setting `{}`: `{}`".format(option, value)) + def process(self): pass diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py index a30a2188a4..8cba8d8217 100644 --- a/openpype/pipeline/load/plugins.py +++ b/openpype/pipeline/load/plugins.py @@ -1,6 +1,8 @@ +import os import logging -from openpype.lib import set_plugin_attributes_from_settings +from openpype.settings import get_system_settings, get_project_settings +from openpype.pipeline import legacy_io from openpype.pipeline.plugin_discover import ( discover, register_plugin, @@ -37,6 +39,46 @@ class LoaderPlugin(list): def __init__(self, context): self.fname = self.filepath_from_context(context) + @classmethod + def apply_settings(cls, project_settings, system_settings): + host_name = os.environ.get("AVALON_APP") + plugin_type = "load" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + setattr(cls, "active", False) + print(" - is disabled by preset") + else: + setattr(cls, option, value) + print(" - setting `{}`: `{}`".format(option, value)) + @classmethod def get_representations(cls): return cls.representations @@ -110,9 +152,25 @@ class SubsetLoaderPlugin(LoaderPlugin): pass -def discover_loader_plugins(): +def discover_loader_plugins(project_name=None): + from openpype.lib import Logger + + log = Logger.get_logger("LoaderDiscover") plugins = discover(LoaderPlugin) - set_plugin_attributes_from_settings(plugins, LoaderPlugin) + if not project_name: + project_name = legacy_io.active_project() + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to loader {}".format( + plugin.__name__ + ), + exc_info=True + ) return plugins diff --git a/openpype/pipeline/publish/abstract_collect_render.py b/openpype/pipeline/publish/abstract_collect_render.py index 2e537227c3..ccb2415346 100644 --- a/openpype/pipeline/publish/abstract_collect_render.py +++ b/openpype/pipeline/publish/abstract_collect_render.py @@ -63,6 +63,8 @@ class RenderInstance(object): family = attr.ib(default="renderlayer") families = attr.ib(default=["renderlayer"]) # list of families + # True if should be rendered on farm, eg not integrate + farm = attr.ib(default=False) # format settings multipartExr = attr.ib(default=False) # flag for multipart exrs diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index 739b2c8806..d5494cd8a4 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -6,6 +6,10 @@ import xml.etree.ElementTree import six import pyblish.plugin +import pyblish.api + +from openpype.lib import Logger +from openpype.settings import get_project_settings, get_system_settings class DiscoverResult: @@ -180,3 +184,92 @@ def publish_plugins_discover(paths=None): result.plugins = plugins return result + + +def filter_pyblish_plugins(plugins): + """Pyblish plugin filter which applies OpenPype settings. + + Apply OpenPype settings on discovered plugins. On plugin with implemented + class method 'def apply_settings(cls, project_settings, system_settings)' + is called the method. Default behavior looks for plugin name and current + host name to look for + + Args: + plugins (List[pyblish.plugin.Plugin]): Discovered plugins on which + are applied settings. + """ + + log = Logger.get_logger("filter_pyblish_plugins") + + # TODO: Don't use host from 'pyblish.api' but from defined host by us. + # - kept becau on farm is probably used host 'shell' which propably + # affect how settings are applied there + host = pyblish.api.current_host() + project_name = os.environ.get("AVALON_PROJECT") + + project_setting = get_project_settings(project_name) + system_settings = get_system_settings() + + # iterate over plugins + for plugin in plugins[:]: + if hasattr(plugin, "apply_settings"): + try: + # Use classmethod 'apply_settings' + # - can be used to target settings from custom settings place + # - skip default behavior when successful + plugin.apply_settings(project_setting, system_settings) + continue + + except Exception: + log.warning( + ( + "Failed to apply settings on plugin {}" + ).format(plugin.__name__), + exc_info=True + ) + + try: + config_data = ( + project_setting + [host] + ["publish"] + [plugin.__name__] + ) + except KeyError: + # host determined from path + file = os.path.normpath(inspect.getsourcefile(plugin)) + file = os.path.normpath(file) + + split_path = file.split(os.path.sep) + if len(split_path) < 4: + log.warning( + 'plugin path too short to extract host {}'.format(file) + ) + continue + + host_from_file = split_path[-4] + plugin_kind = split_path[-2] + + # TODO: change after all plugins are moved one level up + if host_from_file == "openpype": + host_from_file = "global" + + try: + config_data = ( + project_setting + [host_from_file] + [plugin_kind] + [plugin.__name__] + ) + except KeyError: + continue + + for option, value in config_data.items(): + if option == "enabled" and value is False: + log.info('removing plugin {}'.format(plugin.__name__)) + plugins.remove(plugin) + else: + log.info('setting {}:{} on plugin {}'.format( + option, value, plugin.__name__)) + + setattr(plugin, option, value) diff --git a/openpype/pipeline/template_data.py b/openpype/pipeline/template_data.py new file mode 100644 index 0000000000..824a25127c --- /dev/null +++ b/openpype/pipeline/template_data.py @@ -0,0 +1,228 @@ +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_system_settings +from openpype.lib.local_settings import get_openpype_username + + +def get_general_template_data(system_settings=None): + """General template data based on system settings or machine. + + Output contains formatting keys: + - 'studio[name]' - Studio name filled from system settings + - 'studio[code]' - Studio code filled from system settings + - 'user' - User's name using 'get_openpype_username' + + Args: + system_settings (Dict[str, Any]): System settings. + """ + + if not system_settings: + system_settings = get_system_settings() + studio_name = system_settings["general"]["studio_name"] + studio_code = system_settings["general"]["studio_code"] + return { + "studio": { + "name": studio_name, + "code": studio_code + }, + "user": get_openpype_username() + } + + +def get_project_template_data(project_doc): + """Extract data from project document that are used in templates. + + Project document must have 'name' and (at this moment) optional + key 'data.code'. + + Output contains formatting keys: + - 'project[name]' - Project name + - 'project[code]' - Project code + + Args: + project_doc (Dict[str, Any]): Queried project document. + + Returns: + Dict[str, Dict[str, str]]: Template data based on project document. + """ + + project_code = project_doc.get("data", {}).get("code") + return { + "project": { + "name": project_doc["name"], + "code": project_code + } + } + + +def get_asset_template_data(asset_doc, project_name): + """Extract data from asset document that are used in templates. + + Output dictionary contains keys: + - 'asset' - asset name + - 'hierarchy' - parent asset names joined with '/' + - 'parent' - direct parent name, project name used if is under project + + Required document fields: + Asset: 'name', 'data.parents' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + project_name (str): Is used for 'parent' key if asset doc does not have + any. + + Returns: + Dict[str, str]: Data that are based on asset document and can be used + in templates. + """ + + asset_parents = asset_doc["data"]["parents"] + hierarchy = "/".join(asset_parents) + if asset_parents: + parent_name = asset_parents[-1] + else: + parent_name = project_name + + return { + "asset": asset_doc["name"], + "hierarchy": hierarchy, + "parent": parent_name + } + + +def get_task_type(asset_doc, task_name): + """Get task type based on asset document and task name. + + Required document fields: + Asset: 'data.tasks' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + task_name (str): Task name which is under asset. + + Returns: + str: Task type name. + None: Task was not found on asset document. + """ + + asset_tasks_info = asset_doc["data"]["tasks"] + return asset_tasks_info.get(task_name, {}).get("type") + + +def get_task_template_data(project_doc, asset_doc, task_name): + """"Extract task specific data from project and asset documents. + + Required document fields: + Project: 'config.tasks' + Asset: 'data.tasks'. + + Args: + project_doc (Dict[str, Any]): Queried project document. + asset_doc (Dict[str, Any]): Queried asset document. + tas_name (str): Name of task for which data should be returned. + + Returns: + Dict[str, Dict[str, str]]: Template data + """ + + project_task_types = project_doc["config"]["tasks"] + task_type = get_task_type(asset_doc, task_name) + task_code = project_task_types.get(task_type, {}).get("short_name") + + return { + "task": { + "name": task_name, + "type": task_type, + "short": task_code, + } + } + + +def get_template_data( + project_doc, + asset_doc=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered documents and info. + + This function does not "auto fill" any values except system settings and + it's on purpose. + + Universal function to receive template data from passed arguments. Only + required argument is project document all other arguments are optional + and their values won't be added to template data if are not passed. + + Required document fields: + Project: 'name', 'data.code', 'config.tasks' + Asset: 'name', 'data.parents', 'data.tasks' + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]): Used to fill '{app}' key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed (may be slower). + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + template_data = get_general_template_data(system_settings) + template_data.update(get_project_template_data(project_doc)) + if asset_doc: + template_data.update(get_asset_template_data( + asset_doc, project_doc["name"] + )) + if task_name: + template_data.update(get_task_template_data( + project_doc, asset_doc, task_name + )) + + if host_name: + template_data["app"] = host_name + + return template_data + + +def get_template_data_with_names( + project_name, + asset_name=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered entity names and info. + + Copy of 'get_template_data' but based on entity names instead of documents. + Only difference is that documents are queried. + + Args: + project_name (str): Project name for which template data are + calculated. + asset_name (Union[str, None]): Asset name for which template data are + calculated. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]):Used to fill '{app}' key. + because workdir template may contain `{app}` key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed. + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + project_doc = get_project( + project_name, fields=["name", "data.code", "config.tasks"] + ) + asset_doc = None + if asset_name: + asset_doc = get_asset_by_name( + project_name, + asset_name, + fields=["name", "data.parents", "data.tasks"] + ) + return get_template_data( + project_doc, asset_doc, task_name, host_name, system_settings + ) diff --git a/openpype/pipeline/thumbnail.py b/openpype/pipeline/thumbnail.py index ec97b36954..eb383b16d9 100644 --- a/openpype/pipeline/thumbnail.py +++ b/openpype/pipeline/thumbnail.py @@ -2,6 +2,7 @@ import os import copy import logging +from openpype.client import get_project from . import legacy_io from .plugin_discover import ( discover, @@ -85,13 +86,8 @@ class TemplateResolver(ThumbnailResolver): self.log.debug("Thumbnail entity does not have set template") return - project = self.dbcon.find_one( - {"type": "project"}, - { - "name": True, - "data.code": True - } - ) + project_name = self.dbcon.active_project() + project = get_project(project_name, fields=["name", "data.code"]) template_data = copy.deepcopy( thumbnail_entity["data"].get("template_data") or {} diff --git a/openpype/pipeline/workfile/__init__.py b/openpype/pipeline/workfile/__init__.py new file mode 100644 index 0000000000..0aad29b6f9 --- /dev/null +++ b/openpype/pipeline/workfile/__init__.py @@ -0,0 +1,30 @@ +from .path_resolving import ( + get_workfile_template_key_from_context, + get_workfile_template_key, + get_workdir_with_workdir_data, + get_workdir, + + get_last_workfile_with_version, + get_last_workfile, + + get_custom_workfile_template, + get_custom_workfile_template_by_string_context, +) + +from .build_workfile import BuildWorkfile + + +__all__ = ( + "get_workfile_template_key_from_context", + "get_workfile_template_key", + "get_workdir_with_workdir_data", + "get_workdir", + + "get_last_workfile_with_version", + "get_last_workfile", + + "get_custom_workfile_template", + "get_custom_workfile_template_by_string_context", + + "BuildWorkfile", +) diff --git a/openpype/pipeline/workfile/build_workfile.py b/openpype/pipeline/workfile/build_workfile.py new file mode 100644 index 0000000000..bb6fcb4189 --- /dev/null +++ b/openpype/pipeline/workfile/build_workfile.py @@ -0,0 +1,693 @@ +import os +import re +import collections +import json + +from openpype.client import ( + get_asset_by_name, + get_subsets, + get_last_versions, + get_representations, +) +from openpype.settings import get_project_settings +from openpype.lib import ( + get_linked_assets, + filter_profiles, + Logger, +) +from openpype.pipeline import legacy_io +from openpype.pipeline.load import ( + discover_loader_plugins, + IncompatibleLoaderError, + load_container, +) + + +class BuildWorkfile: + """Wrapper for build workfile process. + + Load representations for current context by build presets. Build presets + are host related, since each host has it's loaders. + """ + + _log = None + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @staticmethod + def map_subsets_by_family(subsets): + subsets_by_family = collections.defaultdict(list) + for subset in subsets: + family = subset["data"].get("family") + if not family: + families = subset["data"].get("families") + if not families: + continue + family = families[0] + + subsets_by_family[family].append(subset) + return subsets_by_family + + def process(self): + """Main method of this wrapper. + + Building of workfile is triggered and is possible to implement + post processing of loaded containers if necessary. + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + return self.build_workfile() + + def build_workfile(self): + """Prepares and load containers into workfile. + + Loads latest versions of current and linked assets to workfile by logic + stored in Workfile profiles from presets. Profiles are set by host, + filtered by current task name and used by families. + + Each family can specify representation names and loaders for + representations and first available and successful loaded + representation is returned as container. + + At the end you'll get list of loaded containers per each asset. + + loaded_containers [{ + "asset_entity": , + "containers": [, , ...] + }, { + "asset_entity": , + "containers": [, ...] + }, { + ... + }] + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + loaded_containers = [] + + # Get current asset name and entity + project_name = legacy_io.active_project() + current_asset_name = legacy_io.Session["AVALON_ASSET"] + current_asset_entity = get_asset_by_name( + project_name, current_asset_name + ) + # Skip if asset was not found + if not current_asset_entity: + print("Asset entity with name `{}` was not found".format( + current_asset_name + )) + return loaded_containers + + # Prepare available loaders + loaders_by_name = {} + for loader in discover_loader_plugins(): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {0}!".format(loader_name) + ) + loaders_by_name[loader_name] = loader + + # Skip if there are any loaders + if not loaders_by_name: + self.log.warning("There are no registered loaders.") + return loaded_containers + + # Get current task name + current_task_name = legacy_io.Session["AVALON_TASK"] + + # Load workfile presets for task + self.build_presets = self.get_build_presets( + current_task_name, current_asset_entity + ) + + # Skip if there are any presets for task + if not self.build_presets: + self.log.warning( + "Current task `{}` does not have any loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + # Get presets for loading current asset + current_context_profiles = self.build_presets.get("current_context") + # Get presets for loading linked assets + link_context_profiles = self.build_presets.get("linked_assets") + # Skip if both are missing + if not current_context_profiles and not link_context_profiles: + self.log.warning( + "Current task `{}` has empty loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + elif not current_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any loading" + " preset for it's context." + ).format(current_task_name)) + + elif not link_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any" + "loading preset for it's linked assets." + ).format(current_task_name)) + + # Prepare assets to process by workfile presets + assets = [] + current_asset_id = None + if current_context_profiles: + # Add current asset entity if preset has current context set + assets.append(current_asset_entity) + current_asset_id = current_asset_entity["_id"] + + if link_context_profiles: + # Find and append linked assets if preset has set linked mapping + link_assets = get_linked_assets(current_asset_entity) + if link_assets: + assets.extend(link_assets) + + # Skip if there are no assets. This can happen if only linked mapping + # is set and there are no links for his asset. + if not assets: + self.log.warning( + "Asset does not have linked assets. Nothing to process." + ) + return loaded_containers + + # Prepare entities from database for assets + prepared_entities = self._collect_last_version_repres(assets) + + # Load containers by prepared entities and presets + # - Current asset containers + if current_asset_id and current_asset_id in prepared_entities: + current_context_data = prepared_entities.pop(current_asset_id) + loaded_data = self.load_containers_by_asset_data( + current_context_data, current_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # - Linked assets container + for linked_asset_data in prepared_entities.values(): + loaded_data = self.load_containers_by_asset_data( + linked_asset_data, link_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # Return list of loaded containers + return loaded_containers + + def get_build_presets(self, task_name, asset_doc): + """ Returns presets to build workfile for task name. + + Presets are loaded for current project set in + io.Session["AVALON_PROJECT"], filtered by registered host + and entered task name. + + Args: + task_name (str): Task name used for filtering build presets. + + Returns: + Dict[str, Any]: preset per entered task name + """ + + host_name = os.environ["AVALON_APP"] + project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + host_settings = project_settings.get(host_name) or {} + # Get presets for host + wb_settings = host_settings.get("workfile_builder") + if not wb_settings: + # backward compatibility + wb_settings = host_settings.get("workfile_build") or {} + + builder_profiles = wb_settings.get("profiles") + if not builder_profiles: + return None + + task_type = ( + asset_doc + .get("data", {}) + .get("tasks", {}) + .get(task_name, {}) + .get("type") + ) + filter_data = { + "task_types": task_type, + "tasks": task_name + } + return filter_profiles(builder_profiles, filter_data) + + def _filter_build_profiles(self, build_profiles, loaders_by_name): + """ Filter build profiles by loaders and prepare process data. + + Valid profile must have "loaders", "families" and "repre_names" keys + with valid values. + - "loaders" expects list of strings representing possible loaders. + - "families" expects list of strings for filtering + by main subset family. + - "repre_names" expects list of strings for filtering by + representation name. + + Lowered "families" and "repre_names" are prepared for each profile with + all required keys. + + Args: + build_profiles (Dict[str, Any]): Profiles for building workfile. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Filtered and prepared profiles. + """ + + valid_profiles = [] + for profile in build_profiles: + # Check loaders + profile_loaders = profile.get("loaders") + if not profile_loaders: + self.log.warning(( + "Build profile has missing loaders configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check if any loader is available + loaders_match = False + for loader_name in profile_loaders: + if loader_name in loaders_by_name: + loaders_match = True + break + + if not loaders_match: + self.log.warning(( + "All loaders from Build profile are not available: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check families + profile_families = profile.get("families") + if not profile_families: + self.log.warning(( + "Build profile is missing families configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check representation names + profile_repre_names = profile.get("repre_names") + if not profile_repre_names: + self.log.warning(( + "Build profile is missing" + " representation names filtering: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Prepare lowered families and representation names + profile["families_lowered"] = [ + fam.lower() for fam in profile_families + ] + profile["repre_names_lowered"] = [ + name.lower() for name in profile_repre_names + ] + + valid_profiles.append(profile) + + return valid_profiles + + def _prepare_profile_for_subsets(self, subsets, profiles): + """Select profile for each subset by it's data. + + Profiles are filtered for each subset individually. + Profile is filtered by subset's family, optionally by name regex and + representation names set in profile. + It is possible to not find matching profile for subset, in that case + subset is skipped and it is possible that none of subsets have + matching profile. + + Args: + subsets (List[Dict[str, Any]]): Subset documents. + profiles (List[Dict[str, Any]]): Build profiles. + + Returns: + Dict[str, Any]: Profile by subset's id. + """ + + # Prepare subsets + subsets_by_family = self.map_subsets_by_family(subsets) + + profiles_per_subset_id = {} + for family, subsets in subsets_by_family.items(): + family_low = family.lower() + for profile in profiles: + # Skip profile if does not contain family + if family_low not in profile["families_lowered"]: + continue + + # Precompile name filters as regexes + profile_regexes = profile.get("subset_name_filters") + if profile_regexes: + _profile_regexes = [] + for regex in profile_regexes: + _profile_regexes.append(re.compile(regex)) + profile_regexes = _profile_regexes + + # TODO prepare regex compilation + for subset in subsets: + # Verify regex filtering (optional) + if profile_regexes: + valid = False + for pattern in profile_regexes: + if re.match(pattern, subset["name"]): + valid = True + break + + if not valid: + continue + + profiles_per_subset_id[subset["_id"]] = profile + + # break profiles loop on finding the first matching profile + break + return profiles_per_subset_id + + def load_containers_by_asset_data( + self, asset_entity_data, build_profiles, loaders_by_name + ): + """Load containers for entered asset entity by Build profiles. + + Args: + asset_entity_data (Dict[str, Any]): Prepared data with subsets, + last versions and representations for specific asset. + build_profiles (Dict[str, Any]): Build profiles. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + Dict[str, Any]: Output contains asset document + and loaded containers. + """ + + # Make sure all data are not empty + if not asset_entity_data or not build_profiles or not loaders_by_name: + return + + asset_entity = asset_entity_data["asset_entity"] + + valid_profiles = self._filter_build_profiles( + build_profiles, loaders_by_name + ) + if not valid_profiles: + self.log.warning( + "There are not valid Workfile profiles. Skipping process." + ) + return + + self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) + + subsets_by_id = {} + version_by_subset_id = {} + repres_by_version_id = {} + for subset_id, in_data in asset_entity_data["subsets"].items(): + subset_entity = in_data["subset_entity"] + subsets_by_id[subset_entity["_id"]] = subset_entity + + version_data = in_data["version"] + version_entity = version_data["version_entity"] + version_by_subset_id[subset_id] = version_entity + repres_by_version_id[version_entity["_id"]] = ( + version_data["repres"] + ) + + if not subsets_by_id: + self.log.warning("There are not subsets for asset {0}".format( + asset_entity["name"] + )) + return + + profiles_per_subset_id = self._prepare_profile_for_subsets( + subsets_by_id.values(), valid_profiles + ) + if not profiles_per_subset_id: + self.log.warning("There are not valid subsets.") + return + + valid_repres_by_subset_id = collections.defaultdict(list) + for subset_id, profile in profiles_per_subset_id.items(): + profile_repre_names = profile["repre_names_lowered"] + + version_entity = version_by_subset_id[subset_id] + version_id = version_entity["_id"] + repres = repres_by_version_id[version_id] + for repre in repres: + repre_name_low = repre["name"].lower() + if repre_name_low in profile_repre_names: + valid_repres_by_subset_id[subset_id].append(repre) + + # DEBUG message + msg = "Valid representations for Asset: `{}`".format( + asset_entity["name"] + ) + for subset_id, repres in valid_repres_by_subset_id.items(): + subset = subsets_by_id[subset_id] + msg += "\n# Subset Name/ID: `{}`/{}".format( + subset["name"], subset_id + ) + for repre in repres: + msg += "\n## Repre name: `{}`".format(repre["name"]) + + self.log.debug(msg) + + containers = self._load_containers( + valid_repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ) + + return { + "asset_entity": asset_entity, + "containers": containers + } + + def _load_containers( + self, repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ): + """Real load by collected data happens here. + + Loading of representations per subset happens here. Each subset can + loads one representation. Loading is tried in specific order. + Representations are tried to load by names defined in configuration. + If subset has representation matching representation name each loader + is tried to load it until any is successful. If none of them was + successful then next representation name is tried. + Subset process loop ends when any representation is loaded or + all matching representations were already tried. + + Args: + repres_by_subset_id (Dict[str, Dict[str, Any]]): Available + representations mapped by their parent (subset) id. + subsets_by_id (Dict[str, Dict[str, Any]]): Subset documents + mapped by their id. + profiles_per_subset_id (Dict[str, Dict[str, Any]]): Build profiles + mapped by subset id. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Objects of loaded containers. + """ + + loaded_containers = [] + + # Get subset id order from build presets. + build_presets = self.build_presets.get("current_context", []) + build_presets += self.build_presets.get("linked_assets", []) + subset_ids_ordered = [] + for preset in build_presets: + for preset_family in preset["families"]: + for id, subset in subsets_by_id.items(): + if preset_family not in subset["data"].get("families", []): + continue + + subset_ids_ordered.append(id) + + # Order representations from subsets. + print("repres_by_subset_id", repres_by_subset_id) + representations_ordered = [] + representations = [] + for id in subset_ids_ordered: + for subset_id, repres in repres_by_subset_id.items(): + if repres in representations: + continue + + if id == subset_id: + representations_ordered.append((subset_id, repres)) + representations.append(repres) + + print("representations", representations) + + # Load ordered representations. + for subset_id, repres in representations_ordered: + subset_name = subsets_by_id[subset_id]["name"] + + profile = profiles_per_subset_id[subset_id] + loaders_last_idx = len(profile["loaders"]) - 1 + repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 + + repre_by_low_name = { + repre["name"].lower(): repre for repre in repres + } + + is_loaded = False + for repre_name_idx, profile_repre_name in enumerate( + profile["repre_names_lowered"] + ): + # Break iteration if representation was already loaded + if is_loaded: + break + + repre = repre_by_low_name.get(profile_repre_name) + if not repre: + continue + + for loader_idx, loader_name in enumerate(profile["loaders"]): + if is_loaded: + break + + loader = loaders_by_name.get(loader_name) + if not loader: + continue + try: + container = load_container( + loader, + repre["_id"], + name=subset_name + ) + loaded_containers.append(container) + is_loaded = True + + except Exception as exc: + if exc == IncompatibleLoaderError: + self.log.info(( + "Loader `{}` is not compatible with" + " representation `{}`" + ).format(loader_name, repre["name"])) + + else: + self.log.error( + "Unexpected error happened during loading", + exc_info=True + ) + + msg = "Loading failed." + if loader_idx < loaders_last_idx: + msg += " Trying next loader." + elif repre_name_idx < repre_names_last_idx: + msg += ( + " Loading of subset `{}` was not successful." + ).format(subset_name) + else: + msg += " Trying next representation." + self.log.info(msg) + + return loaded_containers + + def _collect_last_version_repres(self, asset_docs): + """Collect subsets, versions and representations for asset_entities. + + Args: + asset_docs (List[Dict[str, Any]]): Asset entities for which + want to find data. + + Returns: + Dict[str, Any]: collected entities + + Example output: + ``` + { + {Asset ID}: { + "asset_entity": , + "subsets": { + {Subset ID}: { + "subset_entity": , + "version": { + "version_entity": , + "repres": [ + , , ... + ] + } + }, + ... + } + }, + ... + } + output[asset_id]["subsets"][subset_id]["version"]["repres"] + ``` + """ + + output = {} + if not asset_docs: + return output + + asset_docs_by_ids = {asset["_id"]: asset for asset in asset_docs} + + project_name = legacy_io.active_project() + subsets = list(get_subsets( + project_name, asset_ids=asset_docs_by_ids.keys() + )) + subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} + + last_version_by_subset_id = get_last_versions( + project_name, subset_entity_by_ids.keys() + ) + last_version_docs_by_id = { + version["_id"]: version + for version in last_version_by_subset_id.values() + } + repre_docs = get_representations( + project_name, version_ids=last_version_docs_by_id.keys() + ) + + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_doc = last_version_docs_by_id[version_id] + + subset_id = version_doc["parent"] + subset_doc = subset_entity_by_ids[subset_id] + + asset_id = subset_doc["parent"] + asset_doc = asset_docs_by_ids[asset_id] + + if asset_id not in output: + output[asset_id] = { + "asset_entity": asset_doc, + "subsets": {} + } + + if subset_id not in output[asset_id]["subsets"]: + output[asset_id]["subsets"][subset_id] = { + "subset_entity": subset_doc, + "version": { + "version_entity": version_doc, + "repres": [] + } + } + + output[asset_id]["subsets"][subset_id]["version"]["repres"].append( + repre_doc + ) + + return output diff --git a/openpype/pipeline/workfile/path_resolving.py b/openpype/pipeline/workfile/path_resolving.py new file mode 100644 index 0000000000..ed1d1d793e --- /dev/null +++ b/openpype/pipeline/workfile/path_resolving.py @@ -0,0 +1,464 @@ +import os +import re +import copy +import platform + +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_project_settings +from openpype.lib import ( + filter_profiles, + Logger, + StringTemplate, +) +from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data + + +def get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings=None +): + """Helper function to get template key for workfile template. + + Do the same as `get_workfile_template_key` but returns value for "session + context". + + Args: + asset_name(str): Name of asset document. + task_name(str): Task name for which is template key retrieved. + Must be available on asset document under `data.tasks`. + host_name(str): Name of host implementation for which is workfile + used. + project_name(str): Project name where asset and task is. + project_settings(Dict[str, Any]): Project settings for passed + 'project_name'. Not required at all but makes function faster. + """ + + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.tasks"] + ) + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + task_info = asset_tasks.get(task_name) or {} + task_type = task_info.get("type") + + return get_workfile_template_key( + task_type, host_name, project_name, project_settings + ) + + +def get_workfile_template_key( + task_type, host_name, project_name, project_settings=None +): + """Workfile template key which should be used to get workfile template. + + Function is using profiles from project settings to return right template + for passet task type and host name. + + Args: + task_type(str): Name of task type. + host_name(str): Name of host implementation (e.g. "maya", "nuke", ...) + project_name(str): Name of project in which context should look for + settings. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. + """ + + default = "work" + if not task_type or not host_name: + return default + + if not project_settings: + project_settings = get_project_settings(project_name) + + try: + profiles = ( + project_settings + ["global"] + ["tools"] + ["Workfiles"] + ["workfile_template_profiles"] + ) + except Exception: + profiles = [] + + if not profiles: + return default + + profile_filter = { + "task_types": task_type, + "hosts": host_name + } + profile = filter_profiles(profiles, profile_filter) + if profile: + return profile["workfile_template"] or default + return default + + +def get_workdir_with_workdir_data( + workdir_data, + project_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + It is possible to pass only project's name instead of project's anatomy but + one of them **must** be entered. It is preferred to enter anatomy if is + available as initialization of a new Anatomy object may be time consuming. + + Args: + workdir_data (Dict[str, Any]): Data to fill workdir template. + project_name (str): Project's name. + anatomy (Anatomy): Anatomy object for specific project. Faster + processing if is passed. + template_key (str): Key of work templates in anatomy templates. If not + passed `get_workfile_template_key_from_context` is used to get it. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_name) + + if not template_key: + template_key = get_workfile_template_key( + workdir_data["task"]["type"], + workdir_data["app"], + workdir_data["project"]["name"], + project_settings + ) + + anatomy_filled = anatomy.format(workdir_data) + # Output is TemplateResult object which contain useful data + output = anatomy_filled[template_key]["folder"] + if output: + return output.normalized() + return output + + +def get_workdir( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (str): Task name for which are workdir data preapred. + host_name (str): Host which is used to workdir. This is required + because workdir template may contain `{app}` key. In `Session` + is stored under `AVALON_APP` key. + anatomy (Anatomy): Optional argument. Anatomy object is created using + project name from `project_doc`. It is preferred to pass this + argument as initialization of a new Anatomy object may be time + consuming. + template_key (str): Key of work templates in anatomy templates. Default + value is defined in `get_workdir_with_workdir_data`. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_doc["name"]) + + workdir_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # Output is TemplateResult object which contain useful data + return get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + template_key, + project_settings + ) + + +def get_last_workfile_with_version( + workdir, file_template, fill_data, extensions +): + """Return last workfile version. + + Usign workfile template and it's filling data find most possible last + version of workfile which was created for the context. + + Functionality is fully based on knowing which keys are optional or what + values are expected as value. + + The last modified file is used if more files can be considered as + last workfile. + + Args: + workdir (str): Path to dir where workfiles are stored. + file_template (str): Template of file name. + fill_data (Dict[str, Any]): Data for filling template. + extensions (Iterable[str]): All allowed file extensions of workfile. + + Returns: + Tuple[Union[str, None], Union[int, None]]: Last workfile with version + if there is any workfile otherwise None for both. + """ + + if not os.path.exists(workdir): + return None, None + + dotted_extensions = set() + for ext in extensions: + if not ext.startswith("."): + ext = ".{}".format(ext) + dotted_extensions.add(ext) + + # Fast match on extension + filenames = [ + filename + for filename in os.listdir(workdir) + if os.path.splitext(filename)[-1] in dotted_extensions + ] + + # Build template without optionals, version to digits only regex + # and comment to any definable value. + # Escape extensions dot for regex + regex_exts = [ + "\\" + ext + for ext in dotted_extensions + ] + ext_expression = "(?:" + "|".join(regex_exts) + ")" + + # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end + file_template = re.sub(r"\.?{ext}", ext_expression, file_template) + # Replace optional keys with optional content regex + file_template = re.sub(r"<.*?>", r".*?", file_template) + # Replace `{version}` with group regex + file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) + file_template = re.sub(r"{comment.*?}", r".+?", file_template) + file_template = StringTemplate.format_strict_template( + file_template, fill_data + ) + + # Match with ignore case on Windows due to the Windows + # OS not being case-sensitive. This avoids later running + # into the error that the file did exist if it existed + # with a different upper/lower-case. + kwargs = {} + if platform.system().lower() == "windows": + kwargs["flags"] = re.IGNORECASE + + # Get highest version among existing matching files + version = None + output_filenames = [] + for filename in sorted(filenames): + match = re.match(file_template, filename, **kwargs) + if not match: + continue + + file_version = int(match.group(1)) + if version is None or file_version > version: + output_filenames[:] = [] + version = file_version + + if file_version == version: + output_filenames.append(filename) + + output_filename = None + if output_filenames: + if len(output_filenames) == 1: + output_filename = output_filenames[0] + else: + last_time = None + for _output_filename in output_filenames: + full_path = os.path.join(workdir, _output_filename) + mod_time = os.path.getmtime(full_path) + if last_time is None or last_time < mod_time: + output_filename = _output_filename + last_time = mod_time + + return output_filename, version + + +def get_last_workfile( + workdir, file_template, fill_data, extensions, full_path=False +): + """Return last workfile filename. + + Returns file with version 1 if there is not workfile yet. + + Args: + workdir(str): Path to dir where workfiles are stored. + file_template(str): Template of file name. + fill_data(Dict[str, Any]): Data for filling template. + extensions(Iterable[str]): All allowed file extensions of workfile. + full_path(bool): Full path to file is returned if set to True. + + Returns: + str: Last or first workfile as filename of full path to filename. + """ + + filename, version = get_last_workfile_with_version( + workdir, file_template, fill_data, extensions + ) + if filename is None: + data = copy.deepcopy(fill_data) + data["version"] = 1 + data.pop("comment", None) + if not data.get("ext"): + data["ext"] = extensions[0] + data["ext"] = data["ext"].replace('.', '') + filename = StringTemplate.format_strict_template(file_template, data) + + if full_path: + return os.path.normpath(os.path.join(workdir, filename)) + + return filename + + +def get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Custom workfile template can be used as first version of workfiles. + Template is a file on a disk which is set in settings. Expected settings + structure to have this feature enabled is: + project settings + |- + |- workfile_builder + |- create_first_version - a bool which must be set to 'True' + |- custom_templates - profiles based on task name/type which + points to a file which is copied as + first workfile + + It is expected that passed argument are already queried documents of + project and asset as parents of processing task name. + + Args: + project_doc (Dict[str, Any]): Project document from MongoDB. + asset_doc (Dict[str, Any]): Asset document from MongoDB. + task_name (str): Name of task for which templates are filtered. + host_name (str): Name of host. + anatomy (Anatomy): Optionally passed anatomy object for passed project + name. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. Existence of formatted path is not validated. + None: If no profile is matching context. + """ + + log = Logger.get_logger("CustomWorkfileResolve") + + project_name = project_doc["name"] + if project_settings is None: + project_settings = get_project_settings(project_name) + + host_settings = project_settings.get(host_name) + if not host_settings: + log.info("Host \"{}\" doesn't have settings".format(host_name)) + return None + + workfile_builder_settings = host_settings.get("workfile_builder") + if not workfile_builder_settings: + log.info(( + "Seems like old version of settings is used." + " Can't access custom templates in host \"{}\"." + ).format(host_name)) + return + + if not workfile_builder_settings["create_first_version"]: + log.info(( + "Project \"{}\" has turned off to create first workfile for" + " host \"{}\"" + ).format(project_name, host_name)) + return + + # Backwards compatibility + template_profiles = workfile_builder_settings.get("custom_templates") + if not template_profiles: + log.info( + "Custom templates are not filled. Skipping template copy." + ) + return + + if anatomy is None: + anatomy = Anatomy(project_name) + + # get project, asset, task anatomy context data + anatomy_context_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # add root dict + anatomy_context_data["root"] = anatomy.roots + + # get task type for the task in context + current_task_type = anatomy_context_data["task"]["type"] + + # get path from matching profile + matching_item = filter_profiles( + template_profiles, + {"task_types": current_task_type} + ) + # when path is available try to format it in case + # there are some anatomy template strings + if matching_item: + template = matching_item["path"][platform.system().lower()] + return StringTemplate.format_strict_template( + template, anatomy_context_data + ).normalized() + + return None + + +def get_custom_workfile_template_by_string_context( + project_name, + asset_name, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Passed context are string representations of project, asset and task. + Function will query documents of project and asset to be able use + `get_custom_workfile_template` for rest of logic. + + Args: + project_name(str): Project name. + asset_name(str): Asset name. + task_name(str): Task name. + host_name (str): Name of host. + anatomy(Anatomy): Optionally prepared anatomy object for passed + project. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + None: If no profile is matching context. + """ + + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) + + return get_custom_workfile_template( + project_doc, asset_doc, task_name, host_name, anatomy, project_settings + ) diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py index 0794adfb67..8433816908 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/openpype/plugins/publish/collect_anatomy_context_data.py @@ -15,10 +15,8 @@ Provides: import json import pyblish.api -from openpype.lib import ( - get_system_general_anatomy_data -) from openpype.pipeline import legacy_io +from openpype.pipeline.template_data import get_template_data class CollectAnatomyContextData(pyblish.api.ContextPlugin): @@ -33,11 +31,15 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): "asset": "AssetName", "hierarchy": "path/to/asset", "task": "Working", + "user": "MeDespicable", + # Duplicated entry "username": "MeDespicable", + # Current host name + "app": "maya" + *** OPTIONAL *** - "app": "maya" # Current application base name - + mutliple keys from `datetimeData` # see it's collector + + mutliple keys from `datetimeData` (See it's collector) } """ @@ -45,52 +47,26 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): label = "Collect Anatomy Context Data" def process(self, context): + host_name = context.data["hostName"] + system_settings = context.data["system_settings"] project_entity = context.data["projectEntity"] - context_data = { - "project": { - "name": project_entity["name"], - "code": project_entity["data"].get("code") - }, - "username": context.data["user"], - "app": context.data["hostName"] - } - - context.data["anatomyData"] = context_data - - # add system general settings anatomy data - system_general_data = get_system_general_anatomy_data() - context_data.update(system_general_data) - - datetime_data = context.data.get("datetimeData") or {} - context_data.update(datetime_data) - asset_entity = context.data.get("assetEntity") + task_name = None if asset_entity: task_name = legacy_io.Session["AVALON_TASK"] - asset_tasks = asset_entity["data"]["tasks"] - task_type = asset_tasks.get(task_name, {}).get("type") + anatomy_data = get_template_data( + project_entity, asset_entity, task_name, host_name, system_settings + ) + anatomy_data.update(context.data.get("datetimeData") or {}) - project_task_types = project_entity["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") + username = context.data["user"] + anatomy_data["user"] = username + # Backwards compatibility for 'username' key + anatomy_data["username"] = username - asset_parents = asset_entity["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_entity["name"] - if asset_parents: - parent_name = asset_parents[-1] - - context_data.update({ - "asset": asset_entity["name"], - "parent": parent_name, - "hierarchy": hierarchy, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - } - }) + # Store + context.data["anatomyData"] = anatomy_data self.log.info("Global anatomy Data collected") - self.log.debug(json.dumps(context_data, indent=4)) + self.log.debug(json.dumps(anatomy_data, indent=4)) diff --git a/openpype/plugins/publish/collect_anatomy_object.py b/openpype/plugins/publish/collect_anatomy_object.py index b1415098b6..725cae2b14 100644 --- a/openpype/plugins/publish/collect_anatomy_object.py +++ b/openpype/plugins/publish/collect_anatomy_object.py @@ -1,29 +1,32 @@ """Collect Anatomy object. Requires: - os.environ -> AVALON_PROJECT + context -> projectName Provides: context -> anatomy (openpype.pipeline.anatomy.Anatomy) """ -import os + import pyblish.api -from openpype.pipeline import Anatomy +from openpype.pipeline import Anatomy, KnownPublishError class CollectAnatomyObject(pyblish.api.ContextPlugin): - """Collect Anatomy object into Context""" + """Collect Anatomy object into Context. + + Order offset could be changed to '-0.45'. + """ order = pyblish.api.CollectorOrder - 0.4 label = "Collect Anatomy Object" def process(self, context): - project_name = os.environ.get("AVALON_PROJECT") + project_name = context.data.get("projectName") if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` is not set." + raise KnownPublishError(( + "Project name is not set in 'projectName'." "Could not initialize project's Anatomy." - ) + )) context.data["anatomy"] = Anatomy(project_name) diff --git a/openpype/plugins/publish/collect_avalon_entities.py b/openpype/plugins/publish/collect_avalon_entities.py index 6cd0d136e8..3b05b6ae98 100644 --- a/openpype/plugins/publish/collect_avalon_entities.py +++ b/openpype/plugins/publish/collect_avalon_entities.py @@ -1,35 +1,38 @@ """Collect Anatomy and global anatomy data. Requires: - session -> AVALON_PROJECT, AVALON_ASSET + session -> AVALON_ASSET + context -> projectName Provides: - context -> projectEntity - project entity from database - context -> assetEntity - asset entity from database + context -> projectEntity - Project document from database. + context -> assetEntity - Asset document from database only if 'asset' is + set in context. """ import pyblish.api from openpype.client import get_project, get_asset_by_name -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, KnownPublishError class CollectAvalonEntities(pyblish.api.ContextPlugin): - """Collect Anatomy into Context""" + """Collect Anatomy into Context.""" order = pyblish.api.CollectorOrder - 0.1 label = "Collect Avalon Entities" def process(self, context): legacy_io.install() - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = context.data["projectName"] asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] project_entity = get_project(project_name) - assert project_entity, ( - "Project '{0}' was not found." - ).format(project_name) + if not project_entity: + raise KnownPublishError( + "Project '{0}' was not found.".format(project_name) + ) self.log.debug("Collected Project \"{}\"".format(project_entity)) context.data["projectEntity"] = project_entity diff --git a/openpype/plugins/publish/collect_current_context.py b/openpype/plugins/publish/collect_current_context.py new file mode 100644 index 0000000000..7e42700d7d --- /dev/null +++ b/openpype/plugins/publish/collect_current_context.py @@ -0,0 +1,47 @@ +""" +Provides: + context -> projectName (str) + context -> asset (str) + context -> task (str) +""" + +import pyblish.api +from openpype.pipeline import legacy_io + + +class CollectCurrentContext(pyblish.api.ContextPlugin): + """Collect project context into publish context data. + + Plugin does not override any value if is already set. + """ + + order = pyblish.api.CollectorOrder - 0.5 + label = "Collect Current context" + + def process(self, context): + # Make sure 'legacy_io' is intalled + legacy_io.install() + + # Check if values are already set + project_name = context.data.get("projectName") + asset_name = context.data.get("asset") + task_name = context.data.get("task") + if not project_name: + project_name = legacy_io.current_project() + context.data["projectName"] = project_name + + if not asset_name: + asset_name = legacy_io.Session.get("AVALON_ASSET") + context.data["asset"] = asset_name + + if not task_name: + task_name = legacy_io.Session.get("AVALON_TASK") + context.data["task"] = task_name + + # QUESTION should we be explicit with keys? (the same on instances) + # - 'asset' -> 'assetName' + # - 'task' -> 'taskName' + + self.log.info(( + "Collected project context\nProject: {}\nAsset: {}\nTask: {}" + ).format(project_name, asset_name, task_name)) diff --git a/openpype/plugins/publish/collect_datetime_data.py b/openpype/plugins/publish/collect_datetime_data.py index f46d616fb3..b3178ca3d2 100644 --- a/openpype/plugins/publish/collect_datetime_data.py +++ b/openpype/plugins/publish/collect_datetime_data.py @@ -9,7 +9,7 @@ from openpype.lib.dateutils import get_datetime_data class CollectDateTimeData(pyblish.api.ContextPlugin): - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 label = "Collect DateTime data" def process(self, context): diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index d2be633cbe..9236c698ed 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -19,6 +19,9 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): if not create_context: return + project_name = create_context.project_name + if project_name: + context.data["projectName"] = project_name for created_instance in create_context.instances: instance_data = created_instance.data_to_store() if instance_data["active"]: @@ -44,7 +47,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): "subset": subset, "asset": in_data["asset"], "task": in_data["task"], - "label": subset, + "label": in_data.get("label") or subset, "name": subset, "family": in_data["family"], "families": instance_families, diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index 91d5162d62..687397be8a 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -1,7 +1,5 @@ import pyblish.api -from openpype.pipeline import legacy_io - class CollectHierarchy(pyblish.api.ContextPlugin): """Collecting hierarchy from `parents`. @@ -20,7 +18,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): def process(self, context): temp_context = {} - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = context.data["projectName"] final_context = {} final_context[project_name] = {} final_context[project_name]['entity_type'] = 'Project' diff --git a/openpype/plugins/publish/collect_machine_name.py b/openpype/plugins/publish/collect_machine_name.py index 72ef68f8ed..8c25966031 100644 --- a/openpype/plugins/publish/collect_machine_name.py +++ b/openpype/plugins/publish/collect_machine_name.py @@ -11,7 +11,7 @@ import pyblish.api class CollectMachineName(pyblish.api.ContextPlugin): label = "Local Machine Name" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 hosts = ["*"] def process(self, context): diff --git a/openpype/plugins/publish/collect_modules.py b/openpype/plugins/publish/collect_modules.py index 2f6cb1ef0e..d76096bcd9 100644 --- a/openpype/plugins/publish/collect_modules.py +++ b/openpype/plugins/publish/collect_modules.py @@ -7,7 +7,7 @@ import pyblish.api class CollectModules(pyblish.api.ContextPlugin): """Collect OpenPype modules.""" - order = pyblish.api.CollectorOrder - 0.45 + order = pyblish.api.CollectorOrder - 0.5 label = "OpenPype Modules" def process(self, context): diff --git a/openpype/plugins/publish/collect_otio_frame_ranges.py b/openpype/plugins/publish/collect_otio_frame_ranges.py index c86e777850..40e89e29bc 100644 --- a/openpype/plugins/publish/collect_otio_frame_ranges.py +++ b/openpype/plugins/publish/collect_otio_frame_ranges.py @@ -23,7 +23,7 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): label = "Collect OTIO Frame Ranges" order = pyblish.api.CollectorOrder - 0.08 families = ["shot", "clip"] - hosts = ["resolve", "hiero", "flame"] + hosts = ["resolve", "hiero", "flame", "traypublisher"] def process(self, instance): # get basic variables diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py index fc6a9b50f2..9c19f8a78e 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/openpype/plugins/publish/collect_otio_subset_resources.py @@ -116,8 +116,10 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): # check in two way if it is sequence if hasattr(otio.schema, "ImageSequenceReference"): # for OpenTimelineIO 0.13 and newer - if isinstance(media_ref, - otio.schema.ImageSequenceReference): + if isinstance( + media_ref, + otio.schema.ImageSequenceReference + ): is_sequence = True else: # for OpenTimelineIO 0.12 and older @@ -139,11 +141,9 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): padding=media_ref.frame_zero_padding ) collection.indexes.update( - [i for i in range(a_frame_start_h, (a_frame_end_h + 1))]) + list(range(a_frame_start_h, (a_frame_end_h + 1))) + ) - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) else: # in case it is file sequence but not new OTIO schema # `ImageSequenceReference` @@ -152,9 +152,9 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): path, trimmed_media_range_h, metadata) self.staging_dir, collection = collection_data - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) + self.log.debug(collection) + repre = self._create_representation( + frame_start, frame_end, collection=collection) else: _trim = False dirname, filename = os.path.split(media_ref.target_url) @@ -198,7 +198,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): if kwargs.get("collection"): collection = kwargs.get("collection") - files = [f for f in collection] + files = list(collection) ext = collection.format("{tail}") representation_data.update({ "name": ext[1:], @@ -220,7 +220,5 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): }) if kwargs.get("trim") is True: - representation_data.update({ - "tags": ["trim"] - }) + representation_data["tags"] = ["trim"] return representation_data diff --git a/openpype/plugins/publish/collect_rendered_files.py b/openpype/plugins/publish/collect_rendered_files.py index 670e57ed10..8f8d0a5eeb 100644 --- a/openpype/plugins/publish/collect_rendered_files.py +++ b/openpype/plugins/publish/collect_rendered_files.py @@ -1,7 +1,7 @@ """Loads publishing context from json and continues in publish process. Requires: - anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11) + anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.4) Provides: context, instances -> All data from previous publishing process. @@ -12,7 +12,7 @@ import json import pyblish.api -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, KnownPublishError class CollectRenderedFiles(pyblish.api.ContextPlugin): @@ -20,7 +20,12 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): This collector will try to find json files in provided `OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + Note: + We should split this collector and move the part which handle reading + of file and it's context from session data before collect anatomy + and instance creation dependent on anatomy can be done here. """ + order = pyblish.api.CollectorOrder - 0.2 # Keep "filesequence" for backwards compatibility of older jobs targets = ["filesequence", "farm"] @@ -118,23 +123,20 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): def process(self, context): self._context = context - assert os.environ.get("OPENPYPE_PUBLISH_DATA"), ( - "Missing `OPENPYPE_PUBLISH_DATA`") + if not os.environ.get("OPENPYPE_PUBLISH_DATA"): + raise KnownPublishError("Missing `OPENPYPE_PUBLISH_DATA`") + + # QUESTION + # Do we support (or want support) multiple files in the variable? + # - what if they have different context? paths = os.environ["OPENPYPE_PUBLISH_DATA"].split(os.pathsep) - project_name = os.environ.get("AVALON_PROJECT") - if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` was not found." - "Could not set project `root` which may cause issues." - ) - - # TODO root filling should happen after collect Anatomy + # Using already collected Anatomy + anatomy = context.data["anatomy"] self.log.info("Getting root setting for project \"{}\"".format( - project_name + anatomy.project_name )) - anatomy = context.data["anatomy"] self.log.info("anatomy: {}".format(anatomy.roots)) try: session_is_set = False diff --git a/openpype/plugins/publish/collect_resources_path.py b/openpype/plugins/publish/collect_resources_path.py index 8bdf70b529..00f65b8b67 100644 --- a/openpype/plugins/publish/collect_resources_path.py +++ b/openpype/plugins/publish/collect_resources_path.py @@ -13,8 +13,6 @@ import copy import pyblish.api -from openpype.pipeline import legacy_io - class CollectResourcesPath(pyblish.api.InstancePlugin): """Generate directory path where the files and resources will be stored""" @@ -58,7 +56,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "effect", "staticMesh", "skeletalMesh" - ] def process(self, instance): @@ -86,11 +83,10 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) file_path = anatomy_filled["publish"]["path"] # Directory diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/openpype/plugins/publish/extract_otio_audio_tracks.py index 00c1748cdc..ed30a2f0f5 100644 --- a/openpype/plugins/publish/extract_otio_audio_tracks.py +++ b/openpype/plugins/publish/extract_otio_audio_tracks.py @@ -57,15 +57,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): audio_inputs.insert(0, empty) # create cmd - cmd = path_to_subprocess_arg(self.ffmpeg_path) + " " - cmd += self.create_cmd(audio_inputs) - cmd += path_to_subprocess_arg(audio_temp_fpath) - - # run subprocess - self.log.debug("Executing: {}".format(cmd)) - openpype.api.run_subprocess( - cmd, shell=True, logger=self.log - ) + self.mix_audio(audio_inputs, audio_temp_fpath) # remove empty os.remove(empty["mediaPath"]) @@ -245,46 +237,80 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): "durationSec": max_duration_sec } - def create_cmd(self, inputs): + def mix_audio(self, audio_inputs, audio_temp_fpath): """Creating multiple input cmd string Args: - inputs (list): list of input dicts. Order mater. + audio_inputs (list): list of input dicts. Order mater. Returns: str: the command body - """ + + longest_input = 0 + for audio_input in audio_inputs: + audio_len = audio_input["durationSec"] + if audio_len > longest_input: + longest_input = audio_len + # create cmd segments - _inputs = "" - _filters = "-filter_complex \"" - _channels = "" - for index, input in enumerate(inputs): - input_format = input.copy() - input_format.update({"i": index}) - input_format["mediaPath"] = path_to_subprocess_arg( - input_format["mediaPath"] + input_args = [] + filters = [] + tag_names = [] + for index, audio_input in enumerate(audio_inputs): + input_args.extend([ + "-ss", str(audio_input["startSec"]), + "-t", str(audio_input["durationSec"]), + "-i", audio_input["mediaPath"] + ]) + + # Output tag of a filtered audio input + tag_name = "[r{}]".format(index) + tag_names.append(tag_name) + # Delay in audio by delay in item + filters.append("[{}]adelay={}:all=1{}".format( + index, audio_input["delayMilSec"], tag_name + )) + + # Mixing filter + # - dropout transition (when audio will get loader) is set to be + # higher then any input audio item + # - volume is set to number of inputs - each mix adds 1/n volume + # where n is input inder (to get more info read ffmpeg docs and + # send a giftcard to contributor) + filters.append( + ( + "{}amix=inputs={}:duration=first:" + "dropout_transition={},volume={}[a]" + ).format( + "".join(tag_names), + len(audio_inputs), + (longest_input * 1000) + 1000, + len(audio_inputs), ) + ) - _inputs += ( - "-ss {startSec} " - "-t {durationSec} " - "-i {mediaPath} " - ).format(**input_format) + # Store filters to a file (separated by ',') + # - this is to avoid "too long" command issue in ffmpeg + with tempfile.NamedTemporaryFile( + delete=False, mode="w", suffix=".txt" + ) as tmp_file: + filters_tmp_filepath = tmp_file.name + tmp_file.write(",".join(filters)) - _filters += "[{i}]adelay={delayMilSec}:all=1[r{i}]; ".format( - **input_format) - _channels += "[r{}]".format(index) + args = [self.ffmpeg_path] + args.extend(input_args) + args.extend([ + "-filter_complex_script", filters_tmp_filepath, + "-map", "[a]" + ]) + args.append(audio_temp_fpath) - # merge all cmd segments together - cmd = _inputs + _filters + _channels - cmd += str( - "amix=inputs={inputs}:duration=first:" - "dropout_transition=1000,volume={inputs}[a]\" " - ).format(inputs=len(inputs)) - cmd += "-map \"[a]\" " + # run subprocess + self.log.debug("Executing: {}".format(args)) + openpype.api.run_subprocess(args, logger=self.log) - return cmd + os.remove(filters_tmp_filepath) def create_temp_file(self, name): """Create temp wav file diff --git a/openpype/plugins/publish/extract_otio_file.py b/openpype/plugins/publish/extract_otio_file.py index 3bd217d5d4..4d310ce109 100644 --- a/openpype/plugins/publish/extract_otio_file.py +++ b/openpype/plugins/publish/extract_otio_file.py @@ -12,7 +12,7 @@ class ExtractOTIOFile(openpype.api.Extractor): label = "Extract OTIO file" order = pyblish.api.ExtractorOrder - 0.45 families = ["workfile"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "traypublisher"] def process(self, instance): # create representation data diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 1b6e2a1d61..e16f324e0a 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -360,6 +360,7 @@ class ExtractReview(pyblish.api.InstancePlugin): os.unlink(f) new_repre.update({ + "fps": temp_data["fps"], "name": "{}_{}".format(output_name, output_ext), "outputName": output_name, "outputDef": output_def, @@ -1209,7 +1210,6 @@ class ExtractReview(pyblish.api.InstancePlugin): # Get instance data pixel_aspect = temp_data["pixel_aspect"] - if reformat_in_baking: self.log.debug(( "Using resolution from input. It is already " @@ -1229,6 +1229,10 @@ class ExtractReview(pyblish.api.InstancePlugin): # - settings value can't have None but has value of 0 output_width = output_def.get("width") or output_width or None output_height = output_def.get("height") or output_height or None + # Force to use input resolution if output resolution was not defined + # in settings. Resolution from instance is not used when + # 'use_input_res' is set to 'True'. + use_input_res = False # Overscal color overscan_color_value = "black" @@ -1240,6 +1244,17 @@ class ExtractReview(pyblish.api.InstancePlugin): ) self.log.debug("Overscan color: `{}`".format(overscan_color_value)) + # Scale input to have proper pixel aspect ratio + # - scale width by the pixel aspect ratio + scale_pixel_aspect = output_def.get("scale_pixel_aspect", True) + if scale_pixel_aspect and pixel_aspect != 1: + # Change input width after pixel aspect + input_width = int(input_width * pixel_aspect) + use_input_res = True + filters.append(( + "scale={}x{}:flags=lanczos".format(input_width, input_height) + )) + # Convert overscan value video filters overscan_crop = output_def.get("overscan_crop") overscan = OverscanCrop( @@ -1250,13 +1265,10 @@ class ExtractReview(pyblish.api.InstancePlugin): # resolution by it's values if overscan_crop_filters: filters.extend(overscan_crop_filters) + # Change input resolution after overscan crop input_width = overscan.width() input_height = overscan.height() - # Use output resolution as inputs after cropping to skip usage of - # instance data resolution - if output_width is None or output_height is None: - output_width = input_width - output_height = input_height + use_input_res = True # Make sure input width and height is not an odd number input_width_is_odd = bool(input_width % 2 != 0) @@ -1282,8 +1294,10 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("input_width: `{}`".format(input_width)) self.log.debug("input_height: `{}`".format(input_height)) - # Use instance resolution if output definition has not set it. - if output_width is None or output_height is None: + # Use instance resolution if output definition has not set it + # - use instance resolution only if there were not scale changes + # that may massivelly affect output 'use_input_res' + if not use_input_res and output_width is None or output_height is None: output_width = temp_data["resolution_width"] output_height = temp_data["resolution_height"] @@ -1325,7 +1339,6 @@ class ExtractReview(pyblish.api.InstancePlugin): output_width == input_width and output_height == input_height and not letter_box_enabled - and pixel_aspect == 1 ): self.log.debug( "Output resolution is same as input's" @@ -1335,66 +1348,16 @@ class ExtractReview(pyblish.api.InstancePlugin): new_repre["resolutionHeight"] = input_height return filters - # defining image ratios - input_res_ratio = ( - (float(input_width) * pixel_aspect) / input_height - ) - output_res_ratio = float(output_width) / float(output_height) - self.log.debug("input_res_ratio: `{}`".format(input_res_ratio)) - self.log.debug("output_res_ratio: `{}`".format(output_res_ratio)) - - # Round ratios to 2 decimal places for comparing - input_res_ratio = round(input_res_ratio, 2) - output_res_ratio = round(output_res_ratio, 2) - - # get scale factor - scale_factor_by_width = ( - float(output_width) / (input_width * pixel_aspect) - ) - scale_factor_by_height = ( - float(output_height) / input_height - ) - - self.log.debug( - "scale_factor_by_with: `{}`".format(scale_factor_by_width) - ) - self.log.debug( - "scale_factor_by_height: `{}`".format(scale_factor_by_height) - ) - # scaling none square pixels and 1920 width - if ( - input_height != output_height - or input_width != output_width - or pixel_aspect != 1 - ): - if input_res_ratio < output_res_ratio: - self.log.debug( - "Input's resolution ratio is lower then output's" - ) - width_scale = int(input_width * scale_factor_by_height) - width_half_pad = int((output_width - width_scale) / 2) - height_scale = output_height - height_half_pad = 0 - else: - self.log.debug("Input is heigher then output") - width_scale = output_width - width_half_pad = 0 - height_scale = int(input_height * scale_factor_by_width) - height_half_pad = int((output_height - height_scale) / 2) - - self.log.debug("width_scale: `{}`".format(width_scale)) - self.log.debug("width_half_pad: `{}`".format(width_half_pad)) - self.log.debug("height_scale: `{}`".format(height_scale)) - self.log.debug("height_half_pad: `{}`".format(height_half_pad)) - + if input_height != output_height or input_width != output_width: filters.extend([ - "scale={}x{}:flags=lanczos".format( - width_scale, height_scale - ), - "pad={}:{}:{}:{}:{}".format( + ( + "scale={}x{}" + ":flags=lanczos" + ":force_original_aspect_ratio=decrease" + ).format(output_width, output_height), + "pad={}:{}:(ow-iw)/2:(oh-ih)/2:{}".format( output_width, output_height, - width_half_pad, height_half_pad, overscan_color_value ), "setsar=1" diff --git a/openpype/plugins/publish/extract_thumbnail.py b/openpype/plugins/publish/extract_thumbnail.py index c154275322..14b43beae8 100644 --- a/openpype/plugins/publish/extract_thumbnail.py +++ b/openpype/plugins/publish/extract_thumbnail.py @@ -19,7 +19,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): order = pyblish.api.ExtractorOrder families = [ "imagesequence", "render", "render2d", "prerender", - "source", "plate", "take" + "source", "clip", "take" ] hosts = ["shell", "fusion", "resolve", "traypublisher"] enabled = False diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py b/openpype/plugins/publish/extract_trim_video_audio.py similarity index 74% rename from openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py rename to openpype/plugins/publish/extract_trim_video_audio.py index 51dc84e9a2..06817c4b5a 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py +++ b/openpype/plugins/publish/extract_trim_video_audio.py @@ -14,7 +14,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): # must be before `ExtractThumbnailSP` order = pyblish.api.ExtractorOrder - 0.01 label = "Extract Trim Video/Audio" - hosts = ["standalonepublisher"] + hosts = ["standalonepublisher", "traypublisher"] families = ["clip", "trimming"] # make sure it is enabled only if at least both families are available @@ -40,6 +40,21 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): fps = instance.data["fps"] video_file_path = instance.data["editorialSourcePath"] extensions = instance.data.get("extensions", ["mov"]) + output_file_type = instance.data.get("outputFileType") + reviewable = "review" in instance.data["families"] + + frame_start = int(instance.data["frameStart"]) + frame_end = int(instance.data["frameEnd"]) + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + + clip_start_h = float(instance.data["clipInH"]) + _dur = instance.data["clipDuration"] + handle_dur = (handle_start + handle_end) + clip_dur_h = float(_dur + handle_dur) + + if output_file_type: + extensions = [output_file_type] for ext in extensions: self.log.info("Processing ext: `{}`".format(ext)) @@ -49,16 +64,10 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): clip_trimed_path = os.path.join( staging_dir, instance.data["name"] + ext) - # # check video file metadata - # input_data = plib.get_ffprobe_streams(video_file_path)[0] - # self.log.debug(f"__ input_data: `{input_data}`") - - start = float(instance.data["clipInH"]) - dur = float(instance.data["clipDurationH"]) if ext == ".wav": # offset time as ffmpeg is having bug - start += 0.5 + clip_start_h += 0.5 # remove "review" from families instance.data["families"] = [ fml for fml in instance.data["families"] @@ -67,9 +76,9 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): ffmpeg_args = [ ffmpeg_path, - "-ss", str(start / fps), + "-ss", str(clip_start_h / fps), "-i", video_file_path, - "-t", str(dur / fps) + "-t", str(clip_dur_h / fps) ] if ext in [".mov", ".mp4"]: ffmpeg_args.extend([ @@ -98,14 +107,15 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): "ext": ext[1:], "files": os.path.basename(clip_trimed_path), "stagingDir": staging_dir, - "frameStart": int(instance.data["frameStart"]), - "frameEnd": int(instance.data["frameEnd"]), - "frameStartFtrack": int(instance.data["frameStartH"]), - "frameEndFtrack": int(instance.data["frameEndH"]), + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartFtrack": frame_start - handle_start, + "frameEndFtrack": frame_end + handle_end, "fps": fps, + "tags": [] } - if ext in [".mov", ".mp4"]: + if ext in [".mov", ".mp4"] and reviewable: repre.update({ "thumbnail": True, "tags": ["review", "ftrackreview", "delete"]}) diff --git a/openpype/plugins/publish/help/validate_containers.xml b/openpype/plugins/publish/help/validate_containers.xml new file mode 100644 index 0000000000..5d18bb4c19 --- /dev/null +++ b/openpype/plugins/publish/help/validate_containers.xml @@ -0,0 +1,23 @@ + + + +Not up-to-date assets + +## Outdated containers found + +Scene contains one or more outdated loaded containers, eg. versions loaded into scene by Loader are not latest. + +### How to repair? + +Use 'Scene Inventory' and update all highlighted old container to latest OR + refresh Publish and switch 'Validate Containers' toggle on 'Options' tab. + + WARNING: Skipping this validator will result in publishing (and probably rendering) old version of loaded assets. + + +### __Detailed Info__ (optional) + +This validates whether you're working with the latest versions of published content loaded into your scene. This protects you from using outdated versions of an asset. + + + \ No newline at end of file diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py index cac212b7e2..f99c718f8a 100644 --- a/openpype/plugins/publish/integrate.py +++ b/openpype/plugins/publish/integrate.py @@ -5,16 +5,24 @@ import copy import clique import six +from openpype.client.operations import ( + OperationsSession, + new_subset_document, + new_version_doc, + new_representation_doc, + prepare_subset_update_data, + prepare_version_update_data, + prepare_representation_update_data, +) from bson.objectid import ObjectId -from pymongo import DeleteMany, ReplaceOne, InsertOne, UpdateOne import pyblish.api -import openpype.api from openpype.client import ( get_representations, get_subset_by_name, get_version_by_name, ) +from openpype.lib import source_hash from openpype.lib.profiles_filtering import filter_profiles from openpype.lib.file_transaction import FileTransaction from openpype.pipeline import legacy_io @@ -23,41 +31,6 @@ from openpype.pipeline.publish import KnownPublishError log = logging.getLogger(__name__) -def assemble(files): - """Convenience `clique.assemble` wrapper for files of a single collection. - - Unlike `clique.assemble` this wrapper does not allow more than a single - Collection nor any remainder files. Errors will be raised when not only - a single collection is assembled. - - Returns: - clique.Collection: A single sequence Collection - - Raises: - ValueError: Error is raised when files do not result in a single - collected Collection. - - """ - # todo: move this to lib? - # Get the sequence as a collection. The files must be of a single - # sequence and have no remainder outside of the collections. - patterns = [clique.PATTERNS["frames"]] - collections, remainder = clique.assemble(files, - minimum_items=1, - patterns=patterns) - if not collections: - raise ValueError("No collections found in files: " - "{}".format(files)) - if remainder: - raise ValueError("Files found not detected as part" - " of a sequence: {}".format(remainder)) - if len(collections) > 1: - raise ValueError("Files in sequence are not part of a" - " single sequence collection: " - "{}".format(collections)) - return collections[0] - - def get_instance_families(instance): """Get all families of the instance""" # todo: move this to lib? @@ -78,12 +51,6 @@ def get_frame_padded(frame, padding): return "{frame:0{padding}d}".format(padding=padding, frame=frame) -def get_first_frame_padded(collection): - """Return first frame as padded number from `clique.Collection`""" - start_frame = next(iter(collection.indexes)) - return get_frame_padded(start_frame, padding=collection.padding) - - class IntegrateAsset(pyblish.api.InstancePlugin): """Register publish in the database and transfer files to destinations. @@ -168,7 +135,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # the database even if not used by the destination template db_representation_context_keys = [ "project", "asset", "task", "subset", "version", "representation", - "family", "hierarchy", "username" + "family", "hierarchy", "username", "output" ] skip_host_families = [] @@ -288,9 +255,12 @@ class IntegrateAsset(pyblish.api.InstancePlugin): template_name = self.get_template_name(instance) - subset, subset_writes = self.prepare_subset(instance, project_name) - version, version_writes = self.prepare_version( - instance, subset, project_name + op_session = OperationsSession() + subset = self.prepare_subset( + instance, op_session, project_name + ) + version = self.prepare_version( + instance, op_session, subset, project_name ) instance.data["versionEntity"] = version @@ -340,7 +310,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Transaction to reduce the chances of another publish trying to # publish to the same version number since that chance can greatly # increase if the file transaction takes a long time. - legacy_io.bulk_write(subset_writes + version_writes) + op_session.commit() + self.log.info("Subset {subset[name]} and Version {version[name]} " "written to database..".format(subset=subset, version=version)) @@ -372,49 +343,49 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Finalize the representations now the published files are integrated # Get 'files' info for representations and its attached resources - representation_writes = [] new_repre_names_low = set() for prepared in prepared_representations: - representation = prepared["representation"] + repre_doc = prepared["representation"] + repre_update_data = prepared["repre_doc_update_data"] transfers = prepared["transfers"] destinations = [dst for src, dst in transfers] - representation["files"] = self.get_files_info( + repre_doc["files"] = self.get_files_info( destinations, sites=sites, anatomy=anatomy ) # Add the version resource file infos to each representation - representation["files"] += resource_file_infos + repre_doc["files"] += resource_file_infos # Set up representation for writing to the database. Since # we *might* be overwriting an existing entry if the version # already existed we'll use ReplaceOnce with `upsert=True` - representation_writes.append(ReplaceOne( - filter={"_id": representation["_id"]}, - replacement=representation, - upsert=True - )) + if repre_update_data is None: + op_session.create_entity( + project_name, repre_doc["type"], repre_doc + ) + else: + op_session.update_entity( + project_name, + repre_doc["type"], + repre_doc["_id"], + repre_update_data + ) - new_repre_names_low.add(representation["name"].lower()) + new_repre_names_low.add(repre_doc["name"].lower()) # Delete any existing representations that didn't get any new data # if the instance is not set to append mode if not instance.data.get("append", False): - delete_names = set() for name, existing_repres in existing_repres_by_name.items(): if name not in new_repre_names_low: # We add the exact representation name because `name` is # lowercase for name matching only and not in the database - delete_names.add(existing_repres["name"]) - if delete_names: - representation_writes.append(DeleteMany( - filter={ - "parent": version["_id"], - "name": {"$in": list(delete_names)} - } - )) + op_session.delete_entity( + project_name, "representation", existing_repres["_id"] + ) - # Write representations to the database - legacy_io.bulk_write(representation_writes) + self.log.debug("{}".format(op_session.to_data())) + op_session.commit() # Backwards compatibility # todo: can we avoid the need to store this? @@ -425,13 +396,14 @@ class IntegrateAsset(pyblish.api.InstancePlugin): self.log.info("Registered {} representations" "".format(len(prepared_representations))) - def prepare_subset(self, instance, project_name): + def prepare_subset(self, instance, op_session, project_name): asset_doc = instance.data["assetEntity"] subset_name = instance.data["subset"] + family = instance.data["family"] self.log.debug("Subset: {}".format(subset_name)) # Get existing subset if it exists - subset_doc = get_subset_by_name( + existing_subset_doc = get_subset_by_name( project_name, subset_name, asset_doc["_id"] ) @@ -444,69 +416,79 @@ class IntegrateAsset(pyblish.api.InstancePlugin): if subset_group: data["subsetGroup"] = subset_group - bulk_writes = [] - if subset_doc is None: + subset_id = None + if existing_subset_doc: + subset_id = existing_subset_doc["_id"] + subset_doc = new_subset_document( + subset_name, family, asset_doc["_id"], data, subset_id + ) + + if existing_subset_doc is None: # Create a new subset self.log.info("Subset '%s' not found, creating ..." % subset_name) - subset_doc = { - "_id": ObjectId(), - "schema": "openpype:subset-3.0", - "type": "subset", - "name": subset_name, - "data": data, - "parent": asset_doc["_id"] - } - bulk_writes.append(InsertOne(subset_doc)) + op_session.create_entity( + project_name, subset_doc["type"], subset_doc + ) else: # Update existing subset data with new data and set in database. # We also change the found subset in-place so we don't need to # re-query the subset afterwards subset_doc["data"].update(data) - bulk_writes.append(UpdateOne( - {"type": "subset", "_id": subset_doc["_id"]}, - {"$set": { - "data": subset_doc["data"] - }} - )) + update_data = prepare_subset_update_data( + existing_subset_doc, subset_doc + ) + op_session.update_entity( + project_name, + subset_doc["type"], + subset_doc["_id"], + update_data + ) self.log.info("Prepared subset: {}".format(subset_name)) - return subset_doc, bulk_writes + return subset_doc - def prepare_version(self, instance, subset_doc, project_name): + def prepare_version(self, instance, op_session, subset_doc, project_name): version_number = instance.data["version"] - version_doc = { - "schema": "openpype:version-3.0", - "type": "version", - "parent": subset_doc["_id"], - "name": version_number, - "data": self.create_version_data(instance) - } - existing_version = get_version_by_name( project_name, version_number, subset_doc["_id"], fields=["_id"] ) + version_id = None + if existing_version: + version_id = existing_version["_id"] + + version_data = self.create_version_data(instance) + version_doc = new_version_doc( + version_number, + subset_doc["_id"], + version_data, + version_id + ) if existing_version: self.log.debug("Updating existing version ...") - version_doc["_id"] = existing_version["_id"] + update_data = prepare_version_update_data( + existing_version, version_doc + ) + op_session.update_entity( + project_name, + version_doc["type"], + version_doc["_id"], + update_data + ) else: self.log.debug("Creating new version ...") - version_doc["_id"] = ObjectId() - - bulk_writes = [ReplaceOne( - filter={"_id": version_doc["_id"]}, - replacement=version_doc, - upsert=True - )] + op_session.create_entity( + project_name, version_doc["type"], version_doc + ) self.log.info("Prepared version: v{0:03d}".format(version_doc["name"])) - return version_doc, bulk_writes + return version_doc def prepare_representation(self, repre, template_name, @@ -517,20 +499,22 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # pre-flight validations if repre["ext"].startswith("."): - raise ValueError("Extension must not start with a dot '.': " - "{}".format(repre["ext"])) + raise KnownPublishError(( + "Extension must not start with a dot '.': {}" + ).format(repre["ext"])) if repre.get("transfers"): - raise ValueError("Representation is not allowed to have transfers" - "data before integration. They are computed in " - "the integrator" - "Got: {}".format(repre["transfers"])) + raise KnownPublishError(( + "Representation is not allowed to have transfers" + "data before integration. They are computed in " + "the integrator. Got: {}" + ).format(repre["transfers"])) # create template data for Anatomy template_data = copy.deepcopy(instance.data["anatomyData"]) # required representation keys - files = repre['files'] + files = repre["files"] template_data["representation"] = repre["name"] template_data["ext"] = repre["ext"] @@ -546,95 +530,119 @@ class IntegrateAsset(pyblish.api.InstancePlugin): }.items(): # Allow to take value from representation # if not found also consider instance.data - if key in repre: - value = repre[key] - elif key in instance.data: - value = instance.data[key] - else: - continue - template_data[anatomy_key] = value + value = repre.get(key) + if value is None: + value = instance.data.get(key) - if repre.get('stagingDir'): - stagingdir = repre['stagingDir'] - else: + if value is not None: + template_data[anatomy_key] = value + + stagingdir = repre.get("stagingDir") + if not stagingdir: # Fall back to instance staging dir if not explicitly # set for representation in the instance - self.log.debug("Representation uses instance staging dir: " - "{}".format(instance_stagingdir)) + self.log.debug(( + "Representation uses instance staging dir: {}" + ).format(instance_stagingdir)) stagingdir = instance_stagingdir + if not stagingdir: - raise ValueError("No staging directory set for representation: " - "{}".format(repre)) + raise KnownPublishError( + "No staging directory set for representation: {}".format(repre) + ) self.log.debug("Anatomy template name: {}".format(template_name)) - anatomy = instance.context.data['anatomy'] - template = os.path.normpath(anatomy.templates[template_name]["path"]) + anatomy = instance.context.data["anatomy"] + publish_template_category = anatomy.templates[template_name] + template = os.path.normpath(publish_template_category["path"]) is_udim = bool(repre.get("udim")) + is_sequence_representation = isinstance(files, (list, tuple)) if is_sequence_representation: # Collection of files (sequence) - assert not any(os.path.isabs(fname) for fname in files), ( - "Given file names contain full paths" - ) + if any(os.path.isabs(fname) for fname in files): + raise KnownPublishError("Given file names contain full paths") - src_collection = assemble(files) + src_collections, remainders = clique.assemble(files) + if len(files) < 2 or len(src_collections) != 1 or remainders: + raise KnownPublishError(( + "Files of representation does not contain proper" + " sequence files.\nCollected collections: {}" + "\nCollected remainders: {}" + ).format( + ", ".join([str(col) for col in src_collections]), + ", ".join([str(rem) for rem in remainders]) + )) - # If the representation has `frameStart` set it renumbers the - # frame indices of the published collection. It will start from - # that `frameStart` index instead. Thus if that frame start - # differs from the collection we want to shift the destination - # frame indices from the source collection. + src_collection = src_collections[0] destination_indexes = list(src_collection.indexes) - destination_padding = len(get_first_frame_padded(src_collection)) - if repre.get("frameStart") is not None and not is_udim: - index_frame_start = int(repre.get("frameStart")) - - render_template = anatomy.templates[template_name] - # todo: should we ALWAYS manage the frame padding even when not - # having `frameStart` set? - frame_start_padding = int( - render_template.get( - "frame_padding", - render_template.get("padding") - ) + # Use last frame for minimum padding + # - that should cover both 'udim' and 'frame' minimum padding + destination_padding = len(str(destination_indexes[-1])) + if not is_udim: + # Change padding for frames if template has defined higher + # padding. + template_padding = int( + publish_template_category["frame_padding"] ) + if template_padding > destination_padding: + destination_padding = template_padding - # Shift destination sequence to the start frame - src_start_frame = next(iter(src_collection.indexes)) - shift = index_frame_start - src_start_frame - if shift: + # If the representation has `frameStart` set it renumbers the + # frame indices of the published collection. It will start from + # that `frameStart` index instead. Thus if that frame start + # differs from the collection we want to shift the destination + # frame indices from the source collection. + repre_frame_start = repre.get("frameStart") + if repre_frame_start is not None: + index_frame_start = int(repre["frameStart"]) + # Shift destination sequence to the start frame destination_indexes = [ - frame + shift for frame in destination_indexes + index_frame_start + idx + for idx in range(len(destination_indexes)) ] - destination_padding = frame_start_padding # To construct the destination template with anatomy we require # a Frame or UDIM tile set for the template data. We use the first # index of the destination for that because that could've shifted # from the source indexes, etc. - first_index_padded = get_frame_padded(frame=destination_indexes[0], - padding=destination_padding) - if is_udim: - # UDIM representations handle ranges in a different manner - template_data["udim"] = first_index_padded - else: - template_data["frame"] = first_index_padded + first_index_padded = get_frame_padded( + frame=destination_indexes[0], + padding=destination_padding + ) # Construct destination collection from template - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled[template_name]["path"] - repre_context = template_filled.used_values - self.log.debug("Template filled: {}".format(str(template_filled))) - dst_collection = assemble([os.path.normpath(template_filled)]) + repre_context = None + dst_filepaths = [] + for index in destination_indexes: + if is_udim: + template_data["udim"] = index + else: + template_data["frame"] = index + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + dst_filepaths.append(template_filled) + if repre_context is None: + self.log.debug( + "Template filled: {}".format(str(template_filled)) + ) + repre_context = template_filled.used_values + + # Make sure context contains frame + # NOTE: Frame would not be available only if template does not + # contain '{frame}' in template -> Do we want support it? + if not is_udim: + repre_context["frame"] = first_index_padded # Update the destination indexes and padding - dst_collection.indexes.clear() - dst_collection.indexes.update(set(destination_indexes)) + dst_collection = clique.assemble(dst_filepaths)[0][0] dst_collection.padding = destination_padding - assert ( - len(src_collection.indexes) == len(dst_collection.indexes) - ), "This is a bug" + if len(src_collection.indexes) != len(dst_collection.indexes): + raise KnownPublishError(( + "This is a bug. Source sequence frames length" + " does not match integration frames length" + )) # Multiple file transfers transfers = [] @@ -645,9 +653,13 @@ class IntegrateAsset(pyblish.api.InstancePlugin): else: # Single file fname = files - assert not os.path.isabs(fname), ( - "Given file name is a full path" - ) + if os.path.isabs(fname): + self.log.error( + "Filename in representation is filepath {}".format(fname) + ) + raise KnownPublishError( + "This is a bug. Representation file name is full path" + ) # Manage anatomy template data template_data.pop("frame", None) @@ -677,9 +689,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Also add these values to the context even if not used by the # destination template value = template_data.get(key) - if not value: - continue - repre_context[key] = template_data[key] + if value is not None: + repre_context[key] = value # Explicitly store the full list even though template data might # have a different value because it uses just a single udim tile @@ -688,47 +699,34 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Use previous representation's id if there is a name match existing = existing_repres_by_name.get(repre["name"].lower()) + repre_id = None if existing: repre_id = existing["_id"] - else: - repre_id = ObjectId() - # Backwards compatibility: # Store first transferred destination as published path data - # todo: can we remove this? - # todo: We shouldn't change data that makes its way back into - # instance.data[] until we know the publish actually succeeded - # otherwise `published_path` might not actually be valid? + # - used primarily for reviews that are integrated to custom modules + # TODO we should probably store all integrated files + # related to the representation? published_path = transfers[0][1] - repre["published_path"] = published_path # Backwards compatibility + repre["published_path"] = published_path # todo: `repre` is not the actual `representation` entity # we should simplify/clarify difference between data above # and the actual representation entity for the database data = repre.get("data", {}) - data.update({'path': published_path, 'template': template}) - representation = { - "_id": repre_id, - "schema": "openpype:representation-2.0", - "type": "representation", - "parent": version["_id"], - "name": repre['name'], - "data": data, - - # Imprint shortcut to context for performance reasons. - "context": repre_context - } - - # todo: simplify/streamline which additional data makes its way into - # the representation context - if repre.get("outputName"): - representation["context"]["output"] = repre['outputName'] - - if is_sequence_representation and repre.get("frameStart") is not None: - representation['context']['frame'] = template_data["frame"] + data.update({"path": published_path, "template": template}) + repre_doc = new_representation_doc( + repre["name"], version["_id"], repre_context, data, repre_id + ) + update_data = None + if repre_id is not None: + update_data = prepare_representation_update_data( + existing, repre_doc + ) return { - "representation": representation, + "representation": repre_doc, + "repre_doc_update_data": update_data, "anatomy_data": template_data, "transfers": transfers, # todo: avoid the need for 'published_files' used by Integrate Hero @@ -786,7 +784,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): version_data[key] = instance.data[key] # Include instance.data[versionData] directly - version_data_instance = instance.data.get('versionData') + version_data_instance = instance.data.get("versionData") if version_data_instance: version_data.update(version_data_instance) @@ -826,6 +824,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): def get_profile_filter_criteria(self, instance): """Return filter criteria for `filter_profiles`""" + # Anatomy data is pre-filled by Collectors anatomy_data = instance.data["anatomyData"] @@ -856,6 +855,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): path: modified path if possible, or unmodified path + warning logged """ + success, rootless_path = anatomy.find_root_template_from_path(path) if success: path = rootless_path @@ -877,6 +877,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): output_resources: array of dictionaries to be added to 'files' key in representation """ + file_infos = [] for file_path in destinations: file_info = self.prepare_file_info(file_path, anatomy, sites=sites) @@ -896,10 +897,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin): Returns: dict: file info dictionary """ + return { "_id": ObjectId(), "path": self.get_rootless_path(anatomy, path), "size": os.path.getsize(path), - "hash": openpype.api.source_hash(path), + "hash": source_hash(path), "sites": sites } diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index 5f97a9bd41..7d698ff98d 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -71,7 +71,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): template_key = self._get_template_key(instance) anatomy = instance.context.data["anatomy"] - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = anatomy.project_name if template_key not in anatomy.templates: self.log.warning(( "!!! Anatomy of project \"{}\" does not have set" @@ -313,13 +313,9 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): } repre_context = template_filled.used_values for key in self.db_representation_context_keys: - if ( - key in repre_context or - key not in anatomy_data - ): - continue - - repre_context[key] = anatomy_data[key] + value = anatomy_data.get(key) + if value is not None: + repre_context[key] = value # Prepare new repre repre = copy.deepcopy(repre_info["representation"]) @@ -454,7 +450,6 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): ) if bulk_writes: - project_name = legacy_io.Session["AVALON_PROJECT"] legacy_io.database[project_name].bulk_write( bulk_writes ) @@ -517,11 +512,10 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): anatomy_filled = anatomy.format(template_data) # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) file_path = anatomy_filled[template_key]["path"] # Directory diff --git a/openpype/plugins/publish/integrate_thumbnail.py b/openpype/plugins/publish/integrate_thumbnail.py index fd50858a91..8ae0dd2d60 100644 --- a/openpype/plugins/publish/integrate_thumbnail.py +++ b/openpype/plugins/publish/integrate_thumbnail.py @@ -39,9 +39,8 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): ) return - project_name = legacy_io.Session["AVALON_PROJECT"] - anatomy = instance.context.data["anatomy"] + project_name = anatomy.project_name if "publish" not in anatomy.templates: self.log.warning("Anatomy is missing the \"publish\" key!") return diff --git a/openpype/plugins/publish/start_timer.py b/openpype/plugins/publish/start_timer.py deleted file mode 100644 index 112d92bef0..0000000000 --- a/openpype/plugins/publish/start_timer.py +++ /dev/null @@ -1,14 +0,0 @@ -import pyblish.api - -from openpype.lib import change_timer_to_current_context - - -class StartTimer(pyblish.api.ContextPlugin): - label = "Start Timer" - order = pyblish.api.IntegratorOrder + 1 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - change_timer_to_current_context() diff --git a/openpype/plugins/publish/stop_timer.py b/openpype/plugins/publish/stop_timer.py deleted file mode 100644 index 414e43a3c4..0000000000 --- a/openpype/plugins/publish/stop_timer.py +++ /dev/null @@ -1,17 +0,0 @@ -import os -import requests - -import pyblish.api - - -class StopTimer(pyblish.api.ContextPlugin): - label = "Stop Timer" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) - requests.post(rest_api_url) diff --git a/openpype/plugins/publish/validate_containers.py b/openpype/plugins/publish/validate_containers.py index b2a3ed9b79..79759450e1 100644 --- a/openpype/plugins/publish/validate_containers.py +++ b/openpype/plugins/publish/validate_containers.py @@ -1,5 +1,9 @@ import pyblish.api from openpype.pipeline.load import any_outdated_containers +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) class ShowInventory(pyblish.api.Action): @@ -14,7 +18,9 @@ class ShowInventory(pyblish.api.Action): host_tools.show_scene_inventory() -class ValidateContainers(pyblish.api.ContextPlugin): +class ValidateContainers(OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin): + """Containers are must be updated to latest version on publish.""" label = "Validate Containers" @@ -24,5 +30,9 @@ class ValidateContainers(pyblish.api.ContextPlugin): actions = [ShowInventory] def process(self, context): + if not self.is_active(context.data): + return + if any_outdated_containers(): - raise ValueError("There are outdated containers in the scene.") + msg = "There are outdated containers in the scene." + raise PublishXmlValidationError(self, msg) diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/openpype/plugins/publish/validate_editorial_asset_name.py index 702e87b58d..694788c414 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/openpype/plugins/publish/validate_editorial_asset_name.py @@ -19,7 +19,8 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): "hiero", "standalonepublisher", "resolve", - "flame" + "flame", + "traypublisher" ] def process(self, context): diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index 124eacbe39..a447aa916b 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -15,6 +15,7 @@ from openpype.lib.remote_publish import ( fail_batch, find_variant_key, get_task_data, + get_timeout, IN_PROGRESS_STATUS ) @@ -170,7 +171,7 @@ class PypeCommands: log.info("Publish finished.") @staticmethod - def remotepublishfromapp(project, batch_path, host_name, + def remotepublishfromapp(project_name, batch_path, host_name, user_email, targets=None): """Opens installed variant of 'host' and run remote publish there. @@ -189,8 +190,8 @@ class PypeCommands: Runs publish process as user would, in automatic fashion. Args: - project (str): project to publish (only single context is expected - per call of remotepublish + project_name (str): project to publish (only single context is + expected per call of remotepublish batch_path (str): Path batch folder. Contains subfolders with resources (workfile, another subfolder 'renders' etc.) host_name (str): 'photoshop' @@ -222,10 +223,17 @@ class PypeCommands: batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS})) if len(batches_in_progress) > 1: - fail_batch(_id, batches_in_progress, dbcon) + running_batches = [str(batch["_id"]) + for batch in batches_in_progress + if batch["_id"] != _id] + msg = "There are still running batches {}\n". \ + format("\n".join(running_batches)) + msg += "Ask admin to check them and reprocess current batch" + fail_batch(_id, dbcon, msg) print("Another batch running, probably stuck, ask admin for help") - asset, task_name, _ = get_batch_asset_task_info(task_data["context"]) + asset_name, task_name, task_type = get_batch_asset_task_info( + task_data["context"]) application_manager = ApplicationManager() found_variant_key = find_variant_key(application_manager, host_name) @@ -233,8 +241,8 @@ class PypeCommands: # must have for proper launch of app env = get_app_environments_for_context( - project, - asset, + project_name, + asset_name, task_name, app_name ) @@ -262,15 +270,22 @@ class PypeCommands: data = { "last_workfile_path": workfile_path, "start_last_workfile": True, - "project_name": project, - "asset_name": asset, + "project_name": project_name, + "asset_name": asset_name, "task_name": task_name } launched_app = application_manager.launch(app_name, **data) + timeout = get_timeout(project_name, host_name, task_type) + + time_start = time.time() while launched_app.poll() is None: time.sleep(0.5) + if time.time() - time_start > timeout: + launched_app.terminate() + msg = "Timeout reached" + fail_batch(_id, dbcon, msg) @staticmethod def remotepublish(project, batch_path, user_email, targets=None): diff --git a/openpype/resources/app_icons/shotgrid.png b/openpype/resources/app_icons/shotgrid.png new file mode 100644 index 0000000000..6d0cc047f9 Binary files /dev/null and b/openpype/resources/app_icons/shotgrid.png differ diff --git a/openpype/scripts/fusion_switch_shot.py b/openpype/scripts/fusion_switch_shot.py index 245fc665f0..fc22f060a2 100644 --- a/openpype/scripts/fusion_switch_shot.py +++ b/openpype/scripts/fusion_switch_shot.py @@ -3,6 +3,8 @@ import re import sys import logging +from openpype.client import get_asset_by_name, get_versions + # Pipeline imports from openpype.hosts.fusion import api import openpype.hosts.fusion.api.lib as fusion_lib @@ -15,13 +17,10 @@ from openpype.pipeline import ( legacy_io, ) -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Update Slap Comp") -self = sys.modules[__name__] -self._project = None - def _format_version_folder(folder): """Format a version folder based on the filepath @@ -131,8 +130,8 @@ def update_frame_range(comp, representations): """ version_ids = [r["parent"] for r in representations] - versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) - versions = list(versions) + project_name = legacy_io.active_project() + versions = list(get_versions(project_name, version_ids=version_ids)) start = min(v["data"]["frameStart"] for v in versions) end = max(v["data"]["frameEnd"] for v in versions) @@ -162,15 +161,10 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = legacy_io.find_one({"type": "asset", "name": asset_name}) + project_name = legacy_io.active_project() + asset = get_asset_by_name(project_name, asset_name) assert asset, "Could not find '%s' in the database" % asset_name - # Get current project - self._project = legacy_io.find_one({ - "type": "project", - "name": legacy_io.Session["AVALON_PROJECT"] - }) - # Go to comp if not filepath: current_comp = api.get_current_comp() diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 70cda68cb4..9847e58cfa 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -301,7 +301,9 @@ "traypublisher" ], "families": [ - "plate" + "plate", + "review", + "audio" ], "task_types": [], "tasks": [], @@ -432,6 +434,9 @@ "enabled": false, "custom_attribute_keys": [] }, + "IntegrateHierarchyToFtrack": { + "create_task_status_profiles": [] + }, "IntegrateFtrackNote": { "enabled": true, "note_template": "{intent}: {comment}", @@ -447,6 +452,9 @@ "enabled": false, "ftrack_custom_attributes": {} }, + "IntegrateFtrackComponentOverwrite": { + "enabled": true + }, "IntegrateFtrackInstance": { "family_mapping": { "camera": "cam", diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index e509db2791..0ff9363ba7 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -85,6 +85,7 @@ ], "width": 0, "height": 0, + "scale_pixel_aspect": true, "bg_color": [ 0, 0, diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index c96acbff6d..ce9cd4d606 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -31,6 +31,37 @@ } ] }, + "RenderSettings": { + "apply_render_settings": true, + "default_render_image_folder": "renders", + "aov_separator": "underscore", + "reset_current_frame": false, + "arnold_renderer": { + "image_prefix": "maya///_", + "image_format": "exr", + "multilayer_exr": true, + "tiled": true, + "aov_list": [], + "additional_options": [] + }, + "vray_renderer": { + "image_prefix": "maya///", + "engine": "1", + "image_format": "png", + "aov_list": [], + "additional_options": [] + }, + "redshift_renderer": { + "image_prefix": "maya///", + "primary_gi_engine": "0", + "secondary_gi_engine": "0", + "image_format": "iff", + "multilayer_exr": true, + "force_combine": true, + "aov_list": [], + "additional_options": [] + } + }, "create": { "CreateLook": { "enabled": true, @@ -43,9 +74,7 @@ "enabled": true, "defaults": [ "Main" - ], - "aov_separator": "underscore", - "default_render_image_folder": "renders" + ] }, "CreateUnrealStaticMesh": { "enabled": true, @@ -70,6 +99,20 @@ "enabled": true, "publish_mip_map": true }, + "CreateAnimation": { + "enabled": true, + "write_color_sets": false, + "defaults": [ + "Main" + ] + }, + "CreatePointCache": { + "enabled": true, + "write_color_sets": false, + "defaults": [ + "Main" + ] + }, "CreateMultiverseUsd": { "enabled": true, "defaults": [ @@ -88,12 +131,6 @@ "Main" ] }, - "CreateAnimation": { - "enabled": true, - "defaults": [ - "Main" - ] - }, "CreateAss": { "enabled": true, "defaults": [ @@ -132,12 +169,6 @@ "Sculpt" ] }, - "CreatePointCache": { - "enabled": true, - "defaults": [ - "Main" - ] - }, "CreateRenderSetup": { "enabled": true, "defaults": [ diff --git a/openpype/settings/defaults/project_settings/shotgrid.json b/openpype/settings/defaults/project_settings/shotgrid.json new file mode 100644 index 0000000000..83b6f69074 --- /dev/null +++ b/openpype/settings/defaults/project_settings/shotgrid.json @@ -0,0 +1,22 @@ +{ + "shotgrid_project_id": 0, + "shotgrid_server": "", + "event": { + "enabled": false + }, + "fields": { + "asset": { + "type": "sg_asset_type" + }, + "sequence": { + "episode_link": "episode" + }, + "shot": { + "episode_link": "sg_episode", + "sequence_link": "sg_sequence" + }, + "task": { + "step": "step" + } + } +} diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/openpype/settings/defaults/project_settings/traypublisher.json index 8bf3e3b306..5db2a79772 100644 --- a/openpype/settings/defaults/project_settings/traypublisher.json +++ b/openpype/settings/defaults/project_settings/traypublisher.json @@ -236,9 +236,70 @@ "extensions": [] } ], + "editorial_creators": { + "editorial_simple": { + "default_variants": [ + "Main" + ], + "clip_name_tokenizer": { + "_sequence_": "(sc\\d{3})", + "_shot_": "(sh\\d{3})" + }, + "shot_rename": { + "enabled": true, + "shot_rename_template": "{project[code]}_{_sequence_}_{_shot_}" + }, + "shot_hierarchy": { + "enabled": true, + "parents_path": "{project}/{folder}/{sequence}", + "parents": [ + { + "type": "Project", + "name": "project", + "value": "{project[name]}" + }, + { + "type": "Folder", + "name": "folder", + "value": "shots" + }, + { + "type": "Sequence", + "name": "sequence", + "value": "{_sequence_}" + } + ] + }, + "shot_add_tasks": {}, + "family_presets": [ + { + "family": "review", + "variant": "Reference", + "review": true, + "output_file_type": ".mp4" + }, + { + "family": "plate", + "variant": "", + "review": false, + "output_file_type": ".mov" + }, + { + "family": "audio", + "variant": "", + "review": false, + "output_file_type": ".wav" + } + ] + } + }, "BatchMovieCreator": { - "default_variants": ["Main"], - "default_tasks": ["Compositing"], + "default_variants": [ + "Main" + ], + "default_tasks": [ + "Compositing" + ], "extensions": [ ".mov" ] diff --git a/openpype/settings/defaults/project_settings/unreal.json b/openpype/settings/defaults/project_settings/unreal.json index dad61cd1f0..c5f5cdf719 100644 --- a/openpype/settings/defaults/project_settings/unreal.json +++ b/openpype/settings/defaults/project_settings/unreal.json @@ -1,4 +1,5 @@ { + "level_sequences_for_layouts": false, "project_setup": { "dev_mode": true } diff --git a/openpype/settings/defaults/project_settings/webpublisher.json b/openpype/settings/defaults/project_settings/webpublisher.json index 77168c25e6..cba472514e 100644 --- a/openpype/settings/defaults/project_settings/webpublisher.json +++ b/openpype/settings/defaults/project_settings/webpublisher.json @@ -1,4 +1,13 @@ { + "timeout_profiles": [ + { + "hosts": [ + "photoshop" + ], + "task_types": [], + "timeout": 600 + } + ], "publish": { "CollectPublishedFiles": { "task_type_to_family": { diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json index 8cd4114cb0..c84d23d3fc 100644 --- a/openpype/settings/defaults/system_settings/modules.json +++ b/openpype/settings/defaults/system_settings/modules.json @@ -26,13 +26,14 @@ "linux": [] }, "intent": { + "allow_empty_intent": true, + "empty_intent_label": "", "items": { - "-": "-", "wip": "WIP", "final": "Final", "test": "Test" }, - "default": "-" + "default": "" }, "custom_attributes": { "show": { @@ -135,6 +136,13 @@ "enabled": false, "server": "" }, + "shotgrid": { + "enabled": false, + "leecher_manager_url": "http://127.0.0.1:3000", + "leecher_backend_url": "http://127.0.0.1:8090", + "filter_projects_by_login": true, + "shotgrid_settings": {} + }, "timers_manager": { "enabled": true, "auto_stop": true, diff --git a/openpype/settings/entities/__init__.py b/openpype/settings/entities/__init__.py index a173e2454f..b2cb2204f4 100644 --- a/openpype/settings/entities/__init__.py +++ b/openpype/settings/entities/__init__.py @@ -107,6 +107,7 @@ from .enum_entity import ( TaskTypeEnumEntity, DeadlineUrlEnumEntity, AnatomyTemplatesEnumEntity, + ShotgridUrlEnumEntity ) from .list_entity import ListEntity @@ -171,6 +172,7 @@ __all__ = ( "ToolsEnumEntity", "TaskTypeEnumEntity", "DeadlineUrlEnumEntity", + "ShotgridUrlEnumEntity", "AnatomyTemplatesEnumEntity", "ListEntity", diff --git a/openpype/settings/entities/enum_entity.py b/openpype/settings/entities/enum_entity.py index 03998677ce..defe4aa1f0 100644 --- a/openpype/settings/entities/enum_entity.py +++ b/openpype/settings/entities/enum_entity.py @@ -1,10 +1,7 @@ import copy from .input_entities import InputEntity from .exceptions import EntitySchemaError -from .lib import ( - NOT_SET, - STRING_TYPE -) +from .lib import NOT_SET, STRING_TYPE class BaseEnumEntity(InputEntity): @@ -26,7 +23,7 @@ class BaseEnumEntity(InputEntity): for item in self.enum_items: key = tuple(item.keys())[0] if key in enum_keys: - reason = "Key \"{}\" is more than once in enum items.".format( + reason = 'Key "{}" is more than once in enum items.'.format( key ) raise EntitySchemaError(self, reason) @@ -34,7 +31,7 @@ class BaseEnumEntity(InputEntity): enum_keys.add(key) if not isinstance(key, STRING_TYPE): - reason = "Key \"{}\" has invalid type {}, expected {}.".format( + reason = 'Key "{}" has invalid type {}, expected {}.'.format( key, type(key), STRING_TYPE ) raise EntitySchemaError(self, reason) @@ -59,7 +56,7 @@ class BaseEnumEntity(InputEntity): for item in check_values: if item not in self.valid_keys: raise ValueError( - "{} Invalid value \"{}\". Expected one of: {}".format( + '{} Invalid value "{}". Expected one of: {}'.format( self.path, item, self.valid_keys ) ) @@ -84,7 +81,7 @@ class EnumEntity(BaseEnumEntity): self.valid_keys = set(all_keys) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) value_on_not_set = [] if enum_default: if not isinstance(enum_default, list): @@ -109,7 +106,7 @@ class EnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -152,6 +149,7 @@ class HostsEnumEntity(BaseEnumEntity): Host name is not the same as application name. Host name defines implementation instead of application name. """ + schema_types = ["hosts-enum"] all_host_names = [ "aftereffects", @@ -211,7 +209,7 @@ class HostsEnumEntity(BaseEnumEntity): self.valid_keys = valid_keys if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: for key in valid_keys: @@ -219,7 +217,7 @@ class HostsEnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -227,14 +225,10 @@ class HostsEnumEntity(BaseEnumEntity): def schema_validations(self): if self.hosts_filter: enum_len = len(self.enum_items) - if ( - enum_len == 0 - or (enum_len == 1 and self.use_empty_value) - ): - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) + if enum_len == 0 or (enum_len == 1 and self.use_empty_value): + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) reason = ( "All host names were removed after applying" " host filters. {}" @@ -247,24 +241,25 @@ class HostsEnumEntity(BaseEnumEntity): invalid_filters.add(item) if invalid_filters: - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) - expected_hosts = ", ".join([ - '"{}"'.format(item) - for item in self.all_host_names - ]) - self.log.warning(( - "Host filters containt invalid host names:" - " \"{}\" Expected values are {}" - ).format(joined_filters, expected_hosts)) + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) + expected_hosts = ", ".join( + ['"{}"'.format(item) for item in self.all_host_names] + ) + self.log.warning( + ( + "Host filters containt invalid host names:" + ' "{}" Expected values are {}' + ).format(joined_filters, expected_hosts) + ) super(HostsEnumEntity, self).schema_validations() class AppsEnumEntity(BaseEnumEntity): """Enum of applications for project anatomy attributes.""" + schema_types = ["apps-enum"] def _item_initialization(self): @@ -272,7 +267,7 @@ class AppsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -353,7 +348,7 @@ class ToolsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -410,10 +405,10 @@ class TaskTypeEnumEntity(BaseEnumEntity): def _item_initialization(self): self.multiselection = self.schema_data.get("multiselection", True) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) self.value_on_not_set = "" self.enum_items = [] @@ -508,7 +503,8 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): enum_items_list = [] for server_name, url_entity in deadline_urls_entity.items(): enum_items_list.append( - {server_name: "{}: {}".format(server_name, url_entity.value)}) + {server_name: "{}: {}".format(server_name, url_entity.value)} + ) valid_keys.add(server_name) return enum_items_list, valid_keys @@ -531,6 +527,50 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): self._current_value = tuple(self.valid_keys)[0] +class ShotgridUrlEnumEntity(BaseEnumEntity): + schema_types = ["shotgrid_url-enum"] + + def _item_initialization(self): + self.multiselection = False + + self.enum_items = [] + self.valid_keys = set() + + self.valid_value_types = (STRING_TYPE,) + self.value_on_not_set = "" + + # GUI attribute + self.placeholder = self.schema_data.get("placeholder") + + def _get_enum_values(self): + shotgrid_settings = self.get_entity_from_path( + "system_settings/modules/shotgrid/shotgrid_settings" + ) + + valid_keys = set() + enum_items_list = [] + for server_name, settings in shotgrid_settings.items(): + enum_items_list.append( + { + server_name: "{}: {}".format( + server_name, settings["shotgrid_url"].value + ) + } + ) + valid_keys.add(server_name) + return enum_items_list, valid_keys + + def set_override_state(self, *args, **kwargs): + super(ShotgridUrlEnumEntity, self).set_override_state(*args, **kwargs) + + self.enum_items, self.valid_keys = self._get_enum_values() + if not self.valid_keys: + self._current_value = "" + + elif self._current_value not in self.valid_keys: + self._current_value = tuple(self.valid_keys)[0] + + class AnatomyTemplatesEnumEntity(BaseEnumEntity): schema_types = ["anatomy-templates-enum"] diff --git a/openpype/settings/entities/schemas/projects_schema/schema_main.json b/openpype/settings/entities/schemas/projects_schema/schema_main.json index 6c07209de3..80b1baad1b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_main.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_main.json @@ -62,6 +62,10 @@ "type": "schema", "name": "schema_project_ftrack" }, + { + "type": "schema", + "name": "schema_project_shotgrid" + }, { "type": "schema", "name": "schema_project_kitsu" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index e008fd85ee..3f472c6c6a 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -841,6 +841,44 @@ } ] }, + { + "type": "dict", + "key": "IntegrateHierarchyToFtrack", + "label": "Integrate Hierarchy to ftrack", + "is_group": true, + "collapsible": true, + "children": [ + { + "type": "label", + "label": "Set task status on new task creation. Ftrack's default status is used otherwise." + }, + { + "type": "list", + "key": "create_task_status_profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "text", + "key": "status_name", + "label": "Status name" + } + ] + } + } + ] + }, { "type": "dict", "collapsible": true, @@ -930,6 +968,21 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateFtrackComponentOverwrite", + "label": "IntegrateFtrackComponentOverwrite", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "dict", "key": "IntegrateFtrackInstance", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json index 40e98b0333..cb380194a7 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json @@ -57,6 +57,10 @@ "type": "schema", "name": "schema_scriptsmenu" }, + { + "type": "schema", + "name": "schema_maya_render_settings" + }, { "type": "schema", "name": "schema_maya_create" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json new file mode 100644 index 0000000000..4faeca89f3 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json @@ -0,0 +1,98 @@ +{ + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "is_file": true, + "children": [ + { + "type": "number", + "key": "shotgrid_project_id", + "label": "Shotgrid project id" + }, + { + "type": "shotgrid_url-enum", + "key": "shotgrid_server", + "label": "Shotgrid Server" + }, + { + "type": "dict", + "key": "event", + "label": "Event Handler", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, + { + "type": "dict", + "key": "fields", + "label": "Fields Template", + "collapsible": true, + "children": [ + { + "type": "dict", + "key": "asset", + "label": "Asset", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "type", + "label": "Asset Type" + } + ] + }, + { + "type": "dict", + "key": "sequence", + "label": "Sequence", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + } + ] + }, + { + "type": "dict", + "key": "shot", + "label": "Shot", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + }, + { + "type": "text", + "key": "sequence_link", + "label": "Sequence link" + } + ] + }, + { + "type": "dict", + "key": "task", + "label": "Task", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "step", + "label": "Step link" + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json index 8f0f864dc2..7c61aeed50 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json @@ -84,7 +84,197 @@ ] } }, - { + { + "type": "dict", + "collapsible": true, + "key": "editorial_creators", + "label": "Editorial creator plugins", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "editorial_simple", + "label": "Editorial simple creator", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "splitter" + }, + { + "type": "collapsible-wrap", + "label": "Shot metadata creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "key": "clip_name_tokenizer", + "label": "Clip name tokenizer", + "type": "dict-modifiable", + "highlight_content": true, + "tooltip": "Using Regex expression to create tokens. \nThose can be used later in \"Shot rename\" creator \nor \"Shot hierarchy\". \n\nTokens should be decorated with \"_\" on each side", + "object_type": { + "type": "text" + } + }, + { + "type": "dict", + "key": "shot_rename", + "label": "Shot rename", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "shot_rename_template", + "label": "Shot rename template", + "tooltip":"Template only supports Anatomy keys and Tokens \nfrom \"Clip name tokenizer\"" + } + ] + }, + { + "type": "dict", + "key": "shot_hierarchy", + "label": "Shot hierarchy", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "parents_path", + "label": "Parents path template", + "tooltip": "Using keys from \"Token to parent convertor\" or tokens directly" + }, + { + "key": "parents", + "label": "Token to parent convertor", + "type": "list", + "highlight_content": true, + "tooltip": "The left side is key to be used in template. \nThe right is value build from Tokens comming from \n\"Clip name tokenizer\"", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "type", + "label": "Parent type", + "enum_items": [ + {"Project": "Project"}, + {"Folder": "Folder"}, + {"Episode": "Episode"}, + {"Sequence": "Sequence"} + ] + }, + { + "type": "text", + "key": "name", + "label": "Parent token name", + "tooltip": "Unique name used in \"Parent path template\"" + }, + { + "type": "text", + "key": "value", + "label": "Parent name value", + "tooltip": "Template where any text, Anatomy keys and Tokens could be used" + } + ] + } + } + ] + }, + { + "key": "shot_add_tasks", + "label": "Add tasks to shot", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "task-types-enum", + "key": "type", + "label": "Task type", + "multiselection": false + } + ] + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Shot's subset creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "family_presets", + "label": "Family presets", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "family", + "label": "Family", + "enum_items": [ + {"review": "review"}, + {"plate": "plate"}, + {"audio": "audio"} + ] + }, + { + "type": "text", + "key": "variant", + "label": "Variant", + "placeholder": "< Inherited >" + }, + { + "type": "boolean", + "key": "review", + "label": "Review", + "default": true + }, + { + "type": "enum", + "key": "output_file_type", + "label": "Integrating file type", + "enum_items": [ + {".mp4": "MP4"}, + {".mov": "MOV"}, + {".wav": "WAV"} + ] + } + ] + } + } + ] + } + ] + } + ] + }, + { "type": "dict", "collapsible": true, "key": "BatchMovieCreator", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json b/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json index 4e197e9fc8..d26b5c1ccf 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json @@ -5,6 +5,11 @@ "label": "Unreal Engine", "is_file": true, "children": [ + { + "type": "boolean", + "key": "level_sequences_for_layouts", + "label": "Generate level sequences when loading layouts" + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json index b76a0fa844..2ef7a05b21 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json @@ -5,6 +5,38 @@ "label": "Web Publisher", "is_file": true, "children": [ + { + "type": "list", + "collapsible": true, + "use_label_wrap": true, + "key": "timeout_profiles", + "label": "Timeout profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum", + "multiselection": true + }, + { + "type": "separator" + }, + { + "type": "number", + "key": "timeout", + "label": "Timeout (sec)" + } + ] + } + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index b9d0b7daba..e1aa230b49 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -319,6 +319,15 @@ "minimum": 0, "maximum": 100000 }, + { + "type": "label", + "label": "Rescale input when it's pixel aspect ratio is not 1. Usefull for anamorph reviews." + }, + { + "key": "scale_pixel_aspect", + "label": "Scale pixel aspect", + "type": "boolean" + }, { "type": "label", "label": "Background color is used only when input have transparency and Alpha is higher than 0." diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json index 09287a8b50..431add28df 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json @@ -29,42 +29,9 @@ } ] }, - { - "type": "dict", - "collapsible": true, - "key": "CreateRender", - "label": "Create Render", - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "list", - "key": "defaults", - "label": "Default Subsets", - "object_type": "text" - }, - { - "key": "aov_separator", - "label": "AOV Separator character", - "type": "enum", - "multiselection": false, - "default": "underscore", - "enum_items": [ - {"dash": "- (dash)"}, - {"underscore": "_ (underscore)"}, - {"dot": ". (dot)"} - ] - }, - { - "type": "text", - "key": "default_render_image_folder", - "label": "Default render image folder" - } - ] + { + "type": "schema", + "name": "schema_maya_create_render" }, { "type": "dict", @@ -143,6 +110,57 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CreateAnimation", + "label": "Create Animation", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreatePointCache", + "label": "Create Point Cache", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { "type": "schema_template", "name": "template_create_plugin", @@ -159,10 +177,6 @@ "key": "CreateMultiverseUsdOver", "label": "Create Multiverse USD Override" }, - { - "key": "CreateAnimation", - "label": "Create Animation" - }, { "key": "CreateAss", "label": "Create Ass" @@ -187,10 +201,6 @@ "key": "CreateModel", "label": "Create Model" }, - { - "key": "CreatePointCache", - "label": "Create Cache" - }, { "key": "CreateRenderSetup", "label": "Create Render Setup" diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json new file mode 100644 index 0000000000..68ad7ad63d --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json @@ -0,0 +1,20 @@ +{ + "type": "dict", + "collapsible": true, + "key": "CreateRender", + "label": "Create Render", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json new file mode 100644 index 0000000000..af197604f8 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json @@ -0,0 +1,418 @@ +{ + "type": "dict", + "collapsible": true, + "key": "RenderSettings", + "label": "Render Settings", + "children": [ + { + "type": "boolean", + "key": "apply_render_settings", + "label": "Apply Render Settings on creation" + }, + { + "type": "text", + "key": "default_render_image_folder", + "label": "Default render image folder" + }, + { + "key": "aov_separator", + "label": "AOV Separator character", + "type": "enum", + "multiselection": false, + "default": "underscore", + "enum_items": [ + {"dash": "- (dash)"}, + {"underscore": "_ (underscore)"}, + {"dot": ". (dot)"} + ] + }, + { + "key": "reset_current_frame", + "label": "Reset Current Frame", + "type": "boolean" + }, + { + "type": "dict", + "collapsible": true, + "key": "arnold_renderer", + "label": "Arnold Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"jpeg": "jpeg"}, + {"png": "png"}, + {"deepexr": "deep exr"}, + {"tif": "tif"}, + {"exr": "exr"}, + {"maya": "maya"}, + {"mtoa_shaders": "mtoa_shaders"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "tiled", + "label": "Tiled (tif, exr)", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"ID": "ID"}, + {"N": "N"}, + {"P": "P"}, + {"Pref": "Pref"}, + {"RGBA": "RGBA"}, + {"Z": "Z"}, + {"albedo": "albedo"}, + {"background": "background"}, + {"coat": "coat"}, + {"coat_albedo": "coat_albedo"}, + {"coat_direct": "coat_direct"}, + {"coat_indirect": "coat_indirect"}, + {"cputime": "cputime"}, + {"crypto_asset": "crypto_asset"}, + {"crypto_material": "cypto_material"}, + {"crypto_object": "crypto_object"}, + {"diffuse": "diffuse"}, + {"diffuse_albedo": "diffuse_albedo"}, + {"diffuse_direct": "diffuse_direct"}, + {"diffuse_indirect": "diffuse_indirect"}, + {"direct": "direct"}, + {"emission": "emission"}, + {"highlight": "highlight"}, + {"indirect": "indirect"}, + {"motionvector": "motionvector"}, + {"opacity": "opacity"}, + {"raycount": "raycount"}, + {"rim_light": "rim_light"}, + {"shadow": "shadow"}, + {"shadow_diff": "shadow_diff"}, + {"shadow_mask": "shadow_mask"}, + {"shadow_matte": "shadow_matte"}, + {"sheen": "sheen"}, + {"sheen_albedo": "sheen_albedo"}, + {"sheen_direct": "sheen_direct"}, + {"sheen_indirect": "sheen_indirect"}, + {"specular": "specular"}, + {"specular_albedo": "specular_albedo"}, + {"specular_direct": "specular_direct"}, + {"specular_indirect": "specular_indirect"}, + {"sss": "sss"}, + {"sss_albedo": "sss_albedo"}, + {"sss_direct": "sss_direct"}, + {"sss_indirect": "sss_indirect"}, + {"transmission": "transmission"}, + {"transmission_albedo": "transmission_albedo"}, + {"transmission_direct": "transmission_direct"}, + {"transmission_indirect": "transmission_indirect"}, + {"volume": "volume"}, + {"volume_Z": "volume_Z"}, + {"volume_albedo": "volume_albedo"}, + {"volume_direct": "volume_direct"}, + {"volume_indirect": "volume_indirect"}, + {"volume_opacity": "volume_opacity"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like AASamples" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "vray_renderer", + "label": "V-Ray Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "engine", + "label": "Production Engine", + "type": "enum", + "multiselection": false, + "defaults": "1", + "enum_items": [ + {"1": "V-Ray"}, + {"2": "V-Ray GPU"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"png": "png"}, + {"jpg": "jpg"}, + {"vrimg": "vrimg"}, + {"hdr": "hdr"}, + {"exr": "exr"}, + {"exr (multichannel)": "exr (multichannel)"}, + {"exr (deep)": "exr (deep)"}, + {"tga": "tga"}, + {"bmp": "bmp"}, + {"sgi": "sgi"} + ] + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"atmosphereChannel": "atmosphere"}, + {"backgroundChannel": "background"}, + {"bumpNormalsChannel": "bumpnormals"}, + {"causticsChannel": "caustics"}, + {"coatFilterChannel": "coat_filter"}, + {"coatGlossinessChannel": "coatGloss"}, + {"coatReflectionChannel": "coat_reflection"}, + {"vrayCoatChannel": "coat_specular"}, + {"CoverageChannel": "coverage"}, + {"cryptomatteChannel": "cryptomatte"}, + {"customColor": "custom_color"}, + {"drBucketChannel": "DR"}, + {"denoiserChannel": "denoiser"}, + {"diffuseChannel": "diffuse"}, + {"ExtraTexElement": "extraTex"}, + {"giChannel": "GI"}, + {"LightMixElement": "None"}, + {"lightingChannel": "lighting"}, + {"LightingAnalysisChannel": "LightingAnalysis"}, + {"materialIDChannel": "materialID"}, + {"MaterialSelectElement": "materialSelect"}, + {"matteShadowChannel": "matteShadow"}, + {"MultiMatteElement": "multimatte"}, + {"multimatteIDChannel": "multimatteID"}, + {"normalsChannel": "normals"}, + {"nodeIDChannel": "objectId"}, + {"objectSelectChannel": "objectSelect"}, + {"rawCoatFilterChannel": "raw_coat_filter"}, + {"rawCoatReflectionChannel": "raw_coat_reflection"}, + {"rawDiffuseFilterChannel": "rawDiffuseFilter"}, + {"rawGiChannel": "rawGI"}, + {"rawLightChannel": "rawLight"}, + {"rawReflectionChannel": "rawReflection"}, + {"rawReflectionFilterChannel": "rawReflectionFilter"}, + {"rawRefractionChannel": "rawRefraction"}, + {"rawRefractionFilterChannel": "rawRefractionFilter"}, + {"rawShadowChannel": "rawShadow"}, + {"rawSheenFilterChannel": "raw_sheen_filter"}, + {"rawSheenReflectionChannel": "raw_sheen_reflection"}, + {"rawTotalLightChannel": "rawTotalLight"}, + {"reflectIORChannel": "reflIOR"}, + {"reflectChannel": "reflect"}, + {"reflectionFilterChannel": "reflectionFilter"}, + {"reflectGlossinessChannel": "reflGloss"}, + {"refractChannel": "refract"}, + {"refractionFilterChannel": "refractionFilter"}, + {"refractGlossinessChannel": "refrGloss"}, + {"renderIDChannel": "renderId"}, + {"FastSSS2Channel": "SSS"}, + {"sampleRateChannel": "sampleRate"}, + {"samplerInfo": "samplerInfo"}, + {"selfIllumChannel": "selfIllum"}, + {"shadowChannel": "shadow"}, + {"sheenFilterChannel": "sheen_filter"}, + {"sheenGlossinessChannel": "sheenGloss"}, + {"sheenReflectionChannel": "sheen_reflection"}, + {"vraySheenChannel": "sheen_specular"}, + {"specularChannel": "specular"}, + {"Toon": "Toon"}, + {"toonLightingChannel": "toonLighting"}, + {"toonSpecularChannel": "toonSpecular"}, + {"totalLightChannel": "totalLight"}, + {"unclampedColorChannel": "unclampedColor"}, + {"VRScansPaintMaskChannel": "VRScansPaintMask"}, + {"VRScansZoneMaskChannel": "VRScansZoneMask"}, + {"velocityChannel": "velocity"}, + {"zdepthChannel": "zDepth"}, + {"LightSelectElement": "lightselect"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like aaFilterSize" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "redshift_renderer", + "label": "Redshift Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "primary_gi_engine", + "label": "Primary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"1": "Photon Map"}, + {"2": "Irradiance Cache"}, + {"3": "Brute Force"} + ] + }, + { + "key": "secondary_gi_engine", + "label": "Secondary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"1": "Photon Map"}, + {"2": "Irradiance Cache"}, + {"3": "Brute Force"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"iff": "Maya IFF"}, + {"exr": "OpenEXR"}, + {"tif": "TIFF"}, + {"png": "PNG"}, + {"tga": "Targa"}, + {"jpg": "JPEG"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "force_combine", + "label": "Force combine beauty and AOVs", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< none >"}, + {"AO": "Ambient Occlusion"}, + {"Background": "Background"}, + {"Beauty": "Beauty"}, + {"BumpNormals": "Bump Normals"}, + {"Caustics": "Caustics"}, + {"CausticsRaw": "Caustics Raw"}, + {"Cryptomatte": "Cryptomatte"}, + {"Custom": "Custom"}, + {"Z": "Depth"}, + {"DiffuseFilter": "Diffuse Filter"}, + {"DiffuseLighting": "Diffuse Lighting"}, + {"DiffuseLightingRaw": "Diffuse Lighting Raw"}, + {"Emission": "Emission"}, + {"GI": "Global Illumination"}, + {"GIRaw": "Global Illumination Raw"}, + {"Matte": "Matte"}, + {"MotionVectors": "Ambient Occlusion"}, + {"N": "Normals"}, + {"ID": "ObjectID"}, + {"ObjectBumpNormal": "Object-Space Bump Normals"}, + {"ObjectPosition": "Object-Space Positions"}, + {"PuzzleMatte": "Puzzle Matte"}, + {"Reflections": "Reflections"}, + {"ReflectionsFilter": "Reflections Filter"}, + {"ReflectionsRaw": "Reflections Raw"}, + {"Refractions": "Refractions"}, + {"RefractionsFilter": "Refractions Filter"}, + {"RefractionsRaw": "Refractions Filter"}, + {"Shadows": "Shadows"}, + {"SpecularLighting": "Specular Lighting"}, + {"SSS": "Sub Surface Scatter"}, + {"SSSRaw": "Sub Surface Scatter Raw"}, + {"TotalDiffuseLightingRaw": "Total Diffuse Lighting Raw"}, + {"TotalTransLightingRaw": "Total Translucency Filter"}, + {"TransTint": "Translucency Filter"}, + {"TransGIRaw": "Translucency Lighting Raw"}, + {"VolumeFogEmission": "Volume Fog Emission"}, + {"VolumeFogTint": "Volume Fog Tint"}, + {"VolumeLighting": "Volume Lighting"}, + {"P": "World Position"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like reflectionMaxTraceDepth" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json index 484fbf9d07..a4b28f47bc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json @@ -13,6 +13,9 @@ { "ftrackreview": "Add review to Ftrack" }, + { + "shotgridreview": "Add review to Shotgrid" + }, { "delete": "Delete output" }, diff --git a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json index 654ddf2938..7c5774415c 100644 --- a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json +++ b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json @@ -50,8 +50,15 @@ "is_group": true, "children": [ { - "type": "label", - "label": "Intent" + "type": "boolean", + "key": "allow_empty_intent", + "label": "Allow empty intent" + }, + { + "type": "text", + "key": "empty_intent_label", + "label": "Empty item label", + "placeholder": "< Not set >" }, { "type": "dict-modifiable", @@ -64,7 +71,8 @@ { "key": "default", "type": "text", - "label": "Default Intent" + "label": "Default Intent", + "placeholder": "< First available >" }, { "type": "separator" diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json index d22b9016a7..952b38040c 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_modules.json +++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json @@ -48,6 +48,60 @@ "type": "schema", "name": "schema_kitsu" }, + { + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "leecher_manager_url", + "label": "Shotgrid Leecher Manager URL" + }, + { + "type": "text", + "key": "leecher_backend_url", + "label": "Shotgrid Leecher Backend URL" + }, + { + "type": "boolean", + "key": "filter_projects_by_login", + "label": "Filter projects by SG login" + }, + { + "type": "dict-modifiable", + "key": "shotgrid_settings", + "label": "Shotgrid Servers", + "object_type": { + "type": "dict", + "children": [ + { + "key": "shotgrid_url", + "label": "Server URL", + "type": "text" + }, + { + "key": "shotgrid_script_name", + "label": "Script Name", + "type": "text" + }, + { + "key": "shotgrid_script_key", + "label": "Script api key", + "type": "text" + } + ] + } + } + ] + }, { "type": "dict", "key": "timers_manager", diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index a5174bd804..3ce44ea6c8 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -272,15 +272,15 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): # update availability on active site when version changes if self.sync_server.enabled and version_doc: - query = self._repre_per_version_pipeline( + repre_info = self.sync_server.get_repre_info_for_versions( + project_name, [version_doc["_id"]], self.active_site, self.remote_site ) - docs = list(self.dbcon.aggregate(query)) - if docs: - repre = docs.pop() - version_doc["data"].update(self._get_repre_dict(repre)) + if repre_info: + version_doc["data"].update( + self._get_repre_dict(repre_info[0])) self.set_version(index, version_doc) @@ -478,16 +478,16 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): for _subset_id, doc in last_versions_by_subset_id.items(): version_ids.add(doc["_id"]) - query = self._repre_per_version_pipeline( + repres = self.sync_server.get_repre_info_for_versions( + project_name, list(version_ids), self.active_site, self.remote_site ) - - for doc in self.dbcon.aggregate(query): + for repre in repres: if self._doc_fetching_stop: return doc["active_provider"] = self.active_provider doc["remote_provider"] = self.remote_provider - repre_info[doc["_id"]] = doc + repre_info[repre["_id"]] = repre self._doc_payload = { "asset_docs_by_id": asset_docs_by_id, @@ -827,83 +827,6 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): return data - def _repre_per_version_pipeline(self, version_ids, - active_site, remote_site): - query = [ - {"$match": {"parent": {"$in": version_ids}, - "type": "representation", - "files.sites.name": {"$exists": 1}}}, - {"$unwind": "$files"}, - {'$addFields': { - 'order_local': { - '$filter': { - 'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', active_site]} - } - } - }}, - {'$addFields': { - 'order_remote': { - '$filter': { - 'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', remote_site]} - } - } - }}, - {'$addFields': { - 'progress_local': {"$arrayElemAt": [{ - '$cond': [ - {'$size': "$order_local.progress"}, - "$order_local.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_local.created_dt"}, - [1], - [0] - ]} - ]}, - 0 - ]} - }}, - {'$addFields': { - 'progress_remote': {"$arrayElemAt": [{ - '$cond': [ - {'$size': "$order_remote.progress"}, - "$order_remote.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_remote.created_dt"}, - [1], - [0] - ]} - ]}, - 0 - ]} - }}, - {'$group': { # first group by repre - '_id': '$_id', - 'parent': {'$first': '$parent'}, - 'avail_ratio_local': { - '$first': { - '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}] - } - }, - 'avail_ratio_remote': { - '$first': { - '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}] - } - } - }}, - {'$group': { # second group by parent, eg version_id - '_id': '$parent', - 'repre_count': {'$sum': 1}, # total representations - # fully available representation for site - 'avail_repre_local': {'$sum': "$avail_ratio_local"}, - 'avail_repre_remote': {'$sum': "$avail_ratio_remote"}, - }}, - ] - return query - class GroupMemberFilterProxyModel(QtCore.QSortFilterProxyModel): """Provide the feature of filtering group by the acceptance of members diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index 13e18b3757..2d8b4b048d 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -434,7 +434,8 @@ class SubsetWidget(QtWidgets.QWidget): # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) if self.tool_name: available_loaders = lib.remove_tool_name_from_loaders( available_loaders, self.tool_name @@ -566,12 +567,12 @@ class SubsetWidget(QtWidgets.QWidget): # Trigger project_name = self.dbcon.active_project() - subset_names_by_version_id = collections.defaultdict(set) + subset_name_by_version_id = dict() for item in items: version_id = item["version_document"]["_id"] - subset_names_by_version_id[version_id].add(item["subset"]) + subset_name_by_version_id[version_id] = item["subset"] - version_ids = set(subset_names_by_version_id.keys()) + version_ids = set(subset_name_by_version_id.keys()) repre_docs = get_representations( project_name, representation_names=[representation_name], @@ -583,14 +584,15 @@ class SubsetWidget(QtWidgets.QWidget): for repre_doc in repre_docs: repre_ids.append(repre_doc["_id"]) + # keep only version ids without representation with that name version_id = repre_doc["parent"] - if version_id not in version_ids: - version_ids.remove(version_id) + version_ids.discard(version_id) - for version_id in version_ids: + if version_ids: + # report versions that didn't have valid representation joined_subset_names = ", ".join([ - '"{}"'.format(subset) - for subset in subset_names_by_version_id[version_id] + '"{}"'.format(subset_name_by_version_id[version_id]) + for version_id in version_ids ]) self.echo("Subsets {} don't have representation '{}'".format( joined_subset_names, representation_name @@ -1330,7 +1332,8 @@ class RepresentationWidget(QtWidgets.QWidget): selected_side = self._get_selected_side(point_index, rows) # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) filtered_loaders = [] for loader in available_loaders: diff --git a/openpype/tools/pyblish_pype/model.py b/openpype/tools/pyblish_pype/model.py index 2931a379b3..31aa63677e 100644 --- a/openpype/tools/pyblish_pype/model.py +++ b/openpype/tools/pyblish_pype/model.py @@ -86,7 +86,7 @@ class IntentModel(QtGui.QStandardItemModel): First and default value is {"< Not Set >": None} """ - default_item = {"< Not Set >": None} + default_empty_label = "< Not set >" def __init__(self, parent=None): super(IntentModel, self).__init__(parent) @@ -102,27 +102,39 @@ class IntentModel(QtGui.QStandardItemModel): self._item_count = 0 self.default_index = 0 - intents_preset = ( + intent_settings = ( get_system_settings() .get("modules", {}) .get("ftrack", {}) .get("intent", {}) ) - default = intents_preset.get("default") - items = intents_preset.get("items", {}) + items = intent_settings.get("items", {}) if not items: return - for idx, item_value in enumerate(items.keys()): + allow_empty_intent = intent_settings.get("allow_empty_intent", True) + empty_intent_label = ( + intent_settings.get("empty_intent_label") + or self.default_empty_label + ) + listed_items = list(items.items()) + if allow_empty_intent: + listed_items.insert(0, ("", empty_intent_label)) + + default = intent_settings.get("default") + + for idx, item in enumerate(listed_items): + item_value = item[0] if item_value == default: self.default_index = idx break - self.add_items(items) + self._add_items(listed_items) - def add_items(self, items): - for value, label in items.items(): + def _add_items(self, items): + for item in items: + value, label = item new_item = QtGui.QStandardItem() new_item.setData(label, QtCore.Qt.DisplayRole) new_item.setData(value, Roles.IntentItemValue) diff --git a/openpype/tools/pyblish_pype/window.py b/openpype/tools/pyblish_pype/window.py index 78590259bc..e167405325 100644 --- a/openpype/tools/pyblish_pype/window.py +++ b/openpype/tools/pyblish_pype/window.py @@ -523,6 +523,7 @@ class Window(QtWidgets.QDialog): instance_item.setData(enable_value, Roles.IsEnabledRole) def _add_intent_to_context(self): + context_value = None if ( self.intent_model.has_items and "intent" not in self.controller.context.data @@ -530,11 +531,17 @@ class Window(QtWidgets.QDialog): idx = self.intent_model.index(self.intent_box.currentIndex(), 0) intent_value = self.intent_model.data(idx, Roles.IntentItemValue) intent_label = self.intent_model.data(idx, QtCore.Qt.DisplayRole) + if intent_value: + context_value = { + "value": intent_value, + "label": intent_label + } - self.controller.context.data["intent"] = { - "value": intent_value, - "label": intent_label - } + # Unset intent if is set to empty value + if context_value is None: + self.controller.context.data.pop("intent", None) + else: + self.controller.context.data["intent"] = context_value def on_instance_toggle(self, index, state=None): """An item is requesting to be toggled""" diff --git a/openpype/tools/sceneinventory/view.py b/openpype/tools/sceneinventory/view.py index 63d181b2d6..e0e43aaba7 100644 --- a/openpype/tools/sceneinventory/view.py +++ b/openpype/tools/sceneinventory/view.py @@ -551,16 +551,16 @@ class SceneInventoryView(QtWidgets.QTreeView): "toggle": selection_model.Toggle, }[options.get("mode", "select")] - for item in iter_model_rows(model, 0): - item = item.data(InventoryModel.ItemRole) + for index in iter_model_rows(model, 0): + item = index.data(InventoryModel.ItemRole) if item.get("isGroupNode"): continue name = item.get("objectName") if name in object_names: - self.scrollTo(item) # Ensure item is visible + self.scrollTo(index) # Ensure item is visible flags = select_mode | selection_model.Rows - selection_model.select(item, flags) + selection_model.select(index, flags) object_names.remove(name) diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py index 34692b7102..a4109c511e 100644 --- a/openpype/tools/workfiles/files_widget.py +++ b/openpype/tools/workfiles/files_widget.py @@ -12,7 +12,6 @@ from openpype.tools.utils import PlaceholderLineEdit from openpype.tools.utils.delegates import PrettyTimeDelegate from openpype.lib import ( emit_event, - get_workfile_template_key, create_workdir_extra_folders, ) from openpype.lib.avalon_context import ( @@ -24,6 +23,8 @@ from openpype.pipeline import ( legacy_io, Anatomy, ) +from openpype.pipeline.workfile import get_workfile_template_key + from .model import ( WorkAreaFilesModel, PublishFilesModel, diff --git a/openpype/tools/workfiles/save_as_dialog.py b/openpype/tools/workfiles/save_as_dialog.py index b62fd2c889..cded4eb1a5 100644 --- a/openpype/tools/workfiles/save_as_dialog.py +++ b/openpype/tools/workfiles/save_as_dialog.py @@ -5,18 +5,12 @@ import logging from Qt import QtWidgets, QtCore -from openpype.client import ( - get_project, - get_asset_by_name, -) -from openpype.lib import ( - get_last_workfile_with_version, - get_workdir_data, -) from openpype.pipeline import ( registered_host, legacy_io, ) +from openpype.pipeline.workfile import get_last_workfile_with_version +from openpype.pipeline.template_data import get_template_data_with_names from openpype.tools.utils import PlaceholderLineEdit log = logging.getLogger(__name__) @@ -30,16 +24,10 @@ def build_workfile_data(session): asset_name = session["AVALON_ASSET"] task_name = session["AVALON_TASK"] host_name = session["AVALON_APP"] - project_doc = get_project( - project_name, fields=["name", "data.code", "config.tasks"] - ) - asset_doc = get_asset_by_name( - project_name, - asset_name, - fields=["name", "data.tasks", "data.parents"] - ) - data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) data.update({ "version": 1, "comment": "", diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py index 0b0d67e589..de42b80d64 100644 --- a/openpype/tools/workfiles/window.py +++ b/openpype/tools/workfiles/window.py @@ -1,18 +1,20 @@ import os import datetime +import copy from Qt import QtCore, QtWidgets, QtGui from openpype.client import ( - get_asset_by_id, get_asset_by_name, get_workfile_info, ) +from openpype.client.operations import ( + OperationsSession, + new_workfile_info_doc, + prepare_workfile_info_update_data, +) from openpype import style from openpype import resources -from openpype.lib import ( - create_workfile_doc, - save_workfile_data_to_doc, -) +from openpype.pipeline import Anatomy from openpype.pipeline import legacy_io from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.tasks_widget import TasksWidget @@ -324,10 +326,23 @@ class Window(QtWidgets.QWidget): workfile_doc, data = self.side_panel.get_workfile_data() if not workfile_doc: filepath = self.files_widget._get_selected_filepath() - self._create_workfile_doc(filepath, force=True) - workfile_doc = self._get_current_workfile_doc() + workfile_doc = self._create_workfile_doc(filepath) - save_workfile_data_to_doc(workfile_doc, data, legacy_io) + new_workfile_doc = copy.deepcopy(workfile_doc) + new_workfile_doc["data"] = data + update_data = prepare_workfile_info_update_data( + workfile_doc, new_workfile_doc + ) + if not update_data: + return + + project_name = legacy_io.active_project() + + session = OperationsSession() + session.update_entity( + project_name, "workfile", workfile_doc["_id"], update_data + ) + session.commit() def _get_current_workfile_doc(self, filepath=None): if filepath is None: @@ -343,20 +358,32 @@ class Window(QtWidgets.QWidget): project_name, asset_id, task_name, filename ) - def _create_workfile_doc(self, filepath, force=False): - workfile_doc = None - if not force: - workfile_doc = self._get_current_workfile_doc(filepath) + def _create_workfile_doc(self, filepath): + workfile_doc = self._get_current_workfile_doc(filepath) + if workfile_doc: + return workfile_doc - if not workfile_doc: - workdir, filename = os.path.split(filepath) - asset_id = self.assets_widget.get_selected_asset_id() - project_name = legacy_io.active_project() - asset_doc = get_asset_by_id(project_name, asset_id) - task_name = self.tasks_widget.get_selected_task_name() - create_workfile_doc( - asset_doc, task_name, filename, workdir, legacy_io - ) + workdir, filename = os.path.split(filepath) + + project_name = legacy_io.active_project() + asset_id = self.assets_widget.get_selected_asset_id() + task_name = self.tasks_widget.get_selected_task_name() + + anatomy = Anatomy(project_name) + success, rootless_dir = anatomy.find_root_template_from_path(workdir) + filepath = "/".join([ + os.path.normpath(rootless_dir).replace("\\", "/"), + filename + ]) + + workfile_doc = new_workfile_info_doc( + filename, asset_id, task_name, [filepath] + ) + + session = OperationsSession() + session.create_entity(project_name, "workfile", workfile_doc) + session.commit() + return workfile_doc def refresh(self): # Refresh asset widget diff --git a/openpype/version.py b/openpype/version.py index 5c39e9e630..9ae52e8370 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.12.2" +__version__ = "3.13.1-nightly.3" diff --git a/poetry.lock b/poetry.lock index 7221e191ff..919a352505 100644 --- a/poetry.lock +++ b/poetry.lock @@ -92,7 +92,14 @@ version = "1.4.4" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "main" optional = false -python-versions = "*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +develop = false + +[package.source] +type = "git" +url = "https://github.com/ActiveState/appdirs.git" +reference = "master" +resolved_reference = "193a2cbba58cce2542882fcedd0e49f6763672ed" [[package]] name = "arrow" @@ -221,7 +228,7 @@ python-versions = "~=3.7" [[package]] name = "certifi" -version = "2022.5.18.1" +version = "2022.6.15" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false @@ -456,19 +463,20 @@ python-versions = ">=3.7" [[package]] name = "ftrack-python-api" -version = "2.0.0" +version = "2.3.3" description = "Python API for ftrack." category = "main" optional = false -python-versions = ">=2.7.9, <4.0" +python-versions = ">=2.7.9, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, < 3.10" [package.dependencies] +appdirs = ">=1,<2" arrow = ">=0.4.4,<1" -clique = ">=1.2.0,<2" +clique = "1.6.1" future = ">=0.16.0,<1" pyparsing = ">=2.0,<3" requests = ">=2,<3" -six = ">=1,<2" +six = ">=1.13.0,<2" termcolor = ">=1.1.0,<2" websocket-client = ">=0.40.0,<1" @@ -1375,6 +1383,21 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +[[package]] +name = "shotgun-api3" +version = "3.3.3" +description = "Shotgun Python API" +category = "main" +optional = false +python-versions = "*" +develop = false + +[package.source] +type = "git" +url = "https://github.com/shotgunsoftware/python-api.git" +reference = "v3.3.3" +resolved_reference = "b9f066c0edbea6e0733242e18f32f75489064840" + [[package]] name = "six" version = "1.16.0" @@ -1812,10 +1835,7 @@ ansicon = [ {file = "ansicon-1.89.0-py2.py3-none-any.whl", hash = "sha256:f1def52d17f65c2c9682cf8370c03f541f410c1752d6a14029f97318e4b9dfec"}, {file = "ansicon-1.89.0.tar.gz", hash = "sha256:e4d039def5768a47e4afec8e89e83ec3ae5a26bf00ad851f914d1240b444d2b1"}, ] -appdirs = [ - {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, - {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, -] +appdirs = [] arrow = [ {file = "arrow-0.17.0-py2.py3-none-any.whl", hash = "sha256:e098abbd9af3665aea81bdd6c869e93af4feb078e98468dd351c383af187aac5"}, {file = "arrow-0.17.0.tar.gz", hash = "sha256:ff08d10cda1d36c68657d6ad20d74fbea493d980f8b2d45344e00d6ed2bf6ed4"}, @@ -1870,8 +1890,8 @@ cachetools = [ {file = "cachetools-5.2.0.tar.gz", hash = "sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757"}, ] certifi = [ - {file = "certifi-2022.5.18.1-py3-none-any.whl", hash = "sha256:f1d53542ee8cbedbe2118b5686372fb33c297fcd6379b050cca0ef13a597382a"}, - {file = "certifi-2022.5.18.1.tar.gz", hash = "sha256:9c5705e395cd70084351dd8ad5c41e65655e08ce46f2ec9cf6c2c08390f71eb7"}, + {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"}, + {file = "certifi-2022.6.15.tar.gz", hash = "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d"}, ] cffi = [ {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"}, @@ -2137,10 +2157,7 @@ frozenlist = [ {file = "frozenlist-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:772965f773757a6026dea111a15e6e2678fbd6216180f82a48a40b27de1ee2ab"}, {file = "frozenlist-1.3.0.tar.gz", hash = "sha256:ce6f2ba0edb7b0c1d8976565298ad2deba6f8064d2bebb6ffce2ca896eb35b0b"}, ] -ftrack-python-api = [ - {file = "ftrack-python-api-2.0.0.tar.gz", hash = "sha256:dd6f02c31daf5a10078196dc9eac4671e4297c762fbbf4df98de668ac12281d9"}, - {file = "ftrack_python_api-2.0.0-py2.py3-none-any.whl", hash = "sha256:d0df0f2df4b53947272f95e179ec98b477ee425bf4217b37bb59030ad989771e"}, -] +ftrack-python-api = [] future = [ {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, ] @@ -2820,6 +2837,7 @@ semver = [ {file = "semver-2.13.0-py2.py3-none-any.whl", hash = "sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4"}, {file = "semver-2.13.0.tar.gz", hash = "sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f"}, ] +shotgun-api3 = [] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, diff --git a/pyproject.toml b/pyproject.toml index 175e72be24..83ccf233d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.12.2" # OpenPype +version = "3.13.1-nightly.3" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" @@ -33,13 +33,14 @@ aiohttp = "^3.7" aiohttp_json_rpc = "*" # TVPaint server acre = { git = "https://github.com/pypeclub/acre.git" } opentimelineio = { version = "0.14.0.dev1", source = "openpype" } -appdirs = "^1.4.3" +appdirs = { git = "https://github.com/ActiveState/appdirs.git", branch = "master" } blessed = "^1.17" # openpype terminal formatting coolname = "*" clique = "1.6.*" Click = "^7" dnspython = "^2.1.0" -ftrack-python-api = "2.0.*" +ftrack-python-api = "^2.3.3" +shotgun_api3 = {git = "https://github.com/shotgunsoftware/python-api.git", rev = "v3.3.3"} gazu = "^0.8.28" google-api-python-client = "^1.12.8" # sync server google support (should be separate?) jsonschema = "^2.6.0" diff --git a/setup.py b/setup.py index 8b5a545c16..eab0187983 100644 --- a/setup.py +++ b/setup.py @@ -152,7 +152,7 @@ build_exe_options = dict( ) bdist_mac_options = dict( - bundle_name="OpenPype", + bundle_name=f"OpenPype {__version__}", iconfile=mac_icon_path ) diff --git a/start.py b/start.py index ace33ab92a..5cdffafb6e 100644 --- a/start.py +++ b/start.py @@ -103,6 +103,9 @@ import site import distutils.spawn from pathlib import Path + +silent_mode = False + # OPENPYPE_ROOT is variable pointing to build (or code) directory # WARNING `OPENPYPE_ROOT` must be defined before igniter import # - igniter changes cwd which cause that filepath of this script won't lead @@ -138,40 +141,44 @@ if sys.__stdout__: term = blessed.Terminal() def _print(message: str): + if silent_mode: + return if message.startswith("!!! "): - print("{}{}".format(term.orangered2("!!! "), message[4:])) + print(f'{term.orangered2("!!! ")}{message[4:]}') return if message.startswith(">>> "): - print("{}{}".format(term.aquamarine3(">>> "), message[4:])) + print(f'{term.aquamarine3(">>> ")}{message[4:]}') return if message.startswith("--- "): - print("{}{}".format(term.darkolivegreen3("--- "), message[4:])) + print(f'{term.darkolivegreen3("--- ")}{message[4:]}') return if message.startswith("*** "): - print("{}{}".format(term.gold("*** "), message[4:])) + print(f'{term.gold("*** ")}{message[4:]}') return if message.startswith(" - "): - print("{}{}".format(term.wheat(" - "), message[4:])) + print(f'{term.wheat(" - ")}{message[4:]}') return if message.startswith(" . "): - print("{}{}".format(term.tan(" . "), message[4:])) + print(f'{term.tan(" . ")}{message[4:]}') return if message.startswith(" - "): - print("{}{}".format(term.seagreen3(" - "), message[7:])) + print(f'{term.seagreen3(" - ")}{message[7:]}') return if message.startswith(" ! "): - print("{}{}".format(term.goldenrod(" ! "), message[7:])) + print(f'{term.goldenrod(" ! ")}{message[7:]}') return if message.startswith(" * "): - print("{}{}".format(term.aquamarine1(" * "), message[7:])) + print(f'{term.aquamarine1(" * ")}{message[7:]}') return if message.startswith(" "): - print("{}{}".format(term.darkseagreen3(" "), message[4:])) + print(f'{term.darkseagreen3(" ")}{message[4:]}') return print(message) else: def _print(message: str): + if silent_mode: + return print(message) @@ -187,9 +194,8 @@ else: if "--headless" in sys.argv: os.environ["OPENPYPE_HEADLESS_MODE"] = "1" sys.argv.remove("--headless") -else: - if os.getenv("OPENPYPE_HEADLESS_MODE") != "1": - os.environ.pop("OPENPYPE_HEADLESS_MODE", None) +elif os.getenv("OPENPYPE_HEADLESS_MODE") != "1": + os.environ.pop("OPENPYPE_HEADLESS_MODE", None) # Enabled logging debug mode when "--debug" is passed if "--verbose" in sys.argv: @@ -203,8 +209,8 @@ if "--verbose" in sys.argv: value = sys.argv.pop(idx) else: raise RuntimeError(( - "Expect value after \"--verbose\" argument. {}" - ).format(expected_values)) + f"Expect value after \"--verbose\" argument. {expected_values}" + )) log_level = None low_value = value.lower() @@ -225,8 +231,9 @@ if "--verbose" in sys.argv: if log_level is None: raise RuntimeError(( - "Unexpected value after \"--verbose\" argument \"{}\". {}" - ).format(value, expected_values)) + "Unexpected value after \"--verbose\" " + f"argument \"{value}\". {expected_values}" + )) os.environ["OPENPYPE_LOG_LEVEL"] = str(log_level) @@ -242,13 +249,14 @@ from igniter.tools import ( get_openpype_global_settings, get_openpype_path_from_settings, validate_mongo_connection, - OpenPypeVersionNotFound + OpenPypeVersionNotFound, + OpenPypeVersionIncompatible ) # noqa from igniter.bootstrap_repos import OpenPypeVersion # noqa: E402 bootstrap = BootstrapRepos() silent_commands = {"run", "igniter", "standalonepublisher", - "extractenvironments"} + "extractenvironments", "version"} def list_versions(openpype_versions: list, local_version=None) -> None: @@ -270,8 +278,11 @@ def set_openpype_global_environments() -> None: general_env = get_general_environments() + # first resolve general environment because merge doesn't expect + # values to be list. + # TODO: switch to OpenPype environment functions merged_env = acre.merge( - acre.parse(general_env), + acre.compute(acre.parse(general_env), cleanup=False), dict(os.environ) ) env = acre.compute( @@ -333,34 +344,33 @@ def run_disk_mapping_commands(settings): destination = destination.rstrip('/') source = source.rstrip('/') - if low_platform == "windows": - args = ["subst", destination, source] - elif low_platform == "darwin": - scr = "do shell script \"ln -s {} {}\" with administrator privileges".format(source, destination) # noqa: E501 + if low_platform == "darwin": + scr = f'do shell script "ln -s {source} {destination}" with administrator privileges' # noqa + args = ["osascript", "-e", scr] + elif low_platform == "windows": + args = ["subst", destination, source] else: args = ["sudo", "ln", "-s", source, destination] - _print("disk mapping args:: {}".format(args)) + _print(f"*** disk mapping arguments: {args}") try: if not os.path.exists(destination): output = subprocess.Popen(args) if output.returncode and output.returncode != 0: - exc_msg = "Executing was not successful: \"{}\"".format( - args) + exc_msg = f'Executing was not successful: "{args}"' raise RuntimeError(exc_msg) except TypeError as exc: - _print("Error {} in mapping drive {}, {}".format(str(exc), - source, - destination)) + _print( + f"Error {str(exc)} in mapping drive {source}, {destination}") raise def set_avalon_environments(): """Set avalon specific environments. - These are non modifiable environments for avalon workflow that must be set + These are non-modifiable environments for avalon workflow that must be set before avalon module is imported because avalon works with globals set with environment variables. """ @@ -505,7 +515,7 @@ def _process_arguments() -> tuple: ) if m and m.group('version'): use_version = m.group('version') - _print(">>> Requested version [ {} ]".format(use_version)) + _print(f">>> Requested version [ {use_version} ]") if "+staging" in use_version: use_staging = True break @@ -611,8 +621,8 @@ def _determine_mongodb() -> str: try: openpype_mongo = bootstrap.secure_registry.get_item( "openPypeMongo") - except ValueError: - raise RuntimeError("Missing MongoDB url") + except ValueError as e: + raise RuntimeError("Missing MongoDB url") from e return openpype_mongo @@ -684,40 +694,47 @@ def _find_frozen_openpype(use_version: str = None, # Specific version is defined if use_version.lower() == "latest": # Version says to use latest version - _print("Finding latest version defined by use version") + _print(">>> Finding latest version defined by use version") openpype_version = bootstrap.find_latest_openpype_version( - use_staging + use_staging, compatible_with=installed_version ) else: - _print("Finding specified version \"{}\"".format(use_version)) + _print(f">>> Finding specified version \"{use_version}\"") openpype_version = bootstrap.find_openpype_version( use_version, use_staging ) if openpype_version is None: raise OpenPypeVersionNotFound( - "Requested version \"{}\" was not found.".format( - use_version - ) + f"Requested version \"{use_version}\" was not found." ) + if not openpype_version.is_compatible(installed_version): + raise OpenPypeVersionIncompatible(( + f"Requested version \"{use_version}\" is not compatible " + f"with installed version \"{installed_version}\"" + )) + elif studio_version is not None: # Studio has defined a version to use - _print("Finding studio version \"{}\"".format(studio_version)) + _print(f">>> Finding studio version \"{studio_version}\"") openpype_version = bootstrap.find_openpype_version( - studio_version, use_staging + studio_version, use_staging, compatible_with=installed_version ) if openpype_version is None: raise OpenPypeVersionNotFound(( - "Requested OpenPype version \"{}\" defined by settings" + "Requested OpenPype version " + f"\"{studio_version}\" defined by settings" " was not found." - ).format(studio_version)) + )) else: # Default behavior to use latest version - _print("Finding latest version") + _print(( + ">>> Finding latest version compatible " + f"with [ {installed_version} ]")) openpype_version = bootstrap.find_latest_openpype_version( - use_staging + use_staging, compatible_with=installed_version ) if openpype_version is None: if use_staging: @@ -798,7 +815,7 @@ def _bootstrap_from_code(use_version, use_staging): if getattr(sys, 'frozen', False): local_version = bootstrap.get_version(Path(_openpype_root)) - switch_str = f" - will switch to {use_version}" if use_version else "" + switch_str = f" - will switch to {use_version}" if use_version and use_version != local_version else "" # noqa _print(f" - booting version: {local_version}{switch_str}") assert local_version else: @@ -813,11 +830,8 @@ def _bootstrap_from_code(use_version, use_staging): use_version, use_staging ) if version_to_use is None: - raise OpenPypeVersionNotFound( - "Requested version \"{}\" was not found.".format( - use_version - ) - ) + raise OpenPypeVersionIncompatible( + f"Requested version \"{use_version}\" was not found.") else: # Staging version should be used version_to_use = bootstrap.find_latest_openpype_version( @@ -903,7 +917,7 @@ def _boot_validate_versions(use_version, local_version): use_version, openpype_versions ) valid, message = bootstrap.validate_openpype_version(version_path) - _print("{}{}".format(">>> " if valid else "!!! ", message)) + _print(f'{">>> " if valid else "!!! "}{message}') def _boot_print_versions(use_staging, local_version, openpype_root): @@ -914,13 +928,24 @@ def _boot_print_versions(use_staging, local_version, openpype_root): _print("--- This will list only staging versions detected.") _print(" To see other version, omit --use-staging argument.") - openpype_versions = bootstrap.find_openpype(include_zips=True, - staging=use_staging) if getattr(sys, 'frozen', False): local_version = bootstrap.get_version(Path(openpype_root)) else: local_version = OpenPypeVersion.get_installed_version_str() + compatible_with = OpenPypeVersion(version=local_version) + if "--all" in sys.argv: + compatible_with = None + _print("--- Showing all version (even those not compatible).") + else: + _print(("--- Showing only compatible versions " + f"with [ {compatible_with.major}.{compatible_with.minor} ]")) + + openpype_versions = bootstrap.find_openpype( + include_zips=True, + staging=use_staging, + compatible_with=compatible_with) + list_versions(openpype_versions, local_version) @@ -937,6 +962,9 @@ def _boot_handle_missing_version(local_version, use_staging, message): def boot(): """Bootstrap OpenPype.""" + global silent_mode + if any(arg in silent_commands for arg in sys.argv): + silent_mode = True # ------------------------------------------------------------------------ # Set environment to OpenPype root path @@ -1040,7 +1068,7 @@ def boot(): if not result[0]: _print(f"!!! Invalid version: {result[1]}") sys.exit(1) - _print(f"--- version is valid") + _print("--- version is valid") else: try: version_path = _bootstrap_from_code(use_version, use_staging) @@ -1113,8 +1141,12 @@ def boot(): def get_info(use_staging=None) -> list: """Print additional information to console.""" - from openpype.lib.mongo import get_default_components - from openpype.lib.log import PypeLogger + from openpype.client.mongo import get_default_components + try: + from openpype.lib.log import Logger + except ImportError: + # Backwards compatibility for 'PypeLogger' + from openpype.lib.log import PypeLogger as Logger components = get_default_components() @@ -1141,14 +1173,14 @@ def get_info(use_staging=None) -> list: os.environ.get("MUSTER_REST_URL"))) # Reinitialize - PypeLogger.initialize() + Logger.initialize() mongo_components = get_default_components() if mongo_components["host"]: inf.append(("Logging to MongoDB", mongo_components["host"])) inf.append((" - port", mongo_components["port"] or "")) - inf.append((" - database", PypeLogger.log_database_name)) - inf.append((" - collection", PypeLogger.log_collection_name)) + inf.append((" - database", Logger.log_database_name)) + inf.append((" - collection", Logger.log_collection_name)) inf.append((" - user", mongo_components["username"] or "")) if mongo_components["auth_db"]: inf.append((" - auth source", mongo_components["auth_db"])) @@ -1157,8 +1189,7 @@ def get_info(use_staging=None) -> list: formatted = [] for info in inf: padding = (maximum - len(info[0])) + 1 - formatted.append( - "... {}:{}[ {} ]".format(info[0], " " * padding, info[1])) + formatted.append(f'... {info[0]}:{" " * padding}[ {info[1]} ]') return formatted diff --git a/tests/integration/hosts/aftereffects/test_publish_in_aftereffects_multiframe.py b/tests/integration/hosts/aftereffects/test_publish_in_aftereffects_multiframe.py new file mode 100644 index 0000000000..c882e0f9b2 --- /dev/null +++ b/tests/integration/hosts/aftereffects/test_publish_in_aftereffects_multiframe.py @@ -0,0 +1,64 @@ +import logging + +from tests.lib.assert_classes import DBAssert +from tests.integration.hosts.aftereffects.lib import AfterEffectsTestClass + +log = logging.getLogger("test_publish_in_aftereffects") + + +class TestPublishInAfterEffects(AfterEffectsTestClass): + """Basic test case for publishing in AfterEffects + + Should publish 5 frames + """ + PERSIST = True + + TEST_FILES = [ + ("12aSDRjthn4X3yw83gz_0FZJcRRiVDEYT", + "test_aftereffects_publish_multiframe.zip", + "") + ] + + APP = "aftereffects" + APP_VARIANT = "" + + APP_NAME = "{}/{}".format(APP, APP_VARIANT) + + TIMEOUT = 120 # publish timeout + + def test_db_asserts(self, dbcon, publish_finished): + """Host and input data dependent expected results in DB.""" + print("test_db_asserts") + failures = [] + + failures.append(DBAssert.count_of_types(dbcon, "version", 2)) + + failures.append( + DBAssert.count_of_types(dbcon, "version", 0, name={"$ne": 1})) + + failures.append( + DBAssert.count_of_types(dbcon, "subset", 1, + name="imageMainBackgroundcopy")) + + failures.append( + DBAssert.count_of_types(dbcon, "subset", 1, + name="workfileTest_task")) + + failures.append( + DBAssert.count_of_types(dbcon, "subset", 1, + name="reviewTesttask")) + + failures.append( + DBAssert.count_of_types(dbcon, "representation", 4)) + + additional_args = {"context.subset": "renderTestTaskDefault", + "context.ext": "png"} + failures.append( + DBAssert.count_of_types(dbcon, "representation", 1, + additional_args=additional_args)) + + assert not any(failures) + + +if __name__ == "__main__": + test_case = TestPublishInAfterEffects() diff --git a/tests/lib/testing_classes.py b/tests/lib/testing_classes.py index f991f02227..2b4d7deb48 100644 --- a/tests/lib/testing_classes.py +++ b/tests/lib/testing_classes.py @@ -314,30 +314,22 @@ class PublishTest(ModuleUnitTest): Compares only presence, not size nor content! """ - published_dir_base = download_test_data - published_dir = os.path.join(output_folder_url, - self.PROJECT, - self.ASSET, - self.TASK, - "**") - expected_dir_base = os.path.join(published_dir_base, + published_dir_base = output_folder_url + expected_dir_base = os.path.join(download_test_data, "expected") - expected_dir = os.path.join(expected_dir_base, - self.PROJECT, - self.ASSET, - self.TASK, - "**") - print("Comparing published:'{}' : expected:'{}'".format(published_dir, - expected_dir)) - published = set(f.replace(published_dir_base, '') for f in - glob.glob(published_dir, recursive=True) if - f != published_dir_base and os.path.exists(f)) - expected = set(f.replace(expected_dir_base, '') for f in - glob.glob(expected_dir, recursive=True) if - f != expected_dir_base and os.path.exists(f)) - not_matched = expected.difference(published) - assert not not_matched, "Missing {} files".format(not_matched) + print("Comparing published:'{}' : expected:'{}'".format( + published_dir_base, expected_dir_base)) + published = set(f.replace(published_dir_base, '') for f in + glob.glob(published_dir_base + "\\**", recursive=True) + if f != published_dir_base and os.path.exists(f)) + expected = set(f.replace(expected_dir_base, '') for f in + glob.glob(expected_dir_base + "\\**", recursive=True) + if f != expected_dir_base and os.path.exists(f)) + + not_matched = expected.symmetric_difference(published) + assert not not_matched, "Missing {} files".format( + "\n".join(sorted(not_matched))) class HostFixtures(PublishTest): diff --git a/tools/build.sh b/tools/build.sh index 79fb748cd5..fa2c580648 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -193,15 +193,15 @@ if [ "$disable_submodule_update" == 1 ]; then if [[ "$OSTYPE" == "darwin"* ]]; then # fix code signing issue - codesign --remove-signature "$openpype_root/build/OpenPype.app/Contents/MacOS/lib/Python" + codesign --remove-signature "$openpype_root/build/OpenPype $openpype_version.app/Contents/MacOS/lib/Python" if command -v create-dmg > /dev/null 2>&1; then create-dmg \ - --volname "OpenPype Installer" \ + --volname "OpenPype $openpype_version Installer" \ --window-pos 200 120 \ --window-size 600 300 \ --app-drop-link 100 50 \ - "$openpype_root/build/OpenPype-Installer.dmg" \ - "$openpype_root/build/OpenPype.app" + "$openpype_root/build/OpenPype-Installer-$openpype_version.dmg" \ + "$openpype_root/build/OpenPype $openpype_version.app" else echo -e "${BIYellow}!!!${RST} ${BIWhite}create-dmg${RST} command is not available." fi diff --git a/tools/build_dependencies.py b/tools/build_dependencies.py index d3566dd289..d186ead881 100644 --- a/tools/build_dependencies.py +++ b/tools/build_dependencies.py @@ -29,6 +29,7 @@ import shutil import blessed import enlighten import time +import re term = blessed.Terminal() @@ -52,7 +53,7 @@ def _print(msg: str, type: int = 0) -> None: else: header = term.darkolivegreen3("--- ") - print("{}{}".format(header, msg)) + print(f"{header}{msg}") def count_folders(path: Path) -> int: @@ -95,16 +96,22 @@ assert site_pkg, "No venv site-packages are found." _print(f"Working with: {site_pkg}", 2) openpype_root = Path(os.path.dirname(__file__)).parent +version = {} +with open(openpype_root / "openpype" / "version.py") as fp: + exec(fp.read(), version) + +version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) +openpype_version = version_match[1] # create full path if platform.system().lower() == "darwin": build_dir = openpype_root.joinpath( "build", - "OpenPype.app", + f"OpenPype {openpype_version}.app", "Contents", "MacOS") else: - build_subdir = "exe.{}-{}".format(get_platform(), sys.version[0:3]) + build_subdir = f"exe.{get_platform()}-{sys.version[:3]}" build_dir = openpype_root / "build" / build_subdir _print(f"Using build at {build_dir}", 2) diff --git a/tools/create_zip.py b/tools/create_zip.py index 2fc351469a..6392428f58 100644 --- a/tools/create_zip.py +++ b/tools/create_zip.py @@ -61,7 +61,7 @@ def _print(msg: str, message_type: int = 0) -> None: else: header = term.darkolivegreen3("--- ") - print("{}{}".format(header, msg)) + print(f"{header}{msg}") if __name__ == "__main__": diff --git a/vendor/configs/OpenColorIO-Configs b/vendor/configs/OpenColorIO-Configs new file mode 160000 index 0000000000..0bb079c08b --- /dev/null +++ b/vendor/configs/OpenColorIO-Configs @@ -0,0 +1 @@ +Subproject commit 0bb079c08be410030669cbf5f19ff869b88af953 diff --git a/website/docs/artist_hosts_unreal.md b/website/docs/artist_hosts_unreal.md index 1ff09893e3..45a0c8bb6f 100644 --- a/website/docs/artist_hosts_unreal.md +++ b/website/docs/artist_hosts_unreal.md @@ -8,6 +8,20 @@ sidebar_label: Unreal OpenPype supports Unreal in similar ways as in other DCCs Yet there are few specific you need to be aware of. +### Creating the Unreal project + +Selecting a task and opening it with Unreal will generate the Unreal project, if it hasn't been created before. +By default, OpenPype includes the plugin that will be built together with the project. + +Alternatively, the Environment variable `"OPENPYPE_UNREAL_PLUGIN"` can be set to the path of a compiled version of the plugin. +The version of the compiled plugin must match the version of Unreal with which the project is being created. + +:::note +Unreal version 5.0 onwards requires the following Environment variable: + +`"UE_PYTHONPATH": "{PYTHONPATH}"` +::: + ### Project naming Unreal doesn't support project names starting with non-alphabetic character. So names like `123_myProject` are @@ -15,9 +29,9 @@ invalid. If OpenPype detects such name it automatically prepends letter **P** to ## OpenPype global tools -OpenPype global tools can be found in *Window* main menu: +OpenPype global tools can be found in Unreal's toolbar and in the *Tools* main menu: -![Unreal OpenPype Menu](assets/unreal-avalon_tools.jpg) +![Unreal OpenPype Menu](assets/unreal_openpype_tools.png) - [Create](artist_tools.md#creator) - [Load](artist_tools.md#loader) @@ -31,10 +45,118 @@ OpenPype global tools can be found in *Window* main menu: To import Static Mesh model, just choose **OpenPype β†’ Load ...** and select your mesh. Static meshes are transferred as FBX files as specified in [Unreal Engine 4 Static Mesh Pipeline](https://docs.unrealengine.com/en-US/Engine/Content/Importing/FBX/StaticMeshes/index.html). This action will create new folder with subset name (`unrealStaticMeshMain_CON` for example) and put all data into it. Inside, you can find: -![Unreal Container Content](assets/unreal-container.jpg) +![Unreal Container Content](assets/unreal_container.jpg) -In this case there is **lambert1**, material pulled from Maya when this static mesh was published, **unrealStaticMeshCube** is the geometry itself, **unrealStaticMeshCube_CON** is a *AssetContainer* type and is there to mark this directory as Avalon Container (to track changes) and to hold OpenPype metadata. +In this case there is **lambert1**, material pulled from Maya when this static mesh was published, **antennaA_modelMain** is the geometry itself, **modelMain_v002_CON** is a *AssetContainer* type and is there to mark this directory as Avalon Container (to track changes) and to hold OpenPype metadata. ### Publishing -Publishing of Static Mesh works in similar ways. Select your mesh in *Content Browser* and **OpenPype β†’ Create ...**. This will create folder named by subset you've chosen - for example **unrealStaticMeshDefault_INS**. It this folder is that mesh and *Avalon Publish Instance* asset marking this folder as publishable instance and holding important metadata on it. If you want to publish this instance, go **OpenPype β†’ Publish ...** \ No newline at end of file +Publishing of Static Mesh works in similar ways. Select your mesh in *Content Browser* and **OpenPype β†’ Create ...**. This will create folder named by subset you've chosen - for example **unrealStaticMeshDefault_INS**. It this folder is that mesh and *Avalon Publish Instance* asset marking this folder as publishable instance and holding important metadata on it. If you want to publish this instance, go **OpenPype β†’ Publish ...** + +## Layout + +There are two different layout options in Unreal, depending on the type of project you are working on. +One only imports the layout, and saves it in a level. +The other uses [Master Sequences](https://docs.unrealengine.com/4.27/en-US/AnimatingObjects/Sequencer/Overview/TracksShot/) to track the whole level sequence hierarchy. +You can choose in the Project Settings if you want to generate the level sequences. + +![Unreal OP Settings Level Sequence](assets/unreal_setting_level_sequence.png) + +### Loading + +To load a layout, click on the OpenPype icon in Unreal’s main taskbar, and select **Load**. + +![Unreal OP Tools Load](assets/unreal_openpype_tools_load.png) + +Select the task on the left, then right click on the layout asset and select **Load Layout**. + +![Unreal Layout Load](assets/unreal_load_layout.png) + +If you need to load multiple layouts, you can select more than one task on the left, and you can load them together. + +![Unreal Layout Load Batch](assets/unreal_load_layout_batch.png) + +### Navigating the project + +The layout will be imported in the directory `/Content/OpenPype`. The layout will be split into two subfolders: +- *Assets*, which will contain all the rigs and models contained in the layout; +- *Asset name* (in the following example, *episode 2*), a folder named as the **asset** of the current **task**. + +![Unreal Layout Loading Result](assets/unreal_layout_loading_result.png) + +If you chose to generate the level sequences, in the second folder you will find the master level for the task (usually an episode), the level sequence and the folders for all the scenes in the episodes. +Otherwise you will find the level generated for the loaded layout. + +#### Layout without level sequences + +In the layout folder, you will find the level with the imported layout and an object of *AssetContainer* type. The latter is there to mark this directory as Avalon Container (to track changes) and to hold OpenPype metadata. + +![Unreal Layout Loading No Sequence](assets/unreal_layout_loading_no_sequence.png) + +The layout level will and should contain only the data included in the layout. To add lighting, or other elements, like an environment, you have to create a master level, and add the layout level as a [streaming level](https://docs.unrealengine.com/5.0/en-US/level-streaming-in-unreal-engine/). + +Create the master level and open it. Then, open the *Levels* window (from the menu **Windows β†’ Levels**). Click on **Levels β†’ Add Existing** and select the layout level and the other levels you with to include in the scene. The following example shows a master level in which have been added a light level and the layout level. + +![Unreal Add Level](assets/unreal_add_level.png) +![Unreal Level List](assets/unreal_level_list_no_sequences.png) + +#### Layout with level sequences + +In the episode folder, you will find the master level for the episode, the master level sequence and the folders for all the scenes in the episodes. + +After opening the master level, open the *Levels* window (from the menu **Windows β†’ Levels**), and you will see the list of the levels of each shot of the episode for which a layout has been loaded. + +![Unreal Level List](assets/unreal_level_list.png) + +If it has not been added already, you will need to add the environment to the level. Click on **Levels β†’ Add Existing** and select the level with the environment (check with the studio where it is located). + +![Unreal Add Level](assets/unreal_add_level.png) + +After adding the environment level to the master level, you will need to set it as always loaded by right clicking it, and selecting **Change Streaming Method** and selecting **Always Loaded**. + +![Unreal Level Streaming Method](assets/unreal_level_streaming_method.png) + +### Update layouts + +To manage loaded layouts, click on the OpenPype icon in Unreal’s main taskbar, and select **Manage**. + +![Unreal OP Tools Manage](assets/unreal_openpype_tools_manage.png) + +You will get a list of all the assets that have been loaded in the project. +The version number will be in red if it isn’t the latest version. Right click on the element, and select Update if you need to update the layout. + +:::note +**DO NOT** update rigs or models imported with a layout. Update only the layout. +::: + +## Rendering + +:::note +The rendering requires a layout loaded with the option to create the level sequences **on**. +::: + +To render and publish an episode, a scene or a shot, you will need to create a publish instance. The publish instance for the rendering is based on one level sequence. That means that if you want to render the whole episode, you will need to create it for the level sequence of the episode, but if you want to render just one shot, you will need to create it for that shot. + +Navigate to the folder that contains the level sequence that you need to render. Select the level sequence, and then click on the OpenPype icon in Unreal’s main taskbar, and select **Create**. + +![Unreal OP Tools Create](assets/unreal_openpype_tools_create.png) + +In the Instance Creator, select **Unreal - Render**, give it a name, and click **Create**. + +![Unreal OP Instance Creator](assets/unreal_create_render.png) + +The render instance will be created in `/Content/OpenPype/PublishInstances`. + +Select the instance you need to render, and then click on the OpenPype icon in Unreal’s main taskbar, and select **Render**. You can render more than one instance at a time, if needed. Just select all the instances that you need to render before selecting the **Render** button from the OpenPype menu. + +![Unreal OP Tools Render](assets/unreal_openpype_tools_render.png) + +Once the render is finished, click on the OpenPype icon in Unreal’s main taskbar, and select **Publish**. + +![Unreal OP Tools Publish](assets/unreal_openpype_tools_publish.png) + +On the left, you will see the render instances. They will be automatically reorganised to have an instance for each shot. So, for example, if you have created the render instance for the whole episode, here you will have an instance for each shot in the episode. + +![Unreal Publish Render](assets/unreal_publish_render.png) + +Click on the play button in the bottom right, and it will start the publishing process. diff --git a/website/docs/assets/unreal-avalon_tools.jpg b/website/docs/assets/unreal-avalon_tools.jpg deleted file mode 100644 index 531fbe516a..0000000000 Binary files a/website/docs/assets/unreal-avalon_tools.jpg and /dev/null differ diff --git a/website/docs/assets/unreal-container.jpg b/website/docs/assets/unreal-container.jpg deleted file mode 100644 index f0c0a61e95..0000000000 Binary files a/website/docs/assets/unreal-container.jpg and /dev/null differ diff --git a/website/docs/assets/unreal_add_level.png b/website/docs/assets/unreal_add_level.png new file mode 100644 index 0000000000..caeef03d10 Binary files /dev/null and b/website/docs/assets/unreal_add_level.png differ diff --git a/website/docs/assets/unreal_container.jpg b/website/docs/assets/unreal_container.jpg new file mode 100644 index 0000000000..0fda640b00 Binary files /dev/null and b/website/docs/assets/unreal_container.jpg differ diff --git a/website/docs/assets/unreal_create_render.png b/website/docs/assets/unreal_create_render.png new file mode 100644 index 0000000000..2e3ef20b35 Binary files /dev/null and b/website/docs/assets/unreal_create_render.png differ diff --git a/website/docs/assets/unreal_layout_loading_no_sequence.png b/website/docs/assets/unreal_layout_loading_no_sequence.png new file mode 100644 index 0000000000..ed05d77f53 Binary files /dev/null and b/website/docs/assets/unreal_layout_loading_no_sequence.png differ diff --git a/website/docs/assets/unreal_layout_loading_result.png b/website/docs/assets/unreal_layout_loading_result.png new file mode 100644 index 0000000000..55b329110b Binary files /dev/null and b/website/docs/assets/unreal_layout_loading_result.png differ diff --git a/website/docs/assets/unreal_level_list.png b/website/docs/assets/unreal_level_list.png new file mode 100644 index 0000000000..2fc0c1bfc7 Binary files /dev/null and b/website/docs/assets/unreal_level_list.png differ diff --git a/website/docs/assets/unreal_level_list_no_sequences.png b/website/docs/assets/unreal_level_list_no_sequences.png new file mode 100644 index 0000000000..7ed912b68b Binary files /dev/null and b/website/docs/assets/unreal_level_list_no_sequences.png differ diff --git a/website/docs/assets/unreal_level_streaming_method.png b/website/docs/assets/unreal_level_streaming_method.png new file mode 100644 index 0000000000..8f817abd2e Binary files /dev/null and b/website/docs/assets/unreal_level_streaming_method.png differ diff --git a/website/docs/assets/unreal_level_streaming_method_no_sequences.png b/website/docs/assets/unreal_level_streaming_method_no_sequences.png new file mode 100644 index 0000000000..77a2754ded Binary files /dev/null and b/website/docs/assets/unreal_level_streaming_method_no_sequences.png differ diff --git a/website/docs/assets/unreal_load_layout.png b/website/docs/assets/unreal_load_layout.png new file mode 100644 index 0000000000..ffad60ae9b Binary files /dev/null and b/website/docs/assets/unreal_load_layout.png differ diff --git a/website/docs/assets/unreal_load_layout_batch.png b/website/docs/assets/unreal_load_layout_batch.png new file mode 100644 index 0000000000..dd2f2f3e8f Binary files /dev/null and b/website/docs/assets/unreal_load_layout_batch.png differ diff --git a/website/docs/assets/unreal_openpype_tools.png b/website/docs/assets/unreal_openpype_tools.png new file mode 100644 index 0000000000..bf7d850ab2 Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools.png differ diff --git a/website/docs/assets/unreal_openpype_tools_create.png b/website/docs/assets/unreal_openpype_tools_create.png new file mode 100644 index 0000000000..9cfb95f2a1 Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_create.png differ diff --git a/website/docs/assets/unreal_openpype_tools_load.png b/website/docs/assets/unreal_openpype_tools_load.png new file mode 100644 index 0000000000..4909feac3b Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_load.png differ diff --git a/website/docs/assets/unreal_openpype_tools_manage.png b/website/docs/assets/unreal_openpype_tools_manage.png new file mode 100644 index 0000000000..af7b182842 Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_manage.png differ diff --git a/website/docs/assets/unreal_openpype_tools_publish.png b/website/docs/assets/unreal_openpype_tools_publish.png new file mode 100644 index 0000000000..ab4c10c4ca Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_publish.png differ diff --git a/website/docs/assets/unreal_openpype_tools_render.png b/website/docs/assets/unreal_openpype_tools_render.png new file mode 100644 index 0000000000..377dc2951e Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_render.png differ diff --git a/website/docs/assets/unreal_publish_render.png b/website/docs/assets/unreal_publish_render.png new file mode 100644 index 0000000000..674b0ac30e Binary files /dev/null and b/website/docs/assets/unreal_publish_render.png differ diff --git a/website/docs/assets/unreal_setting_level_sequence.png b/website/docs/assets/unreal_setting_level_sequence.png new file mode 100644 index 0000000000..5a8adc6257 Binary files /dev/null and b/website/docs/assets/unreal_setting_level_sequence.png differ