diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..dfd89cdb3c --- /dev/null +++ b/.gitmodules @@ -0,0 +1,7 @@ +[submodule "tools/modules/powershell/BurntToast"] + path = tools/modules/powershell/BurntToast + url = https://github.com/Windos/BurntToast.git + +[submodule "tools/modules/powershell/PSWriteColor"] + path = tools/modules/powershell/PSWriteColor + url = https://github.com/EvotecIT/PSWriteColor.git diff --git a/CHANGELOG.md b/CHANGELOG.md index 003ca72a24..e8da885473 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,99 @@ # Changelog +## [3.12.2-nightly.2](https://github.com/pypeclub/OpenPype/tree/HEAD) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.1...HEAD) + +**🚀 Enhancements** + +- General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) +- Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) +- Ftrack: add source into Note [\#3509](https://github.com/pypeclub/OpenPype/pull/3509) +- Ftrack: Trigger custom ftrack topic of project structure creation [\#3506](https://github.com/pypeclub/OpenPype/pull/3506) +- Settings UI: Add extract to file action on project view [\#3505](https://github.com/pypeclub/OpenPype/pull/3505) +- Add pack and unpack convenience scripts [\#3502](https://github.com/pypeclub/OpenPype/pull/3502) +- General: Event system [\#3499](https://github.com/pypeclub/OpenPype/pull/3499) +- NewPublisher: Keep plugins with mismatch target in report [\#3498](https://github.com/pypeclub/OpenPype/pull/3498) +- Nuke: load clip with options from settings [\#3497](https://github.com/pypeclub/OpenPype/pull/3497) +- TrayPublisher: implemented render\_mov\_batch [\#3486](https://github.com/pypeclub/OpenPype/pull/3486) +- Migrate basic families to the new Tray Publisher [\#3469](https://github.com/pypeclub/OpenPype/pull/3469) + +**🐛 Bug fixes** + +- Additional fixes for powershell scripts [\#3525](https://github.com/pypeclub/OpenPype/pull/3525) +- Maya: Added wrapper around cmds.setAttr [\#3523](https://github.com/pypeclub/OpenPype/pull/3523) +- General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) +- Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) +- TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) +- NewPublisher: Publish attributes are properly collected [\#3510](https://github.com/pypeclub/OpenPype/pull/3510) +- TrayPublisher: Make sure host name is filled [\#3504](https://github.com/pypeclub/OpenPype/pull/3504) +- NewPublisher: Groups work and enum multivalue [\#3501](https://github.com/pypeclub/OpenPype/pull/3501) + +**🔀 Refactored code** + +- General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) +- TimersManager: Use query functions [\#3495](https://github.com/pypeclub/OpenPype/pull/3495) + +## [3.12.1](https://github.com/pypeclub/OpenPype/tree/3.12.1) (2022-07-13) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.1-nightly.6...3.12.1) + +### 📖 Documentation + +- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) + +**🆕 New features** + +- Maya: Add VDB to Arnold loader [\#3433](https://github.com/pypeclub/OpenPype/pull/3433) + +**🚀 Enhancements** + +- TrayPublisher: Added more options for grouping of instances [\#3494](https://github.com/pypeclub/OpenPype/pull/3494) +- NewPublisher: Align creator attributes from top to bottom [\#3487](https://github.com/pypeclub/OpenPype/pull/3487) +- NewPublisher: Added ability to use label of instance [\#3484](https://github.com/pypeclub/OpenPype/pull/3484) +- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) +- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) +- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) +- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) +- Blender: Bugfix - Set fps properly on open [\#3426](https://github.com/pypeclub/OpenPype/pull/3426) +- Hiero: Add custom scripts menu [\#3425](https://github.com/pypeclub/OpenPype/pull/3425) +- Blender: pre pyside install for all platforms [\#3400](https://github.com/pypeclub/OpenPype/pull/3400) + +**🐛 Bug fixes** + +- TrayPublisher: Keep use instance label in list view [\#3493](https://github.com/pypeclub/OpenPype/pull/3493) +- General: Extract review use first frame of input sequence [\#3491](https://github.com/pypeclub/OpenPype/pull/3491) +- General: Fix Plist loading for application launch [\#3485](https://github.com/pypeclub/OpenPype/pull/3485) +- Nuke: Workfile tools open on start [\#3479](https://github.com/pypeclub/OpenPype/pull/3479) +- New Publisher: Disabled context change allows creation [\#3478](https://github.com/pypeclub/OpenPype/pull/3478) +- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) +- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) +- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) +- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) +- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) +- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) +- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) +- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) +- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) +- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) +- Nuke: Slate frame is integrated [\#3427](https://github.com/pypeclub/OpenPype/pull/3427) + +**🔀 Refactored code** + +- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) +- Maya: Re-use `maintained\_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) +- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) +- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) +- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) +- General: Use query functions in openpype lib functions [\#3454](https://github.com/pypeclub/OpenPype/pull/3454) +- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) +- General: Move publish plugin and publish render abstractions [\#3442](https://github.com/pypeclub/OpenPype/pull/3442) +- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) +- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) + ## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.1...3.12.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.12.0-nightly.3...3.12.0) ### 📖 Documentation @@ -13,10 +104,6 @@ - Webserver: Added CORS middleware [\#3422](https://github.com/pypeclub/OpenPype/pull/3422) - Attribute Defs UI: Files widget show what is allowed to drop in [\#3411](https://github.com/pypeclub/OpenPype/pull/3411) -- General: Add ability to change user value for templates [\#3366](https://github.com/pypeclub/OpenPype/pull/3366) -- Hosts: More options for in-host callbacks [\#3357](https://github.com/pypeclub/OpenPype/pull/3357) -- Multiverse: expose some settings to GUI [\#3350](https://github.com/pypeclub/OpenPype/pull/3350) -- Maya: Allow more data to be published along camera 🎥 [\#3304](https://github.com/pypeclub/OpenPype/pull/3304) **🐛 Bug fixes** @@ -27,11 +114,6 @@ - Nuke: Collect representation files based on Write [\#3407](https://github.com/pypeclub/OpenPype/pull/3407) - General: Filter representations before integration start [\#3398](https://github.com/pypeclub/OpenPype/pull/3398) - Maya: look collector typo [\#3392](https://github.com/pypeclub/OpenPype/pull/3392) -- TVPaint: Make sure exit code is set to not None [\#3382](https://github.com/pypeclub/OpenPype/pull/3382) -- Maya: vray device aspect ratio fix [\#3381](https://github.com/pypeclub/OpenPype/pull/3381) -- Harmony: added unc path to zifile command in Harmony [\#3372](https://github.com/pypeclub/OpenPype/pull/3372) -- Standalone: settings improvements [\#3355](https://github.com/pypeclub/OpenPype/pull/3355) -- Nuke: Load full model hierarchy by default [\#3328](https://github.com/pypeclub/OpenPype/pull/3328) **🔀 Refactored code** @@ -41,97 +123,15 @@ - Houdini: Use client query functions [\#3395](https://github.com/pypeclub/OpenPype/pull/3395) - Hiero: Use client query functions [\#3393](https://github.com/pypeclub/OpenPype/pull/3393) - Nuke: Use client query functions [\#3391](https://github.com/pypeclub/OpenPype/pull/3391) -- Maya: Use client query functions [\#3385](https://github.com/pypeclub/OpenPype/pull/3385) -- Harmony: Use client query functions [\#3378](https://github.com/pypeclub/OpenPype/pull/3378) -- Celaction: Use client query functions [\#3376](https://github.com/pypeclub/OpenPype/pull/3376) -- Photoshop: Use client query functions [\#3375](https://github.com/pypeclub/OpenPype/pull/3375) -- AfterEffects: Use client query functions [\#3374](https://github.com/pypeclub/OpenPype/pull/3374) -- TVPaint: Use client query functions [\#3340](https://github.com/pypeclub/OpenPype/pull/3340) -- Ftrack: Use client query functions [\#3339](https://github.com/pypeclub/OpenPype/pull/3339) -- Standalone Publisher: Use client query functions [\#3330](https://github.com/pypeclub/OpenPype/pull/3330) - -**Merged pull requests:** - -- Sync Queue: Added far future value for null values for dates [\#3371](https://github.com/pypeclub/OpenPype/pull/3371) -- Maya - added support for single frame playblast review [\#3369](https://github.com/pypeclub/OpenPype/pull/3369) ## [3.11.1](https://github.com/pypeclub/OpenPype/tree/3.11.1) (2022-06-20) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.11.1-nightly.1...3.11.1) -**🆕 New features** - -- Flame: custom export temp folder [\#3346](https://github.com/pypeclub/OpenPype/pull/3346) -- Nuke: removing third-party plugins [\#3344](https://github.com/pypeclub/OpenPype/pull/3344) - -**🚀 Enhancements** - -- Pyblish Pype: Hiding/Close issues [\#3367](https://github.com/pypeclub/OpenPype/pull/3367) -- Ftrack: Removed requirement of pypeclub role from default settings [\#3354](https://github.com/pypeclub/OpenPype/pull/3354) -- Kitsu: Prevent crash on missing frames information [\#3352](https://github.com/pypeclub/OpenPype/pull/3352) -- Ftrack: Open browser from tray [\#3320](https://github.com/pypeclub/OpenPype/pull/3320) - -**🐛 Bug fixes** - -- Flame: bunch of publishing issues [\#3377](https://github.com/pypeclub/OpenPype/pull/3377) -- Nuke: bake streams with slate on farm [\#3368](https://github.com/pypeclub/OpenPype/pull/3368) -- Harmony: audio validator has wrong logic [\#3364](https://github.com/pypeclub/OpenPype/pull/3364) -- Nuke: Fix missing variable in extract thumbnail [\#3363](https://github.com/pypeclub/OpenPype/pull/3363) -- Nuke: Fix precollect writes [\#3361](https://github.com/pypeclub/OpenPype/pull/3361) -- AE- fix validate\_scene\_settings and renderLocal [\#3358](https://github.com/pypeclub/OpenPype/pull/3358) -- deadline: fixing misidentification of revieables [\#3356](https://github.com/pypeclub/OpenPype/pull/3356) -- General: Create only one thumbnail per instance [\#3351](https://github.com/pypeclub/OpenPype/pull/3351) -- nuke: adding extract thumbnail settings 3.10 [\#3347](https://github.com/pypeclub/OpenPype/pull/3347) -- General: Fix last version function [\#3345](https://github.com/pypeclub/OpenPype/pull/3345) -- Deadline: added OPENPYPE\_MONGO to filter [\#3336](https://github.com/pypeclub/OpenPype/pull/3336) -- Nuke: fixing farm publishing if review is disabled [\#3306](https://github.com/pypeclub/OpenPype/pull/3306) - -**🔀 Refactored code** - -- Webpublisher: Use client query functions [\#3333](https://github.com/pypeclub/OpenPype/pull/3333) - ## [3.11.0](https://github.com/pypeclub/OpenPype/tree/3.11.0) (2022-06-17) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.11.0-nightly.4...3.11.0) -### 📖 Documentation - -- Documentation: Add app key to template documentation [\#3299](https://github.com/pypeclub/OpenPype/pull/3299) -- doc: adding royal render and multiverse to the web site [\#3285](https://github.com/pypeclub/OpenPype/pull/3285) - -**🚀 Enhancements** - -- Settings: Settings can be extracted from UI [\#3323](https://github.com/pypeclub/OpenPype/pull/3323) -- updated poetry installation source [\#3316](https://github.com/pypeclub/OpenPype/pull/3316) -- Ftrack: Action to easily create daily review session [\#3310](https://github.com/pypeclub/OpenPype/pull/3310) -- TVPaint: Extractor use mark in/out range to render [\#3309](https://github.com/pypeclub/OpenPype/pull/3309) -- Ftrack: Delivery action can work on ReviewSessions [\#3307](https://github.com/pypeclub/OpenPype/pull/3307) -- Maya: Look assigner UI improvements [\#3298](https://github.com/pypeclub/OpenPype/pull/3298) -- Ftrack: Action to transfer values of hierarchical attributes [\#3284](https://github.com/pypeclub/OpenPype/pull/3284) - -**🐛 Bug fixes** - -- General: Handle empty source key on instance [\#3342](https://github.com/pypeclub/OpenPype/pull/3342) -- Houdini: Fix Houdini VDB manage update wrong file attribute name [\#3322](https://github.com/pypeclub/OpenPype/pull/3322) -- Nuke: anatomy compatibility issue hacks [\#3321](https://github.com/pypeclub/OpenPype/pull/3321) -- hiero: otio p3 compatibility issue - metadata on effect use update 3.11 [\#3314](https://github.com/pypeclub/OpenPype/pull/3314) -- General: Vendorized modules for Python 2 and update poetry lock [\#3305](https://github.com/pypeclub/OpenPype/pull/3305) -- Fix - added local targets to install host [\#3303](https://github.com/pypeclub/OpenPype/pull/3303) -- Settings: Add missing default settings for nuke gizmo [\#3301](https://github.com/pypeclub/OpenPype/pull/3301) -- Maya: Fix swaped width and height in reviews [\#3300](https://github.com/pypeclub/OpenPype/pull/3300) -- Maya: point cache publish handles Maya instances [\#3297](https://github.com/pypeclub/OpenPype/pull/3297) -- Global: extract review slate issues [\#3286](https://github.com/pypeclub/OpenPype/pull/3286) -- Webpublisher: return only active projects in ProjectsEndpoint [\#3281](https://github.com/pypeclub/OpenPype/pull/3281) - -**🔀 Refactored code** - -- Blender: Use client query functions [\#3331](https://github.com/pypeclub/OpenPype/pull/3331) -- General: Define query functions [\#3288](https://github.com/pypeclub/OpenPype/pull/3288) - -**Merged pull requests:** - -- Maya: add pointcache family to gpu cache loader [\#3318](https://github.com/pypeclub/OpenPype/pull/3318) - ## [3.10.0](https://github.com/pypeclub/OpenPype/tree/3.10.0) (2022-05-26) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.10.0-nightly.6...3.10.0) diff --git a/inno_setup.iss b/inno_setup.iss index ead9907955..fa050ef1d6 100644 --- a/inno_setup.iss +++ b/inno_setup.iss @@ -18,7 +18,8 @@ AppPublisher=Orbi Tools s.r.o AppPublisherURL=http://pype.club AppSupportURL=http://pype.club AppUpdatesURL=http://pype.club -DefaultDirName={autopf}\{#MyAppName} +DefaultDirName={autopf}\{#MyAppName}\{#AppVer} +UsePreviousAppDir=no DisableProgramGroupPage=yes OutputBaseFilename={#MyAppName}-{#AppVer}-install AllowCancelDuringInstall=yes @@ -27,7 +28,7 @@ AllowCancelDuringInstall=yes PrivilegesRequiredOverridesAllowed=dialog SetupIconFile=igniter\openpype.ico OutputDir=build\ -Compression=lzma +Compression=lzma2 SolidCompression=yes WizardStyle=modern @@ -37,6 +38,11 @@ Name: "english"; MessagesFile: "compiler:Default.isl" [Tasks] Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked +[InstallDelete] +; clean everything in previous installation folder +Type: filesandordirs; Name: "{app}\*" + + [Files] Source: "build\{#build}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs ; NOTE: Don't use "Flags: ignoreversion" on any shared system files diff --git a/openpype/api.py b/openpype/api.py index 9ce745b653..fac2ae572b 100644 --- a/openpype/api.py +++ b/openpype/api.py @@ -15,7 +15,6 @@ from .lib import ( run_subprocess, version_up, get_asset, - get_hierarchy, get_workdir_data, get_version_from_path, get_last_version_from_path, @@ -101,7 +100,6 @@ __all__ = [ # get contextual data "version_up", "get_asset", - "get_hierarchy", "get_workdir_data", "get_version_from_path", "get_last_version_from_path", diff --git a/openpype/cli.py b/openpype/cli.py index 2aa4a46929..9a2dfaa141 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -2,7 +2,7 @@ """Package for handling pype command line arguments.""" import os import sys - +import code import click # import sys @@ -424,3 +424,22 @@ def pack_project(project, dirpath): def unpack_project(zipfile, root): """Create a package of project with all files and database dump.""" PypeCommands().unpack_project(zipfile, root) + + +@main.command() +def interactive(): + """Interative (Python like) console. + + Helpfull command not only for development to directly work with python + interpreter. + + Warning: + Executable 'openpype_gui' on windows won't work. + """ + + from openpype.version import __version__ + + banner = "OpenPype {}\nPython {} on {}".format( + __version__, sys.version, sys.platform + ) + code.interact(banner) diff --git a/openpype/client/__init__.py b/openpype/client/__init__.py index e3b4ef5132..97e6755d09 100644 --- a/openpype/client/__init__.py +++ b/openpype/client/__init__.py @@ -1,6 +1,7 @@ from .entities import ( get_projects, get_project, + get_whole_project, get_asset_by_id, get_asset_by_name, @@ -29,15 +30,19 @@ from .entities import ( get_representations, get_representation_parents, get_representations_parents, + get_archived_representations, get_thumbnail, get_thumbnails, get_thumbnail_id_from_source, + + get_workfile_info, ) __all__ = ( "get_projects", "get_project", + "get_whole_project", "get_asset_by_id", "get_asset_by_name", @@ -66,8 +71,11 @@ __all__ = ( "get_representations", "get_representation_parents", "get_representations_parents", + "get_archived_representations", "get_thumbnail", "get_thumbnails", "get_thumbnail_id_from_source", + + "get_workfile_info", ) diff --git a/openpype/client/entities.py b/openpype/client/entities.py index 28cd994254..e7eeadcf48 100644 --- a/openpype/client/entities.py +++ b/openpype/client/entities.py @@ -15,12 +15,15 @@ from bson.objectid import ObjectId from openpype.lib.mongo import OpenPypeMongoConnection -def _get_project_connection(project_name=None): +def _get_project_database(): db_name = os.environ.get("AVALON_DB") or "avalon" - mongodb = OpenPypeMongoConnection.get_mongo_client()[db_name] - if project_name: - return mongodb[project_name] - return mongodb + return OpenPypeMongoConnection.get_mongo_client()[db_name] + + +def _get_project_connection(project_name): + if not project_name: + raise ValueError("Invalid project name {}".format(str(project_name))) + return _get_project_database()[project_name] def _prepare_fields(fields, required_fields=None): @@ -55,7 +58,7 @@ def _convert_ids(in_ids): def get_projects(active=True, inactive=False, fields=None): - mongodb = _get_project_connection() + mongodb = _get_project_database() for project_name in mongodb.collection_names(): if project_name in ("system.indexes",): continue @@ -94,13 +97,28 @@ def get_project(project_name, active=True, inactive=False, fields=None): return conn.find_one(query_filter, _prepare_fields(fields)) +def get_whole_project(project_name): + """Receive all documents from project. + + Helper that can be used to get all document from whole project. For example + for backups etc. + + Returns: + Cursor: Query cursor as iterable which returns all documents from + project collection. + """ + + conn = _get_project_connection(project_name) + return conn.find({}) + + def get_asset_by_id(project_name, asset_id, fields=None): """Receive asset data by it's id. Args: project_name (str): Name of project where to look for queried entities. - asset_id (str|ObjectId): Asset's id. - fields (list[str]): Fields that should be returned. All fields are + asset_id (Union[str, ObjectId]): Asset's id. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -123,7 +141,7 @@ def get_asset_by_name(project_name, asset_name, fields=None): Args: project_name (str): Name of project where to look for queried entities. asset_name (str): Asset's name. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -160,12 +178,13 @@ def _get_assets( Args: project_name (str): Name of project where to look for queried entities. - asset_ids (list[str|ObjectId]): Asset ids that should be found. - asset_names (list[str]): Name assets that should be found. - parent_ids (list[str|ObjectId]): Parent asset ids. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. standard (bool): Query standart assets (type 'asset'). archived (bool): Query archived assets (type 'archived_asset'). - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -226,11 +245,12 @@ def get_assets( Args: project_name (str): Name of project where to look for queried entities. - asset_ids (list[str|ObjectId]): Asset ids that should be found. - asset_names (list[str]): Name assets that should be found. - parent_ids (list[str|ObjectId]): Parent asset ids. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. archived (bool): Add also archived assets. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -265,10 +285,11 @@ def get_archived_assets( Args: project_name (str): Name of project where to look for queried entities. - asset_ids (list[str|ObjectId]): Asset ids that should be found. - asset_names (list[str]): Name assets that should be found. - parent_ids (list[str|ObjectId]): Parent asset ids. - fields (list[str]): Fields that should be returned. All fields are + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -286,10 +307,11 @@ def get_asset_ids_with_subsets(project_name, asset_ids=None): Args: project_name (str): Name of project where to look for queried entities. - asset_ids (list[str|ObjectId]): Look only for entered asset ids. + asset_ids (Iterable[Union[str, ObjectId]]): Look only for entered + asset ids. Returns: - List[ObjectId]: Asset ids that have existing subsets. + Iterable[ObjectId]: Asset ids that have existing subsets. """ subset_query = { @@ -327,8 +349,8 @@ def get_subset_by_id(project_name, subset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - subset_id (str|ObjectId): Id of subset which should be found. - fields (list[str]): Fields that should be returned. All fields are + subset_id (Union[str, ObjectId]): Id of subset which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -351,8 +373,8 @@ def get_subset_by_name(project_name, subset_name, asset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. subset_name (str): Name of subset. - asset_id (str|ObjectId): Id of parent asset. - fields (list[str]): Fields that should be returned. All fields are + asset_id (Union[str, ObjectId]): Id of parent asset. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -381,6 +403,7 @@ def get_subsets( subset_ids=None, subset_names=None, asset_ids=None, + names_by_asset_ids=None, archived=False, fields=None ): @@ -390,13 +413,16 @@ def get_subsets( Args: project_name (str): Name of project where to look for queried entities. - subset_ids (list[str|ObjectId]): Subset ids that should be queried. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should be + queried. Filter ignored if 'None' is passed. + subset_names (Iterable[str]): Subset names that should be queried. Filter ignored if 'None' is passed. - subset_names (list[str]): Subset names that should be queried. - Filter ignored if 'None' is passed. - asset_ids (list[str|ObjectId]): Asset ids under which should look for - the subsets. Filter ignored if 'None' is passed. - fields (list[str]): Fields that should be returned. All fields are + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids under which + should look for the subsets. Filter ignored if 'None' is passed. + names_by_asset_ids (dict[ObjectId, List[str]]): Complex filtering + using asset ids and list of subset names under the asset. + archived (bool): Look for archived subsets too. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -429,6 +455,18 @@ def get_subsets( return [] query_filter["name"] = {"$in": list(subset_names)} + if names_by_asset_ids is not None: + or_query = [] + for asset_id, names in names_by_asset_ids.items(): + if asset_id and names: + or_query.append({ + "parent": _convert_id(asset_id), + "name": {"$in": list(names)} + }) + if not or_query: + return [] + query_filter["$or"] = or_query + conn = _get_project_connection(project_name) return conn.find(query_filter, _prepare_fields(fields)) @@ -438,8 +476,8 @@ def get_subset_families(project_name, subset_ids=None): Args: project_name (str): Name of project where to look for queried entities. - subset_ids (list[str|ObjectId]): Subset ids that should be queried. - All subsets from project are used if 'None' is passed. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should + be queried. All subsets from project are used if 'None' is passed. Returns: set[str]: Main families of matching subsets. @@ -474,8 +512,8 @@ def get_version_by_id(project_name, version_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - version_id (str|ObjectId): Id of version which should be found. - fields (list[str]): Fields that should be returned. All fields are + version_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -501,8 +539,8 @@ def get_version_by_name(project_name, version, subset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. version (int): name of version entity (it's version). - subset_id (str|ObjectId): Id of version which should be found. - fields (list[str]): Fields that should be returned. All fields are + subset_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -587,14 +625,14 @@ def get_versions( Args: project_name (str): Name of project where to look for queried entities. - version_ids (list[str|ObjectId]): Version ids that will be queried. + version_ids (Iterable[Union[str, ObjectId]]): Version ids that will + be queried. Filter ignored if 'None' is passed. + subset_ids (Iterable[str]): Subset ids that will be queried. Filter ignored if 'None' is passed. - subset_ids (list[str]): Subset ids that will be queried. - Filter ignored if 'None' is passed. - versions (list[int]): Version names (as integers). + versions (Iterable[int]): Version names (as integers). Filter ignored if 'None' is passed. hero (bool): Look also for hero versions. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -617,8 +655,9 @@ def get_hero_version_by_subset_id(project_name, subset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - subset_id (str|ObjectId): Subset id under which is hero version. - fields (list[str]): Fields that should be returned. All fields are + subset_id (Union[str, ObjectId]): Subset id under which + is hero version. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -647,8 +686,8 @@ def get_hero_version_by_id(project_name, version_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - version_id (str|ObjectId): Hero version id. - fields (list[str]): Fields that should be returned. All fields are + version_id (Union[str, ObjectId]): Hero version id. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -682,11 +721,11 @@ def get_hero_versions( Args: project_name (str): Name of project where to look for queried entities. - subset_ids (list[str|ObjectId]): Subset ids for which should look for - hero versions. Filter ignored if 'None' is passed. - version_ids (list[str|ObjectId]): Hero version ids. Filter ignored if - 'None' is passed. - fields (list[str]): Fields that should be returned. All fields are + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids for which + should look for hero versions. Filter ignored if 'None' is passed. + version_ids (Iterable[Union[str, ObjectId]]): Hero version ids. Filter + ignored if 'None' is passed. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -712,13 +751,13 @@ def get_output_link_versions(project_name, version_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - version_id (str|ObjectId): Version id which can be used as input link - for other versions. - fields (list[str]): Fields that should be returned. All fields are + version_id (Union[str, ObjectId]): Version id which can be used + as input link for other versions. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: - Cursor|list: Iterable cursor yielding versions that are used as input + Iterable: Iterable cursor yielding versions that are used as input links for passed version. """ @@ -739,7 +778,10 @@ def get_last_versions(project_name, subset_ids, fields=None): """Latest versions for entered subset_ids. Args: - subset_ids (list): List of subset ids. + project_name (str): Name of project where to look for queried entities. + subset_ids (Iterable[Union[str, ObjectId]]): List of subset ids. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. Returns: dict[ObjectId, int]: Key is subset id and value is last version name. @@ -749,7 +791,34 @@ def get_last_versions(project_name, subset_ids, fields=None): if not subset_ids: return {} - _pipeline = [ + if fields is not None: + fields = list(fields) + if not fields: + return {} + + # Avoid double query if only name and _id are requested + name_needed = False + limit_query = False + if fields: + fields_s = set(fields) + if "name" in fields_s: + name_needed = True + fields_s.remove("name") + + for field in ("_id", "parent"): + if field in fields_s: + fields_s.remove(field) + limit_query = len(fields_s) == 0 + + group_item = { + "_id": "$parent", + "_version_id": {"$last": "$_id"} + } + # Add name if name is needed (only for limit query) + if name_needed: + group_item["name"] = {"$last": "$name"} + + aggregation_pipeline = [ # Find all versions of those subsets {"$match": { "type": "version", @@ -758,16 +827,24 @@ def get_last_versions(project_name, subset_ids, fields=None): # Sorting versions all together {"$sort": {"name": 1}}, # Group them by "parent", but only take the last - {"$group": { - "_id": "$parent", - "_version_id": {"$last": "$_id"} - }} + {"$group": group_item} ] conn = _get_project_connection(project_name) + aggregate_result = conn.aggregate(aggregation_pipeline) + if limit_query: + output = {} + for item in aggregate_result: + subset_id = item["_id"] + item_data = {"_id": item["_version_id"], "parent": subset_id} + if name_needed: + item_data["name"] = item["name"] + output[subset_id] = item_data + return output + version_ids = [ doc["_version_id"] - for doc in conn.aggregate(_pipeline) + for doc in aggregate_result ] fields = _prepare_fields(fields, ["parent"]) @@ -787,8 +864,8 @@ def get_last_version_by_subset_id(project_name, subset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - subset_id (str|ObjectId): Id of version which should be found. - fields (list[str]): Fields that should be returned. All fields are + subset_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -817,10 +894,10 @@ def get_last_version_by_subset_name( Args: project_name (str): Name of project where to look for queried entities. subset_name (str): Name of subset. - asset_id (str|ObjectId): Asset id which is parent of passed + asset_id (Union[str, ObjectId]): Asset id which is parent of passed subset name. asset_name (str): Asset name which is parent of passed subset name. - fields (list[str]): Fields that should be returned. All fields are + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -851,8 +928,8 @@ def get_representation_by_id(project_name, representation_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - representation_id (str|ObjectId): Representation id. - fields (list[str]): Fields that should be returned. All fields are + representation_id (Union[str, ObjectId]): Representation id. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -864,7 +941,7 @@ def get_representation_by_id(project_name, representation_id, fields=None): if not representation_id: return None - repre_types = ["representation", "archived_representations"] + repre_types = ["representation", "archived_representation"] query_filter = { "type": {"$in": repre_types} } @@ -884,8 +961,8 @@ def get_representation_by_name( Args: project_name (str): Name of project where to look for queried entities. representation_name (str): Representation name. - version_id (str|ObjectId): Id of parent version entity. - fields (list[str]): Fields that should be returned. All fields are + version_id (Union[str, ObjectId]): Id of parent version entity. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -908,43 +985,26 @@ def get_representation_by_name( return conn.find_one(query_filter, _prepare_fields(fields)) -def get_representations( +def _get_representations( project_name, - representation_ids=None, - representation_names=None, - version_ids=None, - extensions=None, - names_by_version_ids=None, - archived=False, - fields=None + representation_ids, + representation_names, + version_ids, + extensions, + names_by_version_ids, + standard, + archived, + fields ): - """Representaion entities data from one project filtered by filters. - - Filters are additive (all conditions must pass to return subset). - - Args: - project_name (str): Name of project where to look for queried entities. - representation_ids (list[str|ObjectId]): Representation ids used as - filter. Filter ignored if 'None' is passed. - representation_names (list[str]): Representations names used as filter. - Filter ignored if 'None' is passed. - version_ids (list[str]): Subset ids used as parent filter. Filter - ignored if 'None' is passed. - extensions (list[str]): Filter by extension of main representation - file (without dot). - names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering - using version ids and list of names under the version. - archived (bool): Output will also contain archived representations. - fields (list[str]): Fields that should be returned. All fields are - returned if 'None' is passed. - - Returns: - Cursor: Iterable cursor yielding all matching representations. - """ - - repre_types = ["representation"] + repre_types = [] + if standard: + repre_types.append("representation") if archived: - repre_types.append("archived_representations") + repre_types.append("archived_representation") + + if not repre_types: + return [] + if len(repre_types) == 1: query_filter = {"type": repre_types[0]} else: @@ -989,6 +1049,99 @@ def get_representations( return conn.find(query_filter, _prepare_fields(fields)) +def get_representations( + project_name, + representation_ids=None, + representation_names=None, + version_ids=None, + extensions=None, + names_by_version_ids=None, + archived=False, + standard=True, + fields=None +): + """Representaion entities data from one project filtered by filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + representation_ids (Iterable[Union[str, ObjectId]]): Representation ids + used as filter. Filter ignored if 'None' is passed. + representation_names (Iterable[str]): Representations names used + as filter. Filter ignored if 'None' is passed. + version_ids (Iterable[str]): Subset ids used as parent filter. Filter + ignored if 'None' is passed. + extensions (Iterable[str]): Filter by extension of main representation + file (without dot). + names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering + using version ids and list of names under the version. + archived (bool): Output will also contain archived representations. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching representations. + """ + + return _get_representations( + project_name=project_name, + representation_ids=representation_ids, + representation_names=representation_names, + version_ids=version_ids, + extensions=extensions, + names_by_version_ids=names_by_version_ids, + standard=True, + archived=archived, + fields=fields + ) + + +def get_archived_representations( + project_name, + representation_ids=None, + representation_names=None, + version_ids=None, + extensions=None, + names_by_version_ids=None, + fields=None +): + """Archived representaion entities data from project with applied filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + representation_ids (Iterable[Union[str, ObjectId]]): Representation ids + used as filter. Filter ignored if 'None' is passed. + representation_names (Iterable[str]): Representations names used + as filter. Filter ignored if 'None' is passed. + version_ids (Iterable[str]): Subset ids used as parent filter. Filter + ignored if 'None' is passed. + extensions (Iterable[str]): Filter by extension of main representation + file (without dot). + names_by_version_ids (dict[ObjectId, List[str]]): Complex filtering + using version ids and list of names under the version. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching representations. + """ + + return _get_representations( + project_name=project_name, + representation_ids=representation_ids, + representation_names=representation_names, + version_ids=version_ids, + extensions=extensions, + names_by_version_ids=names_by_version_ids, + standard=False, + archived=True, + fields=fields + ) + + def get_representations_parents(project_name, representations): """Prepare parents of representation entities. @@ -997,7 +1150,7 @@ def get_representations_parents(project_name, representations): Args: project_name (str): Name of project where to look for queried entities. - representations (list[dict]): Representation entities with at least + representations (List[dict]): Representation entities with at least '_id' and 'parent' keys. Returns: @@ -1009,8 +1162,10 @@ def get_representations_parents(project_name, representations): versions_by_subset_id = collections.defaultdict(list) subsets_by_subset_id = {} subsets_by_asset_id = collections.defaultdict(list) + output = {} for representation in representations: repre_id = representation["_id"] + output[repre_id] = (None, None, None, None) version_id = representation["parent"] repres_by_version_id[version_id].append(representation) @@ -1040,7 +1195,6 @@ def get_representations_parents(project_name, representations): project = get_project(project_name) - output = {} for version_id, representations in repres_by_version_id.items(): asset = None subset = None @@ -1080,7 +1234,7 @@ def get_representation_parents(project_name, representation): parents_by_repre_id = get_representations_parents( project_name, [representation] ) - return parents_by_repre_id.get(repre_id) + return parents_by_repre_id[repre_id] def get_thumbnail_id_from_source(project_name, src_type, src_id): @@ -1089,7 +1243,7 @@ def get_thumbnail_id_from_source(project_name, src_type, src_id): Args: project_name (str): Name of project where to look for queried entities. src_type (str): Type of source entity ('asset', 'version'). - src_id (str|objectId): Id of source entity. + src_id (Union[str, ObjectId]): Id of source entity. Returns: ObjectId: Thumbnail id assigned to entity. @@ -1116,8 +1270,9 @@ def get_thumbnails(project_name, thumbnail_ids, fields=None): Args: project_name (str): Name of project where to look for queried entities. - thumbnail_ids (list[str|ObjectId]): Ids of thumbnail entities. - fields (list[str]): Fields that should be returned. All fields are + thumbnail_ids (Iterable[Union[str, ObjectId]]): Ids of thumbnail + entities. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -1142,8 +1297,8 @@ def get_thumbnail(project_name, thumbnail_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. - thumbnail_id (str|ObjectId): Id of thumbnail entity. - fields (list[str]): Fields that should be returned. All fields are + thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity. + fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. Returns: @@ -1158,6 +1313,37 @@ def get_thumbnail(project_name, thumbnail_id, fields=None): return conn.find_one(query_filter, _prepare_fields(fields)) +def get_workfile_info( + project_name, asset_id, task_name, filename, fields=None +): + """Document with workfile information. + + Warning: + Query is based on filename and context which does not meant it will + find always right and expected result. Information have limited usage + and is not recommended to use it as source information about workfile. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_id (Union[str, ObjectId]): Id of asset entity. + task_name (str): Task name on asset. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + """ + + if not asset_id or not task_name or not filename: + return None + + query_filter = { + "type": "workfile", + "parent": _convert_id(asset_id), + "task_name": task_name, + "filename": filename + } + conn = _get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + """ ## Custom data storage: - Settings - OP settings overrides and local settings @@ -1168,622 +1354,18 @@ def get_thumbnail(project_name, thumbnail_id, fields=None): - openpype/hosts/maya/api/shader_definition_editor.py - openpype/hosts/maya/plugins/publish/validate_model_name.py -## Global launch hooks -- openpype/hooks/pre_global_host_data.py - Query: - - project - - asset - -## Global load plugins -- openpype/plugins/load/delete_old_versions.py - Query: - - versions - - representations -- openpype/plugins/load/delivery.py - Query: - - representations - ## Global publish plugins -- openpype/plugins/publish/collect_avalon_entities.py - Query: - - asset - - project -- openpype/plugins/publish/collect_anatomy_instance_data.py - Query: - - assets - - subsets - - last version -- openpype/plugins/publish/collect_scene_loaded_versions.py - Query: - - representations - openpype/plugins/publish/extract_hierarchy_avalon.py - Query: - - asset - - assets - - project Create: - asset Update: - asset -- openpype/plugins/publish/integrate_hero_version.py - Query: - - version - - hero version - - representations -- openpype/plugins/publish/integrate_new.py - Query: - - asset - - subset - - version - - representations -- openpype/plugins/publish/integrate_thumbnail.py - Query: - - version -- openpype/plugins/publish/validate_editorial_asset_name.py - Query: - - assets ## Lib -- openpype/lib/applications.py - Query: - - project - - asset - openpype/lib/avalon_context.py - Query: - - project - - asset - - linked assets (new function get_linked_assets?) - - subset - - subsets - - version - - versions - - last version - - representations - - linked representations (new function get_linked_ids_for_representations) Update: - workfile data -- openpype/lib/plugin_tools.py - Query: - - asset - openpype/lib/project_backpack.py - Query: - - project - - everything from mongo Update: - project -- openpype/lib/usdlib.py - Query: - - project - - asset - -## Pipeline -- openpype/pipeline/load/utils.py - Query: - - project - - assets - - subsets - - version - - versions - - representation - - representations -- openpype/pipeline/mongodb.py - Query: - - project -- openpype/pipeline/thumbnail.py - Query: - - project - -## Hosts -### Aftereffects -- openpype/hosts/aftereffects/plugins/create/workfile_creator.py - Query: - - asset - -### Blender -- openpype/hosts/blender/api/pipeline.py - Query: - - asset -- openpype/hosts/blender/plugins/publish/extract_layout.py - Query: - - representation - -### Celaction -- openpype/hosts/celaction/plugins/publish/collect_audio.py - Query: - - subsets - - last versions - - representations - -### Fusion -- openpype/hosts/fusion/api/lib.py - Query: - - asset - - subset - - version - - representation -- openpype/hosts/fusion/plugins/load/load_sequence.py - Query: - - version -- openpype/hosts/fusion/scripts/fusion_switch_shot.py - Query: - - project - - asset - - versions -- openpype/hosts/fusion/utility_scripts/switch_ui.py - Query: - - assets - -### Harmony -- openpype/hosts/harmony/api/pipeline.py - Query: - - representation - -### Hiero -- openpype/hosts/hiero/api/lib.py - Query: - - project - - version - - versions - - representation -- openpype/hosts/hiero/api/tags.py - Query: - - task types - - assets -- openpype/hosts/hiero/plugins/load/load_clip.py - Query: - - version - - versions -- openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py - Query: - - assets - -### Houdini -- openpype/hosts/houdini/api/lib.py - Query: - - asset -- openpype/hosts/houdini/api/usd.py - Query: - - asset -- openpype/hosts/houdini/plugins/create/create_hda.py - Query: - - asset - - subsets -- openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py - Query: - - asset - - subset -- openpype/hosts/houdini/plugins/publish/extract_usd_layered.py - Query: - - asset - - subset - - version - - representation -- openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py - Query: - - asset - - subset -- openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py - Query: - - project - - asset - -### Maya -- openpype/hosts/maya/api/action.py - Query: - - asset -- openpype/hosts/maya/api/commands.py - Query: - - asset - - project -- openpype/hosts/maya/api/lib.py - Query: - - project - - asset - - subset - - subsets - - version - - representation -- openpype/hosts/maya/api/setdress.py - Query: - - version - - representation -- openpype/hosts/maya/plugins/inventory/import_modelrender.py - Query: - - representation -- openpype/hosts/maya/plugins/load/load_audio.py - Query: - - asset - - subset - - version -- openpype/hosts/maya/plugins/load/load_image_plane.py - Query: - - asset - - subset - - version -- openpype/hosts/maya/plugins/load/load_look.py - Query: - - representation -- openpype/hosts/maya/plugins/load/load_vrayproxy.py - Query: - - representation -- openpype/hosts/maya/plugins/load/load_yeti_cache.py - Query: - - representation -- openpype/hosts/maya/plugins/publish/collect_review.py - Query: - - subsets -- openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py - Query: - - assets -- openpype/hosts/maya/plugins/publish/validate_node_ids_related.py - Query: - - asset -- openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py - Query: - - asset - - subset - -### Nuke -- openpype/hosts/nuke/api/command.py - Query: - - project - - asset -- openpype/hosts/nuke/api/lib.py - Query: - - project - - asset - - version - - versions - - representation -- openpype/hosts/nuke/plugins/load/load_backdrop.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_camera_abc.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_clip.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_effects_ip.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_effects.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_gizmo_ip.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_gizmo.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_image.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_model.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/load/load_script_precomp.py - Query: - - version - - versions -- openpype/hosts/nuke/plugins/publish/collect_reads.py - Query: - - asset -- openpype/hosts/nuke/plugins/publish/precollect_instances.py - Query: - - asset -- openpype/hosts/nuke/plugins/publish/precollect_writes.py - Query: - - representation -- openpype/hosts/nuke/plugins/publish/validate_script.py - Query: - - asset - - project - -### Photoshop -- openpype/hosts/photoshop/plugins/create/workfile_creator.py - Query: - - asset - -### Resolve -- openpype/hosts/resolve/plugins/load/load_clip.py - Query: - - version - - versions - -### Standalone publisher -- openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py - Query: - - asset -- openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py - Query: - - assets -- openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py - Query: - - project - - asset -- openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py - Query: - - assets - -### TVPaint -- openpype/hosts/tvpaint/api/pipeline.py - Query: - - project - - asset -- openpype/hosts/tvpaint/plugins/load/load_workfile.py - Query: - - project - - asset -- openpype/hosts/tvpaint/plugins/publish/collect_instances.py - Query: - - asset -- openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py - Query: - - asset -- openpype/hosts/tvpaint/plugins/publish/collect_workfile.py - Query: - - asset - -### Unreal -- openpype/hosts/unreal/plugins/load/load_camera.py - Query: - - asset - - assets -- openpype/hosts/unreal/plugins/load/load_layout.py - Query: - - asset - - assets -- openpype/hosts/unreal/plugins/publish/extract_layout.py - Query: - - representation - -### Webpublisher -- openpype/hosts/webpublisher/webserver_service/webpublish_routes.py - Query: - - assets -- openpype/hosts/webpublisher/plugins/publish/collect_published_files.py - Query: - - last versions - -## Tools -openpype/tools/assetlinks/widgets.py -- SimpleLinkView - Query: - - get_versions - - get_subsets - - get_assets - - get_output_link_versions - -openpype/tools/creator/window.py -- CreatorWindow - Query: - - get_asset_by_name - - get_subsets - -openpype/tools/launcher/models.py -- LauncherModel - Query: - - get_project - - get_assets - -openpype/tools/libraryloader/app.py -- LibraryLoaderWindow - Query: - - get_project - -openpype/tools/loader/app.py -- LoaderWindow - Query: - - get_project -- show - Query: - - get_projects - -openpype/tools/loader/model.py -- SubsetsModel - Query: - - get_assets - - get_subsets - - get_last_versions - - get_versions - - get_hero_versions - - get_version_by_name -- RepresentationModel - Query: - - get_representations - - sync server specific queries (separated into multiple functions?) - - NOT REPLACED - -openpype/tools/loader/widgets.py -- FamilyModel - Query: - - get_subset_families -- VersionTextEdit - Query: - - get_subset_by_id - - get_version_by_id -- SubsetWidget - Query: - - get_subsets - - get_representations - Update: - - Subset groups (combination of asset id and subset names) -- RepresentationWidget - Query: - - get_subsets - - get_versions - - get_representations -- ThumbnailWidget - Query: - - get_thumbnail_id_from_source - - get_thumbnail - -openpype/tools/mayalookassigner/app.py -- MayaLookAssignerWindow - Query: - - get_last_version_by_subset_id - -openpype/tools/mayalookassigner/commands.py -- create_items_from_nodes - Query: - - get_asset_by_id - -openpype/tools/mayalookassigner/vray_proxies.py -- get_look_relationships - Query: - - get_representation_by_name -- load_look - Query: - - get_representation_by_name -- vrayproxy_assign_look - Query: - - get_last_version_by_subset_name - -openpype/tools/project_manager/project_manager/model.py -- HierarchyModel - Query: - - get_asset_ids_with_subsets - - get_project - - get_assets - -openpype/tools/project_manager/project_manager/view.py -- ProjectDocCache - Query: - - get_project - -openpype/tools/project_manager/project_manager/widgets.py -- CreateProjectDialog - Query: - - get_projects - -openpype/tools/publisher/widgets/create_dialog.py -- CreateDialog - Query: - - get_asset_by_name - - get_subsets - -openpype/tools/publisher/control.py -- AssetDocsCache - Query: - - get_assets - -openpype/tools/sceneinventory/model.py -- InventoryModel - Query: - - get_asset_by_id - - get_subset_by_id - - get_version_by_id - - get_last_version_by_subset_id - - get_representation - -openpype/tools/sceneinventory/switch_dialog.py -- SwitchAssetDialog - Query: - - get_asset_by_name - - get_assets - - get_subset_by_name - - get_subsets - - get_versions - - get_hero_versions - - get_last_versions - - get_representations - -openpype/tools/sceneinventory/view.py -- SceneInventoryView - Query: - - get_version_by_id - - get_versions - - get_hero_versions - - get_representation_by_id - - get_representations - -openpype/tools/standalonepublish/widgets/model_asset.py -- AssetModel - Query: - - get_assets - -openpype/tools/standalonepublish/widgets/widget_asset.py -- AssetWidget - Query: - - get_project - - get_asset_by_id - -openpype/tools/standalonepublish/widgets/widget_family.py -- FamilyWidget - Query: - - get_asset_by_name - - get_subset_by_name - - get_subsets - - get_last_version_by_subset_id - -openpype/tools/standalonepublish/app.py -- Window - Query: - - get_asset_by_id - -openpype/tools/texture_copy/app.py -- TextureCopy - Query: - - get_project - - get_asset_by_name - -openpype/tools/workfiles/files_widget.py -- FilesWidget - Query: - - get_asset_by_id - -openpype/tools/workfiles/model.py -- PublishFilesModel - Query: - - get_subsets - - get_versions - - get_representations - -openpype/tools/workfiles/save_as_dialog.py -- build_workfile_data - Query: - - get_project - - get_asset_by_name - -openpype/tools/workfiles/window.py -- Window - Query: - - get_asset_by_id - - get_asset_by_name - -openpype/tools/utils/assets_widget.py -- AssetModel - Query: - - get_project - - get_assets - -openpype/tools/utils/delegates.py -- VersionDelegate - Query: - - get_versions - - get_hero_versions - -openpype/tools/utils/lib.py -- GroupsConfig - Query: - - get_project -- FamilyConfigCache - Query: - - get_asset_by_name - -openpype/tools/utils/tasks_widget.py -- TasksModel - Query: - - get_project - - get_asset_by_id """ diff --git a/openpype/hooks/pre_global_host_data.py b/openpype/hooks/pre_global_host_data.py index ea5e290d6f..6577e37cbe 100644 --- a/openpype/hooks/pre_global_host_data.py +++ b/openpype/hooks/pre_global_host_data.py @@ -1,11 +1,10 @@ -from openpype.api import Anatomy from openpype.lib import ( PreLaunchHook, EnvironmentPrepData, prepare_app_environments, prepare_context_environments ) -from openpype.pipeline import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB, Anatomy class GlobalHostDataHook(PreLaunchHook): diff --git a/openpype/host/__init__.py b/openpype/host/__init__.py new file mode 100644 index 0000000000..84a2fa930a --- /dev/null +++ b/openpype/host/__init__.py @@ -0,0 +1,13 @@ +from .host import ( + HostBase, + IWorkfileHost, + ILoadHost, + INewPublisher, +) + +__all__ = ( + "HostBase", + "IWorkfileHost", + "ILoadHost", + "INewPublisher", +) diff --git a/openpype/host/host.py b/openpype/host/host.py new file mode 100644 index 0000000000..48907e7ec7 --- /dev/null +++ b/openpype/host/host.py @@ -0,0 +1,524 @@ +import logging +import contextlib +from abc import ABCMeta, abstractproperty, abstractmethod +import six + +# NOTE can't import 'typing' because of issues in Maya 2020 +# - shiboken crashes on 'typing' module import + + +class MissingMethodsError(ValueError): + """Exception when host miss some required methods for specific workflow. + + Args: + host (HostBase): Host implementation where are missing methods. + missing_methods (list[str]): List of missing methods. + """ + + def __init__(self, host, missing_methods): + joined_missing = ", ".join( + ['"{}"'.format(item) for item in missing_methods] + ) + message = ( + "Host \"{}\" miss methods {}".format(host.name, joined_missing) + ) + super(MissingMethodsError, self).__init__(message) + + +@six.add_metaclass(ABCMeta) +class HostBase(object): + """Base of host implementation class. + + Host is pipeline implementation of DCC application. This class should help + to identify what must/should/can be implemented for specific functionality. + + Compared to 'avalon' concept: + What was before considered as functions in host implementation folder. The + host implementation should primarily care about adding ability of creation + (mark subsets to be published) and optionaly about referencing published + representations as containers. + + Host may need extend some functionality like working with workfiles + or loading. Not all host implementations may allow that for those purposes + can be logic extended with implementing functions for the purpose. There + are prepared interfaces to be able identify what must be implemented to + be able use that functionality. + - current statement is that it is not required to inherit from interfaces + but all of the methods are validated (only their existence!) + + # Installation of host before (avalon concept): + ```python + from openpype.pipeline import install_host + import openpype.hosts.maya.api as host + + install_host(host) + ``` + + # Installation of host now: + ```python + from openpype.pipeline import install_host + from openpype.hosts.maya.api import MayaHost + + host = MayaHost() + install_host(host) + ``` + + Todo: + - move content of 'install_host' as method of this class + - register host object + - install legacy_io + - install global plugin paths + - store registered plugin paths to this object + - handle current context (project, asset, task) + - this must be done in many separated steps + - have it's object of host tools instead of using globals + + This implementation will probably change over time when more + functionality and responsibility will be added. + """ + + _log = None + + def __init__(self): + """Initialization of host. + + Register DCC callbacks, host specific plugin paths, targets etc. + (Part of what 'install' did in 'avalon' concept.) + + Note: + At this moment global "installation" must happen before host + installation. Because of this current limitation it is recommended + to implement 'install' method which is triggered after global + 'install'. + """ + + pass + + @property + def log(self): + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + @abstractproperty + def name(self): + """Host name.""" + + pass + + def get_current_context(self): + """Get current context information. + + This method should be used to get current context of host. Usage of + this method can be crutial for host implementations in DCCs where + can be opened multiple workfiles at one moment and change of context + can't be catched properly. + + Default implementation returns values from 'legacy_io.Session'. + + Returns: + dict: Context with 3 keys 'project_name', 'asset_name' and + 'task_name'. All of them can be 'None'. + """ + + from openpype.pipeline import legacy_io + + if legacy_io.is_installed(): + legacy_io.install() + + return { + "project_name": legacy_io.Session["AVALON_PROJECT"], + "asset_name": legacy_io.Session["AVALON_ASSET"], + "task_name": legacy_io.Session["AVALON_TASK"] + } + + def get_context_title(self): + """Context title shown for UI purposes. + + Should return current context title if possible. + + Note: + This method is used only for UI purposes so it is possible to + return some logical title for contextless cases. + Is not meant for "Context menu" label. + + Returns: + str: Context title. + None: Default title is used based on UI implementation. + """ + + # Use current context to fill the context title + current_context = self.get_current_context() + project_name = current_context["project_name"] + asset_name = current_context["asset_name"] + task_name = current_context["task_name"] + items = [] + if project_name: + items.append(project_name) + if asset_name: + items.append(asset_name) + if task_name: + items.append(task_name) + if items: + return "/".join(items) + return None + + @contextlib.contextmanager + def maintained_selection(self): + """Some functionlity will happen but selection should stay same. + + This is DCC specific. Some may not allow to implement this ability + that is reason why default implementation is empty context manager. + + Yields: + None: Yield when is ready to restore selected at the end. + """ + + try: + yield + finally: + pass + + +class ILoadHost: + """Implementation requirements to be able use reference of representations. + + The load plugins can do referencing even without implementation of methods + here, but switch and removement of containers would not be possible. + + Questions: + - Is list container dependency of host or load plugins? + - Should this be directly in HostBase? + - how to find out if referencing is available? + - do we need to know that? + """ + + @staticmethod + def get_missing_load_methods(host): + """Look for missing methods on "old type" host implementation. + + Method is used for validation of implemented functions related to + loading. Checks only existence of methods. + + Args: + Union[ModuleType, HostBase]: Object of host where to look for + required methods. + + Returns: + list[str]: Missing method implementations for loading workflow. + """ + + if isinstance(host, ILoadHost): + return [] + + required = ["ls"] + missing = [] + for name in required: + if not hasattr(host, name): + missing.append(name) + return missing + + @staticmethod + def validate_load_methods(host): + """Validate implemented methods of "old type" host for load workflow. + + Args: + Union[ModuleType, HostBase]: Object of host to validate. + + Raises: + MissingMethodsError: If there are missing methods on host + implementation. + """ + missing = ILoadHost.get_missing_load_methods(host) + if missing: + raise MissingMethodsError(host, missing) + + @abstractmethod + def get_containers(self): + """Retreive referenced containers from scene. + + This can be implemented in hosts where referencing can be used. + + Todo: + Rename function to something more self explanatory. + Suggestion: 'get_containers' + + Returns: + list[dict]: Information about loaded containers. + """ + + pass + + # --- Deprecated method names --- + def ls(self): + """Deprecated variant of 'get_containers'. + + Todo: + Remove when all usages are replaced. + """ + + return self.get_containers() + + +@six.add_metaclass(ABCMeta) +class IWorkfileHost: + """Implementation requirements to be able use workfile utils and tool.""" + + @staticmethod + def get_missing_workfile_methods(host): + """Look for missing methods on "old type" host implementation. + + Method is used for validation of implemented functions related to + workfiles. Checks only existence of methods. + + Args: + Union[ModuleType, HostBase]: Object of host where to look for + required methods. + + Returns: + list[str]: Missing method implementations for workfiles workflow. + """ + + if isinstance(host, IWorkfileHost): + return [] + + required = [ + "open_file", + "save_file", + "current_file", + "has_unsaved_changes", + "file_extensions", + "work_root", + ] + missing = [] + for name in required: + if not hasattr(host, name): + missing.append(name) + return missing + + @staticmethod + def validate_workfile_methods(host): + """Validate methods of "old type" host for workfiles workflow. + + Args: + Union[ModuleType, HostBase]: Object of host to validate. + + Raises: + MissingMethodsError: If there are missing methods on host + implementation. + """ + + missing = IWorkfileHost.get_missing_workfile_methods(host) + if missing: + raise MissingMethodsError(host, missing) + + @abstractmethod + def get_workfile_extensions(self): + """Extensions that can be used as save. + + Questions: + This could potentially use 'HostDefinition'. + """ + + return [] + + @abstractmethod + def save_workfile(self, dst_path=None): + """Save currently opened scene. + + Args: + dst_path (str): Where the current scene should be saved. Or use + current path if 'None' is passed. + """ + + pass + + @abstractmethod + def open_workfile(self, filepath): + """Open passed filepath in the host. + + Args: + filepath (str): Path to workfile. + """ + + pass + + @abstractmethod + def get_current_workfile(self): + """Retreive path to current opened file. + + Returns: + str: Path to file which is currently opened. + None: If nothing is opened. + """ + + return None + + def workfile_has_unsaved_changes(self): + """Currently opened scene is saved. + + Not all hosts can know if current scene is saved because the API of + DCC does not support it. + + Returns: + bool: True if scene is saved and False if has unsaved + modifications. + None: Can't tell if workfiles has modifications. + """ + + return None + + def work_root(self, session): + """Modify workdir per host. + + Default implementation keeps workdir untouched. + + Warnings: + We must handle this modification with more sofisticated way because + this can't be called out of DCC so opening of last workfile + (calculated before DCC is launched) is complicated. Also breaking + defined work template is not a good idea. + Only place where it's really used and can make sense is Maya. There + workspace.mel can modify subfolders where to look for maya files. + + Args: + session (dict): Session context data. + + Returns: + str: Path to new workdir. + """ + + return session["AVALON_WORKDIR"] + + # --- Deprecated method names --- + def file_extensions(self): + """Deprecated variant of 'get_workfile_extensions'. + + Todo: + Remove when all usages are replaced. + """ + return self.get_workfile_extensions() + + def save_file(self, dst_path=None): + """Deprecated variant of 'save_workfile'. + + Todo: + Remove when all usages are replaced. + """ + + self.save_workfile() + + def open_file(self, filepath): + """Deprecated variant of 'open_workfile'. + + Todo: + Remove when all usages are replaced. + """ + + return self.open_workfile(filepath) + + def current_file(self): + """Deprecated variant of 'get_current_workfile'. + + Todo: + Remove when all usages are replaced. + """ + + return self.get_current_workfile() + + def has_unsaved_changes(self): + """Deprecated variant of 'workfile_has_unsaved_changes'. + + Todo: + Remove when all usages are replaced. + """ + + return self.workfile_has_unsaved_changes() + + +class INewPublisher: + """Functions related to new creation system in new publisher. + + New publisher is not storing information only about each created instance + but also some global data. At this moment are data related only to context + publish plugins but that can extend in future. + """ + + @staticmethod + def get_missing_publish_methods(host): + """Look for missing methods on "old type" host implementation. + + Method is used for validation of implemented functions related to + new publish creation. Checks only existence of methods. + + Args: + Union[ModuleType, HostBase]: Host module where to look for + required methods. + + Returns: + list[str]: Missing method implementations for new publsher + workflow. + """ + + if isinstance(host, INewPublisher): + return [] + + required = [ + "get_context_data", + "update_context_data", + ] + missing = [] + for name in required: + if not hasattr(host, name): + missing.append(name) + return missing + + @staticmethod + def validate_publish_methods(host): + """Validate implemented methods of "old type" host. + + Args: + Union[ModuleType, HostBase]: Host module to validate. + + Raises: + MissingMethodsError: If there are missing methods on host + implementation. + """ + missing = INewPublisher.get_missing_publish_methods(host) + if missing: + raise MissingMethodsError(host, missing) + + @abstractmethod + def get_context_data(self): + """Get global data related to creation-publishing from workfile. + + These data are not related to any created instance but to whole + publishing context. Not saving/returning them will cause that each + reset of publishing resets all values to default ones. + + Context data can contain information about enabled/disabled publish + plugins or other values that can be filled by artist. + + Returns: + dict: Context data stored using 'update_context_data'. + """ + + pass + + @abstractmethod + def update_context_data(self, data, changes): + """Store global context data to workfile. + + Called when some values in context data has changed. + + Without storing the values in a way that 'get_context_data' would + return them will each reset of publishing cause loose of filled values + by artist. Best practice is to store values into workfile, if possible. + + Args: + data (dict): New data as are. + changes (dict): Only data that has been changed. Each value has + tuple with '(, )' value. + """ + + pass diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index 215c148f37..1019709dd6 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -17,11 +17,8 @@ class RenderCreator(Creator): create_allow_context_change = True - def __init__( - self, create_context, system_settings, project_settings, headless=False - ): - super(RenderCreator, self).__init__(create_context, system_settings, - project_settings, headless) + def __init__(self, project_settings, *args, **kwargs): + super(RenderCreator, self).__init__(project_settings, *args, **kwargs) self._default_variants = (project_settings["aftereffects"] ["create"] ["RenderCreator"] diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index 97b3175c57..bb199a61f7 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -6,8 +6,8 @@ import attr import pyblish.api from openpype.settings import get_project_settings -from openpype.lib import abstract_collect_render -from openpype.lib.abstract_collect_render import RenderInstance +from openpype.pipeline import publish +from openpype.pipeline.publish import RenderInstance from openpype.hosts.aftereffects.api import get_stub @@ -25,7 +25,7 @@ class AERenderInstance(RenderInstance): file_name = attr.ib(default=None) -class CollectAERender(abstract_collect_render.AbstractCollectRender): +class CollectAERender(publish.AbstractCollectRender): order = pyblish.api.CollectorOrder + 0.405 label = "Collect After Effects Render Layers" diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py index 93d81145bc..ea405b028e 100644 --- a/openpype/hosts/blender/api/pipeline.py +++ b/openpype/hosts/blender/api/pipeline.py @@ -93,7 +93,7 @@ def set_start_end_frames(): # Default scene settings frameStart = scene.frame_start frameEnd = scene.frame_end - fps = scene.render.fps + fps = scene.render.fps / scene.render.fps_base resolution_x = scene.render.resolution_x resolution_y = scene.render.resolution_y @@ -116,7 +116,8 @@ def set_start_end_frames(): scene.frame_start = frameStart scene.frame_end = frameEnd - scene.render.fps = fps + scene.render.fps = round(fps) + scene.render.fps_base = round(fps) / fps scene.render.resolution_x = resolution_x scene.render.resolution_y = resolution_y diff --git a/openpype/hosts/blender/hooks/pre_pyside_install.py b/openpype/hosts/blender/hooks/pre_pyside_install.py index a37f8f0379..e5f66d2a26 100644 --- a/openpype/hosts/blender/hooks/pre_pyside_install.py +++ b/openpype/hosts/blender/hooks/pre_pyside_install.py @@ -1,6 +1,7 @@ import os import re import subprocess +from platform import system from openpype.lib import PreLaunchHook @@ -13,12 +14,9 @@ class InstallPySideToBlender(PreLaunchHook): For pipeline implementation is required to have Qt binding installed in blender's python packages. - - Prelaunch hook can work only on Windows right now. """ app_groups = ["blender"] - platforms = ["windows"] def execute(self): # Prelaunch hook is not crucial @@ -34,25 +32,28 @@ class InstallPySideToBlender(PreLaunchHook): # Get blender's python directory version_regex = re.compile(r"^[2-3]\.[0-9]+$") + platform = system().lower() executable = self.launch_context.executable.executable_path - if os.path.basename(executable).lower() != "blender.exe": + expected_executable = "blender" + if platform == "windows": + expected_executable += ".exe" + + if os.path.basename(executable).lower() != expected_executable: self.log.info(( - "Executable does not lead to blender.exe file. Can't determine" - " blender's python to check/install PySide2." + f"Executable does not lead to {expected_executable} file." + "Can't determine blender's python to check/install PySide2." )) return - executable_dir = os.path.dirname(executable) + versions_dir = os.path.dirname(executable) + if platform == "darwin": + versions_dir = os.path.join( + os.path.dirname(versions_dir), "Resources" + ) version_subfolders = [] - for name in os.listdir(executable_dir): - fullpath = os.path.join(name, executable_dir) - if not os.path.isdir(fullpath): - continue - - if not version_regex.match(name): - continue - - version_subfolders.append(name) + for dir_entry in os.scandir(versions_dir): + if dir_entry.is_dir() and version_regex.match(dir_entry.name): + version_subfolders.append(dir_entry.name) if not version_subfolders: self.log.info( @@ -72,16 +73,21 @@ class InstallPySideToBlender(PreLaunchHook): version_subfolder = version_subfolders[0] - pythond_dir = os.path.join( - os.path.dirname(executable), - version_subfolder, - "python" - ) + python_dir = os.path.join(versions_dir, version_subfolder, "python") + python_lib = os.path.join(python_dir, "lib") + python_version = "python" + + if platform != "windows": + for dir_entry in os.scandir(python_lib): + if dir_entry.is_dir() and dir_entry.name.startswith("python"): + python_lib = dir_entry.path + python_version = dir_entry.name + break # Change PYTHONPATH to contain blender's packages as first python_paths = [ - os.path.join(pythond_dir, "lib"), - os.path.join(pythond_dir, "lib", "site-packages"), + python_lib, + os.path.join(python_lib, "site-packages"), ] python_path = self.launch_context.env.get("PYTHONPATH") or "" for path in python_path.split(os.pathsep): @@ -91,7 +97,15 @@ class InstallPySideToBlender(PreLaunchHook): self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) # Get blender's python executable - python_executable = os.path.join(pythond_dir, "bin", "python.exe") + python_bin = os.path.join(python_dir, "bin") + if platform == "windows": + python_executable = os.path.join(python_bin, "python.exe") + else: + python_executable = os.path.join(python_bin, python_version) + # Check for python with enabled 'pymalloc' + if not os.path.exists(python_executable): + python_executable += "m" + if not os.path.exists(python_executable): self.log.warning( "Couldn't find python executable for blender. {}".format( @@ -106,7 +120,15 @@ class InstallPySideToBlender(PreLaunchHook): return # Install PySide2 in blender's python - self.install_pyside_windows(python_executable) + if platform == "windows": + result = self.install_pyside_windows(python_executable) + else: + result = self.install_pyside(python_executable) + + if result: + self.log.info("Successfully installed PySide2 module to blender.") + else: + self.log.warning("Failed to install PySide2 module to blender.") def install_pyside_windows(self, python_executable): """Install PySide2 python module to blender's python. @@ -144,19 +166,41 @@ class InstallPySideToBlender(PreLaunchHook): lpDirectory=os.path.dirname(python_executable) ) process_handle = process_info["hProcess"] - obj = win32event.WaitForSingleObject( - process_handle, win32event.INFINITE - ) + win32event.WaitForSingleObject(process_handle, win32event.INFINITE) returncode = win32process.GetExitCodeProcess(process_handle) - if returncode == 0: - self.log.info( - "Successfully installed PySide2 module to blender." - ) - return + return returncode == 0 except pywintypes.error: pass - self.log.warning("Failed to install PySide2 module to blender.") + def install_pyside(self, python_executable): + """Install PySide2 python module to blender's python.""" + try: + # Parameters + # - use "-m pip" as module pip to install PySide2 and argument + # "--ignore-installed" is to force install module to blender's + # site-packages and make sure it is binary compatible + args = [ + python_executable, + "-m", + "pip", + "install", + "--ignore-installed", + "PySide2", + ] + process = subprocess.Popen( + args, stdout=subprocess.PIPE, universal_newlines=True + ) + process.communicate() + return process.returncode == 0 + except PermissionError: + self.log.warning( + "Permission denied with command:" + "\"{}\".".format(" ".join(args)) + ) + except OSError as error: + self.log.warning(f"OS error has occurred: \"{error}\".") + except subprocess.SubprocessError: + pass def is_pyside_installed(self, python_executable): """Check if PySide2 module is in blender's pip list. @@ -169,7 +213,7 @@ class InstallPySideToBlender(PreLaunchHook): args = [python_executable, "-m", "pip", "list"] process = subprocess.Popen(args, stdout=subprocess.PIPE) stdout, _ = process.communicate() - lines = stdout.decode().split("\r\n") + lines = stdout.decode().split(os.linesep) # Second line contain dashes that define maximum length of module name. # Second column of dashes define maximum length of module version. package_dashes, *_ = lines[1].split(" ") diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index e0a7297381..b12f2f9690 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -2,7 +2,7 @@ import os import flame from pprint import pformat import openpype.hosts.flame.api as opfapi - +from openpype.lib import StringTemplate class LoadClip(opfapi.ClipLoader): """Load a subset to timeline as clip @@ -22,7 +22,7 @@ class LoadClip(opfapi.ClipLoader): # settings reel_group_name = "OpenPype_Reels" reel_name = "Loaded" - clip_name_template = "{asset}_{subset}_{output}" + clip_name_template = "{asset}_{subset}<_{output}>" def load(self, context, name, namespace, options): @@ -36,8 +36,8 @@ class LoadClip(opfapi.ClipLoader): version_data = version.get("data", {}) version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) - clip_name = self.clip_name_template.format( - **context["representation"]["context"]) + clip_name = StringTemplate(self.clip_name_template).format( + context["representation"]["context"]) # TODO: settings in imageio # convert colorspace with ocio to flame mapping diff --git a/openpype/hosts/flame/plugins/load/load_clip_batch.py b/openpype/hosts/flame/plugins/load/load_clip_batch.py index 5de3226035..fb4a3dc6e9 100644 --- a/openpype/hosts/flame/plugins/load/load_clip_batch.py +++ b/openpype/hosts/flame/plugins/load/load_clip_batch.py @@ -2,6 +2,7 @@ import os import flame from pprint import pformat import openpype.hosts.flame.api as opfapi +from openpype.lib import StringTemplate class LoadClipBatch(opfapi.ClipLoader): @@ -21,7 +22,7 @@ class LoadClipBatch(opfapi.ClipLoader): # settings reel_name = "OP_LoadedReel" - clip_name_template = "{asset}_{subset}_{output}" + clip_name_template = "{asset}_{subset}<_{output}>" def load(self, context, name, namespace, options): @@ -39,8 +40,8 @@ class LoadClipBatch(opfapi.ClipLoader): if not context["representation"]["context"].get("output"): self.clip_name_template.replace("output", "representation") - clip_name = self.clip_name_template.format( - **context["representation"]["context"]) + clip_name = StringTemplate(self.clip_name_template).format( + context["representation"]["context"]) # TODO: settings in imageio # convert colorspace with ocio to flame mapping diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py index da9553cc2a..b59107f155 100644 --- a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -323,6 +323,8 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin): def _get_shot_task_dir_path(self, instance, task_data): project_doc = instance.data["projectEntity"] asset_entity = instance.data["assetEntity"] + anatomy = instance.context.data["anatomy"] return get_workdir( - project_doc, asset_entity, task_data["name"], "flame") + project_doc, asset_entity, task_data["name"], "flame", anatomy + ) diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py index 29f3a3a3eb..001eb636ee 100644 --- a/openpype/hosts/fusion/api/lib.py +++ b/openpype/hosts/fusion/api/lib.py @@ -3,9 +3,16 @@ import sys import re import contextlib -from bson.objectid import ObjectId from Qt import QtGui +from openpype.client import ( + get_asset_by_name, + get_subset_by_name, + get_last_version_by_subset_id, + get_representation_by_id, + get_representation_by_name, + get_representation_parents, +) from openpype.pipeline import ( switch_container, legacy_io, @@ -93,13 +100,16 @@ def switch_item(container, raise ValueError("Must have at least one change provided to switch.") # Collect any of current asset, subset and representation if not provided - # so we can use the original name from those. + # so we can use the original name from those. + project_name = legacy_io.active_project() if any(not x for x in [asset_name, subset_name, representation_name]): - _id = ObjectId(container["representation"]) - representation = legacy_io.find_one({ - "type": "representation", "_id": _id - }) - version, subset, asset, project = legacy_io.parenthood(representation) + repre_id = container["representation"] + representation = get_representation_by_id(project_name, repre_id) + repre_parent_docs = get_representation_parents(representation) + if repre_parent_docs: + version, subset, asset, _ = repre_parent_docs + else: + version = subset = asset = None if asset_name is None: asset_name = asset["name"] @@ -111,39 +121,26 @@ def switch_item(container, representation_name = representation["name"] # Find the new one - asset = legacy_io.find_one({ - "name": asset_name, - "type": "asset" - }) + asset = get_asset_by_name(project_name, asset_name, fields=["_id"]) assert asset, ("Could not find asset in the database with the name " "'%s'" % asset_name) - subset = legacy_io.find_one({ - "name": subset_name, - "type": "subset", - "parent": asset["_id"] - }) + subset = get_subset_by_name( + project_name, subset_name, asset["_id"], fields=["_id"] + ) assert subset, ("Could not find subset in the database with the name " "'%s'" % subset_name) - version = legacy_io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[('name', -1)] + version = get_last_version_by_subset_id( + project_name, subset["_id"], fields=["_id"] ) - assert version, "Could not find a version for {}.{}".format( asset_name, subset_name ) - representation = legacy_io.find_one({ - "name": representation_name, - "type": "representation", - "parent": version["_id"]} + representation = get_representation_by_name( + project_name, representation_name, version["_id"] ) - assert representation, ("Could not find representation in the database " "with the name '%s'" % representation_name) diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py index b860abd88b..abd0f4e411 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/openpype/hosts/fusion/plugins/load/load_sequence.py @@ -1,6 +1,7 @@ import os import contextlib +from openpype.client import get_version_by_id from openpype.pipeline import ( load, legacy_io, @@ -123,7 +124,7 @@ def loader_shift(loader, frame, relative=True): class FusionLoadSequence(load.LoaderPlugin): """Load image sequence into Fusion""" - families = ["imagesequence", "review", "render"] + families = ["imagesequence", "review", "render", "plate"] representations = ["*"] label = "Load sequence" @@ -211,10 +212,8 @@ class FusionLoadSequence(load.LoaderPlugin): path = self._get_first_image(root) # Get start frame from version data - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version = get_version_by_id(project_name, representation["parent"]) start = version["data"].get("frameStart") if start is None: self.log.warning("Missing start frame for updated version" diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py index 704f420796..52a157c56e 100644 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py @@ -4,6 +4,11 @@ import sys import logging # Pipeline imports +from openpype.client import ( + get_project, + get_asset_by_name, + get_versions, +) from openpype.pipeline import ( legacy_io, install_host, @@ -164,9 +169,9 @@ def update_frame_range(comp, representations): """ - version_ids = [r["parent"] for r in representations] - versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) - versions = list(versions) + project_name = legacy_io.active_project() + version_ids = {r["parent"] for r in representations} + versions = list(get_versions(project_name, version_ids)) versions = [v for v in versions if v["data"].get("frameStart", None) is not None] @@ -203,11 +208,12 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = legacy_io.find_one({"type": "asset", "name": asset_name}) + project_name = legacy_io.active_project() + asset = get_asset_by_name(project_name, asset_name) assert asset, "Could not find '%s' in the database" % asset_name # Get current project - self._project = legacy_io.find_one({"type": "project"}) + self._project = get_project(project_name) # Go to comp if not filepath: diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/utility_scripts/switch_ui.py index 70eb3d0a19..01d55db647 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/utility_scripts/switch_ui.py @@ -7,6 +7,7 @@ from Qt import QtWidgets, QtCore import qtawesome as qta +from openpype.client import get_assets from openpype import style from openpype.pipeline import ( install_host, @@ -142,7 +143,7 @@ class App(QtWidgets.QWidget): # Clear any existing items self._assets.clear() - asset_names = [a["name"] for a in self.collect_assets()] + asset_names = self.collect_asset_names() completer = QtWidgets.QCompleter(asset_names) self._assets.setCompleter(completer) @@ -165,8 +166,14 @@ class App(QtWidgets.QWidget): items = glob.glob("{}/*.comp".format(directory)) return items - def collect_assets(self): - return list(legacy_io.find({"type": "asset"}, {"name": True})) + def collect_asset_names(self): + project_name = legacy_io.active_project() + asset_docs = get_assets(project_name, fields=["name"]) + asset_names = { + asset_doc["name"] + for asset_doc in asset_docs + } + return list(asset_names) def populate_comp_box(self, files): """Ensure we display the filename only but the path is stored as well diff --git a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py index 3e9e680efd..f6b26eb3e8 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py @@ -4,11 +4,10 @@ from pathlib import Path import attr -import openpype.lib -import openpype.lib.abstract_collect_render -from openpype.lib.abstract_collect_render import RenderInstance from openpype.lib import get_formatted_current_time from openpype.pipeline import legacy_io +from openpype.pipeline import publish +from openpype.pipeline.publish import RenderInstance import openpype.hosts.harmony.api as harmony @@ -20,8 +19,7 @@ class HarmonyRenderInstance(RenderInstance): leadingZeros = attr.ib(default=3) -class CollectFarmRender(openpype.lib.abstract_collect_render. - AbstractCollectRender): +class CollectFarmRender(publish.AbstractCollectRender): """Gather all publishable renders.""" # https://docs.toonboom.com/help/harmony-17/premium/reference/node/output/write-node-image-formats.html diff --git a/openpype/hosts/hiero/api/launchforhiero.py b/openpype/hosts/hiero/api/launchforhiero.py new file mode 100644 index 0000000000..5f7dbe23c9 --- /dev/null +++ b/openpype/hosts/hiero/api/launchforhiero.py @@ -0,0 +1,85 @@ +import logging + +from scriptsmenu import scriptsmenu +from Qt import QtWidgets + + +log = logging.getLogger(__name__) + + +def _hiero_main_window(): + """Return Hiero's main window""" + for obj in QtWidgets.QApplication.topLevelWidgets(): + if (obj.inherits('QMainWindow') and + obj.metaObject().className() == 'Foundry::UI::DockMainWindow'): + return obj + raise RuntimeError('Could not find HieroWindow instance') + + +def _hiero_main_menubar(): + """Retrieve the main menubar of the Hiero window""" + hiero_window = _hiero_main_window() + menubar = [i for i in hiero_window.children() if isinstance( + i, + QtWidgets.QMenuBar + )] + + assert len(menubar) == 1, "Error, could not find menu bar!" + return menubar[0] + + +def find_scripts_menu(title, parent): + """ + Check if the menu exists with the given title in the parent + + Args: + title (str): the title name of the scripts menu + + parent (QtWidgets.QMenuBar): the menubar to check + + Returns: + QtWidgets.QMenu or None + + """ + + menu = None + search = [i for i in parent.children() if + isinstance(i, scriptsmenu.ScriptsMenu) + and i.title() == title] + if search: + assert len(search) < 2, ("Multiple instances of menu '{}' " + "in menu bar".format(title)) + menu = search[0] + + return menu + + +def main(title="Scripts", parent=None, objectName=None): + """Build the main scripts menu in Hiero + + Args: + title (str): name of the menu in the application + + parent (QtWidgets.QtMenuBar): the parent object for the menu + + objectName (str): custom objectName for scripts menu + + Returns: + scriptsmenu.ScriptsMenu instance + + """ + hieromainbar = parent or _hiero_main_menubar() + try: + # check menu already exists + menu = find_scripts_menu(title, hieromainbar) + if not menu: + log.info("Attempting to build menu ...") + object_name = objectName or title.lower() + menu = scriptsmenu.ScriptsMenu(title=title, + parent=hieromainbar, + objectName=object_name) + except Exception as e: + log.error(e) + return + + return menu diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index 8c8c31bc4c..2f66f3ddd7 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -19,8 +19,9 @@ from openpype.client import ( get_last_versions, get_representations, ) -from openpype.pipeline import legacy_io -from openpype.api import (Logger, Anatomy, get_anatomy_settings) +from openpype.settings import get_anatomy_settings +from openpype.pipeline import legacy_io, Anatomy +from openpype.api import Logger from . import tags try: diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py index e262abec00..541a1f1f92 100644 --- a/openpype/hosts/hiero/api/menu.py +++ b/openpype/hosts/hiero/api/menu.py @@ -9,6 +9,7 @@ from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools from . import tags +from openpype.settings import get_project_settings log = Logger.get_logger(__name__) @@ -41,6 +42,7 @@ def menu_install(): Installing menu into Hiero """ + from Qt import QtGui from . import ( publish, launch_workfiles_app, reload_config, @@ -138,3 +140,30 @@ def menu_install(): exeprimental_action.triggered.connect( lambda: host_tools.show_experimental_tools_dialog(parent=main_window) ) + + +def add_scripts_menu(): + try: + from . import launchforhiero + except ImportError: + + log.warning( + "Skipping studio.menu install, because " + "'scriptsmenu' module seems unavailable." + ) + return + + # load configuration of custom menu + project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) + config = project_settings["hiero"]["scriptsmenu"]["definition"] + _menu = project_settings["hiero"]["scriptsmenu"]["name"] + + if not config: + log.warning("Skipping studio menu, no definition found.") + return + + # run the launcher for Hiero menu + studio_menu = launchforhiero.main(title=_menu.title()) + + # apply configuration + studio_menu.build_from_configuration(studio_menu, config) diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index 9b628ec70b..b243a38b06 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -48,6 +48,7 @@ def install(): # install menu menu.menu_install() + menu.add_scripts_menu() # register hiero events events.register_hiero_events() diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py index 202287f1c3..d7d1c79d73 100644 --- a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py +++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py @@ -5,8 +5,7 @@ import husdoutputprocessors.base as base import colorbleed.usdlib as usdlib from openpype.client import get_asset_by_name -from openpype.api import Anatomy -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, Anatomy class AvalonURIOutputProcessor(base.OutputProcessorBase): diff --git a/openpype/hosts/maya/api/__init__.py b/openpype/hosts/maya/api/__init__.py index 5d76bf0f04..a6c5f50e1a 100644 --- a/openpype/hosts/maya/api/__init__.py +++ b/openpype/hosts/maya/api/__init__.py @@ -5,11 +5,11 @@ Anything that isn't defined here is INTERNAL and unreliable for external use. """ from .pipeline import ( - install, uninstall, ls, containerise, + MayaHost, ) from .plugin import ( Creator, @@ -40,11 +40,11 @@ from .lib import ( __all__ = [ - "install", "uninstall", "ls", "containerise", + "MayaHost", "Creator", "Loader", diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index de9a9da911..e4221978c0 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -1908,7 +1908,7 @@ def iter_parents(node): """ while True: split = node.rsplit("|", 1) - if len(split) == 1: + if len(split) == 1 or not split[0]: return node = split[0] @@ -2522,12 +2522,30 @@ def load_capture_preset(data=None): temp_options2['multiSampleEnable'] = False temp_options2['multiSampleCount'] = preset[id][key] + if key == 'renderDepthOfField': + temp_options2['renderDepthOfField'] = preset[id][key] + if key == 'ssaoEnable': if preset[id][key] is True: temp_options2['ssaoEnable'] = True else: temp_options2['ssaoEnable'] = False + if key == 'ssaoSamples': + temp_options2['ssaoSamples'] = preset[id][key] + + if key == 'ssaoAmount': + temp_options2['ssaoAmount'] = preset[id][key] + + if key == 'ssaoRadius': + temp_options2['ssaoRadius'] = preset[id][key] + + if key == 'hwFogDensity': + temp_options2['hwFogDensity'] = preset[id][key] + + if key == 'ssaoFilterRadius': + temp_options2['ssaoFilterRadius'] = preset[id][key] + if key == 'alphaCut': temp_options2['transparencyAlgorithm'] = 5 temp_options2['transparencyQuality'] = 1 @@ -2535,6 +2553,48 @@ def load_capture_preset(data=None): if key == 'headsUpDisplay': temp_options['headsUpDisplay'] = True + if key == 'fogging': + temp_options['fogging'] = preset[id][key] or False + + if key == 'hwFogStart': + temp_options2['hwFogStart'] = preset[id][key] + + if key == 'hwFogEnd': + temp_options2['hwFogEnd'] = preset[id][key] + + if key == 'hwFogAlpha': + temp_options2['hwFogAlpha'] = preset[id][key] + + if key == 'hwFogFalloff': + temp_options2['hwFogFalloff'] = int(preset[id][key]) + + if key == 'hwFogColorR': + temp_options2['hwFogColorR'] = preset[id][key] + + if key == 'hwFogColorG': + temp_options2['hwFogColorG'] = preset[id][key] + + if key == 'hwFogColorB': + temp_options2['hwFogColorB'] = preset[id][key] + + if key == 'motionBlurEnable': + if preset[id][key] is True: + temp_options2['motionBlurEnable'] = True + else: + temp_options2['motionBlurEnable'] = False + + if key == 'motionBlurSampleCount': + temp_options2['motionBlurSampleCount'] = preset[id][key] + + if key == 'motionBlurShutterOpenFraction': + temp_options2['motionBlurShutterOpenFraction'] = preset[id][key] + + if key == 'lineAAEnable': + if preset[id][key] is True: + temp_options2['lineAAEnable'] = True + else: + temp_options2['lineAAEnable'] = False + else: temp_options[str(key)] = preset[id][key] @@ -2544,7 +2604,24 @@ def load_capture_preset(data=None): 'gpuCacheDisplayFilter', 'multiSample', 'ssaoEnable', - 'textureMaxResolution' + 'ssaoSamples', + 'ssaoAmount', + 'ssaoFilterRadius', + 'ssaoRadius', + 'hwFogStart', + 'hwFogEnd', + 'hwFogAlpha', + 'hwFogFalloff', + 'hwFogColorR', + 'hwFogColorG', + 'hwFogColorB', + 'hwFogDensity', + 'textureMaxResolution', + 'motionBlurEnable', + 'motionBlurSampleCount', + 'motionBlurShutterOpenFraction', + 'lineAAEnable', + 'renderDepthOfField' ]: temp_options.pop(key, None) @@ -3213,3 +3290,209 @@ def parent_nodes(nodes, parent=None): node[0].setParent(node[1]) if delete_parent: pm.delete(parent_node) + + +@contextlib.contextmanager +def maintained_time(): + ct = cmds.currentTime(query=True) + try: + yield + finally: + cmds.currentTime(ct, edit=True) + + +def iter_visible_nodes_in_range(nodes, start, end): + """Yield nodes that are visible in start-end frame range. + + - Ignores intermediateObjects completely. + - Considers animated visibility attributes + upstream visibilities. + + This is optimized for large scenes where some nodes in the parent + hierarchy might have some input connections to the visibilities, + e.g. key, driven keys, connections to other attributes, etc. + + This only does a single time step to `start` if current frame is + not inside frame range since the assumption is made that changing + a frame isn't so slow that it beats querying all visibility + plugs through MDGContext on another frame. + + Args: + nodes (list): List of node names to consider. + start (int, float): Start frame. + end (int, float): End frame. + + Returns: + list: List of node names. These will be long full path names so + might have a longer name than the input nodes. + + """ + # States we consider per node + VISIBLE = 1 # always visible + INVISIBLE = 0 # always invisible + ANIMATED = -1 # animated visibility + + # Ensure integers + start = int(start) + end = int(end) + + # Consider only non-intermediate dag nodes and use the "long" names. + nodes = cmds.ls(nodes, long=True, noIntermediate=True, type="dagNode") + if not nodes: + return + + with maintained_time(): + # Go to first frame of the range if the current time is outside + # the queried range so can directly query all visible nodes on + # that frame. + current_time = cmds.currentTime(query=True) + if not (start <= current_time <= end): + cmds.currentTime(start) + + visible = cmds.ls(nodes, long=True, visible=True) + for node in visible: + yield node + if len(visible) == len(nodes) or start == end: + # All are visible on frame one, so they are at least visible once + # inside the frame range. + return + + # For the invisible ones check whether its visibility and/or + # any of its parents visibility attributes are animated. If so, it might + # get visible on other frames in the range. + def memodict(f): + """Memoization decorator for a function taking a single argument. + + See: http://code.activestate.com/recipes/ + 578231-probably-the-fastest-memoization-decorator-in-the-/ + """ + + class memodict(dict): + def __missing__(self, key): + ret = self[key] = f(key) + return ret + + return memodict().__getitem__ + + @memodict + def get_state(node): + plug = node + ".visibility" + connections = cmds.listConnections(plug, + source=True, + destination=False) + if connections: + return ANIMATED + else: + return VISIBLE if cmds.getAttr(plug) else INVISIBLE + + visible = set(visible) + invisible = [node for node in nodes if node not in visible] + always_invisible = set() + # Iterate over the nodes by short to long names to iterate the highest + # in hierarchy nodes first. So the collected data can be used from the + # cache for parent queries in next iterations. + node_dependencies = dict() + for node in sorted(invisible, key=len): + + state = get_state(node) + if state == INVISIBLE: + always_invisible.add(node) + continue + + # If not always invisible by itself we should go through and check + # the parents to see if any of them are always invisible. For those + # that are "ANIMATED" we consider that this node is dependent on + # that attribute, we store them as dependency. + dependencies = set() + if state == ANIMATED: + dependencies.add(node) + + traversed_parents = list() + for parent in iter_parents(node): + + if parent in always_invisible or get_state(parent) == INVISIBLE: + # When parent is always invisible then consider this parent, + # this node we started from and any of the parents we + # have traversed in-between to be *always invisible* + always_invisible.add(parent) + always_invisible.add(node) + always_invisible.update(traversed_parents) + break + + # If we have traversed the parent before and its visibility + # was dependent on animated visibilities then we can just extend + # its dependencies for to those for this node and break further + # iteration upwards. + parent_dependencies = node_dependencies.get(parent, None) + if parent_dependencies is not None: + dependencies.update(parent_dependencies) + break + + state = get_state(parent) + if state == ANIMATED: + dependencies.add(parent) + + traversed_parents.append(parent) + + if node not in always_invisible and dependencies: + node_dependencies[node] = dependencies + + if not node_dependencies: + return + + # Now we only have to check the visibilities for nodes that have animated + # visibility dependencies upstream. The fastest way to check these + # visibility attributes across different frames is with Python api 2.0 + # so we do that. + @memodict + def get_visibility_mplug(node): + """Return api 2.0 MPlug with cached memoize decorator""" + sel = om.MSelectionList() + sel.add(node) + dag = sel.getDagPath(0) + return om.MFnDagNode(dag).findPlug("visibility", True) + + @contextlib.contextmanager + def dgcontext(mtime): + """MDGContext context manager""" + context = om.MDGContext(mtime) + try: + previous = context.makeCurrent() + yield context + finally: + previous.makeCurrent() + + # We skip the first frame as we already used that frame to check for + # overall visibilities. And end+1 to include the end frame. + scene_units = om.MTime.uiUnit() + for frame in range(start + 1, end + 1): + mtime = om.MTime(frame, unit=scene_units) + + # Build little cache so we don't query the same MPlug's value + # again if it was checked on this frame and also is a dependency + # for another node + frame_visibilities = {} + with dgcontext(mtime) as context: + for node, dependencies in list(node_dependencies.items()): + for dependency in dependencies: + dependency_visible = frame_visibilities.get(dependency, + None) + if dependency_visible is None: + mplug = get_visibility_mplug(dependency) + dependency_visible = mplug.asBool(context) + frame_visibilities[dependency] = dependency_visible + + if not dependency_visible: + # One dependency is not visible, thus the + # node is not visible. + break + + else: + # All dependencies are visible. + yield node + # Remove node with dependencies for next frame iterations + # because it was visible at least once. + node_dependencies.pop(node) + + # If no more nodes to process break the frame iterations.. + if not node_dependencies: + break diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index 2d3bda5245..123b934428 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -1087,7 +1087,7 @@ class RenderProductsRenderman(ARenderProducts): "d_tiff": "tif" } - displays = get_displays()["displays"] + displays = get_displays(override_dst="render")["displays"] for name, display in displays.items(): enabled = display["params"]["enable"]["value"] if not enabled: @@ -1106,9 +1106,33 @@ class RenderProductsRenderman(ARenderProducts): display["driverNode"]["type"], "exr") for camera in cameras: - product = RenderProduct(productName=aov_name, - ext=extensions, - camera=camera) + # Create render product and set it as multipart only on + # display types supporting it. In all other cases, Renderman + # will create separate output per channel. + if display["driverNode"]["type"] in ["d_openexr", "d_deepexr", "d_tiff"]: # noqa + product = RenderProduct( + productName=aov_name, + ext=extensions, + camera=camera, + multipart=True + ) + else: + # this code should handle the case where no multipart + # capable format is selected. But since it involves + # shady logic to determine what channel become what + # lets not do that as all productions will use exr anyway. + """ + for channel in display['params']['displayChannels']['value']: # noqa + product = RenderProduct( + productName="{}_{}".format(aov_name, channel), + ext=extensions, + camera=camera, + multipart=False + ) + """ + raise UnsupportedImageFormatException( + "Only exr, deep exr and tiff formats are supported.") + products.append(product) return products @@ -1201,3 +1225,7 @@ class UnsupportedRendererException(Exception): Raised when requesting data from unsupported renderer. """ + + +class UnsupportedImageFormatException(Exception): + """Custom exception to report unsupported output image format.""" diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index d9276ddf4a..d08e8d1926 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -1,13 +1,15 @@ import os -import sys import errno import logging +import contextlib from maya import utils, cmds, OpenMaya import maya.api.OpenMaya as om import pyblish.api +from openpype.settings import get_project_settings +from openpype.host import HostBase, IWorkfileHost, ILoadHost import openpype.hosts.maya from openpype.tools.utils import host_tools from openpype.lib import ( @@ -28,6 +30,14 @@ from openpype.pipeline import ( ) from openpype.hosts.maya.lib import copy_workspace_mel from . import menu, lib +from .workio import ( + open_file, + save_file, + file_extensions, + has_unsaved_changes, + work_root, + current_file +) log = logging.getLogger("openpype.hosts.maya") @@ -40,49 +50,121 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") AVALON_CONTAINERS = ":AVALON_CONTAINERS" -self = sys.modules[__name__] -self._ignore_lock = False -self._events = {} +class MayaHost(HostBase, IWorkfileHost, ILoadHost): + name = "maya" -def install(): - from openpype.settings import get_project_settings + def __init__(self): + super(MayaHost, self).__init__() + self._op_events = {} - project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) - # process path mapping - dirmap_processor = MayaDirmap("maya", project_settings) - dirmap_processor.process_dirmap() + def install(self): + project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) + # process path mapping + dirmap_processor = MayaDirmap("maya", project_settings) + dirmap_processor.process_dirmap() - pyblish.api.register_plugin_path(PUBLISH_PATH) - pyblish.api.register_host("mayabatch") - pyblish.api.register_host("mayapy") - pyblish.api.register_host("maya") + pyblish.api.register_plugin_path(PUBLISH_PATH) + pyblish.api.register_host("mayabatch") + pyblish.api.register_host("mayapy") + pyblish.api.register_host("maya") - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - log.info(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) + self.log.info(PUBLISH_PATH) - log.info("Installing callbacks ... ") - register_event_callback("init", on_init) + self.log.info("Installing callbacks ... ") + register_event_callback("init", on_init) - if lib.IS_HEADLESS: - log.info(("Running in headless mode, skipping Maya " - "save/open/new callback installation..")) + if lib.IS_HEADLESS: + self.log.info(( + "Running in headless mode, skipping Maya save/open/new" + " callback installation.." + )) - return + return - _set_project() - _register_callbacks() + _set_project() + self._register_callbacks() - menu.install() + menu.install() - register_event_callback("save", on_save) - register_event_callback("open", on_open) - register_event_callback("new", on_new) - register_event_callback("before.save", on_before_save) - register_event_callback("taskChanged", on_task_changed) - register_event_callback("workfile.save.before", before_workfile_save) + register_event_callback("save", on_save) + register_event_callback("open", on_open) + register_event_callback("new", on_new) + register_event_callback("before.save", on_before_save) + register_event_callback("taskChanged", on_task_changed) + register_event_callback("workfile.save.before", before_workfile_save) + + def open_workfile(self, filepath): + return open_file(filepath) + + def save_workfile(self, filepath=None): + return save_file(filepath) + + def work_root(self, session): + return work_root(session) + + def get_current_workfile(self): + return current_file() + + def workfile_has_unsaved_changes(self): + return has_unsaved_changes() + + def get_workfile_extensions(self): + return file_extensions() + + def get_containers(self): + return ls() + + @contextlib.contextmanager + def maintained_selection(self): + with lib.maintained_selection(): + yield + + def _register_callbacks(self): + for handler, event in self._op_events.copy().items(): + if event is None: + continue + + try: + OpenMaya.MMessage.removeCallback(event) + self._op_events[handler] = None + except RuntimeError as exc: + self.log.info(exc) + + self._op_events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save + ) + + self._op_events[_before_scene_save] = ( + OpenMaya.MSceneMessage.addCheckCallback( + OpenMaya.MSceneMessage.kBeforeSaveCheck, + _before_scene_save + ) + ) + + self._op_events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kAfterNew, _on_scene_new + ) + + self._op_events[_on_maya_initialized] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kMayaInitialized, + _on_maya_initialized + ) + ) + + self._op_events[_on_scene_open] = OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kAfterOpen, _on_scene_open + ) + + self.log.info("Installed event handler _on_scene_save..") + self.log.info("Installed event handler _before_scene_save..") + self.log.info("Installed event handler _on_scene_new..") + self.log.info("Installed event handler _on_maya_initialized..") + self.log.info("Installed event handler _on_scene_open..") def _set_project(): @@ -106,44 +188,6 @@ def _set_project(): cmds.workspace(workdir, openWorkspace=True) -def _register_callbacks(): - for handler, event in self._events.copy().items(): - if event is None: - continue - - try: - OpenMaya.MMessage.removeCallback(event) - self._events[handler] = None - except RuntimeError as e: - log.info(e) - - self._events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save - ) - - self._events[_before_scene_save] = OpenMaya.MSceneMessage.addCheckCallback( - OpenMaya.MSceneMessage.kBeforeSaveCheck, _before_scene_save - ) - - self._events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterNew, _on_scene_new - ) - - self._events[_on_maya_initialized] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kMayaInitialized, _on_maya_initialized - ) - - self._events[_on_scene_open] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterOpen, _on_scene_open - ) - - log.info("Installed event handler _on_scene_save..") - log.info("Installed event handler _before_scene_save..") - log.info("Installed event handler _on_scene_new..") - log.info("Installed event handler _on_maya_initialized..") - log.info("Installed event handler _on_scene_open..") - - def _on_maya_initialized(*args): emit_event("init") @@ -475,7 +519,6 @@ def on_task_changed(): workdir = legacy_io.Session["AVALON_WORKDIR"] if os.path.exists(workdir): log.info("Updating Maya workspace for task change to %s", workdir) - _set_project() # Set Maya fileDialog's start-dir to /scenes diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index f05893a7b4..9280805945 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -9,8 +9,8 @@ from openpype.pipeline import ( LoaderPlugin, get_representation_path, AVALON_CONTAINER_ID, + Anatomy, ) -from openpype.api import Anatomy from openpype.settings import get_project_settings from .pipeline import containerise from . import lib diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py index bea8f154b1..159bfe9eb3 100644 --- a/openpype/hosts/maya/api/setdress.py +++ b/openpype/hosts/maya/api/setdress.py @@ -296,12 +296,9 @@ def update_package_version(container, version): assert current_representation is not None, "This is a bug" - repre_parents = get_representation_parents( - project_name, current_representation + version_doc, subset_doc, asset_doc, project_doc = ( + get_representation_parents(project_name, current_representation) ) - version_doc = subset_doc = asset_doc = project_doc = None - if repre_parents: - version_doc, subset_doc, asset_doc, project_doc = repre_parents if version == -1: new_version = get_last_version_by_subset_id( diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py index fbf3399f61..ba51ffa009 100644 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ b/openpype/hosts/maya/plugins/create/create_review.py @@ -15,6 +15,8 @@ class CreateReview(plugin.Creator): keepImages = False isolate = False imagePlane = True + Width = 0 + Height = 0 transparency = [ "preset", "simple", @@ -33,6 +35,8 @@ class CreateReview(plugin.Creator): for key, value in animation_data.items(): data[key] = value + data["review_width"] = self.Width + data["review_height"] = self.Height data["isolate"] = self.isolate data["keepImages"] = self.keepImages data["imagePlane"] = self.imagePlane diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_arnold.py b/openpype/hosts/maya/plugins/load/load_vdb_to_arnold.py new file mode 100644 index 0000000000..d458c5abda --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_arnold.py @@ -0,0 +1,139 @@ +import os + +from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) +# TODO aiVolume doesn't automatically set velocity fps correctly, set manual? + + +class LoadVDBtoArnold(load.LoaderPlugin): + """Load OpenVDB for Arnold in aiVolume""" + + families = ["vdbcache"] + representations = ["vdb"] + + label = "Load VDB to Arnold" + icon = "cloud" + color = "orange" + + def load(self, context, name, namespace, data): + + from maya import cmds + from openpype.hosts.maya.api.pipeline import containerise + from openpype.hosts.maya.api.lib import unique_namespace + + try: + family = context["representation"]["context"]["family"] + except ValueError: + family = "vdbcache" + + # Check if the plugin for arnold is available on the pc + try: + cmds.loadPlugin("mtoa", quiet=True) + except Exception as exc: + self.log.error("Encountered exception:\n%s" % exc) + return + + asset = context['asset'] + asset_name = asset["name"] + namespace = namespace or unique_namespace( + asset_name + "_", + prefix="_" if asset_name[0].isdigit() else "", + suffix="_", + ) + + # Root group + label = "{}:{}".format(namespace, name) + root = cmds.group(name=label, empty=True) + + settings = get_project_settings(os.environ['AVALON_PROJECT']) + colors = settings['maya']['load']['colors'] + + c = colors.get(family) + if c is not None: + cmds.setAttr(root + ".useOutlinerColor", 1) + cmds.setAttr(root + ".outlinerColor", + (float(c[0]) / 255), + (float(c[1]) / 255), + (float(c[2]) / 255) + ) + + # Create VRayVolumeGrid + grid_node = cmds.createNode("aiVolume", + name="{}Shape".format(root), + parent=root) + + self._set_path(grid_node, + path=self.fname, + representation=context["representation"]) + + # Lock the shape node so the user can't delete the transform/shape + # as if it was referenced + cmds.lockNode(grid_node, lock=True) + + nodes = [root, grid_node] + self[:] = nodes + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__) + + def update(self, container, representation): + + from maya import cmds + + path = get_representation_path(representation) + + # Find VRayVolumeGrid + members = cmds.sets(container['objectName'], query=True) + grid_nodes = cmds.ls(members, type="aiVolume", long=True) + assert len(grid_nodes) == 1, "This is a bug" + + # Update the VRayVolumeGrid + self._set_path(grid_nodes[0], path=path, representation=representation) + + # Update container representation + cmds.setAttr(container["objectName"] + ".representation", + str(representation["_id"]), + type="string") + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + + from maya import cmds + + # Get all members of the avalon container, ensure they are unlocked + # and delete everything + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass + + @staticmethod + def _set_path(grid_node, + path, + representation): + """Apply the settings for the VDB path to the aiVolume node""" + from maya import cmds + + if not os.path.exists(path): + raise RuntimeError("Path does not exist: %s" % path) + + is_sequence = bool(representation["context"].get("frame")) + cmds.setAttr(grid_node + ".useFrameExtension", is_sequence) + + # Set file path + cmds.setAttr(grid_node + ".filename", path, type="string") diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py index 70bd9d22e2..c6a69dfe35 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py @@ -1,11 +1,21 @@ import os from openpype.api import get_project_settings -from openpype.pipeline import load +from openpype.pipeline import ( + load, + get_representation_path +) class LoadVDBtoRedShift(load.LoaderPlugin): - """Load OpenVDB in a Redshift Volume Shape""" + """Load OpenVDB in a Redshift Volume Shape + + Note that the RedshiftVolumeShape is created without a RedshiftVolume + shader assigned. To get the Redshift volume to render correctly assign + a RedshiftVolume shader (in the Hypershade) and set the density, scatter + and emission channels to the channel names of the volumes in the VDB file. + + """ families = ["vdbcache"] representations = ["vdb"] @@ -55,7 +65,7 @@ class LoadVDBtoRedShift(load.LoaderPlugin): # Root group label = "{}:{}".format(namespace, name) - root = cmds.group(name=label, empty=True) + root = cmds.createNode("transform", name=label) settings = get_project_settings(os.environ['AVALON_PROJECT']) colors = settings['maya']['load']['colors'] @@ -74,9 +84,9 @@ class LoadVDBtoRedShift(load.LoaderPlugin): name="{}RVSShape".format(label), parent=root) - cmds.setAttr("{}.fileName".format(volume_node), - self.fname, - type="string") + self._set_path(volume_node, + path=self.fname, + representation=context["representation"]) nodes = [root, volume_node] self[:] = nodes @@ -87,3 +97,56 @@ class LoadVDBtoRedShift(load.LoaderPlugin): nodes=nodes, context=context, loader=self.__class__.__name__) + + def update(self, container, representation): + from maya import cmds + + path = get_representation_path(representation) + + # Find VRayVolumeGrid + members = cmds.sets(container['objectName'], query=True) + grid_nodes = cmds.ls(members, type="RedshiftVolumeShape", long=True) + assert len(grid_nodes) == 1, "This is a bug" + + # Update the VRayVolumeGrid + self._set_path(grid_nodes[0], path=path, representation=representation) + + # Update container representation + cmds.setAttr(container["objectName"] + ".representation", + str(representation["_id"]), + type="string") + + def remove(self, container): + from maya import cmds + + # Get all members of the avalon container, ensure they are unlocked + # and delete everything + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass + + def switch(self, container, representation): + self.update(container, representation) + + @staticmethod + def _set_path(grid_node, + path, + representation): + """Apply the settings for the VDB path to the RedshiftVolumeShape""" + from maya import cmds + + if not os.path.exists(path): + raise RuntimeError("Path does not exist: %s" % path) + + is_sequence = bool(representation["context"].get("frame")) + cmds.setAttr(grid_node + ".useFrameExtension", is_sequence) + + # Set file path + cmds.setAttr(grid_node + ".fileName", path, type="string") diff --git a/openpype/hosts/maya/plugins/publish/collect_review.py b/openpype/hosts/maya/plugins/publish/collect_review.py index 15b89ad53c..eb872c2935 100644 --- a/openpype/hosts/maya/plugins/publish/collect_review.py +++ b/openpype/hosts/maya/plugins/publish/collect_review.py @@ -71,6 +71,8 @@ class CollectReview(pyblish.api.InstancePlugin): data['handles'] = instance.data.get('handles', None) data['step'] = instance.data['step'] data['fps'] = instance.data['fps'] + data['review_width'] = instance.data['review_width'] + data['review_height'] = instance.data['review_height'] data["isolate"] = instance.data["isolate"] cmds.setAttr(str(instance) + '.active', 1) self.log.debug('data {}'.format(instance.context[i].data)) diff --git a/openpype/hosts/maya/plugins/publish/extract_animation.py b/openpype/hosts/maya/plugins/publish/extract_animation.py deleted file mode 100644 index abe5ed3bf5..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_animation.py +++ /dev/null @@ -1,100 +0,0 @@ -import os - -from maya import cmds - -import openpype.api -from openpype.hosts.maya.api.lib import ( - extract_alembic, - suspended_refresh, - maintained_selection -) - - -class ExtractAnimation(openpype.api.Extractor): - """Produce an alembic of just point positions and normals. - - Positions and normals, uvs, creases are preserved, but nothing more, - for plain and predictable point caches. - - Plugin can run locally or remotely (on a farm - if instance is marked with - "farm" it will be skipped in local processing, but processed on farm) - """ - - label = "Extract Animation" - hosts = ["maya"] - families = ["animation"] - targets = ["local", "remote"] - - def process(self, instance): - if instance.data.get("farm"): - self.log.debug("Should be processed on farm, skipping.") - return - - # Collect the out set nodes - out_sets = [node for node in instance if node.endswith("out_SET")] - if len(out_sets) != 1: - raise RuntimeError("Couldn't find exactly one out_SET: " - "{0}".format(out_sets)) - out_set = out_sets[0] - roots = cmds.sets(out_set, query=True) - - # Include all descendants - nodes = roots + cmds.listRelatives(roots, - allDescendents=True, - fullPath=True) or [] - - # Collect the start and end including handles - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - self.log.info("Extracting animation..") - dirname = self.staging_dir(instance) - - parent_dir = self.staging_dir(instance) - filename = "{name}.abc".format(**instance.data) - path = os.path.join(parent_dir, filename) - - options = { - "step": instance.data.get("step", 1.0) or 1.0, - "attr": ["cbId"], - "writeVisibility": True, - "writeCreases": True, - "uvWrite": True, - "selection": True, - "worldSpace": instance.data.get("worldSpace", True), - "writeColorSets": instance.data.get("writeColorSets", False), - "writeFaceSets": instance.data.get("writeFaceSets", False) - } - - if not instance.data.get("includeParentHierarchy", True): - # Set the root nodes if we don't want to include parents - # The roots are to be considered the ones that are the actual - # direct members of the set - options["root"] = roots - - if int(cmds.about(version=True)) >= 2017: - # Since Maya 2017 alembic supports multiple uv sets - write them. - options["writeUVSets"] = True - - with suspended_refresh(): - with maintained_selection(): - cmds.select(nodes, noExpand=True) - extract_alembic(file=path, - startFrame=float(start), - endFrame=float(end), - **options) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": dirname, - } - instance.data["representations"].append(representation) - - instance.context.data["cleanupFullPaths"].append(path) - - self.log.info("Extracted {} to {}".format(instance, dirname)) diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py b/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py index 4110ad474d..b744bfd0fe 100644 --- a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py +++ b/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py @@ -30,7 +30,7 @@ class ExtractCameraAlembic(openpype.api.Extractor): # get cameras members = instance.data['setMembers'] - cameras = cmds.ls(members, leaf=True, shapes=True, long=True, + cameras = cmds.ls(members, leaf=True, long=True, dag=True, type="camera") # validate required settings @@ -61,10 +61,30 @@ class ExtractCameraAlembic(openpype.api.Extractor): if bake_to_worldspace: job_str += ' -worldSpace' - for member in member_shapes: - self.log.info(f"processing {member}") + + # if baked, drop the camera hierarchy to maintain + # clean output and backwards compatibility + camera_root = cmds.listRelatives( + camera, parent=True, fullPath=True)[0] + job_str += ' -root {0}'.format(camera_root) + + for member in members: + descendants = cmds.listRelatives(member, + allDescendents=True, + fullPath=True) or [] + shapes = cmds.ls(descendants, shapes=True, + noIntermediate=True, long=True) + cameras = cmds.ls(shapes, type="camera", long=True) + if cameras: + if not set(shapes) - set(cameras): + continue + self.log.warning(( + "Camera hierarchy contains additional geometry. " + "Extraction will fail.") + ) transform = cmds.listRelatives( - member, parent=True, fullPath=True)[0] + member, parent=True, fullPath=True) + transform = transform[0] if transform else member job_str += ' -root {0}'.format(transform) job_str += ' -file "{0}"'.format(path) diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py b/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py index 1cb30e65ea..8d6c4b5f3c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py +++ b/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py @@ -172,18 +172,19 @@ class ExtractCameraMayaScene(openpype.api.Extractor): dag=True, shapes=True, long=True) + + attrs = {"backgroundColorR": 0.0, + "backgroundColorG": 0.0, + "backgroundColorB": 0.0, + "overscan": 1.0} + # Fix PLN-178: Don't allow background color to be non-black - for cam in cmds.ls( + for cam, (attr, value) in itertools.product(cmds.ls( baked_camera_shapes, type="camera", dag=True, - shapes=True, long=True): - attrs = {"backgroundColorR": 0.0, - "backgroundColorG": 0.0, - "backgroundColorB": 0.0, - "overscan": 1.0} - for attr, value in attrs.items(): - plug = "{0}.{1}".format(cam, attr) - unlock(plug) - cmds.setAttr(plug, value) + long=True), attrs.items()): + plug = "{0}.{1}".format(cam, attr) + unlock(plug) + cmds.setAttr(plug, value) self.log.info("Performing extraction..") cmds.select(cmds.ls(members, dag=True, diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index ba939d5428..233a0b60c2 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -1,6 +1,7 @@ import os import glob import contextlib + import clique import capture @@ -50,8 +51,32 @@ class ExtractPlayblast(openpype.api.Extractor): ['override_viewport_options'] ) preset = lib.load_capture_preset(data=self.capture_preset) - + # Grab capture presets from the project settings + capture_presets = self.capture_preset + # Set resolution variables from capture presets + width_preset = capture_presets["Resolution"]["width"] + height_preset = capture_presets["Resolution"]["height"] + # Set resolution variables from asset values + asset_data = instance.data["assetEntity"]["data"] + asset_width = asset_data.get("resolutionWidth") + asset_height = asset_data.get("resolutionHeight") + review_instance_width = instance.data.get("review_width") + review_instance_height = instance.data.get("review_height") preset['camera'] = camera + + # Tests if project resolution is set, + # if it is a value other than zero, that value is + # used, if not then the asset resolution is + # used + if review_instance_width and review_instance_height: + preset['width'] = review_instance_width + preset['height'] = review_instance_height + elif width_preset and height_preset: + preset['width'] = width_preset + preset['height'] = height_preset + elif asset_width and asset_height: + preset['width'] = asset_width + preset['height'] = asset_height preset['start_frame'] = start preset['end_frame'] = end camera_option = preset.get("camera_option", {}) @@ -90,7 +115,7 @@ class ExtractPlayblast(openpype.api.Extractor): else: preset["viewport_options"] = {"imagePlane": image_plane} - with maintained_time(): + with lib.maintained_time(): filename = preset.get("filename", "%TEMP%") # Force viewer to False in call to capture because we have our own @@ -153,12 +178,3 @@ class ExtractPlayblast(openpype.api.Extractor): 'camera_name': camera_node_name } instance.data["representations"].append(representation) - - -@contextlib.contextmanager -def maintained_time(): - ct = cmds.currentTime(query=True) - try: - yield - finally: - cmds.currentTime(ct, edit=True) diff --git a/openpype/hosts/maya/plugins/publish/extract_pointcache.py b/openpype/hosts/maya/plugins/publish/extract_pointcache.py index c4c8610ebb..bf6feecef3 100644 --- a/openpype/hosts/maya/plugins/publish/extract_pointcache.py +++ b/openpype/hosts/maya/plugins/publish/extract_pointcache.py @@ -6,7 +6,8 @@ import openpype.api from openpype.hosts.maya.api.lib import ( extract_alembic, suspended_refresh, - maintained_selection + maintained_selection, + iter_visible_nodes_in_range ) @@ -32,7 +33,7 @@ class ExtractAlembic(openpype.api.Extractor): self.log.debug("Should be processed on farm, skipping.") return - nodes = instance[:] + nodes, roots = self.get_members_and_roots(instance) # Collect the start and end including handles start = float(instance.data.get("frameStartHandle", 1)) @@ -45,10 +46,6 @@ class ExtractAlembic(openpype.api.Extractor): attr_prefixes = instance.data.get("attrPrefix", "").split(";") attr_prefixes = [value for value in attr_prefixes if value.strip()] - # Get extra export arguments - writeColorSets = instance.data.get("writeColorSets", False) - writeFaceSets = instance.data.get("writeFaceSets", False) - self.log.info("Extracting pointcache..") dirname = self.staging_dir(instance) @@ -62,8 +59,8 @@ class ExtractAlembic(openpype.api.Extractor): "attrPrefix": attr_prefixes, "writeVisibility": True, "writeCreases": True, - "writeColorSets": writeColorSets, - "writeFaceSets": writeFaceSets, + "writeColorSets": instance.data.get("writeColorSets", False), + "writeFaceSets": instance.data.get("writeFaceSets", False), "uvWrite": True, "selection": True, "worldSpace": instance.data.get("worldSpace", True) @@ -73,12 +70,22 @@ class ExtractAlembic(openpype.api.Extractor): # Set the root nodes if we don't want to include parents # The roots are to be considered the ones that are the actual # direct members of the set - options["root"] = instance.data.get("setMembers") + options["root"] = roots if int(cmds.about(version=True)) >= 2017: # Since Maya 2017 alembic supports multiple uv sets - write them. options["writeUVSets"] = True + if instance.data.get("visibleOnly", False): + # If we only want to include nodes that are visible in the frame + # range then we need to do our own check. Alembic's `visibleOnly` + # flag does not filter out those that are only hidden on some + # frames as it counts "animated" or "connected" visibilities as + # if it's always visible. + nodes = list(iter_visible_nodes_in_range(nodes, + start=start, + end=end)) + with suspended_refresh(): with maintained_selection(): cmds.select(nodes, noExpand=True) @@ -101,3 +108,28 @@ class ExtractAlembic(openpype.api.Extractor): instance.context.data["cleanupFullPaths"].append(path) self.log.info("Extracted {} to {}".format(instance, dirname)) + + def get_members_and_roots(self, instance): + return instance[:], instance.data.get("setMembers") + + +class ExtractAnimation(ExtractAlembic): + label = "Extract Animation" + families = ["animation"] + + def get_members_and_roots(self, instance): + + # Collect the out set nodes + out_sets = [node for node in instance if node.endswith("out_SET")] + if len(out_sets) != 1: + raise RuntimeError("Couldn't find exactly one out_SET: " + "{0}".format(out_sets)) + out_set = out_sets[0] + roots = cmds.sets(out_set, query=True) + + # Include all descendants + nodes = roots + cmds.listRelatives(roots, + allDescendents=True, + fullPath=True) or [] + + return nodes, roots diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index c2cefc56f1..4f28aa167c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -1,5 +1,4 @@ import os -import contextlib import glob import capture @@ -28,7 +27,6 @@ class ExtractThumbnail(openpype.api.Extractor): camera = instance.data['review_camera'] - capture_preset = "" capture_preset = ( instance.context.data["project_settings"]['maya']['publish']['ExtractPlayblast']['capture_preset'] ) @@ -60,7 +58,29 @@ class ExtractThumbnail(openpype.api.Extractor): "overscan": 1.0, "depthOfField": cmds.getAttr("{0}.depthOfField".format(camera)), } - + capture_presets = capture_preset + # Set resolution variables from capture presets + width_preset = capture_presets["Resolution"]["width"] + height_preset = capture_presets["Resolution"]["height"] + # Set resolution variables from asset values + asset_data = instance.data["assetEntity"]["data"] + asset_width = asset_data.get("resolutionWidth") + asset_height = asset_data.get("resolutionHeight") + review_instance_width = instance.data.get("review_width") + review_instance_height = instance.data.get("review_height") + # Tests if project resolution is set, + # if it is a value other than zero, that value is + # used, if not then the asset resolution is + # used + if review_instance_width and review_instance_height: + preset['width'] = review_instance_width + preset['height'] = review_instance_height + elif width_preset and height_preset: + preset['width'] = width_preset + preset['height'] = height_preset + elif asset_width and asset_height: + preset['width'] = asset_width + preset['height'] = asset_height stagingDir = self.staging_dir(instance) filename = "{0}".format(instance.name) path = os.path.join(stagingDir, filename) @@ -81,9 +101,7 @@ class ExtractThumbnail(openpype.api.Extractor): if preset.pop("isolate_view", False) and instance.data.get("isolate"): preset["isolate"] = instance.data["setMembers"] - with maintained_time(): - filename = preset.get("filename", "%TEMP%") - + with lib.maintained_time(): # Force viewer to False in call to capture because we have our own # viewer opening call to allow a signal to trigger between # playblast and viewer @@ -152,12 +170,3 @@ class ExtractThumbnail(openpype.api.Extractor): filepath = max(files, key=os.path.getmtime) return filepath - - -@contextlib.contextmanager -def maintained_time(): - ct = cmds.currentTime(query=True) - try: - yield - finally: - cmds.currentTime(ct, edit=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py b/openpype/hosts/maya/plugins/publish/validate_camera_contents.py index 87712a4cea..5f6faddbe7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_camera_contents.py @@ -51,18 +51,7 @@ class ValidateCameraContents(pyblish.api.InstancePlugin): raise RuntimeError("No cameras found in empty instance.") if not cls.validate_shapes: - cls.log.info("Not validating shapes in the content.") - - for member in members: - parents = cmds.ls(member, long=True)[0].split("|")[1:-1] - parents_long_named = [ - "|".join(parents[:i]) for i in range(1, 1 + len(parents)) - ] - if cameras[0] in parents_long_named: - cls.log.error( - "{} is parented under camera {}".format( - member, cameras[0])) - invalid.extend(member) + cls.log.info("not validating shapes in the content") return invalid # non-camera shapes diff --git a/openpype/hosts/maya/plugins/publish/validate_frame_range.py b/openpype/hosts/maya/plugins/publish/validate_frame_range.py index 98b5b4d79b..c51766379e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_frame_range.py +++ b/openpype/hosts/maya/plugins/publish/validate_frame_range.py @@ -27,6 +27,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): "yeticache"] optional = True actions = [openpype.api.RepairAction] + exclude_families = [] def process(self, instance): context = instance.context @@ -56,7 +57,9 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): # compare with data on instance errors = [] - + if [ef for ef in self.exclude_families + if instance.data["family"] in ef]: + return if(inst_start != frame_start_handle): errors.append("Instance start frame [ {} ] doesn't " "match the one set on instance [ {} ]: " diff --git a/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py b/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py index d70096ee45..04cc9ab5fb 100644 --- a/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py +++ b/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py @@ -6,7 +6,7 @@ from openpype.pipeline import PublishXmlValidationError class ValidateReviewSubsetUniqueness(pyblish.api.ContextPlugin): - """Validates that nodes has common root.""" + """Validates that review subset has unique name.""" order = openpype.api.ValidateContentsOrder hosts = ["maya"] @@ -17,7 +17,7 @@ class ValidateReviewSubsetUniqueness(pyblish.api.ContextPlugin): subset_names = [] for instance in context: - self.log.info("instance:: {}".format(instance.data)) + self.log.debug("Instance: {}".format(instance.data)) if instance.data.get('publish'): subset_names.append(instance.data.get('subset')) diff --git a/openpype/hosts/maya/plugins/publish/validate_setdress_root.py b/openpype/hosts/maya/plugins/publish/validate_setdress_root.py index 0b4842d208..8e23a7c04f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_setdress_root.py +++ b/openpype/hosts/maya/plugins/publish/validate_setdress_root.py @@ -4,8 +4,7 @@ import openpype.api class ValidateSetdressRoot(pyblish.api.InstancePlugin): - """ - """ + """Validate if set dress top root node is published.""" order = openpype.api.ValidateContentsOrder label = "SetDress Root" diff --git a/openpype/hosts/maya/plugins/publish/validate_visible_only.py b/openpype/hosts/maya/plugins/publish/validate_visible_only.py new file mode 100644 index 0000000000..59a7f976ab --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_visible_only.py @@ -0,0 +1,51 @@ +import pyblish.api + +import openpype.api +from openpype.hosts.maya.api.lib import iter_visible_nodes_in_range +import openpype.hosts.maya.api.action + + +class ValidateAlembicVisibleOnly(pyblish.api.InstancePlugin): + """Validates at least a single node is visible in frame range. + + This validation only validates if the `visibleOnly` flag is enabled + on the instance - otherwise the validation is skipped. + + """ + order = openpype.api.ValidateContentsOrder + 0.05 + label = "Alembic Visible Only" + hosts = ["maya"] + families = ["pointcache", "animation"] + actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + + def process(self, instance): + + if not instance.data.get("visibleOnly", False): + self.log.debug("Visible only is disabled. Validation skipped..") + return + + invalid = self.get_invalid(instance) + if invalid: + start, end = self.get_frame_range(instance) + raise RuntimeError("No visible nodes found in " + "frame range {}-{}.".format(start, end)) + + @classmethod + def get_invalid(cls, instance): + + if instance.data["family"] == "animation": + # Special behavior to use the nodes in out_SET + nodes = instance.data["out_hierarchy"] + else: + nodes = instance[:] + + start, end = cls.get_frame_range(instance) + if not any(iter_visible_nodes_in_range(nodes, start, end)): + # Return the nodes we have considered so the user can identify + # them with the select invalid action + return nodes + + @staticmethod + def get_frame_range(instance): + data = instance.data + return data["frameStartHandle"], data["frameEndHandle"] diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py index a3ab483add..10e68c2ddb 100644 --- a/openpype/hosts/maya/startup/userSetup.py +++ b/openpype/hosts/maya/startup/userSetup.py @@ -1,10 +1,11 @@ import os from openpype.api import get_project_settings from openpype.pipeline import install_host -from openpype.hosts.maya import api +from openpype.hosts.maya.api import MayaHost from maya import cmds -install_host(api) +host = MayaHost() +install_host(host) print("starting OpenPype usersetup") diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 45a1e72703..9b24c9fb38 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -10,6 +10,7 @@ from collections import OrderedDict import clique import nuke +from Qt import QtCore, QtWidgets from openpype.client import ( get_project, @@ -20,21 +21,24 @@ from openpype.client import ( ) from openpype.api import ( Logger, - Anatomy, BuildWorkfile, get_version_from_path, - get_anatomy_settings, get_workdir_data, get_asset, get_current_project_settings, ) from openpype.tools.utils import host_tools +from openpype.lib import env_value_to_bool from openpype.lib.path_tools import HostDirmap -from openpype.settings import get_project_settings +from openpype.settings import ( + get_project_settings, + get_anatomy_settings, +) from openpype.modules import ModulesManager from openpype.pipeline import ( discover_legacy_creator_plugins, legacy_io, + Anatomy, ) from . import gizmo_menu @@ -61,7 +65,10 @@ class Context: main_window = None context_label = None project_name = os.getenv("AVALON_PROJECT") + # Workfile related code workfiles_launched = False + workfiles_tool_timer = None + # Seems unused _project_doc = None @@ -2382,12 +2389,19 @@ def select_nodes(nodes): def launch_workfiles_app(): - '''Function letting start workfiles after start of host - ''' - from openpype.lib import ( - env_value_to_bool - ) - from .pipeline import get_main_window + """Show workfiles tool on nuke launch. + + Trigger to show workfiles tool on application launch. Can be executed only + once all other calls are ignored. + + Workfiles tool show is deffered after application initialization using + QTimer. + """ + + if Context.workfiles_launched: + return + + Context.workfiles_launched = True # get all imortant settings open_at_start = env_value_to_bool( @@ -2398,10 +2412,40 @@ def launch_workfiles_app(): if not open_at_start: return - if not Context.workfiles_launched: - Context.workfiles_launched = True - main_window = get_main_window() - host_tools.show_workfiles(parent=main_window) + # Show workfiles tool using timer + # - this will be probably triggered during initialization in that case + # the application is not be able to show uis so it must be + # deffered using timer + # - timer should be processed when initialization ends + # When applications starts to process events. + timer = QtCore.QTimer() + timer.timeout.connect(_launch_workfile_app) + timer.setInterval(100) + Context.workfiles_tool_timer = timer + timer.start() + + +def _launch_workfile_app(): + # Safeguard to not show window when application is still starting up + # or is already closing down. + closing_down = QtWidgets.QApplication.closingDown() + starting_up = QtWidgets.QApplication.startingUp() + + # Stop the timer if application finished start up of is closing down + if closing_down or not starting_up: + Context.workfiles_tool_timer.stop() + Context.workfiles_tool_timer = None + + # Skip if application is starting up or closing down + if starting_up or closing_down: + return + + # Make sure on top is enabled on first show so the window is not hidden + # under main nuke window + # - this happened on Centos 7 and it is because the focus of nuke + # changes to the main window after showing because of initialization + # which moves workfiles tool under it + host_tools.show_workfiles(parent=None, on_top=True) def process_workfile_builder(): diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 2785eb65cd..0afc56d2f7 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -120,8 +120,9 @@ def install(): nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root") nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") nuke.addOnCreate(process_workfile_builder, nodeClass="Root") - nuke.addOnCreate(launch_workfiles_app, nodeClass="Root") + _install_menu() + launch_workfiles_app() def uninstall(): @@ -141,6 +142,14 @@ def uninstall(): _uninstall_menu() +def _show_workfiles(): + # Make sure parent is not set + # - this makes Workfiles tool as separated window which + # avoid issues with reopening + # - it is possible to explicitly change on top flag of the tool + host_tools.show_workfiles(parent=None, on_top=False) + + def _install_menu(): # uninstall original avalon menu main_window = get_main_window() @@ -157,7 +166,7 @@ def _install_menu(): menu.addSeparator() menu.addCommand( "Work Files...", - lambda: host_tools.show_workfiles(parent=main_window) + _show_workfiles ) menu.addSeparator() diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index d177e6ba76..b2dc4a52d7 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -54,20 +54,28 @@ class LoadClip(plugin.NukeLoader): script_start = int(nuke.root()["first_frame"].value()) # option gui - defaults = { - "start_at_workfile": True + options_defaults = { + "start_at_workfile": True, + "add_retime": True } - options = [ - qargparse.Boolean( - "start_at_workfile", - help="Load at workfile start frame", - default=True - ) - ] - node_name_template = "{class_name}_{ext}" + @classmethod + def get_options(cls, *args): + return [ + qargparse.Boolean( + "start_at_workfile", + help="Load at workfile start frame", + default=cls.options_defaults["start_at_workfile"] + ), + qargparse.Boolean( + "add_retime", + help="Load with retime", + default=cls.options_defaults["add_retime"] + ) + ] + @classmethod def get_representations(cls): return ( @@ -86,7 +94,10 @@ class LoadClip(plugin.NukeLoader): file = self.fname.replace("\\", "/") start_at_workfile = options.get( - "start_at_workfile", self.defaults["start_at_workfile"]) + "start_at_workfile", self.options_defaults["start_at_workfile"]) + + add_retime = options.get( + "add_retime", self.options_defaults["add_retime"]) version = context['version'] version_data = version.get("data", {}) @@ -151,7 +162,7 @@ class LoadClip(plugin.NukeLoader): data_imprint = {} for k in add_keys: if k == 'version': - data_imprint.update({k: context["version"]['name']}) + data_imprint[k] = context["version"]['name'] elif k == 'colorspace': colorspace = repre["data"].get(k) colorspace = colorspace or version_data.get(k) @@ -159,10 +170,13 @@ class LoadClip(plugin.NukeLoader): if used_colorspace: data_imprint["used_colorspace"] = used_colorspace else: - data_imprint.update( - {k: context["version"]['data'].get(k, str(None))}) + data_imprint[k] = context["version"]['data'].get( + k, str(None)) - data_imprint.update({"objectName": read_name}) + data_imprint["objectName"] = read_name + + if add_retime and version_data.get("retime", None): + data_imprint["addRetime"] = True read_node["tile_color"].setValue(int("0x4ecd25ff", 16)) @@ -174,7 +188,7 @@ class LoadClip(plugin.NukeLoader): loader=self.__class__.__name__, data=data_imprint) - if version_data.get("retime", None): + if add_retime and version_data.get("retime", None): self._make_retimes(read_node, version_data) self.set_as_member(read_node) @@ -198,7 +212,12 @@ class LoadClip(plugin.NukeLoader): read_node = nuke.toNode(container['objectName']) file = get_representation_path(representation).replace("\\", "/") - start_at_workfile = bool("start at" in read_node['frame_mode'].value()) + start_at_workfile = "start at" in read_node['frame_mode'].value() + + add_retime = [ + key for key in read_node.knobs().keys() + if "addRetime" in key + ] project_name = legacy_io.active_project() version_doc = get_version_by_id(project_name, representation["parent"]) @@ -286,7 +305,7 @@ class LoadClip(plugin.NukeLoader): "updated to version: {}".format(version_doc.get("name")) ) - if version_data.get("retime", None): + if add_retime and version_data.get("retime", None): self._make_retimes(read_node, version_data) else: self.clear_members(read_node) diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py index 5ea7c352b9..fc16e189fb 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py @@ -104,7 +104,10 @@ class ExtractReviewDataMov(openpype.api.Extractor): self, instance, o_name, o_data["extension"], multiple_presets) - if "render.farm" in families: + if ( + "render.farm" in families or + "prerender.farm" in families + ): if "review" in instance.data["families"]: instance.data["families"].remove("review") diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index e0c4bdb953..99ade4cf9b 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -4,6 +4,7 @@ import nuke import copy import pyblish.api +import six import openpype from openpype.hosts.nuke.api import ( @@ -12,7 +13,6 @@ from openpype.hosts.nuke.api import ( get_view_process_node ) - class ExtractSlateFrame(openpype.api.Extractor): """Extracts movie and thumbnail with baked in luts @@ -152,6 +152,7 @@ class ExtractSlateFrame(openpype.api.Extractor): self.log.debug("__ first_frame: {}".format(first_frame)) self.log.debug("__ slate_first_frame: {}".format(slate_first_frame)) + above_slate_node = slate_node.dependencies().pop() # fallback if files does not exists if self._check_frames_exists(instance): # Read node @@ -164,8 +165,16 @@ class ExtractSlateFrame(openpype.api.Extractor): r_node["colorspace"].setValue(instance.data["colorspace"]) previous_node = r_node temporary_nodes = [previous_node] + + # adding copy metadata node for correct frame metadata + cm_node = nuke.createNode("CopyMetaData") + cm_node.setInput(0, previous_node) + cm_node.setInput(1, above_slate_node) + previous_node = cm_node + temporary_nodes.append(cm_node) + else: - previous_node = slate_node.dependencies().pop() + previous_node = above_slate_node temporary_nodes = [] # only create colorspace baking if toggled on @@ -236,6 +245,48 @@ class ExtractSlateFrame(openpype.api.Extractor): int(slate_first_frame) ) + # Add file to representation files + # - get write node + write_node = instance.data["writeNode"] + # - evaluate filepaths for first frame and slate frame + first_filename = os.path.basename( + write_node["file"].evaluate(first_frame)) + slate_filename = os.path.basename( + write_node["file"].evaluate(slate_first_frame)) + + # Find matching representation based on first filename + matching_repre = None + is_sequence = None + for repre in instance.data["representations"]: + files = repre["files"] + if ( + not isinstance(files, six.string_types) + and first_filename in files + ): + matching_repre = repre + is_sequence = True + break + + elif files == first_filename: + matching_repre = repre + is_sequence = False + break + + if not matching_repre: + self.log.info(( + "Matching reresentaion was not found." + " Representation files were not filled with slate." + )) + return + + # Add frame to matching representation files + if not is_sequence: + matching_repre["files"] = [first_filename, slate_filename] + elif slate_filename not in matching_repre["files"]: + matching_repre["files"].insert(0, slate_filename) + + self.log.warning("Added slate frame to representation files") + def add_comment_slate_node(self, instance, node): comment = instance.context.data.get("comment") diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index 049958bd07..a97f34b370 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -35,6 +35,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): if node is None: return + instance.data["writeNode"] = node self.log.debug("checking instance: {}".format(instance)) # Determine defined file type diff --git a/openpype/hosts/resolve/api/lib.py b/openpype/hosts/resolve/api/lib.py index c4717bd370..93ccdaf812 100644 --- a/openpype/hosts/resolve/api/lib.py +++ b/openpype/hosts/resolve/api/lib.py @@ -319,14 +319,13 @@ def get_current_timeline_items( selected_track_count = timeline.GetTrackCount(track_type) # loop all tracks and get items - _clips = dict() + _clips = {} for track_index in range(1, (int(selected_track_count) + 1)): _track_name = timeline.GetTrackName(track_type, track_index) # filter out all unmathed track names - if track_name: - if _track_name not in track_name: - continue + if track_name and _track_name not in track_name: + continue timeline_items = timeline.GetItemListInTrack( track_type, track_index) @@ -348,12 +347,8 @@ def get_current_timeline_items( "index": clip_index } ti_color = ti.GetClipColor() - if filter is True: - if selecting_color in ti_color: - selected_clips.append(data) - else: + if filter and selecting_color in ti_color or not filter: selected_clips.append(data) - return selected_clips diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index 8e1436021c..49b478fb3b 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -506,7 +506,7 @@ class Creator(LegacyCreator): super(Creator, self).__init__(*args, **kwargs) from openpype.api import get_current_project_settings resolve_p_settings = get_current_project_settings().get("resolve") - self.presets = dict() + self.presets = {} if resolve_p_settings: self.presets = resolve_p_settings["create"].get( self.__class__.__name__, {}) diff --git a/openpype/hosts/resolve/plugins/create/create_shot_clip.py b/openpype/hosts/resolve/plugins/create/create_shot_clip.py index 62d5557a50..dbf10c5163 100644 --- a/openpype/hosts/resolve/plugins/create/create_shot_clip.py +++ b/openpype/hosts/resolve/plugins/create/create_shot_clip.py @@ -116,12 +116,13 @@ class CreateShotClip(resolve.Creator): "order": 0}, "vSyncTrack": { "value": gui_tracks, # noqa - "type": "QComboBox", - "label": "Hero track", - "target": "ui", - "toolTip": "Select driving track name which should be mastering all others", # noqa - "order": 1} + "type": "QComboBox", + "label": "Hero track", + "target": "ui", + "toolTip": "Select driving track name which should be mastering all others", # noqa + "order": 1 } + } }, "publishSettings": { "type": "section", @@ -172,28 +173,31 @@ class CreateShotClip(resolve.Creator): "target": "ui", "order": 4, "value": { - "workfileFrameStart": { - "value": 1001, - "type": "QSpinBox", - "label": "Workfiles Start Frame", - "target": "tag", - "toolTip": "Set workfile starting frame number", # noqa - "order": 0}, - "handleStart": { - "value": 0, - "type": "QSpinBox", - "label": "Handle start (head)", - "target": "tag", - "toolTip": "Handle at start of clip", # noqa - "order": 1}, - "handleEnd": { - "value": 0, - "type": "QSpinBox", - "label": "Handle end (tail)", - "target": "tag", - "toolTip": "Handle at end of clip", # noqa - "order": 2}, - } + "workfileFrameStart": { + "value": 1001, + "type": "QSpinBox", + "label": "Workfiles Start Frame", + "target": "tag", + "toolTip": "Set workfile starting frame number", # noqa + "order": 0 + }, + "handleStart": { + "value": 0, + "type": "QSpinBox", + "label": "Handle start (head)", + "target": "tag", + "toolTip": "Handle at start of clip", # noqa + "order": 1 + }, + "handleEnd": { + "value": 0, + "type": "QSpinBox", + "label": "Handle end (tail)", + "target": "tag", + "toolTip": "Handle at end of clip", # noqa + "order": 2 + } + } } } @@ -229,8 +233,10 @@ class CreateShotClip(resolve.Creator): v_sync_track = widget.result["vSyncTrack"]["value"] # sort selected trackItems by - sorted_selected_track_items = list() - unsorted_selected_track_items = list() + sorted_selected_track_items = [] + unsorted_selected_track_items = [] + print("_____ selected ______") + print(self.selected) for track_item_data in self.selected: if track_item_data["track"]["name"] in v_sync_track: sorted_selected_track_items.append(track_item_data) @@ -253,10 +259,10 @@ class CreateShotClip(resolve.Creator): "sq_frame_start": sq_frame_start, "sq_markers": sq_markers } - + print(kwargs) for i, track_item_data in enumerate(sorted_selected_track_items): self.rename_index = i - + self.log.info(track_item_data) # convert track item to timeline media pool item track_item = resolve.PublishClip( self, track_item_data, **kwargs).convert() diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py index cf88b14e81..190a5a7206 100644 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ b/openpype/hosts/resolve/plugins/load/load_clip.py @@ -1,6 +1,10 @@ from copy import deepcopy from importlib import reload +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.hosts import resolve from openpype.pipeline import ( get_representation_path, @@ -19,7 +23,7 @@ class LoadClip(resolve.TimelineItemLoader): """ families = ["render2d", "source", "plate", "render", "review"] - representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264", ".mov"] + representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264", "mov"] label = "Load as clip" order = -10 @@ -96,10 +100,8 @@ class LoadClip(resolve.TimelineItemLoader): namespace = container['namespace'] timeline_item_data = resolve.get_pype_timeline_item_by_name(namespace) timeline_item = timeline_item_data["clip"]["item"] - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version = get_version_by_id(project_name, representation["parent"]) version_data = version.get("data", {}) version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) @@ -138,19 +140,22 @@ class LoadClip(resolve.TimelineItemLoader): @classmethod def set_item_color(cls, timeline_item, version): - # define version name version_name = version.get("name", None) # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + project_name = legacy_io.active_project() + last_version_doc = get_last_version_by_subset_id( + project_name, + version["parent"], + fields=["name"] + ) + if last_version_doc: + last_version = last_version_doc["name"] + else: + last_version = None # set clip colour - if version_name == max_version: + if version_name == last_version: timeline_item.SetClipColor(cls.clip_color_last) else: timeline_item.SetClipColor(cls.clip_color) diff --git a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py index a58f288770..53e67aee0e 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py @@ -30,7 +30,8 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): "asset": asset, "subset": "{}{}".format(asset, subset.capitalize()), "item": project, - "family": "workfile" + "family": "workfile", + "families": [] } # create instance with workfile diff --git a/openpype/hosts/traypublisher/api/__init__.py b/openpype/hosts/traypublisher/api/__init__.py index c461c0c526..4e7284b09a 100644 --- a/openpype/hosts/traypublisher/api/__init__.py +++ b/openpype/hosts/traypublisher/api/__init__.py @@ -1,20 +1,8 @@ from .pipeline import ( - install, - ls, - - set_project_name, - get_context_title, - get_context_data, - update_context_data, + TrayPublisherHost, ) __all__ = ( - "install", - "ls", - - "set_project_name", - "get_context_title", - "get_context_data", - "update_context_data", + "TrayPublisherHost", ) diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py index 954a0bae47..2d9db7801e 100644 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ b/openpype/hosts/traypublisher/api/pipeline.py @@ -9,6 +9,8 @@ from openpype.pipeline import ( register_creator_plugin_path, legacy_io, ) +from openpype.host import HostBase, INewPublisher + ROOT_DIR = os.path.dirname(os.path.dirname( os.path.abspath(__file__) @@ -17,6 +19,35 @@ PUBLISH_PATH = os.path.join(ROOT_DIR, "plugins", "publish") CREATE_PATH = os.path.join(ROOT_DIR, "plugins", "create") +class TrayPublisherHost(HostBase, INewPublisher): + name = "traypublisher" + + def install(self): + os.environ["AVALON_APP"] = self.name + legacy_io.Session["AVALON_APP"] = self.name + + pyblish.api.register_host("traypublisher") + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_creator_plugin_path(CREATE_PATH) + + def get_context_title(self): + return HostContext.get_project_name() + + def get_context_data(self): + return HostContext.get_context_data() + + def update_context_data(self, data, changes): + HostContext.save_context_data(data, changes) + + def set_project_name(self, project_name): + # TODO Deregister project specific plugins and register new project + # plugins + os.environ["AVALON_PROJECT"] = project_name + legacy_io.Session["AVALON_PROJECT"] = project_name + legacy_io.install() + HostContext.set_project_name(project_name) + + class HostContext: _context_json_path = None @@ -150,32 +181,3 @@ def get_context_data(): def update_context_data(data, changes): HostContext.save_context_data(data) - - -def get_context_title(): - return HostContext.get_project_name() - - -def ls(): - """Probably will never return loaded containers.""" - return [] - - -def install(): - """This is called before a project is known. - - Project is defined with 'set_project_name'. - """ - os.environ["AVALON_APP"] = "traypublisher" - - pyblish.api.register_host("traypublisher") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_creator_plugin_path(CREATE_PATH) - - -def set_project_name(project_name): - # TODO Deregister project specific plugins and register new project plugins - os.environ["AVALON_PROJECT"] = project_name - legacy_io.Session["AVALON_PROJECT"] = project_name - legacy_io.install() - HostContext.set_project_name(project_name) diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py index 202664cfc6..9b9425855e 100644 --- a/openpype/hosts/traypublisher/api/plugin.py +++ b/openpype/hosts/traypublisher/api/plugin.py @@ -1,8 +1,8 @@ +from openpype.lib.attribute_definitions import FileDef from openpype.pipeline import ( Creator, CreatedInstance ) -from openpype.lib import FileDef from .pipeline import ( list_instances, @@ -12,6 +12,29 @@ from .pipeline import ( ) +IMAGE_EXTENSIONS = [ + ".ani", ".anim", ".apng", ".art", ".bmp", ".bpg", ".bsave", ".cal", + ".cin", ".cpc", ".cpt", ".dds", ".dpx", ".ecw", ".exr", ".fits", + ".flic", ".flif", ".fpx", ".gif", ".hdri", ".hevc", ".icer", + ".icns", ".ico", ".cur", ".ics", ".ilbm", ".jbig", ".jbig2", + ".jng", ".jpeg", ".jpeg-ls", ".jpeg", ".2000", ".jpg", ".xr", + ".jpeg", ".xt", ".jpeg-hdr", ".kra", ".mng", ".miff", ".nrrd", + ".ora", ".pam", ".pbm", ".pgm", ".ppm", ".pnm", ".pcx", ".pgf", + ".pictor", ".png", ".psb", ".psp", ".qtvr", ".ras", + ".rgbe", ".logluv", ".tiff", ".sgi", ".tga", ".tiff", ".tiff/ep", + ".tiff/it", ".ufo", ".ufp", ".wbmp", ".webp", ".xbm", ".xcf", + ".xpm", ".xwd" +] +VIDEO_EXTENSIONS = [ + ".3g2", ".3gp", ".amv", ".asf", ".avi", ".drc", ".f4a", ".f4b", + ".f4p", ".f4v", ".flv", ".gif", ".gifv", ".m2v", ".m4p", ".m4v", + ".mkv", ".mng", ".mov", ".mp2", ".mp4", ".mpe", ".mpeg", ".mpg", + ".mpv", ".mxf", ".nsv", ".ogg", ".ogv", ".qt", ".rm", ".rmvb", + ".roq", ".svi", ".vob", ".webm", ".wmv", ".yuv" +] +REVIEW_EXTENSIONS = IMAGE_EXTENSIONS + VIDEO_EXTENSIONS + + class TrayPublishCreator(Creator): create_allow_context_change = True host_name = "traypublisher" @@ -37,6 +60,21 @@ class TrayPublishCreator(Creator): # Use same attributes as for instance attrobites return self.get_instance_attr_defs() + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + # Add instance to current context + self._add_instance_to_context(new_instance) + class SettingsCreator(TrayPublishCreator): create_allow_context_change = True @@ -58,19 +96,27 @@ class SettingsCreator(TrayPublishCreator): data["settings_creator"] = True # Create new instance new_instance = CreatedInstance(self.family, subset_name, data, self) - # Host implementation of storing metadata about instance - HostContext.add_instance(new_instance.data_to_store()) - # Add instance to current context - self._add_instance_to_context(new_instance) + + self._store_new_instance(new_instance) def get_instance_attr_defs(self): return [ FileDef( - "filepath", + "representation_files", folders=False, extensions=self.extensions, allow_sequences=self.allow_sequences, - label="Filepath", + single_item=not self.allow_multiple_items, + label="Representations", + ), + FileDef( + "reviewable", + folders=False, + extensions=REVIEW_EXTENSIONS, + allow_sequences=True, + single_item=True, + label="Reviewable representations", + extensions_label="Single reviewable item" ) ] @@ -92,6 +138,7 @@ class SettingsCreator(TrayPublishCreator): "detailed_description": item_data["detailed_description"], "extensions": item_data["extensions"], "allow_sequences": item_data["allow_sequences"], + "allow_multiple_items": item_data["allow_multiple_items"], "default_variants": item_data["default_variants"] } ) diff --git a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py new file mode 100644 index 0000000000..c5f0d6b75e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py @@ -0,0 +1,216 @@ +import copy +import os +import re + +from openpype.client import get_assets, get_asset_by_name +from openpype.lib import ( + FileDef, + BoolDef, + get_subset_name_with_asset_doc, + TaskNotSetError, +) +from openpype.pipeline import ( + CreatedInstance, + CreatorError +) + +from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator + + +class BatchMovieCreator(TrayPublishCreator): + """Creates instances from movie file(s). + + Intended for .mov files, but should work for any video file. + Doesn't handle image sequences though. + """ + identifier = "render_movie_batch" + label = "Batch Movies" + family = "render" + description = "Publish batch of video files" + + create_allow_context_change = False + version_regex = re.compile(r"^(.+)_v([0-9]+)$") + + def __init__(self, project_settings, *args, **kwargs): + super(BatchMovieCreator, self).__init__(project_settings, + *args, **kwargs) + creator_settings = ( + project_settings["traypublisher"]["BatchMovieCreator"] + ) + self.default_variants = creator_settings["default_variants"] + self.default_tasks = creator_settings["default_tasks"] + self.extensions = creator_settings["extensions"] + + def get_icon(self): + return "fa.file" + + def create(self, subset_name, data, pre_create_data): + file_paths = pre_create_data.get("filepath") + if not file_paths: + return + + for file_info in file_paths: + instance_data = copy.deepcopy(data) + file_name = file_info["filenames"][0] + filepath = os.path.join(file_info["directory"], file_name) + instance_data["creator_attributes"] = {"filepath": filepath} + + asset_doc, version = self.get_asset_doc_from_file_name( + file_name, self.project_name) + + subset_name, task_name = self._get_subset_and_task( + asset_doc, data["variant"], self.project_name) + + instance_data["task"] = task_name + instance_data["asset"] = asset_doc["name"] + + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, + instance_data, self) + self._store_new_instance(new_instance) + + def get_asset_doc_from_file_name(self, source_filename, project_name): + """Try to parse out asset name from file name provided. + + Artists might provide various file name formats. + Currently handled: + - chair.mov + - chair_v001.mov + - my_chair_to_upload.mov + """ + version = None + asset_name = os.path.splitext(source_filename)[0] + # Always first check if source filename is in assets + matching_asset_doc = self._get_asset_by_name_case_not_sensitive( + project_name, asset_name) + + if matching_asset_doc is None: + matching_asset_doc, version = ( + self._parse_with_version(project_name, asset_name)) + + if matching_asset_doc is None: + matching_asset_doc = self._parse_containing(project_name, + asset_name) + + if matching_asset_doc is None: + raise CreatorError( + "Cannot guess asset name from {}".format(source_filename)) + + return matching_asset_doc, version + + def _parse_with_version(self, project_name, asset_name): + """Try to parse asset name from a file name containing version too + + Eg. 'chair_v001.mov' >> 'chair', 1 + """ + self.log.debug(( + "Asset doc by \"{}\" was not found, trying version regex." + ).format(asset_name)) + + matching_asset_doc = version_number = None + + regex_result = self.version_regex.findall(asset_name) + if regex_result: + _asset_name, _version_number = regex_result[0] + matching_asset_doc = self._get_asset_by_name_case_not_sensitive( + project_name, _asset_name) + if matching_asset_doc: + version_number = int(_version_number) + + return matching_asset_doc, version_number + + def _parse_containing(self, project_name, asset_name): + """Look if file name contains any existing asset name""" + for asset_doc in get_assets(project_name, fields=["name"]): + if asset_doc["name"].lower() in asset_name.lower(): + return get_asset_by_name(project_name, asset_doc["name"]) + + def _get_subset_and_task(self, asset_doc, variant, project_name): + """Create subset name according to standard template process""" + task_name = self._get_task_name(asset_doc) + + try: + subset_name = get_subset_name_with_asset_doc( + self.family, + variant, + task_name, + asset_doc, + project_name + ) + except TaskNotSetError: + # Create instance with fake task + # - instance will be marked as invalid so it can't be published + # but user have ability to change it + # NOTE: This expect that there is not task 'Undefined' on asset + task_name = "Undefined" + subset_name = get_subset_name_with_asset_doc( + self.family, + variant, + task_name, + asset_doc, + project_name + ) + + return subset_name, task_name + + def _get_task_name(self, asset_doc): + """Get applicable task from 'asset_doc' """ + available_task_names = {} + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + for task_name in asset_tasks.keys(): + available_task_names[task_name.lower()] = task_name + + task_name = None + for _task_name in self.default_tasks: + _task_name_low = _task_name.lower() + if _task_name_low in available_task_names: + task_name = available_task_names[_task_name_low] + break + + return task_name + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes + return [ + FileDef( + "filepath", + folders=False, + single_item=False, + extensions=self.extensions, + label="Filepath" + ), + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_detail_description(self): + return """# Publish batch of .mov to multiple assets. + + File names must then contain only asset name, or asset name + version. + (eg. 'chair.mov', 'chair_v001.mov', not really safe `my_chair_v001.mov` + """ + + def _get_asset_by_name_case_not_sensitive(self, project_name, asset_name): + """Handle more cases in file names""" + asset_name = re.compile(asset_name, re.IGNORECASE) + + assets = list(get_assets(project_name, asset_names=[asset_name])) + if assets: + if len(assets) > 1: + self.log.warning("Too many records found for {}".format( + asset_name)) + return + + return assets.pop() diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py new file mode 100644 index 0000000000..f37e04d1c9 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py @@ -0,0 +1,47 @@ +import os + +import pyblish.api +from openpype.pipeline import OpenPypePyblishPluginMixin + + +class CollectMovieBatch( + pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin +): + """Collect file url for batch movies and create representation. + + Adds review on instance and to repre.tags based on value of toggle button + on creator. + """ + + label = "Collect Movie Batch Files" + order = pyblish.api.CollectorOrder + + hosts = ["traypublisher"] + + def process(self, instance): + if instance.data.get("creator_identifier") != "render_movie_batch": + return + + creator_attributes = instance.data["creator_attributes"] + + file_url = creator_attributes["filepath"] + file_name = os.path.basename(file_url) + _, ext = os.path.splitext(file_name) + + repre = { + "name": ext[1:], + "ext": ext[1:], + "files": file_name, + "stagingDir": os.path.dirname(file_url), + "tags": [] + } + + if creator_attributes["add_review_family"]: + repre["tags"].append("review") + instance.data["families"].append("review") + + instance.data["representations"].append(repre) + + instance.data["source"] = file_url + + self.log.debug("instance.data {}".format(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py b/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py deleted file mode 100644 index 965e251527..0000000000 --- a/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py +++ /dev/null @@ -1,31 +0,0 @@ -import pyblish.api -from openpype.lib import BoolDef -from openpype.pipeline import OpenPypePyblishPluginMixin - - -class CollectReviewFamily( - pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin -): - """Add review family.""" - - label = "Collect Review Family" - order = pyblish.api.CollectorOrder - 0.49 - - hosts = ["traypublisher"] - families = [ - "image", - "render", - "plate", - "review" - ] - - def process(self, instance): - values = self.get_attr_values_from_data(instance.data) - if values.get("add_review_family"): - instance.data["families"].append("review") - - @classmethod - def get_attribute_defs(cls): - return [ - BoolDef("add_review_family", label="Review", default=True) - ] diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py index b2be43c701..c0ae694c3c 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py +++ b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py @@ -1,9 +1,31 @@ import os +import tempfile + +import clique import pyblish.api class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): - """Collect data for instances created by settings creators.""" + """Collect data for instances created by settings creators. + + Plugin create representations for simple instances based + on 'representation_files' attribute stored on instance data. + + There is also possibility to have reviewable representation which can be + stored under 'reviewable' attribute stored on instance data. If there was + already created representation with the same files as 'revieable' containes + + Representations can be marked for review and in that case is also added + 'review' family to instance families. For review can be marked only one + representation so **first** representation that has extension available + in '_review_extensions' is used for review. + + For instance 'source' is used path from last representation created + from 'representation_files'. + + Set staging directory on instance. That is probably never used because + each created representation has it's own staging dir. + """ label = "Collect Settings Simple Instances" order = pyblish.api.CollectorOrder - 0.49 @@ -14,37 +36,193 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): if not instance.data.get("settings_creator"): return - if "families" not in instance.data: - instance.data["families"] = [] + instance_label = instance.data["name"] + # Create instance's staging dir in temp + tmp_folder = tempfile.mkdtemp(prefix="traypublisher_") + instance.data["stagingDir"] = tmp_folder + instance.context.data["cleanupFullPaths"].append(tmp_folder) - if "representations" not in instance.data: - instance.data["representations"] = [] - repres = instance.data["representations"] + self.log.debug(( + "Created temp staging directory for instance {}. {}" + ).format(instance_label, tmp_folder)) + + # Store filepaths for validation of their existence + source_filepaths = [] + # Make sure there are no representations with same name + repre_names_counter = {} + # Store created names for logging + repre_names = [] + # Store set of filepaths per each representation + representation_files_mapping = [] + source = self._create_main_representations( + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ) + + self._create_review_representation( + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ) + + instance.data["source"] = source + instance.data["sourceFilepaths"] = list(set(source_filepaths)) + + self.log.debug( + ( + "Created Simple Settings instance \"{}\"" + " with {} representations: {}" + ).format( + instance_label, + len(instance.data["representations"]), + ", ".join(repre_names) + ) + ) + + def _create_main_representations( + self, + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ): + creator_attributes = instance.data["creator_attributes"] + filepath_items = creator_attributes["representation_files"] + if not isinstance(filepath_items, list): + filepath_items = [filepath_items] + + source = None + for filepath_item in filepath_items: + # Skip if filepath item does not have filenames + if not filepath_item["filenames"]: + continue + + filepaths = { + os.path.join(filepath_item["directory"], filename) + for filename in filepath_item["filenames"] + } + source_filepaths.extend(filepaths) + + source = self._calculate_source(filepaths) + representation = self._create_representation_data( + filepath_item, repre_names_counter, repre_names + ) + instance.data["representations"].append(representation) + representation_files_mapping.append( + (filepaths, representation, source) + ) + return source + + def _create_review_representation( + self, + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ): + # Skip review representation creation if there are no representations + # created for "main" part + # - review representation must not be created in that case so + # validation can care about it + if not representation_files_mapping: + self.log.warning(( + "There are missing source representations." + " Creation of review representation was skipped." + )) + return creator_attributes = instance.data["creator_attributes"] - filepath_item = creator_attributes["filepath"] - self.log.info(filepath_item) - filepaths = [ - os.path.join(filepath_item["directory"], filename) - for filename in filepath_item["filenames"] - ] + review_file_item = creator_attributes["reviewable"] + filenames = review_file_item.get("filenames") + if not filenames: + self.log.debug(( + "Filepath for review is not defined." + " Skipping review representation creation." + )) + return - instance.data["sourceFilepaths"] = filepaths - instance.data["stagingDir"] = filepath_item["directory"] + filepaths = { + os.path.join(review_file_item["directory"], filename) + for filename in filenames + } + source_filepaths.extend(filepaths) + # First try to find out representation with same filepaths + # so it's not needed to create new representation just for review + review_representation = None + # Review path (only for logging) + review_path = None + for item in representation_files_mapping: + _filepaths, representation, repre_path = item + if _filepaths == filepaths: + review_representation = representation + review_path = repre_path + break + + if review_representation is None: + self.log.debug("Creating new review representation") + review_path = self._calculate_source(filepaths) + review_representation = self._create_representation_data( + review_file_item, repre_names_counter, repre_names + ) + instance.data["representations"].append(review_representation) + + if "review" not in instance.data["families"]: + instance.data["families"].append("review") + + review_representation["tags"].append("review") + self.log.debug("Representation {} was marked for review. {}".format( + review_representation["name"], review_path + )) + + def _create_representation_data( + self, filepath_item, repre_names_counter, repre_names + ): + """Create new representation data based on file item. + + Args: + filepath_item (Dict[str, Any]): Item with information about + representation paths. + repre_names_counter (Dict[str, int]): Store count of representation + names. + repre_names (List[str]): All used representation names. For + logging purposes. + + Returns: + Dict: Prepared base representation data. + """ filenames = filepath_item["filenames"] _, ext = os.path.splitext(filenames[0]) - ext = ext[1:] if len(filenames) == 1: filenames = filenames[0] - repres.append({ - "ext": ext, - "name": ext, + repre_name = repre_ext = ext[1:] + if repre_name not in repre_names_counter: + repre_names_counter[repre_name] = 2 + else: + counter = repre_names_counter[repre_name] + repre_names_counter[repre_name] += 1 + repre_name = "{}_{}".format(repre_name, counter) + repre_names.append(repre_name) + return { + "ext": repre_ext, + "name": repre_name, "stagingDir": filepath_item["directory"], - "files": filenames - }) + "files": filenames, + "tags": [] + } - self.log.debug("Created Simple Settings instance {}".format( - instance.data - )) + def _calculate_source(self, filepaths): + cols, rems = clique.assemble(filepaths) + if cols: + source = cols[0].format("{head}{padding}{tail}") + elif rems: + source = rems[0] + return source diff --git a/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml b/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml new file mode 100644 index 0000000000..933df1c7c5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml @@ -0,0 +1,15 @@ + + + +Invalid frame range + +## Invalid frame range + +Expected duration or '{duration}' frames set in database, workfile contains only '{found}' frames. + +### How to repair? + +Modify configuration in the database or tweak frame range in the workfile. + + + \ No newline at end of file diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py index c7302b1005..749199fbd3 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py +++ b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py @@ -3,8 +3,17 @@ import pyblish.api from openpype.pipeline import PublishValidationError -class ValidateWorkfilePath(pyblish.api.InstancePlugin): - """Validate existence of workfile instance existence.""" +class ValidateFilePath(pyblish.api.InstancePlugin): + """Validate existence of source filepaths on instance. + + Plugins looks into key 'sourceFilepaths' and validate if paths there + actually exist on disk. + + Also validate if the key is filled but is empty. In that case also + crashes so do not fill the key if unfilled value should not cause error. + + This is primarily created for Simple Creator instances. + """ label = "Validate Workfile" order = pyblish.api.ValidatorOrder - 0.49 @@ -14,12 +23,28 @@ class ValidateWorkfilePath(pyblish.api.InstancePlugin): def process(self, instance): if "sourceFilepaths" not in instance.data: self.log.info(( - "Can't validate source filepaths existence." + "Skipped validation of source filepaths existence." " Instance does not have collected 'sourceFilepaths'" )) return - filepaths = instance.data.get("sourceFilepaths") + family = instance.data["family"] + label = instance.data["name"] + filepaths = instance.data["sourceFilepaths"] + if not filepaths: + raise PublishValidationError( + ( + "Source filepaths of '{}' instance \"{}\" are not filled" + ).format(family, label), + "File not filled", + ( + "## Files were not filled" + "\nThis mean that you didn't enter any files into required" + " file input." + "\n- Please refresh publishing and check instance" + " {}" + ).format(label) + ) not_found_files = [ filepath @@ -34,11 +59,7 @@ class ValidateWorkfilePath(pyblish.api.InstancePlugin): raise PublishValidationError( ( "Filepath of '{}' instance \"{}\" does not exist:\n{}" - ).format( - instance.data["family"], - instance.data["name"], - joined_paths - ), + ).format(family, label, joined_paths), "File not found", ( "## Files were not found\nFiles\n{}" diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py new file mode 100644 index 0000000000..947624100a --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py @@ -0,0 +1,75 @@ +import re + +import pyblish.api + +import openpype.api +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) + + +class ValidateFrameRange(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validating frame range of rendered files against state in DB.""" + + label = "Validate Frame Range" + hosts = ["traypublisher"] + families = ["render"] + order = openpype.api.ValidateContentsOrder + + optional = True + # published data might be sequence (.mov, .mp4) in that counting files + # doesnt make sense + check_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", + "gif", "svg"] + skip_timelines_check = [] # skip for specific task names (regex) + + def process(self, instance): + # Skip the instance if is not active by data on the instance + if not self.is_active(instance.data): + return + + if (self.skip_timelines_check and + any(re.search(pattern, instance.data["task"]) + for pattern in self.skip_timelines_check)): + self.log.info("Skipping for {} task".format(instance.data["task"])) + + asset_doc = instance.data["assetEntity"] + asset_data = asset_doc["data"] + frame_start = asset_data["frameStart"] + frame_end = asset_data["frameEnd"] + handle_start = asset_data["handleStart"] + handle_end = asset_data["handleEnd"] + duration = (frame_end - frame_start + 1) + handle_start + handle_end + + repres = instance.data.get("representations") + if not repres: + self.log.info("No representations, skipping.") + return + + first_repre = repres[0] + ext = first_repre['ext'].replace(".", '') + + if not ext or ext.lower() not in self.check_extensions: + self.log.warning("Cannot check for extension {}".format(ext)) + return + + files = first_repre["files"] + if isinstance(files, str): + files = [files] + frames = len(files) + + msg = ( + "Frame duration from DB:'{}' doesn't match number of files:'{}'" + " Please change frame range for Asset or limit no. of files" + ). format(int(duration), frames) + + formatting_data = {"duration": duration, + "found": frames} + if frames != duration: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + self.log.debug("Valid ranges expected '{}' - found '{}'". + format(int(duration), frames)) diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index 462f12abf0..c6dc765a27 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -10,8 +10,8 @@ from openpype.lib import ( from openpype.pipeline import ( registered_host, legacy_io, + Anatomy, ) -from openpype.api import Anatomy from openpype.hosts.tvpaint.api import lib, pipeline, plugin diff --git a/openpype/hosts/unreal/api/rendering.py b/openpype/hosts/unreal/api/rendering.py index b2732506fc..29e4747f6e 100644 --- a/openpype/hosts/unreal/api/rendering.py +++ b/openpype/hosts/unreal/api/rendering.py @@ -2,7 +2,7 @@ import os import unreal -from openpype.api import Anatomy +from openpype.pipeline import Anatomy from openpype.hosts.unreal.api import pipeline diff --git a/openpype/hosts/unreal/integration/UE_5.0/Content/Python/__init__.py b/openpype/hosts/unreal/integration/UE_5.0/Content/Python/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/openpype/hosts/unreal/integration/UE_5.0/Content/__init__.py b/openpype/hosts/unreal/integration/UE_5.0/Content/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py index 9fb45ea7a7..cb28f4bf60 100644 --- a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py +++ b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py @@ -3,7 +3,7 @@ from pathlib import Path import unreal -from openpype.api import Anatomy +from openpype.pipeline import Anatomy from openpype.hosts.unreal.api import pipeline import pyblish.api diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index 8d4e733b7d..fb52a9aca7 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -120,7 +120,6 @@ from .avalon_context import ( is_latest, any_outdated, get_asset, - get_hierarchy, get_linked_assets, get_latest_version, get_system_general_anatomy_data, @@ -292,7 +291,6 @@ __all__ = [ "is_latest", "any_outdated", "get_asset", - "get_hierarchy", "get_linked_assets", "get_latest_version", "get_system_general_anatomy_data", diff --git a/openpype/lib/abstract_collect_render.py b/openpype/lib/abstract_collect_render.py index 3d81f6d794..e4ff87aa0f 100644 --- a/openpype/lib/abstract_collect_render.py +++ b/openpype/lib/abstract_collect_render.py @@ -1,269 +1,33 @@ # -*- coding: utf-8 -*- -"""Collect render template. +"""Content was moved to 'openpype.pipeline.publish.abstract_collect_render'. -TODO: use @dataclass when times come. +Please change your imports as soon as possible. +File will be probably removed in OpenPype 3.14.* """ -from abc import abstractmethod -import attr -import six - -import pyblish.api - -from openpype.pipeline import legacy_io - -from .abstract_metaplugins import AbstractMetaContextPlugin +import warnings +from openpype.pipeline.publish import AbstractCollectRender, RenderInstance -@attr.s -class RenderInstance(object): - """Data collected by collectors. - - This data class later on passed to collected instances. - Those attributes are required later on. - - """ - - # metadata - version = attr.ib() # instance version - time = attr.ib() # time of instance creation (get_formatted_current_time) - source = attr.ib() # path to source scene file - label = attr.ib() # label to show in GUI - subset = attr.ib() # subset name - task = attr.ib() # task name - asset = attr.ib() # asset name (AVALON_ASSET) - attachTo = attr.ib() # subset name to attach render to - setMembers = attr.ib() # list of nodes/members producing render output - publish = attr.ib() # bool, True to publish instance - name = attr.ib() # instance name - - # format settings - resolutionWidth = attr.ib() # resolution width (1920) - resolutionHeight = attr.ib() # resolution height (1080) - pixelAspect = attr.ib() # pixel aspect (1.0) - - # time settings - frameStart = attr.ib() # start frame - frameEnd = attr.ib() # start end - frameStep = attr.ib() # frame step - - handleStart = attr.ib(default=None) # start frame - handleEnd = attr.ib(default=None) # start frame - - # for software (like Harmony) where frame range cannot be set by DB - # handles need to be propagated if exist - ignoreFrameHandleCheck = attr.ib(default=False) - - # -------------------- - # With default values - # metadata - renderer = attr.ib(default="") # renderer - can be used in Deadline - review = attr.ib(default=False) # generate review from instance (bool) - priority = attr.ib(default=50) # job priority on farm - - family = attr.ib(default="renderlayer") - families = attr.ib(default=["renderlayer"]) # list of families - - # format settings - multipartExr = attr.ib(default=False) # flag for multipart exrs - convertToScanline = attr.ib(default=False) # flag for exr conversion - - tileRendering = attr.ib(default=False) # bool: treat render as tiles - tilesX = attr.ib(default=0) # number of tiles in X - tilesY = attr.ib(default=0) # number of tiles in Y - - # submit_publish_job - toBeRenderedOn = attr.ib(default=None) - deadlineSubmissionJob = attr.ib(default=None) - anatomyData = attr.ib(default=None) - outputDir = attr.ib(default=None) - context = attr.ib(default=None) - - @frameStart.validator - def check_frame_start(self, _, value): - """Validate if frame start is not larger then end.""" - if value > self.frameEnd: - raise ValueError("frameStart must be smaller " - "or equal then frameEnd") - - @frameEnd.validator - def check_frame_end(self, _, value): - """Validate if frame end is not less then start.""" - if value < self.frameStart: - raise ValueError("frameEnd must be smaller " - "or equal then frameStart") - - @tilesX.validator - def check_tiles_x(self, _, value): - """Validate if tile x isn't less then 1.""" - if not self.tileRendering: - return - if value < 1: - raise ValueError("tile X size cannot be less then 1") - - if value == 1 and self.tilesY == 1: - raise ValueError("both tiles X a Y sizes are set to 1") - - @tilesY.validator - def check_tiles_y(self, _, value): - """Validate if tile y isn't less then 1.""" - if not self.tileRendering: - return - if value < 1: - raise ValueError("tile Y size cannot be less then 1") - - if value == 1 and self.tilesX == 1: - raise ValueError("both tiles X a Y sizes are set to 1") +class CollectRenderDeprecated(DeprecationWarning): + pass -@six.add_metaclass(AbstractMetaContextPlugin) -class AbstractCollectRender(pyblish.api.ContextPlugin): - """Gather all publishable render layers from renderSetup.""" +warnings.simplefilter("always", CollectRenderDeprecated) +warnings.warn( + ( + "Content of 'abstract_collect_render' was moved." + "\nUsing deprecated source of 'abstract_collect_render'. Content was" + " move to 'openpype.pipeline.publish.abstract_collect_render'." + " Please change your imports as soon as possible." + ), + category=CollectRenderDeprecated, + stacklevel=4 +) - order = pyblish.api.CollectorOrder + 0.01 - label = "Collect Render" - sync_workfile_version = False - def __init__(self, *args, **kwargs): - """Constructor.""" - super(AbstractCollectRender, self).__init__(*args, **kwargs) - self._file_path = None - self._asset = legacy_io.Session["AVALON_ASSET"] - self._context = None - - def process(self, context): - """Entry point to collector.""" - self._context = context - for instance in context: - # make sure workfile instance publishing is enabled - try: - if "workfile" in instance.data["families"]: - instance.data["publish"] = True - # TODO merge renderFarm and render.farm - if ("renderFarm" in instance.data["families"] or - "render.farm" in instance.data["families"]): - instance.data["remove"] = True - except KeyError: - # be tolerant if 'families' is missing. - pass - - self._file_path = context.data["currentFile"].replace("\\", "/") - - render_instances = self.get_instances(context) - for render_instance in render_instances: - exp_files = self.get_expected_files(render_instance) - assert exp_files, "no file names were generated, this is bug" - - # if we want to attach render to subset, check if we have AOV's - # in expectedFiles. If so, raise error as we cannot attach AOV - # (considered to be subset on its own) to another subset - if render_instance.attachTo: - assert isinstance(exp_files, list), ( - "attaching multiple AOVs or renderable cameras to " - "subset is not supported" - ) - - frame_start_render = int(render_instance.frameStart) - frame_end_render = int(render_instance.frameEnd) - if (render_instance.ignoreFrameHandleCheck or - int(context.data['frameStartHandle']) == frame_start_render - and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501 - - handle_start = context.data['handleStart'] - handle_end = context.data['handleEnd'] - frame_start = context.data['frameStart'] - frame_end = context.data['frameEnd'] - frame_start_handle = context.data['frameStartHandle'] - frame_end_handle = context.data['frameEndHandle'] - else: - handle_start = 0 - handle_end = 0 - frame_start = frame_start_render - frame_end = frame_end_render - frame_start_handle = frame_start_render - frame_end_handle = frame_end_render - - data = { - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartHandle": frame_start_handle, - "frameEndHandle": frame_end_handle, - "byFrameStep": int(render_instance.frameStep), - - "author": context.data["user"], - # Add source to allow tracing back to the scene from - # which was submitted originally - "expectedFiles": exp_files, - } - if self.sync_workfile_version: - data["version"] = context.data["version"] - - # add additional data - data = self.add_additional_data(data) - render_instance_dict = attr.asdict(render_instance) - - instance = context.create_instance(render_instance.name) - instance.data["label"] = render_instance.label - instance.data.update(render_instance_dict) - instance.data.update(data) - - self.post_collecting_action() - - @abstractmethod - def get_instances(self, context): - """Get all renderable instances and their data. - - Args: - context (pyblish.api.Context): Context object. - - Returns: - list of :class:`RenderInstance`: All collected renderable instances - (like render layers, write nodes, etc.) - - """ - pass - - @abstractmethod - def get_expected_files(self, render_instance): - """Get list of expected files. - - Returns: - list: expected files. This can be either simple list of files with - their paths, or list of dictionaries, where key is name of AOV - for example and value is list of files for that AOV. - - Example:: - - ['/path/to/file.001.exr', '/path/to/file.002.exr'] - - or as dictionary: - - [ - { - "beauty": ['/path/to/beauty.001.exr', ...], - "mask": ['/path/to/mask.001.exr'] - } - ] - - """ - pass - - def add_additional_data(self, data): - """Add additional data to collected instance. - - This can be overridden by host implementation to add custom - additional data. - - """ - return data - - def post_collecting_action(self): - """Execute some code after collection is done. - - This is useful for example for restoring current render layer. - - """ - pass +__all__ = ( + "AbstractCollectRender", + "RenderInstance" +) diff --git a/openpype/lib/abstract_expected_files.py b/openpype/lib/abstract_expected_files.py index f9f3c17ef5..f24d844fe5 100644 --- a/openpype/lib/abstract_expected_files.py +++ b/openpype/lib/abstract_expected_files.py @@ -1,53 +1,32 @@ # -*- coding: utf-8 -*- -"""Abstract ExpectedFile class definition.""" -from abc import ABCMeta, abstractmethod -import six +"""Content was moved to 'openpype.pipeline.publish.abstract_expected_files'. + +Please change your imports as soon as possible. + +File will be probably removed in OpenPype 3.14.* +""" + +import warnings +from openpype.pipeline.publish import ExpectedFiles -@six.add_metaclass(ABCMeta) -class ExpectedFiles: - """Class grouping functionality for all supported renderers. - - Attributes: - multipart (bool): Flag if multipart exrs are used. - - """ - - multipart = False - - @abstractmethod - def get(self, render_instance): - """Get expected files for given renderer and render layer. - - This method should return dictionary of all files we are expecting - to be rendered from the host. Usually `render_instance` corresponds - to *render layer*. Result can be either flat list with the file - paths or it can be list of dictionaries. Each key corresponds to - for example AOV name or channel, etc. - - Example:: - - ['/path/to/file.001.exr', '/path/to/file.002.exr'] - - or as dictionary: - - [ - { - "beauty": ['/path/to/beauty.001.exr', ...], - "mask": ['/path/to/mask.001.exr'] - } - ] +class ExpectedFilesDeprecated(DeprecationWarning): + pass - Args: - render_instance (:class:`RenderInstance`): Data passed from - collector to determine files. This should be instance of - :class:`abstract_collect_render.RenderInstance` +warnings.simplefilter("always", ExpectedFilesDeprecated) +warnings.warn( + ( + "Content of 'abstract_expected_files' was moved." + "\nUsing deprecated source of 'abstract_expected_files'. Content was" + " move to 'openpype.pipeline.publish.abstract_expected_files'." + " Please change your imports as soon as possible." + ), + category=ExpectedFilesDeprecated, + stacklevel=4 +) - Returns: - list: Full paths to expected rendered files. - list of dict: Path to expected rendered files categorized by - AOVs, etc. - """ - raise NotImplementedError() +__all__ = ( + "ExpectedFiles", +) diff --git a/openpype/lib/abstract_metaplugins.py b/openpype/lib/abstract_metaplugins.py index f8163956ad..346b5d86b3 100644 --- a/openpype/lib/abstract_metaplugins.py +++ b/openpype/lib/abstract_metaplugins.py @@ -1,10 +1,35 @@ -from abc import ABCMeta -from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin +"""Content was moved to 'openpype.pipeline.publish.publish_plugins'. + +Please change your imports as soon as possible. + +File will be probably removed in OpenPype 3.14.* +""" + +import warnings +from openpype.pipeline.publish import ( + AbstractMetaInstancePlugin, + AbstractMetaContextPlugin +) -class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin): +class MetaPluginsDeprecated(DeprecationWarning): pass -class AbstractMetaContextPlugin(ABCMeta, ExplicitMetaPlugin): - pass +warnings.simplefilter("always", MetaPluginsDeprecated) +warnings.warn( + ( + "Content of 'abstract_metaplugins' was moved." + "\nUsing deprecated source of 'abstract_metaplugins'. Content was" + " moved to 'openpype.pipeline.publish.publish_plugins'." + " Please change your imports as soon as possible." + ), + category=MetaPluginsDeprecated, + stacklevel=4 +) + + +__all__ = ( + "AbstractMetaInstancePlugin", + "AbstractMetaContextPlugin", +) diff --git a/openpype/lib/anatomy.py b/openpype/lib/anatomy.py index 3fbc05ee88..6d339f058f 100644 --- a/openpype/lib/anatomy.py +++ b/openpype/lib/anatomy.py @@ -1,1260 +1,38 @@ -import os -import re -import copy -import platform -import collections -import numbers +"""Code related to project Anatomy was moved +to 'openpype.pipeline.anatomy' please change your imports as soon as +possible. File will be probably removed in OpenPype 3.14.* +""" -from openpype.settings.lib import ( - get_default_anatomy_settings, - get_anatomy_settings -) -from .path_templates import ( - TemplateUnsolved, - TemplateResult, - TemplatesDict, - FormatObject, -) -from .log import PypeLogger - -log = PypeLogger().get_logger(__name__) - -try: - StringType = basestring -except NameError: - StringType = str +import warnings +import functools -class ProjectNotSet(Exception): - """Exception raised when is created Anatomy without project name.""" +class AnatomyDeprecatedWarning(DeprecationWarning): + pass -class RootCombinationError(Exception): - """This exception is raised when templates has combined root types.""" +def anatomy_deprecated(func): + """Mark functions as deprecated. - def __init__(self, roots): - joined_roots = ", ".join( - ["\"{}\"".format(_root) for _root in roots] - ) - # TODO better error message - msg = ( - "Combination of root with and" - " without root name in AnatomyTemplates. {}" - ).format(joined_roots) - - super(RootCombinationError, self).__init__(msg) - - -class Anatomy: - """Anatomy module helps to keep project settings. - - Wraps key project specifications, AnatomyTemplates and Roots. - - Args: - project_name (str): Project name to look on overrides. + It will result in a warning being emitted when the function is used. """ - root_key_regex = re.compile(r"{(root?[^}]+)}") - root_name_regex = re.compile(r"root\[([^]]+)\]") - - def __init__(self, project_name=None, site_name=None): - if not project_name: - project_name = os.environ.get("AVALON_PROJECT") - - if not project_name: - raise ProjectNotSet(( - "Implementation bug: Project name is not set. Anatomy requires" - " to load data for specific project." - )) - - self.project_name = project_name - - self._data = self._prepare_anatomy_data( - get_anatomy_settings(project_name, site_name) + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.simplefilter("always", AnatomyDeprecatedWarning) + warnings.warn( + ( + "Deprecated import of 'Anatomy'." + " Class was moved to 'openpype.pipeline.anatomy'." + " Please change your imports of Anatomy in codebase." + ), + category=AnatomyDeprecatedWarning ) - self._site_name = site_name - self._templates_obj = AnatomyTemplates(self) - self._roots_obj = Roots(self) + return func(*args, **kwargs) + return new_func - # Anatomy used as dictionary - # - implemented only getters returning copy - def __getitem__(self, key): - return copy.deepcopy(self._data[key]) - def get(self, key, default=None): - return copy.deepcopy(self._data).get(key, default) - - def keys(self): - return copy.deepcopy(self._data).keys() - - def values(self): - return copy.deepcopy(self._data).values() - - def items(self): - return copy.deepcopy(self._data).items() - - @staticmethod - def default_data(): - """Default project anatomy data. - - Always return fresh loaded data. May be used as data for new project. - - Not used inside Anatomy itself. - """ - return get_default_anatomy_settings(clear_metadata=False) - - @staticmethod - def _prepare_anatomy_data(anatomy_data): - """Prepare anatomy data for further processing. - - Method added to replace `{task}` with `{task[name]}` in templates. - """ - templates_data = anatomy_data.get("templates") - if templates_data: - # Replace `{task}` with `{task[name]}` in templates - value_queue = collections.deque() - value_queue.append(templates_data) - while value_queue: - item = value_queue.popleft() - if not isinstance(item, dict): - continue - - for key in tuple(item.keys()): - value = item[key] - if isinstance(value, dict): - value_queue.append(value) - - elif isinstance(value, StringType): - item[key] = value.replace("{task}", "{task[name]}") - return anatomy_data - - def reset(self): - """Reset values of cached data in templates and roots objects.""" - self._data = self._prepare_anatomy_data( - get_anatomy_settings(self.project_name, self._site_name) - ) - self.templates_obj.reset() - self.roots_obj.reset() - - @property - def templates(self): - """Wrap property `templates` of Anatomy's AnatomyTemplates instance.""" - return self._templates_obj.templates - - @property - def templates_obj(self): - """Return `AnatomyTemplates` object of current Anatomy instance.""" - return self._templates_obj - - def format(self, *args, **kwargs): - """Wrap `format` method of Anatomy's `templates_obj`.""" - return self._templates_obj.format(*args, **kwargs) - - def format_all(self, *args, **kwargs): - """Wrap `format_all` method of Anatomy's `templates_obj`.""" - return self._templates_obj.format_all(*args, **kwargs) - - @property - def roots(self): - """Wrap `roots` property of Anatomy's `roots_obj`.""" - return self._roots_obj.roots - - @property - def roots_obj(self): - """Return `Roots` object of current Anatomy instance.""" - return self._roots_obj - - def root_environments(self): - """Return OPENPYPE_ROOT_* environments for current project in dict.""" - return self._roots_obj.root_environments() - - def root_environmets_fill_data(self, template=None): - """Environment variable values in dictionary for rootless path. - - Args: - template (str): Template for environment variable key fill. - By default is set to `"${}"`. - """ - return self.roots_obj.root_environmets_fill_data(template) - - def find_root_template_from_path(self, *args, **kwargs): - """Wrapper for Roots `find_root_template_from_path`.""" - return self.roots_obj.find_root_template_from_path(*args, **kwargs) - - def path_remapper(self, *args, **kwargs): - """Wrapper for Roots `path_remapper`.""" - return self.roots_obj.path_remapper(*args, **kwargs) - - def all_root_paths(self): - """Wrapper for Roots `all_root_paths`.""" - return self.roots_obj.all_root_paths() - - def set_root_environments(self): - """Set OPENPYPE_ROOT_* environments for current project.""" - self._roots_obj.set_root_environments() - - def root_names(self): - """Return root names for current project.""" - return self.root_names_from_templates(self.templates) - - def _root_keys_from_templates(self, data): - """Extract root key from templates in data. - - Args: - data (dict): Data that may contain templates as string. - - Return: - set: Set of all root names from templates as strings. - - Output example: `{"root[work]", "root[publish]"}` - """ - - output = set() - if isinstance(data, dict): - for value in data.values(): - for root in self._root_keys_from_templates(value): - output.add(root) - - elif isinstance(data, str): - for group in re.findall(self.root_key_regex, data): - output.add(group) - - return output - - def root_value_for_template(self, template): - """Returns value of root key from template.""" - root_templates = [] - for group in re.findall(self.root_key_regex, template): - root_templates.append("{" + group + "}") - - if not root_templates: - return None - - return root_templates[0].format(**{"root": self.roots}) - - def root_names_from_templates(self, templates): - """Extract root names form anatomy templates. - - Returns None if values in templates contain only "{root}". - Empty list is returned if there is no "root" in templates. - Else returns all root names from templates in list. - - RootCombinationError is raised when templates contain both root types, - basic "{root}" and with root name specification "{root[work]}". - - Args: - templates (dict): Anatomy templates where roots are not filled. - - Return: - list/None: List of all root names from templates as strings when - multiroot setup is used, otherwise None is returned. - """ - roots = list(self._root_keys_from_templates(templates)) - # Return empty list if no roots found in templates - if not roots: - return roots - - # Raise exception when root keys have roots with and without root name. - # Invalid output example: ["root", "root[project]", "root[render]"] - if len(roots) > 1 and "root" in roots: - raise RootCombinationError(roots) - - # Return None if "root" without root name in templates - if len(roots) == 1 and roots[0] == "root": - return None - - names = set() - for root in roots: - for group in re.findall(self.root_name_regex, root): - names.add(group) - return list(names) - - def fill_root(self, template_path): - """Fill template path where is only "root" key unfilled. - - Args: - template_path (str): Path with "root" key in. - Example path: "{root}/projects/MyProject/Shot01/Lighting/..." - - Return: - str: formatted path - """ - # NOTE does not care if there are different keys than "root" - return template_path.format(**{"root": self.roots}) - - @classmethod - def fill_root_with_path(cls, rootless_path, root_path): - """Fill path without filled "root" key with passed path. - - This is helper to fill root with different directory path than anatomy - has defined no matter if is single or multiroot. - - Output path is same as input path if `rootless_path` does not contain - unfilled root key. - - Args: - rootless_path (str): Path without filled "root" key. Example: - "{root[work]}/MyProject/..." - root_path (str): What should replace root key in `rootless_path`. - - Returns: - str: Path with filled root. - """ - output = str(rootless_path) - for group in re.findall(cls.root_key_regex, rootless_path): - replacement = "{" + group + "}" - output = output.replace(replacement, root_path) - - return output - - def replace_root_with_env_key(self, filepath, template=None): - """Replace root of path with environment key. - - # Example: - ## Project with roots: - ``` - { - "nas": { - "windows": P:/projects", - ... - } - ... - } - ``` - - ## Entered filepath - "P:/projects/project/asset/task/animation_v001.ma" - - ## Entered template - "<{}>" - - ## Output - "/project/asset/task/animation_v001.ma" - - Args: - filepath (str): Full file path where root should be replaced. - template (str): Optional template for environment key. Must - have one index format key. - Default value if not entered: "${}" - - Returns: - str: Path where root is replaced with environment root key. - - Raise: - ValueError: When project's roots were not found in entered path. - """ - success, rootless_path = self.find_root_template_from_path(filepath) - if not success: - raise ValueError( - "{}: Project's roots were not found in path: {}".format( - self.project_name, filepath - ) - ) - - data = self.root_environmets_fill_data(template) - return rootless_path.format(**data) - - -class AnatomyTemplateUnsolved(TemplateUnsolved): - """Exception for unsolved template when strict is set to True.""" - - msg = "Anatomy template \"{0}\" is unsolved.{1}{2}" - - -class AnatomyTemplateResult(TemplateResult): - rootless = None - - def __new__(cls, result, rootless_path): - new_obj = super(AnatomyTemplateResult, cls).__new__( - cls, - str(result), - result.template, - result.solved, - result.used_values, - result.missing_keys, - result.invalid_types - ) - new_obj.rootless = rootless_path - return new_obj - - def validate(self): - if not self.solved: - raise AnatomyTemplateUnsolved( - self.template, - self.missing_keys, - self.invalid_types - ) - - -class AnatomyTemplates(TemplatesDict): - inner_key_pattern = re.compile(r"(\{@.*?[^{}0]*\})") - inner_key_name_pattern = re.compile(r"\{@(.*?[^{}0]*)\}") - - def __init__(self, anatomy): - super(AnatomyTemplates, self).__init__() - self.anatomy = anatomy - self.loaded_project = None - - def __getitem__(self, key): - return self.templates[key] - - def get(self, key, default=None): - return self.templates.get(key, default) - - def reset(self): - self._raw_templates = None - self._templates = None - self._objected_templates = None - - @property - def project_name(self): - return self.anatomy.project_name - - @property - def roots(self): - return self.anatomy.roots - - @property - def templates(self): - self._validate_discovery() - return self._templates - - @property - def objected_templates(self): - self._validate_discovery() - return self._objected_templates - - def _validate_discovery(self): - if self.project_name != self.loaded_project: - self.reset() - - if self._templates is None: - self._discover() - self.loaded_project = self.project_name - - def _format_value(self, value, data): - if isinstance(value, RootItem): - return self._solve_dict(value, data) - - result = super(AnatomyTemplates, self)._format_value(value, data) - if isinstance(result, TemplateResult): - rootless_path = self._rootless_path(result, data) - result = AnatomyTemplateResult(result, rootless_path) - return result - - def set_templates(self, templates): - if not templates: - self.reset() - return - - self._raw_templates = copy.deepcopy(templates) - templates = copy.deepcopy(templates) - v_queue = collections.deque() - v_queue.append(templates) - while v_queue: - item = v_queue.popleft() - if not isinstance(item, dict): - continue - - for key in tuple(item.keys()): - value = item[key] - if isinstance(value, dict): - v_queue.append(value) - - elif ( - isinstance(value, StringType) - and "{task}" in value - ): - item[key] = value.replace("{task}", "{task[name]}") - - solved_templates = self.solve_template_inner_links(templates) - self._templates = solved_templates - self._objected_templates = self.create_ojected_templates( - solved_templates - ) - - def default_templates(self): - """Return default templates data with solved inner keys.""" - return self.solve_template_inner_links( - self.anatomy["templates"] - ) - - def _discover(self): - """ Loads anatomy templates from yaml. - Default templates are loaded if project is not set or project does - not have set it's own. - TODO: create templates if not exist. - - Returns: - TemplatesResultDict: Contain templates data for current project of - default templates. - """ - - if self.project_name is None: - # QUESTION create project specific if not found? - raise AssertionError(( - "Project \"{0}\" does not have his own templates." - " Trying to use default." - ).format(self.project_name)) - - self.set_templates(self.anatomy["templates"]) - - @classmethod - def replace_inner_keys(cls, matches, value, key_values, key): - """Replacement of inner keys in template values.""" - for match in matches: - anatomy_sub_keys = ( - cls.inner_key_name_pattern.findall(match) - ) - if key in anatomy_sub_keys: - raise ValueError(( - "Unsolvable recursion in inner keys, " - "key: \"{}\" is in his own value." - " Can't determine source, please check Anatomy templates." - ).format(key)) - - for anatomy_sub_key in anatomy_sub_keys: - replace_value = key_values.get(anatomy_sub_key) - if replace_value is None: - raise KeyError(( - "Anatomy templates can't be filled." - " Anatomy key `{0}` has" - " invalid inner key `{1}`." - ).format(key, anatomy_sub_key)) - - valid = isinstance(replace_value, (numbers.Number, StringType)) - if not valid: - raise ValueError(( - "Anatomy templates can't be filled." - " Anatomy key `{0}` has" - " invalid inner key `{1}`" - " with value `{2}`." - ).format(key, anatomy_sub_key, str(replace_value))) - - value = value.replace(match, str(replace_value)) - - return value - - @classmethod - def prepare_inner_keys(cls, key_values): - """Check values of inner keys. - - Check if inner key exist in template group and has valid value. - It is also required to avoid infinite loop with unsolvable recursion - when first inner key's value refers to second inner key's value where - first is used. - """ - keys_to_solve = set(key_values.keys()) - while True: - found = False - for key in tuple(keys_to_solve): - value = key_values[key] - - if isinstance(value, StringType): - matches = cls.inner_key_pattern.findall(value) - if not matches: - keys_to_solve.remove(key) - continue - - found = True - key_values[key] = cls.replace_inner_keys( - matches, value, key_values, key - ) - continue - - elif not isinstance(value, dict): - keys_to_solve.remove(key) - continue - - subdict_found = False - for _key, _value in tuple(value.items()): - matches = cls.inner_key_pattern.findall(_value) - if not matches: - continue - - subdict_found = True - found = True - key_values[key][_key] = cls.replace_inner_keys( - matches, _value, key_values, - "{}.{}".format(key, _key) - ) - - if not subdict_found: - keys_to_solve.remove(key) - - if not found: - break - - return key_values - - @classmethod - def solve_template_inner_links(cls, templates): - """Solve templates inner keys identified by "{@*}". - - Process is split into 2 parts. - First is collecting all global keys (keys in top hierarchy where value - is not dictionary). All global keys are set for all group keys (keys - in top hierarchy where value is dictionary). Value of a key is not - overridden in group if already contain value for the key. - - In second part all keys with "at" symbol in value are replaced with - value of the key afterward "at" symbol from the group. - - Args: - templates (dict): Raw templates data. - - Example: - templates:: - key_1: "value_1", - key_2: "{@key_1}/{filling_key}" - - group_1: - key_3: "value_3/{@key_2}" - - group_2: - key_2": "value_2" - key_4": "value_4/{@key_2}" - - output:: - key_1: "value_1" - key_2: "value_1/{filling_key}" - - group_1: { - key_1: "value_1" - key_2: "value_1/{filling_key}" - key_3: "value_3/value_1/{filling_key}" - - group_2: { - key_1: "value_1" - key_2: "value_2" - key_4: "value_3/value_2" - """ - default_key_values = templates.pop("defaults", {}) - for key, value in tuple(templates.items()): - if isinstance(value, dict): - continue - default_key_values[key] = templates.pop(key) - - # Pop "others" key before before expected keys are processed - other_templates = templates.pop("others") or {} - - keys_by_subkey = {} - for sub_key, sub_value in templates.items(): - key_values = {} - key_values.update(default_key_values) - key_values.update(sub_value) - keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) - - for sub_key, sub_value in other_templates.items(): - if sub_key in keys_by_subkey: - log.warning(( - "Key \"{}\" is duplicated in others. Skipping." - ).format(sub_key)) - continue - - key_values = {} - key_values.update(default_key_values) - key_values.update(sub_value) - keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) - - default_keys_by_subkeys = cls.prepare_inner_keys(default_key_values) - - for key, value in default_keys_by_subkeys.items(): - keys_by_subkey[key] = value - - return keys_by_subkey - - def _dict_to_subkeys_list(self, subdict, pre_keys=None): - if pre_keys is None: - pre_keys = [] - output = [] - for key in subdict: - value = subdict[key] - result = list(pre_keys) - result.append(key) - if isinstance(value, dict): - for item in self._dict_to_subkeys_list(value, result): - output.append(item) - else: - output.append(result) - return output - - def _keys_to_dicts(self, key_list, value): - if not key_list: - return None - if len(key_list) == 1: - return {key_list[0]: value} - return {key_list[0]: self._keys_to_dicts(key_list[1:], value)} - - def _rootless_path(self, result, final_data): - used_values = result.used_values - missing_keys = result.missing_keys - template = result.template - invalid_types = result.invalid_types - if ( - "root" not in used_values - or "root" in missing_keys - or "{root" not in template - ): - return - - for invalid_type in invalid_types: - if "root" in invalid_type: - return - - root_keys = self._dict_to_subkeys_list({"root": used_values["root"]}) - if not root_keys: - return - - output = str(result) - for used_root_keys in root_keys: - if not used_root_keys: - continue - - used_value = used_values - root_key = None - for key in used_root_keys: - used_value = used_value[key] - if root_key is None: - root_key = key - else: - root_key += "[{}]".format(key) - - root_key = "{" + root_key + "}" - output = output.replace(str(used_value), root_key) - - return output - - def format(self, data, strict=True): - copy_data = copy.deepcopy(data) - roots = self.roots - if roots: - copy_data["root"] = roots - result = super(AnatomyTemplates, self).format(copy_data) - result.strict = strict - return result - - def format_all(self, in_data, only_keys=True): - """ Solves templates based on entered data. - - Args: - data (dict): Containing keys to be filled into template. - - Returns: - TemplatesResultDict: Output `TemplateResult` have `strict` - attribute set to False so accessing unfilled keys in templates - won't raise any exceptions. - """ - return self.format(in_data, strict=False) - - -class RootItem(FormatObject): - """Represents one item or roots. - - Holds raw data of root item specification. Raw data contain value - for each platform, but current platform value is used when object - is used for formatting of template. - - Args: - root_raw_data (dict): Dictionary containing root values by platform - names. ["windows", "linux" and "darwin"] - name (str, optional): Root name which is representing. Used with - multi root setup otherwise None value is expected. - parent_keys (list, optional): All dictionary parent keys. Values of - `parent_keys` are used for get full key which RootItem is - representing. Used for replacing root value in path with - formattable key. e.g. parent_keys == ["work"] -> {root[work]} - parent (object, optional): It is expected to be `Roots` object. - Value of `parent` won't affect code logic much. - """ - - def __init__( - self, root_raw_data, name=None, parent_keys=None, parent=None - ): - lowered_platform_keys = {} - for key, value in root_raw_data.items(): - lowered_platform_keys[key.lower()] = value - self.raw_data = lowered_platform_keys - self.cleaned_data = self._clean_roots(lowered_platform_keys) - self.name = name - self.parent_keys = parent_keys or [] - self.parent = parent - - self.available_platforms = list(lowered_platform_keys.keys()) - self.value = lowered_platform_keys.get(platform.system().lower()) - self.clean_value = self.clean_root(self.value) - - def __format__(self, *args, **kwargs): - return self.value.__format__(*args, **kwargs) - - def __str__(self): - return str(self.value) - - def __repr__(self): - return self.__str__() - - def __getitem__(self, key): - if isinstance(key, numbers.Number): - return self.value[key] - - additional_info = "" - if self.parent and self.parent.project_name: - additional_info += " for project \"{}\"".format( - self.parent.project_name - ) - - raise AssertionError( - "Root key \"{}\" is missing{}.".format( - key, additional_info - ) - ) - - def full_key(self): - """Full key value for dictionary formatting in template. - - Returns: - str: Return full replacement key for formatting. This helps when - multiple roots are set. In that case e.g. `"root[work]"` is - returned. - """ - if not self.name: - return "root" - - joined_parent_keys = "".join( - ["[{}]".format(key) for key in self.parent_keys] - ) - return "root{}".format(joined_parent_keys) - - def clean_path(self, path): - """Just replace backslashes with forward slashes.""" - return str(path).replace("\\", "/") - - def clean_root(self, root): - """Makes sure root value does not end with slash.""" - if root: - root = self.clean_path(root) - while root.endswith("/"): - root = root[:-1] - return root - - def _clean_roots(self, raw_data): - """Clean all values of raw root item values.""" - cleaned = {} - for key, value in raw_data.items(): - cleaned[key] = self.clean_root(value) - return cleaned - - def path_remapper(self, path, dst_platform=None, src_platform=None): - """Remap path for specific platform. - - Args: - path (str): Source path which need to be remapped. - dst_platform (str, optional): Specify destination platform - for which remapping should happen. - src_platform (str, optional): Specify source platform. This is - recommended to not use and keep unset until you really want - to use specific platform. - roots (dict/RootItem/None, optional): It is possible to remap - path with different roots then instance where method was - called has. - - Returns: - str/None: When path does not contain known root then - None is returned else returns remapped path with "{root}" - or "{root[]}". - """ - cleaned_path = self.clean_path(path) - if dst_platform: - dst_root_clean = self.cleaned_data.get(dst_platform) - if not dst_root_clean: - key_part = "" - full_key = self.full_key() - if full_key != "root": - key_part += "\"{}\" ".format(full_key) - - log.warning( - "Root {}miss platform \"{}\" definition.".format( - key_part, dst_platform - ) - ) - return None - - if cleaned_path.startswith(dst_root_clean): - return cleaned_path - - if src_platform: - src_root_clean = self.cleaned_data.get(src_platform) - if src_root_clean is None: - log.warning( - "Root \"{}\" miss platform \"{}\" definition.".format( - self.full_key(), src_platform - ) - ) - return None - - if not cleaned_path.startswith(src_root_clean): - return None - - subpath = cleaned_path[len(src_root_clean):] - if dst_platform: - # `dst_root_clean` is used from upper condition - return dst_root_clean + subpath - return self.clean_value + subpath - - result, template = self.find_root_template_from_path(path) - if not result: - return None - - def parent_dict(keys, value): - if not keys: - return value - - key = keys.pop(0) - return {key: parent_dict(keys, value)} - - if dst_platform: - format_value = parent_dict(list(self.parent_keys), dst_root_clean) - else: - format_value = parent_dict(list(self.parent_keys), self.value) - - return template.format(**{"root": format_value}) - - def find_root_template_from_path(self, path): - """Replaces known root value with formattable key in path. - - All platform values are checked for this replacement. - - Args: - path (str): Path where root value should be found. - - Returns: - tuple: Tuple contain 2 values: `success` (bool) and `path` (str). - When success it True then path should contain replaced root - value with formattable key. - - Example: - When input path is:: - "C:/windows/path/root/projects/my_project/file.ext" - - And raw data of item looks like:: - { - "windows": "C:/windows/path/root", - "linux": "/mount/root" - } - - Output will be:: - (True, "{root}/projects/my_project/file.ext") - - If any of raw data value wouldn't match path's root output is:: - (False, "C:/windows/path/root/projects/my_project/file.ext") - """ - result = False - output = str(path) - - root_paths = list(self.cleaned_data.values()) - mod_path = self.clean_path(path) - for root_path in root_paths: - # Skip empty paths - if not root_path: - continue - - if mod_path.startswith(root_path): - result = True - replacement = "{" + self.full_key() + "}" - output = replacement + mod_path[len(root_path):] - break - - return (result, output) - - -class Roots: - """Object which should be used for formatting "root" key in templates. - - Args: - anatomy Anatomy: Anatomy object created for a specific project. - """ - - env_prefix = "OPENPYPE_PROJECT_ROOT" - roots_filename = "roots.json" - - def __init__(self, anatomy): - self.anatomy = anatomy - self.loaded_project = None - self._roots = None - - def __format__(self, *args, **kwargs): - return self.roots.__format__(*args, **kwargs) - - def __getitem__(self, key): - return self.roots[key] - - def reset(self): - """Reset current roots value.""" - self._roots = None - - def path_remapper( - self, path, dst_platform=None, src_platform=None, roots=None - ): - """Remap path for specific platform. - - Args: - path (str): Source path which need to be remapped. - dst_platform (str, optional): Specify destination platform - for which remapping should happen. - src_platform (str, optional): Specify source platform. This is - recommended to not use and keep unset until you really want - to use specific platform. - roots (dict/RootItem/None, optional): It is possible to remap - path with different roots then instance where method was - called has. - - Returns: - str/None: When path does not contain known root then - None is returned else returns remapped path with "{root}" - or "{root[]}". - """ - if roots is None: - roots = self.roots - - if roots is None: - raise ValueError("Roots are not set. Can't find path.") - - if "{root" in path: - path = path.format(**{"root": roots}) - # If `dst_platform` is not specified then return else continue. - if not dst_platform: - return path - - if isinstance(roots, RootItem): - return roots.path_remapper(path, dst_platform, src_platform) - - for _root in roots.values(): - result = self.path_remapper( - path, dst_platform, src_platform, _root - ) - if result is not None: - return result - - def find_root_template_from_path(self, path, roots=None): - """Find root value in entered path and replace it with formatting key. - - Args: - path (str): Source path where root will be searched. - roots (Roots/dict, optional): It is possible to use different - roots than instance where method was triggered has. - - Returns: - tuple: Output contains tuple with bool representing success as - first value and path with or without replaced root with - formatting key as second value. - - Raises: - ValueError: When roots are not entered and can't be loaded. - """ - if roots is None: - log.debug( - "Looking for matching root in path \"{}\".".format(path) - ) - roots = self.roots - - if roots is None: - raise ValueError("Roots are not set. Can't find path.") - - if isinstance(roots, RootItem): - return roots.find_root_template_from_path(path) - - for root_name, _root in roots.items(): - success, result = self.find_root_template_from_path(path, _root) - if success: - log.info("Found match in root \"{}\".".format(root_name)) - return success, result - - log.warning("No matching root was found in current setting.") - return (False, path) - - def set_root_environments(self): - """Set root environments for current project.""" - for key, value in self.root_environments().items(): - os.environ[key] = value - - def root_environments(self): - """Use root keys to create unique keys for environment variables. - - Concatenates prefix "OPENPYPE_ROOT" with root keys to create unique - keys. - - Returns: - dict: Result is `{(str): (str)}` dicitonary where key represents - unique key concatenated by keys and value is root value of - current platform root. - - Example: - With raw root values:: - "work": { - "windows": "P:/projects/work", - "linux": "/mnt/share/projects/work", - "darwin": "/darwin/path/work" - }, - "publish": { - "windows": "P:/projects/publish", - "linux": "/mnt/share/projects/publish", - "darwin": "/darwin/path/publish" - } - - Result on windows platform:: - { - "OPENPYPE_ROOT_WORK": "P:/projects/work", - "OPENPYPE_ROOT_PUBLISH": "P:/projects/publish" - } - - Short example when multiroot is not used:: - { - "OPENPYPE_ROOT": "P:/projects" - } - """ - return self._root_environments() - - def all_root_paths(self, roots=None): - """Return all paths for all roots of all platforms.""" - if roots is None: - roots = self.roots - - output = [] - if isinstance(roots, RootItem): - for value in roots.raw_data.values(): - output.append(value) - return output - - for _roots in roots.values(): - output.extend(self.all_root_paths(_roots)) - return output - - def _root_environments(self, keys=None, roots=None): - if not keys: - keys = [] - if roots is None: - roots = self.roots - - if isinstance(roots, RootItem): - key_items = [self.env_prefix] - for _key in keys: - key_items.append(_key.upper()) - - key = "_".join(key_items) - # Make sure key and value does not contain unicode - # - can happen in Python 2 hosts - return {str(key): str(roots.value)} - - output = {} - for _key, _value in roots.items(): - _keys = list(keys) - _keys.append(_key) - output.update(self._root_environments(_keys, _value)) - return output - - def root_environmets_fill_data(self, template=None): - """Environment variable values in dictionary for rootless path. - - Args: - template (str): Template for environment variable key fill. - By default is set to `"${}"`. - """ - if template is None: - template = "${}" - return self._root_environmets_fill_data(template) - - def _root_environmets_fill_data(self, template, keys=None, roots=None): - if keys is None and roots is None: - return { - "root": self._root_environmets_fill_data( - template, [], self.roots - ) - } - - if isinstance(roots, RootItem): - key_items = [Roots.env_prefix] - for _key in keys: - key_items.append(_key.upper()) - key = "_".join(key_items) - return template.format(key) - - output = {} - for key, value in roots.items(): - _keys = list(keys) - _keys.append(key) - output[key] = self._root_environmets_fill_data( - template, _keys, value - ) - return output - - @property - def project_name(self): - """Return project name which will be used for loading root values.""" - return self.anatomy.project_name - - @property - def roots(self): - """Property for filling "root" key in templates. - - This property returns roots for current project or default root values. - Warning: - Default roots value may cause issues when project use different - roots settings. That may happen when project use multiroot - templates but default roots miss their keys. - """ - if self.project_name != self.loaded_project: - self._roots = None - - if self._roots is None: - self._roots = self._discover() - self.loaded_project = self.project_name - return self._roots - - def _discover(self): - """ Loads current project's roots or default. - - Default roots are loaded if project override's does not contain roots. - - Returns: - `RootItem` or `dict` with multiple `RootItem`s when multiroot - setting is used. - """ - - return self._parse_dict(self.anatomy["roots"], parent=self) - - @staticmethod - def _parse_dict(data, key=None, parent_keys=None, parent=None): - """Parse roots raw data into RootItem or dictionary with RootItems. - - Converting raw roots data to `RootItem` helps to handle platform keys. - This method is recursive to be able handle multiroot setup and - is static to be able to load default roots without creating new object. - - Args: - data (dict): Should contain raw roots data to be parsed. - key (str, optional): Current root key. Set by recursion. - parent_keys (list): Parent dictionary keys. Set by recursion. - parent (Roots, optional): Parent object set in `RootItem` - helps to keep RootItem instance updated with `Roots` object. - - Returns: - `RootItem` or `dict` with multiple `RootItem`s when multiroot - setting is used. - """ - if not parent_keys: - parent_keys = [] - is_last = False - for value in data.values(): - if isinstance(value, StringType): - is_last = True - break - - if is_last: - return RootItem(data, key, parent_keys, parent=parent) - - output = {} - for _key, value in data.items(): - _parent_keys = list(parent_keys) - _parent_keys.append(_key) - output[_key] = Roots._parse_dict(value, _key, _parent_keys, parent) - return output +@anatomy_deprecated +def Anatomy(*args, **kwargs): + from openpype.pipeline.anatomy import Anatomy + return Anatomy(*args, **kwargs) diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index a81bdeca0f..f46197e15f 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -11,6 +11,10 @@ from abc import ABCMeta, abstractmethod import six +from openpype.client import ( + get_project, + get_asset_by_name, +) from openpype.settings import ( get_system_settings, get_project_settings, @@ -20,10 +24,7 @@ from openpype.settings.constants import ( METADATA_KEYS, M_DYNAMIC_KEY_LABEL ) -from . import ( - PypeLogger, - Anatomy -) +from . import PypeLogger from .profiles_filtering import filter_profiles from .local_settings import get_openpype_username from .avalon_context import ( @@ -664,7 +665,11 @@ class ApplicationExecutable: if os.path.exists(plist_filepath): import plistlib - parsed_plist = plistlib.readPlist(plist_filepath) + if hasattr(plistlib, "load"): + with open(plist_filepath, "rb") as stream: + parsed_plist = plistlib.load(stream) + else: + parsed_plist = plistlib.readPlist(plist_filepath) executable_filename = parsed_plist.get("CFBundleExecutable") if executable_filename: @@ -1305,7 +1310,7 @@ def get_app_environments_for_context( dict: Environments for passed context and application. """ - from openpype.pipeline import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB, Anatomy # Avalon database connection dbcon = AvalonMongoDB() @@ -1313,11 +1318,8 @@ def get_app_environments_for_context( dbcon.install() # Project document - project_doc = dbcon.find_one({"type": "project"}) - asset_doc = dbcon.find_one({ - "type": "asset", - "name": asset_name - }) + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) if modules_manager is None: from openpype.modules import ModulesManager diff --git a/openpype/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py index a1f7c1e0f4..17658eef93 100644 --- a/openpype/lib/attribute_definitions.py +++ b/openpype/lib/attribute_definitions.py @@ -14,6 +14,7 @@ class AbstractAttrDefMeta(ABCMeta): Each object of `AbtractAttrDef` mus have defined 'key' attribute. """ + def __call__(self, *args, **kwargs): obj = super(AbstractAttrDefMeta, self).__call__(*args, **kwargs) init_class = getattr(obj, "__init__class__", None) @@ -45,6 +46,7 @@ class AbtractAttrDef: is_label_horizontal(bool): UI specific argument. Specify if label is next to value input or ahead. """ + is_value_def = True def __init__( @@ -77,6 +79,7 @@ class AbtractAttrDef: Convert passed value to a valid type. Use default if value can't be converted. """ + pass @@ -113,6 +116,7 @@ class UnknownDef(AbtractAttrDef): This attribute can be used to keep existing data unchanged but does not have known definition of type. """ + def __init__(self, key, default=None, **kwargs): kwargs["default"] = default super(UnknownDef, self).__init__(key, **kwargs) @@ -204,6 +208,7 @@ class TextDef(AbtractAttrDef): placeholder(str): UI placeholder for attribute. default(str, None): Default value. Empty string used when not defined. """ + def __init__( self, key, multiline=None, regex=None, placeholder=None, default=None, **kwargs @@ -531,14 +536,15 @@ class FileDef(AbtractAttrDef): Args: single_item(bool): Allow only single path item. folders(bool): Allow folder paths. - extensions(list): Allow files with extensions. Empty list will + extensions(List[str]): Allow files with extensions. Empty list will allow all extensions and None will disable files completely. - default(str, list): Defautl value. + extensions_label(str): Custom label shown instead of extensions in UI. + default(str, List[str]): Default value. """ def __init__( self, key, single_item=True, folders=None, extensions=None, - allow_sequences=True, default=None, **kwargs + allow_sequences=True, extensions_label=None, default=None, **kwargs ): if folders is None and extensions is None: folders = True @@ -578,6 +584,7 @@ class FileDef(AbtractAttrDef): self.folders = folders self.extensions = set(extensions) self.allow_sequences = allow_sequences + self.extensions_label = extensions_label super(FileDef, self).__init__(key, default=default, **kwargs) def __eq__(self, other): diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index a03f066300..2944b2506e 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -7,14 +7,25 @@ import platform import logging import collections import functools +import warnings -from bson.objectid import ObjectId - +from openpype.client import ( + get_project, + get_assets, + get_asset_by_name, + get_subset_by_name, + get_subsets, + get_version_by_id, + get_last_versions, + get_last_version_by_subset_id, + get_representations, + get_representation_by_id, + get_workfile_info, +) from openpype.settings import ( get_project_settings, get_system_settings ) -from .anatomy import Anatomy from .profiles_filtering import filter_profiles from .events import emit_event from .path_templates import StringTemplate @@ -36,6 +47,51 @@ PROJECT_NAME_REGEX = re.compile( ) +class AvalonContextDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", AvalonContextDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=AvalonContextDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + def create_project( project_name, project_code, library_project=False, dbcon=None ): @@ -65,6 +121,11 @@ def create_project( from openpype.pipeline import AvalonMongoDB from openpype.pipeline.schema import validate + if get_project(project_name, fields=["name"]): + raise ValueError("Project with name \"{}\" already exists".format( + project_name + )) + if dbcon is None: dbcon = AvalonMongoDB() @@ -74,15 +135,6 @@ def create_project( ).format(project_name)) database = dbcon.database - project_doc = database[project_name].find_one( - {"type": "project"}, - {"name": 1} - ) - if project_doc: - raise ValueError("Project with name \"{}\" already exists".format( - project_name - )) - project_doc = { "type": "project", "name": project_name, @@ -105,7 +157,7 @@ def create_project( database[project_name].delete_one({"type": "project"}) raise - project_doc = database[project_name].find_one({"type": "project"}) + project_doc = get_project(project_name) try: # Validate created project document @@ -137,23 +189,23 @@ def is_latest(representation): Returns: bool: Whether the representation is of latest version. - """ - version = legacy_io.find_one({"_id": representation['parent']}) + project_name = legacy_io.active_project() + version = get_version_by_id( + project_name, + representation["parent"], + fields=["_id", "type", "parent"] + ) if version["type"] == "hero_version": return True # Get highest version under the parent - highest_version = legacy_io.find_one({ - "type": "version", - "parent": version["parent"] - }, sort=[("name", -1)], projection={"name": True}) + last_version = get_last_version_by_subset_id( + project_name, version["parent"], fields=["_id"] + ) - if version['name'] == highest_version['name']: - return True - else: - return False + return version["_id"] == last_version["_id"] @with_pipeline_io @@ -161,6 +213,7 @@ def any_outdated(): """Return whether the current scene has any outdated content""" from openpype.pipeline import registered_host + project_name = legacy_io.active_project() checked = set() host = registered_host() for container in host.ls(): @@ -168,12 +221,8 @@ def any_outdated(): if representation in checked: continue - representation_doc = legacy_io.find_one( - { - "_id": ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} + representation_doc = get_representation_by_id( + project_name, representation, fields=["parent"] ) if representation_doc and not is_latest(representation_doc): return True @@ -191,81 +240,29 @@ def any_outdated(): def get_asset(asset_name=None): """ Returning asset document from database by its name. - Doesn't count with duplicities on asset names! + Doesn't count with duplicities on asset names! - Args: - asset_name (str) + Args: + asset_name (str) - Returns: - (MongoDB document) + Returns: + (MongoDB document) """ + + project_name = legacy_io.active_project() if not asset_name: asset_name = legacy_io.Session["AVALON_ASSET"] - asset_document = legacy_io.find_one({ - "name": asset_name, - "type": "asset" - }) - + asset_document = get_asset_by_name(project_name, asset_name) if not asset_document: raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) return asset_document -@with_pipeline_io -def get_hierarchy(asset_name=None): - """ - Obtain asset hierarchy path string from mongo db - - Args: - asset_name (str) - - Returns: - (string): asset hierarchy path - - """ - if not asset_name: - asset_name = legacy_io.Session.get( - "AVALON_ASSET", - os.environ["AVALON_ASSET"] - ) - - asset_entity = legacy_io.find_one({ - "type": 'asset', - "name": asset_name - }) - - not_set = "PARENTS_NOT_SET" - entity_parents = asset_entity.get("data", {}).get("parents", not_set) - - # If entity already have parents then just return joined - if entity_parents != not_set: - return "/".join(entity_parents) - - # Else query parents through visualParents and store result to entity - hierarchy_items = [] - entity = asset_entity - while True: - parent_id = entity.get("data", {}).get("visualParent") - if not parent_id: - break - entity = legacy_io.find_one({"_id": parent_id}) - hierarchy_items.append(entity["name"]) - - # Add parents to entity data for next query - entity_data = asset_entity.get("data", {}) - entity_data["parents"] = hierarchy_items - legacy_io.update_many( - {"_id": asset_entity["_id"]}, - {"$set": {"data": entity_data}} - ) - - return "/".join(hierarchy_items) - - -def get_system_general_anatomy_data(): - system_settings = get_system_settings() +def get_system_general_anatomy_data(system_settings=None): + if not system_settings: + system_settings = get_system_settings() studio_name = system_settings["general"]["studio_name"] studio_code = system_settings["general"]["studio_code"] return { @@ -313,11 +310,13 @@ def get_linked_assets(asset_doc): Returns: (list) Asset documents of input links for passed asset doc. """ + link_ids = get_linked_asset_ids(asset_doc) if not link_ids: return [] - return list(legacy_io.find({"_id": {"$in": link_ids}})) + project_name = legacy_io.active_project() + return list(get_assets(project_name, link_ids)) @with_pipeline_io @@ -339,20 +338,14 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): dict: Last version document for entered . """ - if not dbcon: - log.debug("Using `legacy_io` for query.") - dbcon = legacy_io - # Make sure is installed - dbcon.install() + if not project_name: + if not dbcon: + log.debug("Using `legacy_io` for query.") + dbcon = legacy_io + # Make sure is installed + dbcon.install() - if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"): - # `legacy_io` has only `_database` attribute - # but `AvalonMongoDB` has `database` - database = getattr(dbcon, "database", dbcon._database) - collection = database[project_name] - else: - project_name = dbcon.Session.get("AVALON_PROJECT") - collection = dbcon + project_name = dbcon.active_project() log.debug(( "Getting latest version for Project: \"{}\" Asset: \"{}\"" @@ -360,19 +353,15 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): ).format(project_name, asset_name, subset_name)) # Query asset document id by asset name - asset_doc = collection.find_one( - {"type": "asset", "name": asset_name}, - {"_id": True} - ) + asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) if not asset_doc: log.info( "Asset \"{}\" was not found in Database.".format(asset_name) ) return None - subset_doc = collection.find_one( - {"type": "subset", "name": subset_name, "parent": asset_doc["_id"]}, - {"_id": True} + subset_doc = get_subset_by_name( + project_name, subset_name, asset_doc["_id"] ) if not subset_doc: log.info( @@ -380,9 +369,8 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): ) return None - version_doc = collection.find_one( - {"type": "version", "parent": subset_doc["_id"]}, - sort=[("name", -1)], + version_doc = get_last_version_by_subset_id( + project_name, subset_doc["_id"] ) if not version_doc: log.info( @@ -420,28 +408,17 @@ def get_workfile_template_key_from_context( ValueError: When both 'dbcon' and 'project_name' were not passed. """ - if not dbcon: - if not project_name: + if not project_name: + if not dbcon: raise ValueError(( "`get_workfile_template_key_from_context` requires to pass" " one of 'dbcon' or 'project_name' arguments." )) - from openpype.pipeline import AvalonMongoDB - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name + project_name = dbcon.active_project() - elif not project_name: - project_name = dbcon.Session["AVALON_PROJECT"] - - asset_doc = dbcon.find_one( - { - "type": "asset", - "name": asset_name - }, - { - "data.tasks": 1 - } + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.tasks"] ) asset_tasks = asset_doc.get("data", {}).get("tasks") or {} task_info = asset_tasks.get(task_name) or {} @@ -593,6 +570,7 @@ def get_workdir_with_workdir_data( )) if not anatomy: + from openpype.pipeline import Anatomy anatomy = Anatomy(project_name) if not template_key: @@ -604,7 +582,10 @@ def get_workdir_with_workdir_data( anatomy_filled = anatomy.format(workdir_data) # Output is TemplateResult object which contain useful data - return anatomy_filled[template_key]["folder"] + output = anatomy_filled[template_key]["folder"] + if output: + return output.normalized() + return output def get_workdir( @@ -634,7 +615,9 @@ def get_workdir( Returns: TemplateResult: Workdir path. """ + if not anatomy: + from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) workdir_data = get_workdir_data( @@ -661,15 +644,11 @@ def template_data_from_session(session=None): session = legacy_io.Session project_name = session["AVALON_PROJECT"] - project_doc = legacy_io.database[project_name].find_one({ - "type": "project" - }) - asset_doc = legacy_io.database[project_name].find_one({ - "type": "asset", - "name": session["AVALON_ASSET"] - }) + asset_name = session["AVALON_ASSET"] task_name = session["AVALON_TASK"] host_name = session["AVALON_APP"] + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) return get_workdir_data(project_doc, asset_doc, task_name, host_name) @@ -694,8 +673,8 @@ def compute_session_changes( Returns: dict: The required changes in the Session dictionary. - """ + changes = dict() # If no changes, return directly @@ -713,12 +692,9 @@ def compute_session_changes( if not asset_document or not asset_tasks: # Assume asset name - asset_document = legacy_io.find_one( - { - "name": asset, - "type": "asset" - }, - {"data.tasks": True} + project_name = session["AVALON_PROJECT"] + asset_document = get_asset_by_name( + project_name, asset, fields=["data.tasks"] ) assert asset_document, "Asset must exist" @@ -747,6 +723,8 @@ def compute_session_changes( @with_pipeline_io def get_workdir_from_session(session=None, template_key=None): + from openpype.pipeline import Anatomy + if session is None: session = legacy_io.Session project_name = session["AVALON_PROJECT"] @@ -762,7 +740,10 @@ def get_workdir_from_session(session=None, template_key=None): host_name, project_name=project_name ) - return anatomy_filled[template_key]["folder"] + path = anatomy_filled[template_key]["folder"] + if path: + path = os.path.normpath(path) + return path @with_pipeline_io @@ -810,6 +791,7 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): @with_pipeline_io +@deprecated("openpype.client.get_workfile_info") def get_workfile_doc(asset_id, task_name, filename, dbcon=None): """Return workfile document for entered context. @@ -826,16 +808,13 @@ def get_workfile_doc(asset_id, task_name, filename, dbcon=None): Returns: dict: Workfile document or None. """ + # Use legacy_io if dbcon is not entered if not dbcon: dbcon = legacy_io - return dbcon.find_one({ - "type": "workfile", - "parent": asset_id, - "task_name": task_name, - "filename": filename - }) + project_name = dbcon.active_project() + return get_workfile_info(project_name, asset_id, task_name, filename) @with_pipeline_io @@ -853,6 +832,8 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and `legacy_io` is used if not entered. """ + from openpype.pipeline import Anatomy + # Use legacy_io if dbcon is not entered if not dbcon: dbcon = legacy_io @@ -868,12 +849,13 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): doc_data = copy.deepcopy(doc_filter) # Prepare project for workdir data - project_doc = dbcon.find_one({"type": "project"}) + project_name = dbcon.active_project() + project_doc = get_project(project_name) workdir_data = get_workdir_data( project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"] ) # Prepare anatomy - anatomy = Anatomy(project_doc["name"]) + anatomy = Anatomy(project_name) # Get workdir path (result is anatomy.TemplateResult) template_workdir = get_workdir_with_workdir_data( workdir_data, anatomy @@ -988,12 +970,11 @@ class BuildWorkfile: from openpype.pipeline import discover_loader_plugins # Get current asset name and entity + project_name = legacy_io.active_project() current_asset_name = legacy_io.Session["AVALON_ASSET"] - current_asset_entity = legacy_io.find_one({ - "type": "asset", - "name": current_asset_name - }) - + current_asset_entity = get_asset_by_name( + project_name, current_asset_name + ) # Skip if asset was not found if not current_asset_entity: print("Asset entity with name `{}` was not found".format( @@ -1498,7 +1479,7 @@ class BuildWorkfile: return loaded_containers @with_pipeline_io - def _collect_last_version_repres(self, asset_entities): + def _collect_last_version_repres(self, asset_docs): """Collect subsets, versions and representations for asset_entities. Args: @@ -1531,64 +1512,56 @@ class BuildWorkfile: ``` """ - if not asset_entities: - return {} + output = {} + if not asset_docs: + return output - asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} + asset_docs_by_ids = {asset["_id"]: asset for asset in asset_docs} - subsets = list(legacy_io.find({ - "type": "subset", - "parent": {"$in": list(asset_entity_by_ids.keys())} - })) + project_name = legacy_io.active_project() + subsets = list(get_subsets( + project_name, asset_ids=asset_docs_by_ids.keys() + )) subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - sorted_versions = list(legacy_io.find({ - "type": "version", - "parent": {"$in": list(subset_entity_by_ids.keys())} - }).sort("name", -1)) + last_version_by_subset_id = get_last_versions( + project_name, subset_entity_by_ids.keys() + ) + last_version_docs_by_id = { + version["_id"]: version + for version in last_version_by_subset_id.values() + } + repre_docs = get_representations( + project_name, version_ids=last_version_docs_by_id.keys() + ) - subset_id_with_latest_version = [] - last_versions_by_id = {} - for version in sorted_versions: - subset_id = version["parent"] - if subset_id in subset_id_with_latest_version: - continue - subset_id_with_latest_version.append(subset_id) - last_versions_by_id[version["_id"]] = version + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_doc = last_version_docs_by_id[version_id] - repres = legacy_io.find({ - "type": "representation", - "parent": {"$in": list(last_versions_by_id.keys())} - }) + subset_id = version_doc["parent"] + subset_doc = subset_entity_by_ids[subset_id] - output = {} - for repre in repres: - version_id = repre["parent"] - version = last_versions_by_id[version_id] - - subset_id = version["parent"] - subset = subset_entity_by_ids[subset_id] - - asset_id = subset["parent"] - asset = asset_entity_by_ids[asset_id] + asset_id = subset_doc["parent"] + asset_doc = asset_docs_by_ids[asset_id] if asset_id not in output: output[asset_id] = { - "asset_entity": asset, + "asset_entity": asset_doc, "subsets": {} } if subset_id not in output[asset_id]["subsets"]: output[asset_id]["subsets"][subset_id] = { - "subset_entity": subset, + "subset_entity": subset_doc, "version": { - "version_entity": version, + "version_entity": version_doc, "repres": [] } } output[asset_id]["subsets"][subset_id]["version"]["repres"].append( - repre + repre_doc ) return output @@ -1673,6 +1646,7 @@ def _get_task_context_data_for_anatomy( """ if anatomy is None: + from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) asset_name = asset_doc["name"] @@ -1741,6 +1715,7 @@ def get_custom_workfile_template_by_context( """ if anatomy is None: + from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) # get project, asset, task anatomy context data @@ -1794,35 +1769,19 @@ def get_custom_workfile_template_by_string_context( context. (Existence of formatted path is not validated.) """ - if dbcon is None: - from openpype.pipeline import AvalonMongoDB + project_name = None + if anatomy is not None: + project_name = anatomy.project_name - dbcon = AvalonMongoDB() + if not project_name and dbcon is not None: + project_name = dbcon.active_project() - dbcon.install() + if not project_name: + raise ValueError("Can't determina project") - if dbcon.Session["AVALON_PROJECT"] != project_name: - dbcon.Session["AVALON_PROJECT"] = project_name - - project_doc = dbcon.find_one( - {"type": "project"}, - # All we need is "name" and "data.code" keys - { - "name": 1, - "data.code": 1 - } - ) - asset_doc = dbcon.find_one( - { - "type": "asset", - "name": asset_name - }, - # All we need is "name" and "data.tasks" keys - { - "name": 1, - "data.tasks": 1 - } - ) + project_doc = get_project(project_name, fields=["name", "data.code"]) + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["name", "data.tasks"]) return get_custom_workfile_template_by_context( template_profiles, project_doc, asset_doc, task_name, anatomy diff --git a/openpype/lib/events.py b/openpype/lib/events.py index 7bec6ee30d..301d62e2a6 100644 --- a/openpype/lib/events.py +++ b/openpype/lib/events.py @@ -11,6 +11,10 @@ except Exception: from openpype.lib.python_2_comp import WeakMethod +class MissingEventSystem(Exception): + pass + + class EventCallback(object): """Callback registered to a topic. @@ -176,16 +180,20 @@ class Event(object): topic (str): Identifier of event. data (Any): Data specific for event. Dictionary is recommended. source (str): Identifier of source. + event_system (EventSystem): Event system in which can be event + triggered. """ + _data = {} - def __init__(self, topic, data=None, source=None): + def __init__(self, topic, data=None, source=None, event_system=None): self._id = str(uuid4()) self._topic = topic if data is None: data = {} self._data = data self._source = source + self._event_system = event_system def __getitem__(self, key): return self._data[key] @@ -211,28 +219,118 @@ class Event(object): def emit(self): """Emit event and trigger callbacks.""" - StoredCallbacks.emit_event(self) + if self._event_system is None: + raise MissingEventSystem( + "Can't emit event {}. Does not have set event system.".format( + str(repr(self)) + ) + ) + self._event_system.emit_event(self) -class StoredCallbacks: - _registered_callbacks = [] +class EventSystem(object): + """Encapsulate event handling into an object. + + System wraps registered callbacks and triggered events into single object + so it is possible to create mutltiple independent systems that have their + topics and callbacks. + + + """ + + def __init__(self): + self._registered_callbacks = [] + + def add_callback(self, topic, callback): + """Register callback in event system. + + Args: + topic (str): Topic for EventCallback. + callback (Callable): Function or method that will be called + when topic is triggered. + + Returns: + EventCallback: Created callback object which can be used to + stop listening. + """ - @classmethod - def add_callback(cls, topic, callback): callback = EventCallback(topic, callback) - cls._registered_callbacks.append(callback) + self._registered_callbacks.append(callback) return callback - @classmethod - def emit_event(cls, event): + def create_event(self, topic, data, source): + """Create new event which is bound to event system. + + Args: + topic (str): Event topic. + data (dict): Data related to event. + source (str): Source of event. + + Returns: + Event: Object of event. + """ + + return Event(topic, data, source, self) + + def emit(self, topic, data, source): + """Create event based on passed data and emit it. + + This is easiest way how to trigger event in an event system. + + Args: + topic (str): Event topic. + data (dict): Data related to event. + source (str): Source of event. + + Returns: + Event: Created and emitted event. + """ + + event = self.create_event(topic, data, source) + event.emit() + return event + + def emit_event(self, event): + """Emit event object. + + Args: + event (Event): Prepared event with topic and data. + """ + invalid_callbacks = [] - for callback in cls._registered_callbacks: + for callback in self._registered_callbacks: callback.process_event(event) if not callback.is_ref_valid: invalid_callbacks.append(callback) for callback in invalid_callbacks: - cls._registered_callbacks.remove(callback) + self._registered_callbacks.remove(callback) + + +class GlobalEventSystem: + """Event system living in global scope of process. + + This is primarily used in host implementation to trigger events + related to DCC changes or changes of context in the host implementation. + """ + + _global_event_system = None + + @classmethod + def get_global_event_system(cls): + if cls._global_event_system is None: + cls._global_event_system = EventSystem() + return cls._global_event_system + + @classmethod + def add_callback(cls, topic, callback): + event_system = cls.get_global_event_system() + return event_system.add_callback(topic, callback) + + @classmethod + def emit(cls, topic, data, source): + event_system = cls.get_global_event_system() + return event_system.emit(topic, data, source) def register_event_callback(topic, callback): @@ -249,7 +347,8 @@ def register_event_callback(topic, callback): enable/disable listening to a topic or remove the callback from the topic completely. """ - return StoredCallbacks.add_callback(topic, callback) + + return GlobalEventSystem.add_callback(topic, callback) def emit_event(topic, data=None, source=None): @@ -263,6 +362,5 @@ def emit_event(topic, data=None, source=None): Returns: Event: Object of event that was emitted. """ - event = Event(topic, data, source) - event.emit() - return event + + return GlobalEventSystem.emit(topic, data, source) diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index 5c40aa4549..c1282016ef 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -409,6 +409,19 @@ class TemplateResult(str): self.invalid_types ) + def normalized(self): + """Convert to normalized path.""" + + cls = self.__class__ + return cls( + os.path.normpath(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + class TemplatesResultDict(dict): """Holds and wrap TemplateResults for easy bug report.""" diff --git a/openpype/lib/path_tools.py b/openpype/lib/path_tools.py index caad20f4d6..4f28be3302 100644 --- a/openpype/lib/path_tools.py +++ b/openpype/lib/path_tools.py @@ -9,7 +9,6 @@ import platform from openpype.client import get_project from openpype.settings import get_project_settings -from .anatomy import Anatomy from .profiles_filtering import filter_profiles log = logging.getLogger(__name__) @@ -227,6 +226,7 @@ def fill_paths(path_list, anatomy): def create_project_folders(basic_paths, project_name): + from openpype.pipeline import Anatomy anatomy = Anatomy(project_name) concat_paths = concatenate_splitted_paths(basic_paths, anatomy) diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index bcbf06a0e8..1d3c1eec6b 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -6,10 +6,10 @@ import logging import re import json -from .profiles_filtering import filter_profiles - +from openpype.client import get_asset_by_id from openpype.settings import get_project_settings +from .profiles_filtering import filter_profiles log = logging.getLogger(__name__) @@ -135,24 +135,17 @@ def get_subset_name( This is legacy function should be replaced with `get_subset_name_with_asset_doc` where asset document is expected. """ - if dbcon is None: - from openpype.pipeline import AvalonMongoDB - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name + if project_name is None: + project_name = dbcon.project_name - dbcon.install() - - asset_doc = dbcon.find_one( - {"_id": asset_id}, - {"data.tasks": True} - ) or {} + asset_doc = get_asset_by_id(project_name, asset_id, fields=["data.tasks"]) return get_subset_name_with_asset_doc( family, variant, task_name, - asset_doc, + asset_doc or {}, project_name, host_name, default_template, diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py index 396479c725..ff2f1d4b88 100644 --- a/openpype/lib/project_backpack.py +++ b/openpype/lib/project_backpack.py @@ -24,7 +24,10 @@ from bson.json_util import ( dumps, CANONICAL_JSON_OPTIONS ) - +from openpype.client import ( + get_project, + get_whole_project, +) from openpype.pipeline import AvalonMongoDB DOCUMENTS_FILE_NAME = "database" @@ -50,14 +53,12 @@ def pack_project(project_name, destination_dir=None): Args: project_name(str): Project that should be packaged. - destination_dir(str): Optinal path where zip will be stored. Project's + destination_dir(str): Optional path where zip will be stored. Project's root is used if not passed. """ print("Creating package of project \"{}\"".format(project_name)) # Validate existence of project - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) if not project_doc: raise ValueError("Project \"{}\" was not found in database".format( project_name @@ -118,7 +119,7 @@ def pack_project(project_name, destination_dir=None): temp_docs_json = s.name # Query all project documents and store them to temp json - docs = list(dbcon.find({})) + docs = list(get_whole_project(project_name)) data = dumps( docs, json_options=CANONICAL_JSON_OPTIONS ) @@ -147,7 +148,7 @@ def pack_project(project_name, destination_dir=None): # Cleanup os.remove(temp_docs_json) os.remove(temp_metadata_json) - dbcon.uninstall() + print("*** Packing finished ***") @@ -207,7 +208,7 @@ def unpack_project(path_to_zip, new_root=None): print("Using different root path {}".format(new_root)) root_path = new_root - project_doc = collection.find_one({"type": "project"}) + project_doc = get_project(project_name) roots = project_doc["config"]["roots"] key = tuple(roots.keys())[0] update_key = "config.roots.{}.{}".format(key, low_platform) diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py index 86de19b4be..20703ee308 100644 --- a/openpype/lib/usdlib.py +++ b/openpype/lib/usdlib.py @@ -8,10 +8,8 @@ except ImportError: # Allow to fall back on Multiverse 6.3.0+ pxr usd library from mvpxr import Usd, UsdGeom, Sdf, Kind -from openpype.pipeline import ( - registered_root, - legacy_io, -) +from openpype.client import get_project, get_asset_by_name +from openpype.pipeline import legacy_io, Anatomy log = logging.getLogger(__name__) @@ -128,7 +126,8 @@ def create_model(filename, asset, variant_subsets): """ - asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name(project_name, asset) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -178,7 +177,8 @@ def create_shade(filename, asset, variant_subsets): """ - asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name(project_name, asset) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -213,7 +213,8 @@ def create_shade_variation(filename, asset, model_variant, shade_variants): """ - asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name(project_name, asset) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -313,21 +314,25 @@ def get_usd_master_path(asset, subset, representation): """ - project = legacy_io.find_one( - {"type": "project"}, projection={"config.template.publish": True} + project_name = legacy_io.active_project() + anatomy = Anatomy(project_name) + project_doc = get_project( + project_name, + fields=["name", "data.code"] ) - template = project["config"]["template"]["publish"] if isinstance(asset, dict) and "name" in asset: # Allow explicitly passing asset document asset_doc = asset else: - asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) + asset_doc = get_asset_by_name(project_name, asset, fields=["name"]) - path = template.format( - **{ - "root": registered_root(), - "project": legacy_io.Session["AVALON_PROJECT"], + formatted_result = anatomy.format( + { + "project": { + "name": project_name, + "code": project_doc.get("data", {}).get("code") + }, "asset": asset_doc["name"], "subset": subset, "representation": representation, @@ -335,6 +340,7 @@ def get_usd_master_path(asset, subset, representation): } ) + path = formatted_result["publish"]["path"] # Remove the version folder subset_folder = os.path.dirname(os.path.dirname(path)) master_folder = os.path.join(subset_folder, "master") diff --git a/openpype/modules/avalon_apps/rest_api.py b/openpype/modules/avalon_apps/rest_api.py index b35f5bf357..a52ce1b6df 100644 --- a/openpype/modules/avalon_apps/rest_api.py +++ b/openpype/modules/avalon_apps/rest_api.py @@ -5,7 +5,12 @@ from bson.objectid import ObjectId from aiohttp.web_response import Response -from openpype.pipeline import AvalonMongoDB +from openpype.client import ( + get_projects, + get_project, + get_assets, + get_asset_by_name, +) from openpype_modules.webserver.base_routes import RestApiEndpoint @@ -14,19 +19,13 @@ class _RestApiEndpoint(RestApiEndpoint): self.resource = resource super(_RestApiEndpoint, self).__init__() - @property - def dbcon(self): - return self.resource.dbcon - class AvalonProjectsEndpoint(_RestApiEndpoint): async def get(self) -> Response: - output = [] - for project_name in self.dbcon.database.collection_names(): - project_doc = self.dbcon.database[project_name].find_one({ - "type": "project" - }) - output.append(project_doc) + output = [ + project_doc + for project_doc in get_projects() + ] return Response( status=200, body=self.resource.encode(output), @@ -36,9 +35,7 @@ class AvalonProjectsEndpoint(_RestApiEndpoint): class AvalonProjectEndpoint(_RestApiEndpoint): async def get(self, project_name) -> Response: - project_doc = self.dbcon.database[project_name].find_one({ - "type": "project" - }) + project_doc = get_project(project_name) if project_doc: return Response( status=200, @@ -53,9 +50,7 @@ class AvalonProjectEndpoint(_RestApiEndpoint): class AvalonAssetsEndpoint(_RestApiEndpoint): async def get(self, project_name) -> Response: - asset_docs = list(self.dbcon.database[project_name].find({ - "type": "asset" - })) + asset_docs = list(get_assets(project_name)) return Response( status=200, body=self.resource.encode(asset_docs), @@ -65,10 +60,7 @@ class AvalonAssetsEndpoint(_RestApiEndpoint): class AvalonAssetEndpoint(_RestApiEndpoint): async def get(self, project_name, asset_name) -> Response: - asset_doc = self.dbcon.database[project_name].find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) if asset_doc: return Response( status=200, @@ -88,9 +80,6 @@ class AvalonRestApiResource: self.module = avalon_module self.server_manager = server_manager - self.dbcon = AvalonMongoDB() - self.dbcon.install() - self.prefix = "/avalon" self.endpoint_defs = ( diff --git a/openpype/modules/base.py b/openpype/modules/base.py index b9ccec13cc..1bd343fd07 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -49,6 +49,7 @@ class _ModuleClass(object): Object of this class can be stored to `sys.modules` and used for storing dynamically imported modules. """ + def __init__(self, name): # Call setattr on super class super(_ModuleClass, self).__setattr__("name", name) @@ -116,12 +117,13 @@ class _InterfacesClass(_ModuleClass): - this is because interfaces must be available even if are missing implementation """ + def __getattr__(self, attr_name): if attr_name not in self.__attributes__: if attr_name in ("__path__", "__file__"): return None - raise ImportError(( + raise AttributeError(( "cannot import name '{}' from 'openpype_interfaces'" ).format(attr_name)) diff --git a/openpype/modules/clockify/launcher_actions/ClockifyStart.py b/openpype/modules/clockify/launcher_actions/ClockifyStart.py index 4669f98b01..7663aecc31 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifyStart.py +++ b/openpype/modules/clockify/launcher_actions/ClockifyStart.py @@ -1,16 +1,9 @@ -from openpype.api import Logger -from openpype.pipeline import ( - legacy_io, - LauncherAction, -) +from openpype.client import get_asset_by_name +from openpype.pipeline import LauncherAction from openpype_modules.clockify.clockify_api import ClockifyAPI -log = Logger.get_logger(__name__) - - class ClockifyStart(LauncherAction): - name = "clockify_start_timer" label = "Clockify - Start Timer" icon = "clockify_icon" @@ -24,20 +17,19 @@ class ClockifyStart(LauncherAction): return False def process(self, session, **kwargs): - project_name = session['AVALON_PROJECT'] - asset_name = session['AVALON_ASSET'] - task_name = session['AVALON_TASK'] + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] description = asset_name - asset = legacy_io.find_one({ - 'type': 'asset', - 'name': asset_name - }) - if asset is not None: - desc_items = asset.get('data', {}).get('parents', []) + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.parents"] + ) + if asset_doc is not None: + desc_items = asset_doc.get("data", {}).get("parents", []) desc_items.append(asset_name) desc_items.append(task_name) - description = '/'.join(desc_items) + description = "/".join(desc_items) project_id = self.clockapi.get_project_id(project_name) tag_ids = [] diff --git a/openpype/modules/clockify/launcher_actions/ClockifySync.py b/openpype/modules/clockify/launcher_actions/ClockifySync.py index 356bbd0306..c346a1b4f6 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifySync.py +++ b/openpype/modules/clockify/launcher_actions/ClockifySync.py @@ -1,11 +1,6 @@ +from openpype.client import get_projects, get_project from openpype_modules.clockify.clockify_api import ClockifyAPI -from openpype.api import Logger -from openpype.pipeline import ( - legacy_io, - LauncherAction, -) - -log = Logger.get_logger(__name__) +from openpype.pipeline import LauncherAction class ClockifySync(LauncherAction): @@ -22,39 +17,36 @@ class ClockifySync(LauncherAction): return self.have_permissions def process(self, session, **kwargs): - project_name = session.get('AVALON_PROJECT', None) + project_name = session.get("AVALON_PROJECT") or "" projects_to_sync = [] - if project_name.strip() == '' or project_name is None: - for project in legacy_io.projects(): - projects_to_sync.append(project) + if project_name.strip(): + projects_to_sync = [get_project(project_name)] else: - project = legacy_io.find_one({'type': 'project'}) - projects_to_sync.append(project) + projects_to_sync = get_projects() projects_info = {} for project in projects_to_sync: - task_types = project['config']['tasks'].keys() - projects_info[project['name']] = task_types + task_types = project["config"]["tasks"].keys() + projects_info[project["name"]] = task_types clockify_projects = self.clockapi.get_projects() for project_name, task_types in projects_info.items(): - if project_name not in clockify_projects: - response = self.clockapi.add_project(project_name) - if 'id' not in response: - self.log.error('Project {} can\'t be created'.format( - project_name - )) - continue - project_id = response['id'] - else: - project_id = clockify_projects[project_name] + if project_name in clockify_projects: + continue + + response = self.clockapi.add_project(project_name) + if "id" not in response: + self.log.error("Project {} can't be created".format( + project_name + )) + continue clockify_workspace_tags = self.clockapi.get_tags() for task_type in task_types: if task_type not in clockify_workspace_tags: response = self.clockapi.add_tag(task_type) - if 'id' not in response: + if "id" not in response: self.log.error('Task {} can\'t be created'.format( task_type )) diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 22902d79ea..3f54273a56 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -15,7 +15,7 @@ import attr import requests import pyblish.api -from openpype.lib.abstract_metaplugins import AbstractMetaInstancePlugin +from openpype.pipeline.publish import AbstractMetaInstancePlugin def requests_post(*args, **kwargs): diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index f834ae7e92..fdf67b51bc 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -55,7 +55,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): scenename = os.path.basename(scene) # Get project code - project = legacy_io.find_one({"type": "project"}) + project = context.data["projectEntity"] code = project["data"].get("code", project["name"]) job_name = "{scene} [PUBLISH]".format(scene=scenename) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index dff80e62b9..145b6d795f 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -711,7 +711,9 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): new_payload["JobInfo"].update(tiles_data["JobInfo"]) new_payload["PluginInfo"].update(tiles_data["PluginInfo"]) - job_hash = hashlib.sha256("{}_{}".format(file_index, file)) + self.log.info("hashing {} - {}".format(file_index, file)) + job_hash = hashlib.sha256( + ("{}_{}".format(file_index, file)).encode("utf-8")) frame_jobs[frame] = job_hash.hexdigest() new_payload["JobInfo"]["ExtraInfo0"] = job_hash.hexdigest() new_payload["JobInfo"]["ExtraInfo1"] = file diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index b54b00d099..b098eaba8e 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -11,6 +11,7 @@ import clique import pyblish.api import openpype.api +from openpype.client import get_representations from openpype.pipeline import ( get_representation_path, legacy_io, @@ -18,15 +19,23 @@ from openpype.pipeline import ( from openpype.pipeline.farm.patterning import match_aov_pattern -def get_resources(version, extension=None): +def get_resources(project_name, version, extension=None): """Get the files from the specific version.""" - query = {"type": "representation", "parent": version["_id"]} + + # TODO this functions seems to be weird + # - it's looking for representation with one extension or first (any) + # representation from a version? + # - not sure how this should work, maybe it does for specific use cases + # but probably can't be used for all resources from 2D workflows + extensions = None if extension: - query["name"] = extension - - representation = legacy_io.find_one(query) - assert representation, "This is a bug" + extensions = [extension] + repre_docs = list(get_representations( + project_name, version_ids=[version["_id"]], extensions=extensions + )) + assert repre_docs, "This is a bug" + representation = repre_docs[0] directory = get_representation_path(representation) print("Source: ", directory) resources = sorted( @@ -330,13 +339,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.log.info("Preparing to copy ...") start = instance.data.get("frameStart") end = instance.data.get("frameEnd") + project_name = legacy_io.active_project() # get latest version of subset # this will stop if subset wasn't published yet version = openpype.api.get_latest_version(instance.data.get("asset"), instance.data.get("subset")) # get its files based on extension - subset_resources = get_resources(version, representation.get("ext")) + subset_resources = get_resources( + project_name, version, representation.get("ext") + ) r_col, _ = clique.assemble(subset_resources) # if override remove all frames we are expecting to be rendered @@ -1045,7 +1057,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): get publish_path Args: - anatomy (pype.lib.anatomy.Anatomy): + anatomy (openpype.pipeline.anatomy.Anatomy): template_data (dict): pre-calculated collected data for process asset (string): asset name subset (string): subset name (actually group name of subset) diff --git a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py index 8a8e86e7b9..21382007a0 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py +++ b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py @@ -6,7 +6,10 @@ import collections import ftrack_api from openpype.lib import get_datetime_data -from openpype.api import get_project_settings +from openpype.settings.lib import ( + get_project_settings, + get_default_project_settings +) from openpype_modules.ftrack.lib import ServerAction @@ -79,6 +82,35 @@ class CreateDailyReviewSessionServerAction(ServerAction): ) return True + def _calculate_next_cycle_delta(self): + studio_default_settings = get_default_project_settings() + action_settings = ( + studio_default_settings + ["ftrack"] + [self.settings_frack_subkey] + [self.settings_key] + ) + cycle_hour_start = action_settings.get("cycle_hour_start") + if not cycle_hour_start: + h = m = s = 0 + else: + h, m, s = cycle_hour_start + + # Create threading timer which will trigger creation of report + # at the 00:00:01 of next day + # - callback will trigger another timer which will have 1 day offset + now = datetime.datetime.now() + # Create object of today morning + expected_next_trigger = datetime.datetime( + now.year, now.month, now.day, h, m, s + ) + if expected_next_trigger > now: + seconds = (expected_next_trigger - now).total_seconds() + else: + expected_next_trigger += self._day_delta + seconds = (expected_next_trigger - now).total_seconds() + return seconds, expected_next_trigger + def register(self, *args, **kwargs): """Override register to be able trigger """ # Register server action as would be normally @@ -86,22 +118,14 @@ class CreateDailyReviewSessionServerAction(ServerAction): *args, **kwargs ) - # Create threading timer which will trigger creation of report - # at the 00:00:01 of next day - # - callback will trigger another timer which will have 1 day offset - now = datetime.datetime.now() - # Create object of today morning - today_morning = datetime.datetime( - now.year, now.month, now.day, 0, 0, 1 - ) - # Add a day delta (to calculate next day date) - next_day_morning = today_morning + self._day_delta - # Calculate first delta in seconds for first threading timer - first_delta = (next_day_morning - now).total_seconds() + seconds_delta, cycle_time = self._calculate_next_cycle_delta() + # Store cycle time which will be used to create next timer - self._last_cyle_time = next_day_morning + self._last_cyle_time = cycle_time # Create timer thread - self._cycle_timer = threading.Timer(first_delta, self._timer_callback) + self._cycle_timer = threading.Timer( + seconds_delta, self._timer_callback + ) self._cycle_timer.start() self._check_review_session() @@ -111,13 +135,12 @@ class CreateDailyReviewSessionServerAction(ServerAction): self._cycle_timer is not None and self._last_cyle_time is not None ): - now = datetime.datetime.now() - while self._last_cyle_time < now: - self._last_cyle_time = self._last_cyle_time + self._day_delta + seconds_delta, cycle_time = self._calculate_next_cycle_delta() + self._last_cyle_time = cycle_time - delay = (self._last_cyle_time - now).total_seconds() - - self._cycle_timer = threading.Timer(delay, self._timer_callback) + self._cycle_timer = threading.Timer( + seconds_delta, self._timer_callback + ) self._cycle_timer.start() self._check_review_session() diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 361aa98d16..713a4d9aba 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -1,4 +1,5 @@ import json +import copy from openpype.client import get_project from openpype.api import ProjectSettings @@ -373,6 +374,10 @@ class PrepareProjectServer(ServerAction): project_name, project_code )) create_project(project_name, project_code) + self.trigger_event( + "openpype.project.created", + {"project_name": project_name} + ) project_settings = ProjectSettings(project_name) project_anatomy_settings = project_settings["project_anatomy"] @@ -400,6 +405,10 @@ class PrepareProjectServer(ServerAction): self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value)) session.commit() + event_data = copy.deepcopy(in_data) + event_data["project_name"] = project_name + self.trigger_event("openpype.project.prepared", event_data) + return True diff --git a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py index 58f79e8a2b..df9147bdf7 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py @@ -1,7 +1,8 @@ import time import sys import json -import traceback + +import ftrack_api from openpype_modules.ftrack.lib import ServerAction from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory @@ -180,6 +181,13 @@ class SyncToAvalonServer(ServerAction): "* Total time: {}".format(time_7 - time_start) ) + if self.entities_factory.project_created: + event = ftrack_api.event.base.Event( + topic="openpype.project.created", + data={"project_name": project_name} + ) + self.session.event_hub.publish(event) + report = self.entities_factory.report() if report and report.get("items"): default_title = "Synchronization report ({}):".format( diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py index 82b79e986b..88d252e8cf 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py @@ -2,11 +2,11 @@ import re import subprocess from openpype.client import get_asset_by_id, get_asset_by_name +from openpype.settings import get_project_settings +from openpype.pipeline import Anatomy from openpype_modules.ftrack.lib import BaseEvent from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from openpype.api import Anatomy, get_project_settings - class UserAssigmentEvent(BaseEvent): """ diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py index 81f38e0c39..9806f83773 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py @@ -1,7 +1,7 @@ import os import collections import copy -from openpype.api import Anatomy +from openpype.pipeline import Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py index ebea8872f9..df914de854 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py @@ -84,6 +84,11 @@ class CreateProjectFolders(BaseAction): create_project_folders(basic_paths, project_name) self.create_ftrack_entities(basic_paths, project_entity) + self.trigger_event( + "openpype.project.structure.created", + {"project_name": project_name} + ) + except Exception as exc: self.log.warning("Creating of structure crashed.", exc_info=True) session.rollback() diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index 3400c509ab..79d04a7854 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -11,9 +11,8 @@ from openpype.client import ( get_versions, get_representations ) -from openpype.api import Anatomy from openpype.lib import StringTemplate, TemplateUnsolved -from openpype.pipeline import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB, Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py index 4b799b092b..ad82af39a3 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delivery.py @@ -10,12 +10,13 @@ from openpype.client import ( get_versions, get_representations ) -from openpype.api import Anatomy, config +from openpype.pipeline import Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype_modules.ftrack.lib.custom_attributes import ( query_custom_attributes ) +from openpype.lib import config from openpype.lib.delivery import ( path_from_representation, get_format_dict, diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index d30c41a749..d91649d7ba 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -11,13 +11,13 @@ from openpype.client import ( get_project, get_assets, ) -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.lib import ( get_workfile_template_key, get_workdir_data, - Anatomy, StringTemplate, ) +from openpype.pipeline import Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py index e9dc11de9f..e89595109e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py @@ -1,4 +1,5 @@ import json +import copy from openpype.client import get_project from openpype.api import ProjectSettings @@ -399,6 +400,10 @@ class PrepareProjectLocal(BaseAction): project_name, project_code )) create_project(project_name, project_code) + self.trigger_event( + "openpype.project.created", + {"project_name": project_name} + ) project_settings = ProjectSettings(project_name) project_anatomy_settings = project_settings["project_anatomy"] @@ -433,6 +438,10 @@ class PrepareProjectLocal(BaseAction): self.process_identifier() ) self.trigger_action(trigger_identifier, event) + + event_data = copy.deepcopy(in_data) + event_data["project_name"] = project_name + self.trigger_event("openpype.project.prepared", event_data) return True diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py index 2480ea7f95..d05f0c47f6 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py @@ -11,10 +11,10 @@ from openpype.client import ( get_version_by_name, get_representation_by_name ) -from openpype.api import Anatomy from openpype.pipeline import ( get_representation_path, AvalonMongoDB, + Anatomy, ) from openpype_modules.ftrack.lib import BaseAction, statics_icon diff --git a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py index d655dddcaf..8748f426bd 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py @@ -14,8 +14,7 @@ from openpype.client import ( get_representations ) from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype.api import Anatomy -from openpype.pipeline import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB, Anatomy from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY diff --git a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py index cd2f371f38..e52a061471 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py @@ -1,7 +1,8 @@ import time import sys import json -import traceback + +import ftrack_api from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory @@ -184,6 +185,13 @@ class SyncToAvalonLocal(BaseAction): "* Total time: {}".format(time_7 - time_start) ) + if self.entities_factory.project_created: + event = ftrack_api.event.base.Event( + topic="openpype.project.created", + data={"project_name": project_name} + ) + self.session.event_hub.publish(event) + report = self.entities_factory.report() if report and report.get("items"): default_title = "Synchronization report ({}):".format( diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index 68b5c62c53..f8883cefbd 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -443,6 +443,7 @@ class SyncEntitiesFactory: } self.create_list = [] + self.project_created = False self.unarchive_list = [] self.updates = collections.defaultdict(dict) @@ -2214,6 +2215,7 @@ class SyncEntitiesFactory: self._avalon_ents_by_name[project_item["name"]] = str(new_id) self.create_list.append(project_item) + self.project_created = True # store mongo id to ftrack entity entity = self.entities_dict[self.ft_project_id]["entity"] diff --git a/openpype/modules/ftrack/lib/ftrack_base_handler.py b/openpype/modules/ftrack/lib/ftrack_base_handler.py index 2130abc20c..c0fad6aadc 100644 --- a/openpype/modules/ftrack/lib/ftrack_base_handler.py +++ b/openpype/modules/ftrack/lib/ftrack_base_handler.py @@ -535,7 +535,7 @@ class BaseHandler(object): ) def trigger_event( - self, topic, event_data={}, session=None, source=None, + self, topic, event_data=None, session=None, source=None, event=None, on_error="ignore" ): if session is None: @@ -543,6 +543,9 @@ class BaseHandler(object): if not source and event: source = event.get("source") + + if event_data is None: + event_data = {} # Create and trigger event event = ftrack_api.event.base.Event( topic=topic, diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py index 952b21546d..77a7ebdfcf 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py @@ -116,6 +116,7 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): "app_name": app_name, "app_label": app_label, "published_paths": "
".join(sorted(published_paths)), + "source": instance.data.get("source", '') } comment = template.format(**format_data) if not comment: diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py index 84c400bde9..d28ded06c7 100644 --- a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py @@ -32,11 +32,17 @@ class CollectKitsuEntities(pyblish.api.ContextPlugin): context.data["kitsu_project"] = kitsu_project self.log.debug("Collect kitsu project: {}".format(kitsu_project)) - kitsu_asset = gazu.asset.get_asset(zou_asset_data["id"]) - if not kitsu_asset: - raise AssertionError("Asset not found in kitsu!") - context.data["kitsu_asset"] = kitsu_asset - self.log.debug("Collect kitsu asset: {}".format(kitsu_asset)) + entity_type = zou_asset_data["type"] + if entity_type == "Shot": + kitsu_entity = gazu.shot.get_shot(zou_asset_data["id"]) + else: + kitsu_entity = gazu.asset.get_asset(zou_asset_data["id"]) + + if not kitsu_entity: + raise AssertionError(f"{entity_type} not found in kitsu!") + + context.data["kitsu_entity"] = kitsu_entity + self.log.debug(f"Collect kitsu {entity_type}: {kitsu_entity}") if zou_task_data: kitsu_task = gazu.task.get_task(zou_task_data["id"]) @@ -57,7 +63,7 @@ class CollectKitsuEntities(pyblish.api.ContextPlugin): ) kitsu_task = gazu.task.get_task_by_name( - kitsu_asset, kitsu_task_type + kitsu_entity, kitsu_task_type ) if not kitsu_task: raise AssertionError("Task not found in kitsu!") diff --git a/openpype/modules/kitsu/utils/sync_service.py b/openpype/modules/kitsu/utils/sync_service.py index 6c003942f8..441b95a7ec 100644 --- a/openpype/modules/kitsu/utils/sync_service.py +++ b/openpype/modules/kitsu/utils/sync_service.py @@ -2,11 +2,17 @@ import os import gazu +from openpype.client import ( + get_project, + get_assets, + get_asset_by_name +) from openpype.pipeline import AvalonMongoDB from .credentials import validate_credentials from .update_op_with_zou import ( create_op_asset, set_op_project, + get_kitsu_project_name, write_project_to_op, update_op_assets, ) @@ -119,17 +125,16 @@ class Listener: # Write into DB if update_project: - self.dbcon = self.dbcon.database[project_name] + self.dbcon.Session["AVALON_PROJECT"] = project_name self.dbcon.bulk_write([update_project]) def _delete_project(self, data): """Delete project.""" - project_doc = self.dbcon.find_one( - {"type": "project", "data.zou_id": data["project_id"]} - ) + + project_name = get_kitsu_project_name(data["project_id"]) # Delete project collection - self.dbcon.database[project_doc["name"]].drop() + self.dbcon.database[project_name].drop() # == Asset == @@ -150,7 +155,8 @@ class Listener: def _update_asset(self, data): """Update asset into OP DB.""" set_op_project(self.dbcon, data["project_id"]) - project_doc = self.dbcon.find_one({"type": "project"}) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) # Get gazu entity asset = gazu.asset.get_asset(data["asset_id"]) @@ -159,16 +165,18 @@ class Listener: # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in self.dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[asset["project_id"]] = project_doc # Update - asset_doc_id, asset_update = update_op_assets( + update_op_result = update_op_assets( self.dbcon, project_doc, [asset], zou_ids_and_asset_docs - )[0] - self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) def _delete_asset(self, data): """Delete asset of OP DB.""" @@ -197,7 +205,8 @@ class Listener: def _update_episode(self, data): """Update episode into OP DB.""" set_op_project(self.dbcon, data["project_id"]) - project_doc = self.dbcon.find_one({"type": "project"}) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) # Get gazu entity episode = gazu.shot.get_episode(data["episode_id"]) @@ -206,16 +215,18 @@ class Listener: # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in self.dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[episode["project_id"]] = project_doc # Update - asset_doc_id, asset_update = update_op_assets( + update_op_result = update_op_assets( self.dbcon, project_doc, [episode], zou_ids_and_asset_docs - )[0] - self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) def _delete_episode(self, data): """Delete shot of OP DB.""" @@ -245,7 +256,8 @@ class Listener: def _update_sequence(self, data): """Update sequence into OP DB.""" set_op_project(self.dbcon, data["project_id"]) - project_doc = self.dbcon.find_one({"type": "project"}) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) # Get gazu entity sequence = gazu.shot.get_sequence(data["sequence_id"]) @@ -254,16 +266,18 @@ class Listener: # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in self.dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[sequence["project_id"]] = project_doc # Update - asset_doc_id, asset_update = update_op_assets( + update_op_result = update_op_assets( self.dbcon, project_doc, [sequence], zou_ids_and_asset_docs - )[0] - self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) def _delete_sequence(self, data): """Delete sequence of OP DB.""" @@ -293,7 +307,8 @@ class Listener: def _update_shot(self, data): """Update shot into OP DB.""" set_op_project(self.dbcon, data["project_id"]) - project_doc = self.dbcon.find_one({"type": "project"}) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) # Get gazu entity shot = gazu.shot.get_shot(data["shot_id"]) @@ -302,16 +317,18 @@ class Listener: # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in self.dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[shot["project_id"]] = project_doc # Update - asset_doc_id, asset_update = update_op_assets( + update_op_result = update_op_assets( self.dbcon, project_doc, [shot], zou_ids_and_asset_docs - )[0] - self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) def _delete_shot(self, data): """Delete shot of OP DB.""" @@ -327,14 +344,15 @@ class Listener: """Create new task into OP DB.""" # Get project entity set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() # Get gazu entity task = gazu.task.get_task(data["task_id"]) # Find asset doc - asset_doc = self.dbcon.find_one( - {"type": "asset", "data.zou.id": task["entity"]["id"]} - ) + parent_name = task["entity"]["name"] + + asset_doc = get_asset_by_name(project_name, parent_name) # Update asset tasks with new one asset_tasks = asset_doc["data"].get("tasks") @@ -351,10 +369,11 @@ class Listener: def _delete_task(self, data): """Delete task of OP DB.""" - set_op_project(self.dbcon, data["project_id"]) + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() # Find asset doc - asset_docs = [doc for doc in self.dbcon.find({"type": "asset"})] + asset_docs = list(get_assets(project_name)) for doc in asset_docs: # Match task for name, task in doc["data"]["tasks"].items(): diff --git a/openpype/modules/kitsu/utils/update_op_with_zou.py b/openpype/modules/kitsu/utils/update_op_with_zou.py index cd98c0d204..02c27382eb 100644 --- a/openpype/modules/kitsu/utils/update_op_with_zou.py +++ b/openpype/modules/kitsu/utils/update_op_with_zou.py @@ -10,6 +10,12 @@ from gazu.task import ( all_tasks_for_shot, ) +from openpype.client import ( + get_project, + get_assets, + get_asset_by_id, + get_asset_by_name, +) from openpype.pipeline import AvalonMongoDB from openpype.api import get_project_settings from openpype.lib import create_project @@ -33,6 +39,20 @@ def create_op_asset(gazu_entity: dict) -> dict: } +def get_kitsu_project_name(project_id: str) -> str: + """Get project name based on project id in kitsu. + + Args: + project_id (str): UUID of project in Kitsu. + + Returns: + str: Name of Kitsu project. + """ + + project = gazu.project.get_project(project_id) + return project["name"] + + def set_op_project(dbcon: AvalonMongoDB, project_id: str): """Set project context. @@ -40,9 +60,8 @@ def set_op_project(dbcon: AvalonMongoDB, project_id: str): dbcon (AvalonMongoDB): Connection to DB project_id (str): Project zou ID """ - project = gazu.project.get_project(project_id) - project_name = project["name"] - dbcon.Session["AVALON_PROJECT"] = project_name + + dbcon.Session["AVALON_PROJECT"] = get_kitsu_project_name(project_id) def update_op_assets( @@ -72,9 +91,7 @@ def update_op_assets( if not item_doc: # Create asset op_asset = create_op_asset(item) insert_result = dbcon.insert_one(op_asset) - item_doc = dbcon.find_one( - {"type": "asset", "_id": insert_result.inserted_id} - ) + item_doc = get_asset_by_id(project_name, insert_result.inserted_id) # Update asset item_data = deepcopy(item_doc["data"]) @@ -82,22 +99,37 @@ def update_op_assets( item_data["zou"] = item # == Asset settings == - # Frame in, fallback on 0 - frame_in = int(item_data.get("frame_in") or 0) + # Frame in, fallback to project's value or default value (1001) + # TODO: get default from settings/project_anatomy/attributes.json + try: + frame_in = int( + item_data.pop( + "frame_in", project_doc["data"].get("frameStart") + ) + ) + except (TypeError, ValueError): + frame_in = 1001 item_data["frameStart"] = frame_in - item_data.pop("frame_in", None) - # Frame out, fallback on frame_in + duration - frames_duration = int(item.get("nb_frames") or 1) - frame_out = ( - item_data["frame_out"] - if item_data.get("frame_out") - else frame_in + frames_duration - ) - item_data["frameEnd"] = int(frame_out) - item_data.pop("frame_out", None) - # Fps, fallback to project's value when entity fps is deleted - if not item_data.get("fps") and item_doc["data"].get("fps"): - item_data["fps"] = project_doc["data"]["fps"] + # Frames duration, fallback on 0 + try: + frames_duration = int(item_data.pop("nb_frames", 0)) + except (TypeError, ValueError): + frames_duration = 0 + # Frame out, fallback on frame_in + duration or project's value or 1001 + frame_out = item_data.pop("frame_out", None) + if not frame_out: + frame_out = frame_in + frames_duration + try: + frame_out = int(frame_out) + except (TypeError, ValueError): + frame_out = 1001 + item_data["frameEnd"] = frame_out + # Fps, fallback to project's value or default value (25.0) + try: + fps = float(item_data.get("fps", project_doc["data"].get("fps"))) + except (TypeError, ValueError): + fps = 25.0 + item_data["fps"] = fps # Tasks tasks_list = [] @@ -106,9 +138,8 @@ def update_op_assets( tasks_list = all_tasks_for_asset(item) elif item_type == "Shot": tasks_list = all_tasks_for_shot(item) - # TODO frame in and out item_data["tasks"] = { - t["task_type_name"]: {"type": t["task_type_name"]} + t["task_type_name"]: {"type": t["task_type_name"], "zou": t} for t in tasks_list } @@ -123,17 +154,23 @@ def update_op_assets( parent_zou_id = substitute_parent_item["parent_id"] else: parent_zou_id = ( - item.get("parent_id") + # For Asset, put under asset type directory + item.get("entity_type_id") + if item_type == "Asset" + else None + # Else, fallback on usual hierarchy + or item.get("parent_id") or item.get("episode_id") or item.get("source_id") - ) # TODO check consistency + ) - # Substitute Episode and Sequence by Shot - substitute_item_type = ( - "shots" - if item_type in ["Episode", "Sequence"] - else f"{item_type.lower()}s" - ) + # Substitute item type for general classification (assets or shots) + if item_type in ["Asset", "AssetType"]: + substitute_item_type = "assets" + elif item_type in ["Episode", "Sequence"]: + substitute_item_type = "shots" + else: + substitute_item_type = f"{item_type.lower()}s" entity_parent_folders = [ f for f in project_module_settings["entities_root"] @@ -147,15 +184,33 @@ def update_op_assets( asset_doc_ids[parent_zou_id]["_id"] if parent_zou_id else None ) if visual_parent_doc_id is None: - # Find root folder doc - root_folder_doc = dbcon.find_one( - { - "type": "asset", - "name": entity_parent_folders[-1], - "data.root_of": substitute_item_type, - }, - ["_id"], + # Find root folder docs + root_folder_docs = get_assets( + project_name, + asset_names=[entity_parent_folders[-1]], + fields=["_id", "data.root_of"], ) + # NOTE: Not sure why it's checking for entity type? + # OP3 does not support multiple assets with same names so type + # filtering is irelevant. + # This way mimics previous implementation: + # ``` + # root_folder_doc = dbcon.find_one( + # { + # "type": "asset", + # "name": entity_parent_folders[-1], + # "data.root_of": substitute_item_type, + # }, + # ["_id"], + # ) + # ``` + root_folder_doc = None + for folder_doc in root_folder_docs: + root_of = folder_doc.get("data", {}).get("root_of") + if root_of == substitute_item_type: + root_folder_doc = folder_doc + break + if root_folder_doc: visual_parent_doc_id = root_folder_doc["_id"] @@ -170,7 +225,14 @@ def update_op_assets( # Get parent entity parent_entity = parent_doc["data"]["zou"] - parent_zou_id = parent_entity["parent_id"] + parent_zou_id = parent_entity.get("parent_id") + + if item_type in ["Shot", "Sequence"]: + # Name with parents hierarchy "({episode}_){sequence}_{shot}" + # to avoid duplicate name issue + item_name = "_".join(item_data["parents"] + [item_doc["name"]]) + else: + item_name = item_doc["name"] # Set root folders parents item_data["parents"] = entity_parent_folders + item_data["parents"] @@ -185,9 +247,9 @@ def update_op_assets( item_doc["_id"], { "$set": { - "name": item["name"], + "name": item_name, "data": item_data, - "parent": asset_doc_ids[item["project_id"]]["_id"], + "parent": project_doc["_id"], } }, ) @@ -208,7 +270,7 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: UpdateOne: Update instance for the project """ project_name = project["name"] - project_doc = dbcon.database[project_name].find_one({"type": "project"}) + project_doc = get_project(project_name) if not project_doc: print(f"Creating project '{project_name}'") project_doc = create_project(project_name, project_name, dbcon=dbcon) @@ -229,9 +291,9 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: project_data.update( { "code": project_code, - "fps": project["fps"], - "resolutionWidth": project["resolution"].split("x")[0], - "resolutionHeight": project["resolution"].split("x")[1], + "fps": float(project["fps"]), + "resolutionWidth": int(project["resolution"].split("x")[0]), + "resolutionHeight": int(project["resolution"].split("x")[1]), "zou_id": project["id"], } ) @@ -278,6 +340,11 @@ def sync_all_projects(login: str, password: str): def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): """Update OP project in DB with Zou data. + `root_of` is meant to sort entities by type for a better readability in + the data tree. It puts all shot like (Shot and Episode and Sequence) and + asset entities under two different root folders or hierarchy, defined in + settings. + Args: dbcon (AvalonMongoDB): MongoDB connection project (dict): Project dict got using gazu. @@ -292,12 +359,17 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): # Get all assets from zou all_assets = gazu.asset.all_assets_for_project(project) + all_asset_types = gazu.asset.all_asset_types_for_project(project) all_episodes = gazu.shot.all_episodes_for_project(project) all_seqs = gazu.shot.all_sequences_for_project(project) all_shots = gazu.shot.all_shots_for_project(project) all_entities = [ item - for item in all_assets + all_episodes + all_seqs + all_shots + for item in all_assets + + all_asset_types + + all_episodes + + all_seqs + + all_shots if naming_pattern.match(item["name"]) ] @@ -305,26 +377,44 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): bulk_writes.append(write_project_to_op(project, dbcon)) # Try to find project document - dbcon.Session["AVALON_PROJECT"] = project["name"] - project_doc = dbcon.find_one({"type": "project"}) + project_name = project["name"] + dbcon.Session["AVALON_PROJECT"] = project_name + project_doc = get_project(project_name) # Query all assets of the local project zou_ids_and_asset_docs = { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou", {}).get("id") } zou_ids_and_asset_docs[project["id"]] = project_doc # Create entities root folders - project_module_settings = get_project_settings(project["name"])["kitsu"] + project_module_settings = get_project_settings(project_name)["kitsu"] for entity_type, root in project_module_settings["entities_root"].items(): parent_folders = root.split("/") direct_parent_doc = None for i, folder in enumerate(parent_folders, 1): - parent_doc = dbcon.find_one( - {"type": "asset", "name": folder, "data.root_of": entity_type} + parent_doc = get_asset_by_name( + project_name, folder, fields=["_id", "data.root_of"] ) + # NOTE: Not sure why it's checking for entity type? + # OP3 does not support multiple assets with same names so type + # filtering is irelevant. + # Also all of the entities could find be queried at once using + # 'get_assets'. + # This way mimics previous implementation: + # ``` + # parent_doc = dbcon.find_one( + # {"type": "asset", "name": folder, "data.root_of": entity_type} + # ) + # ``` + if ( + parent_doc + and parent_doc.get("data", {}).get("root_of") != entity_type + ): + parent_doc = None + if not parent_doc: direct_parent_doc = dbcon.insert_one( { @@ -334,21 +424,20 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): "data": { "root_of": entity_type, "parents": parent_folders[:i], - "visualParent": direct_parent_doc, + "visualParent": direct_parent_doc.inserted_id + if direct_parent_doc + else None, "tasks": {}, }, } ) # Create - to_insert = [] - to_insert.extend( - [ - create_op_asset(item) - for item in all_entities - if item["id"] not in zou_ids_and_asset_docs.keys() - ] - ) + to_insert = [ + create_op_asset(item) + for item in all_entities + if item["id"] not in zou_ids_and_asset_docs.keys() + ] if to_insert: # Insert doc in DB dbcon.insert_many(to_insert) @@ -357,7 +446,7 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): zou_ids_and_asset_docs.update( { asset_doc["data"]["zou"]["id"]: asset_doc - for asset_doc in dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) if asset_doc["data"].get("zou") } ) diff --git a/openpype/modules/kitsu/utils/update_zou_with_op.py b/openpype/modules/kitsu/utils/update_zou_with_op.py index 81d421206f..57d7094e95 100644 --- a/openpype/modules/kitsu/utils/update_zou_with_op.py +++ b/openpype/modules/kitsu/utils/update_zou_with_op.py @@ -6,6 +6,7 @@ from typing import List import gazu from pymongo import UpdateOne +from openpype.client import get_project, get_assets from openpype.pipeline import AvalonMongoDB from openpype.api import get_project_settings from openpype.modules.kitsu.utils.credentials import validate_credentials @@ -53,9 +54,7 @@ def sync_zou_from_op_project( """ # Get project doc if not provided if not project_doc: - project_doc = dbcon.database[project_name].find_one( - {"type": "project"} - ) + project_doc = get_project(project_name) # Get all entities from zou print(f"Synchronizing {project_name}...") @@ -96,7 +95,7 @@ def sync_zou_from_op_project( dbcon.Session["AVALON_PROJECT"] = project_name asset_docs = { asset_doc["_id"]: asset_doc - for asset_doc in dbcon.find({"type": "asset"}) + for asset_doc in get_assets(project_name) } # Create new assets diff --git a/openpype/modules/log_viewer/tray/widgets.py b/openpype/modules/log_viewer/tray/widgets.py index ed08e62109..c7ac64ab70 100644 --- a/openpype/modules/log_viewer/tray/widgets.py +++ b/openpype/modules/log_viewer/tray/widgets.py @@ -1,3 +1,4 @@ +import html from Qt import QtCore, QtWidgets import qtawesome from .models import LogModel, LogsFilterProxy @@ -286,7 +287,7 @@ class OutputWidget(QtWidgets.QWidget): if level == "debug": line_f = ( " -" - " {{ {loggerName} }}: [" + " {{ {logger_name} }}: [" " {message}" " ]" ) @@ -299,7 +300,7 @@ class OutputWidget(QtWidgets.QWidget): elif level == "warning": line_f = ( "*** WRN:" - " >>> {{ {loggerName} }}: [" + " >>> {{ {logger_name} }}: [" " {message}" " ]" ) @@ -307,16 +308,25 @@ class OutputWidget(QtWidgets.QWidget): line_f = ( "!!! ERR:" " {timestamp}" - " >>> {{ {loggerName} }}: [" + " >>> {{ {logger_name} }}: [" " {message}" " ]" ) + logger_name = log["loggerName"] + timestamp = "" + if not show_timecode: + timestamp = log["timestamp"] + message = log["message"] exc = log.get("exception") if exc: - log["message"] = exc["message"] + message = exc["message"] - line = line_f.format(**log) + line = line_f.format( + message=html.escape(message), + logger_name=logger_name, + timestamp=timestamp + ) if show_timecode: timestamp = log["timestamp"] diff --git a/openpype/modules/sync_server/providers/local_drive.py b/openpype/modules/sync_server/providers/local_drive.py index 68f604b39c..172cb338cf 100644 --- a/openpype/modules/sync_server/providers/local_drive.py +++ b/openpype/modules/sync_server/providers/local_drive.py @@ -4,10 +4,11 @@ import shutil import threading import time -from openpype.api import Logger, Anatomy +from openpype.api import Logger +from openpype.pipeline import Anatomy from .abstract_provider import AbstractProvider -log = Logger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class LocalDriveHandler(AbstractProvider): diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index 698b296a52..4027561d22 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -9,14 +9,12 @@ from collections import deque, defaultdict from openpype.modules import OpenPypeModule from openpype_interfaces import ITrayModule -from openpype.api import ( - Anatomy, +from openpype.settings import ( get_project_settings, get_system_settings, - get_local_site_id ) -from openpype.lib import PypeLogger -from openpype.pipeline import AvalonMongoDB +from openpype.lib import PypeLogger, get_local_site_id +from openpype.pipeline import AvalonMongoDB, Anatomy from openpype.settings.lib import ( get_default_anatomy_settings, get_anatomy_settings @@ -28,7 +26,7 @@ from .providers import lib from .utils import time_function, SyncStatus, SiteAlreadyPresentError -log = PypeLogger().get_logger("SyncServer") +log = PypeLogger.get_logger("SyncServer") class SyncServerModule(OpenPypeModule, ITrayModule): diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/timers_manager/timers_manager.py index 3cf1614316..3453e4bc4c 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/openpype/modules/timers_manager/timers_manager.py @@ -2,13 +2,13 @@ import os import platform +from openpype.client import get_asset_by_name from openpype.modules import OpenPypeModule from openpype_interfaces import ( ITrayService, ILaunchHookPaths ) from openpype.lib.events import register_event_callback -from openpype.pipeline import AvalonMongoDB from .exceptions import InvalidContextError @@ -197,22 +197,13 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): " Project: \"{}\" Asset: \"{}\" Task: \"{}\"" ).format(str(project_name), str(asset_name), str(task_name))) - dbconn = AvalonMongoDB() - dbconn.install() - dbconn.Session["AVALON_PROJECT"] = project_name - - asset_doc = dbconn.find_one( - { - "type": "asset", - "name": asset_name - }, - { - "data.tasks": True, - "data.parents": True - } + asset_doc = get_asset_by_name( + project_name, + asset_name, + fields=["_id", "name", "data.tasks", "data.parents"] ) + if not asset_doc: - dbconn.uninstall() raise InvalidContextError(( "Asset \"{}\" not found in project \"{}\"" ).format(asset_name, project_name)) @@ -220,7 +211,6 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): asset_data = asset_doc.get("data") or {} asset_tasks = asset_data.get("tasks") or {} if task_name not in asset_tasks: - dbconn.uninstall() raise InvalidContextError(( "Task \"{}\" not found on asset \"{}\" in project \"{}\"" ).format(task_name, asset_name, project_name)) @@ -238,9 +228,10 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): hierarchy_items = asset_data.get("parents") or [] hierarchy_items.append(asset_name) - dbconn.uninstall() return { "project_name": project_name, + "asset_id": str(asset_doc["_id"]), + "asset_name": asset_doc["name"], "task_name": task_name, "task_type": task_type, "hierarchy": hierarchy_items diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py index 2e441fbf27..2cf785d981 100644 --- a/openpype/pipeline/__init__.py +++ b/openpype/pipeline/__init__.py @@ -6,6 +6,7 @@ from .constants import ( from .mongodb import ( AvalonMongoDB, ) +from .anatomy import Anatomy from .create import ( BaseCreator, @@ -96,6 +97,9 @@ __all__ = ( # --- MongoDB --- "AvalonMongoDB", + # --- Anatomy --- + "Anatomy", + # --- Create --- "BaseCreator", "Creator", diff --git a/openpype/pipeline/anatomy.py b/openpype/pipeline/anatomy.py new file mode 100644 index 0000000000..08db4749b3 --- /dev/null +++ b/openpype/pipeline/anatomy.py @@ -0,0 +1,1270 @@ +import os +import re +import copy +import platform +import collections +import numbers + +import six + +from openpype.settings.lib import get_anatomy_settings +from openpype.lib.path_templates import ( + TemplateUnsolved, + TemplateResult, + TemplatesDict, + FormatObject, +) +from openpype.lib.log import PypeLogger + +log = PypeLogger.get_logger(__name__) + + +class ProjectNotSet(Exception): + """Exception raised when is created Anatomy without project name.""" + + +class RootCombinationError(Exception): + """This exception is raised when templates has combined root types.""" + + def __init__(self, roots): + joined_roots = ", ".join( + ["\"{}\"".format(_root) for _root in roots] + ) + # TODO better error message + msg = ( + "Combination of root with and" + " without root name in AnatomyTemplates. {}" + ).format(joined_roots) + + super(RootCombinationError, self).__init__(msg) + + +class Anatomy: + """Anatomy module helps to keep project settings. + + Wraps key project specifications, AnatomyTemplates and Roots. + + Args: + project_name (str): Project name to look on overrides. + """ + + root_key_regex = re.compile(r"{(root?[^}]+)}") + root_name_regex = re.compile(r"root\[([^]]+)\]") + + def __init__(self, project_name=None, site_name=None): + if not project_name: + project_name = os.environ.get("AVALON_PROJECT") + + if not project_name: + raise ProjectNotSet(( + "Implementation bug: Project name is not set. Anatomy requires" + " to load data for specific project." + )) + + self.project_name = project_name + + self._data = self._prepare_anatomy_data( + get_anatomy_settings(project_name, site_name) + ) + self._site_name = site_name + self._templates_obj = AnatomyTemplates(self) + self._roots_obj = Roots(self) + + # Anatomy used as dictionary + # - implemented only getters returning copy + def __getitem__(self, key): + return copy.deepcopy(self._data[key]) + + def get(self, key, default=None): + return copy.deepcopy(self._data).get(key, default) + + def keys(self): + return copy.deepcopy(self._data).keys() + + def values(self): + return copy.deepcopy(self._data).values() + + def items(self): + return copy.deepcopy(self._data).items() + + @staticmethod + def _prepare_anatomy_data(anatomy_data): + """Prepare anatomy data for further processing. + + Method added to replace `{task}` with `{task[name]}` in templates. + """ + templates_data = anatomy_data.get("templates") + if templates_data: + # Replace `{task}` with `{task[name]}` in templates + value_queue = collections.deque() + value_queue.append(templates_data) + while value_queue: + item = value_queue.popleft() + if not isinstance(item, dict): + continue + + for key in tuple(item.keys()): + value = item[key] + if isinstance(value, dict): + value_queue.append(value) + + elif isinstance(value, six.string_types): + item[key] = value.replace("{task}", "{task[name]}") + return anatomy_data + + def reset(self): + """Reset values of cached data in templates and roots objects.""" + self._data = self._prepare_anatomy_data( + get_anatomy_settings(self.project_name, self._site_name) + ) + self.templates_obj.reset() + self.roots_obj.reset() + + @property + def templates(self): + """Wrap property `templates` of Anatomy's AnatomyTemplates instance.""" + return self._templates_obj.templates + + @property + def templates_obj(self): + """Return `AnatomyTemplates` object of current Anatomy instance.""" + return self._templates_obj + + def format(self, *args, **kwargs): + """Wrap `format` method of Anatomy's `templates_obj`.""" + return self._templates_obj.format(*args, **kwargs) + + def format_all(self, *args, **kwargs): + """Wrap `format_all` method of Anatomy's `templates_obj`.""" + return self._templates_obj.format_all(*args, **kwargs) + + @property + def roots(self): + """Wrap `roots` property of Anatomy's `roots_obj`.""" + return self._roots_obj.roots + + @property + def roots_obj(self): + """Return `Roots` object of current Anatomy instance.""" + return self._roots_obj + + def root_environments(self): + """Return OPENPYPE_ROOT_* environments for current project in dict.""" + return self._roots_obj.root_environments() + + def root_environmets_fill_data(self, template=None): + """Environment variable values in dictionary for rootless path. + + Args: + template (str): Template for environment variable key fill. + By default is set to `"${}"`. + """ + return self.roots_obj.root_environmets_fill_data(template) + + def find_root_template_from_path(self, *args, **kwargs): + """Wrapper for Roots `find_root_template_from_path`.""" + return self.roots_obj.find_root_template_from_path(*args, **kwargs) + + def path_remapper(self, *args, **kwargs): + """Wrapper for Roots `path_remapper`.""" + return self.roots_obj.path_remapper(*args, **kwargs) + + def all_root_paths(self): + """Wrapper for Roots `all_root_paths`.""" + return self.roots_obj.all_root_paths() + + def set_root_environments(self): + """Set OPENPYPE_ROOT_* environments for current project.""" + self._roots_obj.set_root_environments() + + def root_names(self): + """Return root names for current project.""" + return self.root_names_from_templates(self.templates) + + def _root_keys_from_templates(self, data): + """Extract root key from templates in data. + + Args: + data (dict): Data that may contain templates as string. + + Return: + set: Set of all root names from templates as strings. + + Output example: `{"root[work]", "root[publish]"}` + """ + + output = set() + if isinstance(data, dict): + for value in data.values(): + for root in self._root_keys_from_templates(value): + output.add(root) + + elif isinstance(data, str): + for group in re.findall(self.root_key_regex, data): + output.add(group) + + return output + + def root_value_for_template(self, template): + """Returns value of root key from template.""" + root_templates = [] + for group in re.findall(self.root_key_regex, template): + root_templates.append("{" + group + "}") + + if not root_templates: + return None + + return root_templates[0].format(**{"root": self.roots}) + + def root_names_from_templates(self, templates): + """Extract root names form anatomy templates. + + Returns None if values in templates contain only "{root}". + Empty list is returned if there is no "root" in templates. + Else returns all root names from templates in list. + + RootCombinationError is raised when templates contain both root types, + basic "{root}" and with root name specification "{root[work]}". + + Args: + templates (dict): Anatomy templates where roots are not filled. + + Return: + list/None: List of all root names from templates as strings when + multiroot setup is used, otherwise None is returned. + """ + roots = list(self._root_keys_from_templates(templates)) + # Return empty list if no roots found in templates + if not roots: + return roots + + # Raise exception when root keys have roots with and without root name. + # Invalid output example: ["root", "root[project]", "root[render]"] + if len(roots) > 1 and "root" in roots: + raise RootCombinationError(roots) + + # Return None if "root" without root name in templates + if len(roots) == 1 and roots[0] == "root": + return None + + names = set() + for root in roots: + for group in re.findall(self.root_name_regex, root): + names.add(group) + return list(names) + + def fill_root(self, template_path): + """Fill template path where is only "root" key unfilled. + + Args: + template_path (str): Path with "root" key in. + Example path: "{root}/projects/MyProject/Shot01/Lighting/..." + + Return: + str: formatted path + """ + # NOTE does not care if there are different keys than "root" + return template_path.format(**{"root": self.roots}) + + @classmethod + def fill_root_with_path(cls, rootless_path, root_path): + """Fill path without filled "root" key with passed path. + + This is helper to fill root with different directory path than anatomy + has defined no matter if is single or multiroot. + + Output path is same as input path if `rootless_path` does not contain + unfilled root key. + + Args: + rootless_path (str): Path without filled "root" key. Example: + "{root[work]}/MyProject/..." + root_path (str): What should replace root key in `rootless_path`. + + Returns: + str: Path with filled root. + """ + output = str(rootless_path) + for group in re.findall(cls.root_key_regex, rootless_path): + replacement = "{" + group + "}" + output = output.replace(replacement, root_path) + + return output + + def replace_root_with_env_key(self, filepath, template=None): + """Replace root of path with environment key. + + # Example: + ## Project with roots: + ``` + { + "nas": { + "windows": P:/projects", + ... + } + ... + } + ``` + + ## Entered filepath + "P:/projects/project/asset/task/animation_v001.ma" + + ## Entered template + "<{}>" + + ## Output + "/project/asset/task/animation_v001.ma" + + Args: + filepath (str): Full file path where root should be replaced. + template (str): Optional template for environment key. Must + have one index format key. + Default value if not entered: "${}" + + Returns: + str: Path where root is replaced with environment root key. + + Raise: + ValueError: When project's roots were not found in entered path. + """ + success, rootless_path = self.find_root_template_from_path(filepath) + if not success: + raise ValueError( + "{}: Project's roots were not found in path: {}".format( + self.project_name, filepath + ) + ) + + data = self.root_environmets_fill_data(template) + return rootless_path.format(**data) + + +class AnatomyTemplateUnsolved(TemplateUnsolved): + """Exception for unsolved template when strict is set to True.""" + + msg = "Anatomy template \"{0}\" is unsolved.{1}{2}" + + +class AnatomyTemplateResult(TemplateResult): + rootless = None + + def __new__(cls, result, rootless_path): + new_obj = super(AnatomyTemplateResult, cls).__new__( + cls, + str(result), + result.template, + result.solved, + result.used_values, + result.missing_keys, + result.invalid_types + ) + new_obj.rootless = rootless_path + return new_obj + + def validate(self): + if not self.solved: + raise AnatomyTemplateUnsolved( + self.template, + self.missing_keys, + self.invalid_types + ) + + def copy(self): + tmp = TemplateResult( + str(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + return self.__class__(tmp, self.rootless) + + def normalized(self): + """Convert to normalized path.""" + + tmp = TemplateResult( + os.path.normpath(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + return self.__class__(tmp, self.rootless) + + +class AnatomyTemplates(TemplatesDict): + inner_key_pattern = re.compile(r"(\{@.*?[^{}0]*\})") + inner_key_name_pattern = re.compile(r"\{@(.*?[^{}0]*)\}") + + def __init__(self, anatomy): + super(AnatomyTemplates, self).__init__() + self.anatomy = anatomy + self.loaded_project = None + + def __getitem__(self, key): + return self.templates[key] + + def get(self, key, default=None): + return self.templates.get(key, default) + + def reset(self): + self._raw_templates = None + self._templates = None + self._objected_templates = None + + @property + def project_name(self): + return self.anatomy.project_name + + @property + def roots(self): + return self.anatomy.roots + + @property + def templates(self): + self._validate_discovery() + return self._templates + + @property + def objected_templates(self): + self._validate_discovery() + return self._objected_templates + + def _validate_discovery(self): + if self.project_name != self.loaded_project: + self.reset() + + if self._templates is None: + self._discover() + self.loaded_project = self.project_name + + def _format_value(self, value, data): + if isinstance(value, RootItem): + return self._solve_dict(value, data) + + result = super(AnatomyTemplates, self)._format_value(value, data) + if isinstance(result, TemplateResult): + rootless_path = self._rootless_path(result, data) + result = AnatomyTemplateResult(result, rootless_path) + return result + + def set_templates(self, templates): + if not templates: + self.reset() + return + + self._raw_templates = copy.deepcopy(templates) + templates = copy.deepcopy(templates) + v_queue = collections.deque() + v_queue.append(templates) + while v_queue: + item = v_queue.popleft() + if not isinstance(item, dict): + continue + + for key in tuple(item.keys()): + value = item[key] + if isinstance(value, dict): + v_queue.append(value) + + elif ( + isinstance(value, six.string_types) + and "{task}" in value + ): + item[key] = value.replace("{task}", "{task[name]}") + + solved_templates = self.solve_template_inner_links(templates) + self._templates = solved_templates + self._objected_templates = self.create_ojected_templates( + solved_templates + ) + + def default_templates(self): + """Return default templates data with solved inner keys.""" + return self.solve_template_inner_links( + self.anatomy["templates"] + ) + + def _discover(self): + """ Loads anatomy templates from yaml. + Default templates are loaded if project is not set or project does + not have set it's own. + TODO: create templates if not exist. + + Returns: + TemplatesResultDict: Contain templates data for current project of + default templates. + """ + + if self.project_name is None: + # QUESTION create project specific if not found? + raise AssertionError(( + "Project \"{0}\" does not have his own templates." + " Trying to use default." + ).format(self.project_name)) + + self.set_templates(self.anatomy["templates"]) + + @classmethod + def replace_inner_keys(cls, matches, value, key_values, key): + """Replacement of inner keys in template values.""" + for match in matches: + anatomy_sub_keys = ( + cls.inner_key_name_pattern.findall(match) + ) + if key in anatomy_sub_keys: + raise ValueError(( + "Unsolvable recursion in inner keys, " + "key: \"{}\" is in his own value." + " Can't determine source, please check Anatomy templates." + ).format(key)) + + for anatomy_sub_key in anatomy_sub_keys: + replace_value = key_values.get(anatomy_sub_key) + if replace_value is None: + raise KeyError(( + "Anatomy templates can't be filled." + " Anatomy key `{0}` has" + " invalid inner key `{1}`." + ).format(key, anatomy_sub_key)) + + if not ( + isinstance(replace_value, numbers.Number) + or isinstance(replace_value, six.string_types) + ): + raise ValueError(( + "Anatomy templates can't be filled." + " Anatomy key `{0}` has" + " invalid inner key `{1}`" + " with value `{2}`." + ).format(key, anatomy_sub_key, str(replace_value))) + + value = value.replace(match, str(replace_value)) + + return value + + @classmethod + def prepare_inner_keys(cls, key_values): + """Check values of inner keys. + + Check if inner key exist in template group and has valid value. + It is also required to avoid infinite loop with unsolvable recursion + when first inner key's value refers to second inner key's value where + first is used. + """ + keys_to_solve = set(key_values.keys()) + while True: + found = False + for key in tuple(keys_to_solve): + value = key_values[key] + + if isinstance(value, six.string_types): + matches = cls.inner_key_pattern.findall(value) + if not matches: + keys_to_solve.remove(key) + continue + + found = True + key_values[key] = cls.replace_inner_keys( + matches, value, key_values, key + ) + continue + + elif not isinstance(value, dict): + keys_to_solve.remove(key) + continue + + subdict_found = False + for _key, _value in tuple(value.items()): + matches = cls.inner_key_pattern.findall(_value) + if not matches: + continue + + subdict_found = True + found = True + key_values[key][_key] = cls.replace_inner_keys( + matches, _value, key_values, + "{}.{}".format(key, _key) + ) + + if not subdict_found: + keys_to_solve.remove(key) + + if not found: + break + + return key_values + + @classmethod + def solve_template_inner_links(cls, templates): + """Solve templates inner keys identified by "{@*}". + + Process is split into 2 parts. + First is collecting all global keys (keys in top hierarchy where value + is not dictionary). All global keys are set for all group keys (keys + in top hierarchy where value is dictionary). Value of a key is not + overridden in group if already contain value for the key. + + In second part all keys with "at" symbol in value are replaced with + value of the key afterward "at" symbol from the group. + + Args: + templates (dict): Raw templates data. + + Example: + templates:: + key_1: "value_1", + key_2: "{@key_1}/{filling_key}" + + group_1: + key_3: "value_3/{@key_2}" + + group_2: + key_2": "value_2" + key_4": "value_4/{@key_2}" + + output:: + key_1: "value_1" + key_2: "value_1/{filling_key}" + + group_1: { + key_1: "value_1" + key_2: "value_1/{filling_key}" + key_3: "value_3/value_1/{filling_key}" + + group_2: { + key_1: "value_1" + key_2: "value_2" + key_4: "value_3/value_2" + """ + default_key_values = templates.pop("defaults", {}) + for key, value in tuple(templates.items()): + if isinstance(value, dict): + continue + default_key_values[key] = templates.pop(key) + + # Pop "others" key before before expected keys are processed + other_templates = templates.pop("others") or {} + + keys_by_subkey = {} + for sub_key, sub_value in templates.items(): + key_values = {} + key_values.update(default_key_values) + key_values.update(sub_value) + keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) + + for sub_key, sub_value in other_templates.items(): + if sub_key in keys_by_subkey: + log.warning(( + "Key \"{}\" is duplicated in others. Skipping." + ).format(sub_key)) + continue + + key_values = {} + key_values.update(default_key_values) + key_values.update(sub_value) + keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) + + default_keys_by_subkeys = cls.prepare_inner_keys(default_key_values) + + for key, value in default_keys_by_subkeys.items(): + keys_by_subkey[key] = value + + return keys_by_subkey + + def _dict_to_subkeys_list(self, subdict, pre_keys=None): + if pre_keys is None: + pre_keys = [] + output = [] + for key in subdict: + value = subdict[key] + result = list(pre_keys) + result.append(key) + if isinstance(value, dict): + for item in self._dict_to_subkeys_list(value, result): + output.append(item) + else: + output.append(result) + return output + + def _keys_to_dicts(self, key_list, value): + if not key_list: + return None + if len(key_list) == 1: + return {key_list[0]: value} + return {key_list[0]: self._keys_to_dicts(key_list[1:], value)} + + def _rootless_path(self, result, final_data): + used_values = result.used_values + missing_keys = result.missing_keys + template = result.template + invalid_types = result.invalid_types + if ( + "root" not in used_values + or "root" in missing_keys + or "{root" not in template + ): + return + + for invalid_type in invalid_types: + if "root" in invalid_type: + return + + root_keys = self._dict_to_subkeys_list({"root": used_values["root"]}) + if not root_keys: + return + + output = str(result) + for used_root_keys in root_keys: + if not used_root_keys: + continue + + used_value = used_values + root_key = None + for key in used_root_keys: + used_value = used_value[key] + if root_key is None: + root_key = key + else: + root_key += "[{}]".format(key) + + root_key = "{" + root_key + "}" + output = output.replace(str(used_value), root_key) + + return output + + def format(self, data, strict=True): + copy_data = copy.deepcopy(data) + roots = self.roots + if roots: + copy_data["root"] = roots + result = super(AnatomyTemplates, self).format(copy_data) + result.strict = strict + return result + + def format_all(self, in_data, only_keys=True): + """ Solves templates based on entered data. + + Args: + data (dict): Containing keys to be filled into template. + + Returns: + TemplatesResultDict: Output `TemplateResult` have `strict` + attribute set to False so accessing unfilled keys in templates + won't raise any exceptions. + """ + return self.format(in_data, strict=False) + + +class RootItem(FormatObject): + """Represents one item or roots. + + Holds raw data of root item specification. Raw data contain value + for each platform, but current platform value is used when object + is used for formatting of template. + + Args: + root_raw_data (dict): Dictionary containing root values by platform + names. ["windows", "linux" and "darwin"] + name (str, optional): Root name which is representing. Used with + multi root setup otherwise None value is expected. + parent_keys (list, optional): All dictionary parent keys. Values of + `parent_keys` are used for get full key which RootItem is + representing. Used for replacing root value in path with + formattable key. e.g. parent_keys == ["work"] -> {root[work]} + parent (object, optional): It is expected to be `Roots` object. + Value of `parent` won't affect code logic much. + """ + + def __init__( + self, root_raw_data, name=None, parent_keys=None, parent=None + ): + lowered_platform_keys = {} + for key, value in root_raw_data.items(): + lowered_platform_keys[key.lower()] = value + self.raw_data = lowered_platform_keys + self.cleaned_data = self._clean_roots(lowered_platform_keys) + self.name = name + self.parent_keys = parent_keys or [] + self.parent = parent + + self.available_platforms = list(lowered_platform_keys.keys()) + self.value = lowered_platform_keys.get(platform.system().lower()) + self.clean_value = self.clean_root(self.value) + + def __format__(self, *args, **kwargs): + return self.value.__format__(*args, **kwargs) + + def __str__(self): + return str(self.value) + + def __repr__(self): + return self.__str__() + + def __getitem__(self, key): + if isinstance(key, numbers.Number): + return self.value[key] + + additional_info = "" + if self.parent and self.parent.project_name: + additional_info += " for project \"{}\"".format( + self.parent.project_name + ) + + raise AssertionError( + "Root key \"{}\" is missing{}.".format( + key, additional_info + ) + ) + + def full_key(self): + """Full key value for dictionary formatting in template. + + Returns: + str: Return full replacement key for formatting. This helps when + multiple roots are set. In that case e.g. `"root[work]"` is + returned. + """ + if not self.name: + return "root" + + joined_parent_keys = "".join( + ["[{}]".format(key) for key in self.parent_keys] + ) + return "root{}".format(joined_parent_keys) + + def clean_path(self, path): + """Just replace backslashes with forward slashes.""" + return str(path).replace("\\", "/") + + def clean_root(self, root): + """Makes sure root value does not end with slash.""" + if root: + root = self.clean_path(root) + while root.endswith("/"): + root = root[:-1] + return root + + def _clean_roots(self, raw_data): + """Clean all values of raw root item values.""" + cleaned = {} + for key, value in raw_data.items(): + cleaned[key] = self.clean_root(value) + return cleaned + + def path_remapper(self, path, dst_platform=None, src_platform=None): + """Remap path for specific platform. + + Args: + path (str): Source path which need to be remapped. + dst_platform (str, optional): Specify destination platform + for which remapping should happen. + src_platform (str, optional): Specify source platform. This is + recommended to not use and keep unset until you really want + to use specific platform. + roots (dict/RootItem/None, optional): It is possible to remap + path with different roots then instance where method was + called has. + + Returns: + str/None: When path does not contain known root then + None is returned else returns remapped path with "{root}" + or "{root[]}". + """ + cleaned_path = self.clean_path(path) + if dst_platform: + dst_root_clean = self.cleaned_data.get(dst_platform) + if not dst_root_clean: + key_part = "" + full_key = self.full_key() + if full_key != "root": + key_part += "\"{}\" ".format(full_key) + + log.warning( + "Root {}miss platform \"{}\" definition.".format( + key_part, dst_platform + ) + ) + return None + + if cleaned_path.startswith(dst_root_clean): + return cleaned_path + + if src_platform: + src_root_clean = self.cleaned_data.get(src_platform) + if src_root_clean is None: + log.warning( + "Root \"{}\" miss platform \"{}\" definition.".format( + self.full_key(), src_platform + ) + ) + return None + + if not cleaned_path.startswith(src_root_clean): + return None + + subpath = cleaned_path[len(src_root_clean):] + if dst_platform: + # `dst_root_clean` is used from upper condition + return dst_root_clean + subpath + return self.clean_value + subpath + + result, template = self.find_root_template_from_path(path) + if not result: + return None + + def parent_dict(keys, value): + if not keys: + return value + + key = keys.pop(0) + return {key: parent_dict(keys, value)} + + if dst_platform: + format_value = parent_dict(list(self.parent_keys), dst_root_clean) + else: + format_value = parent_dict(list(self.parent_keys), self.value) + + return template.format(**{"root": format_value}) + + def find_root_template_from_path(self, path): + """Replaces known root value with formattable key in path. + + All platform values are checked for this replacement. + + Args: + path (str): Path where root value should be found. + + Returns: + tuple: Tuple contain 2 values: `success` (bool) and `path` (str). + When success it True then path should contain replaced root + value with formattable key. + + Example: + When input path is:: + "C:/windows/path/root/projects/my_project/file.ext" + + And raw data of item looks like:: + { + "windows": "C:/windows/path/root", + "linux": "/mount/root" + } + + Output will be:: + (True, "{root}/projects/my_project/file.ext") + + If any of raw data value wouldn't match path's root output is:: + (False, "C:/windows/path/root/projects/my_project/file.ext") + """ + result = False + output = str(path) + + root_paths = list(self.cleaned_data.values()) + mod_path = self.clean_path(path) + for root_path in root_paths: + # Skip empty paths + if not root_path: + continue + + if mod_path.startswith(root_path): + result = True + replacement = "{" + self.full_key() + "}" + output = replacement + mod_path[len(root_path):] + break + + return (result, output) + + +class Roots: + """Object which should be used for formatting "root" key in templates. + + Args: + anatomy Anatomy: Anatomy object created for a specific project. + """ + + env_prefix = "OPENPYPE_PROJECT_ROOT" + roots_filename = "roots.json" + + def __init__(self, anatomy): + self.anatomy = anatomy + self.loaded_project = None + self._roots = None + + def __format__(self, *args, **kwargs): + return self.roots.__format__(*args, **kwargs) + + def __getitem__(self, key): + return self.roots[key] + + def reset(self): + """Reset current roots value.""" + self._roots = None + + def path_remapper( + self, path, dst_platform=None, src_platform=None, roots=None + ): + """Remap path for specific platform. + + Args: + path (str): Source path which need to be remapped. + dst_platform (str, optional): Specify destination platform + for which remapping should happen. + src_platform (str, optional): Specify source platform. This is + recommended to not use and keep unset until you really want + to use specific platform. + roots (dict/RootItem/None, optional): It is possible to remap + path with different roots then instance where method was + called has. + + Returns: + str/None: When path does not contain known root then + None is returned else returns remapped path with "{root}" + or "{root[]}". + """ + if roots is None: + roots = self.roots + + if roots is None: + raise ValueError("Roots are not set. Can't find path.") + + if "{root" in path: + path = path.format(**{"root": roots}) + # If `dst_platform` is not specified then return else continue. + if not dst_platform: + return path + + if isinstance(roots, RootItem): + return roots.path_remapper(path, dst_platform, src_platform) + + for _root in roots.values(): + result = self.path_remapper( + path, dst_platform, src_platform, _root + ) + if result is not None: + return result + + def find_root_template_from_path(self, path, roots=None): + """Find root value in entered path and replace it with formatting key. + + Args: + path (str): Source path where root will be searched. + roots (Roots/dict, optional): It is possible to use different + roots than instance where method was triggered has. + + Returns: + tuple: Output contains tuple with bool representing success as + first value and path with or without replaced root with + formatting key as second value. + + Raises: + ValueError: When roots are not entered and can't be loaded. + """ + if roots is None: + log.debug( + "Looking for matching root in path \"{}\".".format(path) + ) + roots = self.roots + + if roots is None: + raise ValueError("Roots are not set. Can't find path.") + + if isinstance(roots, RootItem): + return roots.find_root_template_from_path(path) + + for root_name, _root in roots.items(): + success, result = self.find_root_template_from_path(path, _root) + if success: + log.info("Found match in root \"{}\".".format(root_name)) + return success, result + + log.warning("No matching root was found in current setting.") + return (False, path) + + def set_root_environments(self): + """Set root environments for current project.""" + for key, value in self.root_environments().items(): + os.environ[key] = value + + def root_environments(self): + """Use root keys to create unique keys for environment variables. + + Concatenates prefix "OPENPYPE_ROOT" with root keys to create unique + keys. + + Returns: + dict: Result is `{(str): (str)}` dicitonary where key represents + unique key concatenated by keys and value is root value of + current platform root. + + Example: + With raw root values:: + "work": { + "windows": "P:/projects/work", + "linux": "/mnt/share/projects/work", + "darwin": "/darwin/path/work" + }, + "publish": { + "windows": "P:/projects/publish", + "linux": "/mnt/share/projects/publish", + "darwin": "/darwin/path/publish" + } + + Result on windows platform:: + { + "OPENPYPE_ROOT_WORK": "P:/projects/work", + "OPENPYPE_ROOT_PUBLISH": "P:/projects/publish" + } + + Short example when multiroot is not used:: + { + "OPENPYPE_ROOT": "P:/projects" + } + """ + return self._root_environments() + + def all_root_paths(self, roots=None): + """Return all paths for all roots of all platforms.""" + if roots is None: + roots = self.roots + + output = [] + if isinstance(roots, RootItem): + for value in roots.raw_data.values(): + output.append(value) + return output + + for _roots in roots.values(): + output.extend(self.all_root_paths(_roots)) + return output + + def _root_environments(self, keys=None, roots=None): + if not keys: + keys = [] + if roots is None: + roots = self.roots + + if isinstance(roots, RootItem): + key_items = [self.env_prefix] + for _key in keys: + key_items.append(_key.upper()) + + key = "_".join(key_items) + # Make sure key and value does not contain unicode + # - can happen in Python 2 hosts + return {str(key): str(roots.value)} + + output = {} + for _key, _value in roots.items(): + _keys = list(keys) + _keys.append(_key) + output.update(self._root_environments(_keys, _value)) + return output + + def root_environmets_fill_data(self, template=None): + """Environment variable values in dictionary for rootless path. + + Args: + template (str): Template for environment variable key fill. + By default is set to `"${}"`. + """ + if template is None: + template = "${}" + return self._root_environmets_fill_data(template) + + def _root_environmets_fill_data(self, template, keys=None, roots=None): + if keys is None and roots is None: + return { + "root": self._root_environmets_fill_data( + template, [], self.roots + ) + } + + if isinstance(roots, RootItem): + key_items = [Roots.env_prefix] + for _key in keys: + key_items.append(_key.upper()) + key = "_".join(key_items) + return template.format(key) + + output = {} + for key, value in roots.items(): + _keys = list(keys) + _keys.append(key) + output[key] = self._root_environmets_fill_data( + template, _keys, value + ) + return output + + @property + def project_name(self): + """Return project name which will be used for loading root values.""" + return self.anatomy.project_name + + @property + def roots(self): + """Property for filling "root" key in templates. + + This property returns roots for current project or default root values. + Warning: + Default roots value may cause issues when project use different + roots settings. That may happen when project use multiroot + templates but default roots miss their keys. + """ + if self.project_name != self.loaded_project: + self._roots = None + + if self._roots is None: + self._roots = self._discover() + self.loaded_project = self.project_name + return self._roots + + def _discover(self): + """ Loads current project's roots or default. + + Default roots are loaded if project override's does not contain roots. + + Returns: + `RootItem` or `dict` with multiple `RootItem`s when multiroot + setting is used. + """ + + return self._parse_dict(self.anatomy["roots"], parent=self) + + @staticmethod + def _parse_dict(data, key=None, parent_keys=None, parent=None): + """Parse roots raw data into RootItem or dictionary with RootItems. + + Converting raw roots data to `RootItem` helps to handle platform keys. + This method is recursive to be able handle multiroot setup and + is static to be able to load default roots without creating new object. + + Args: + data (dict): Should contain raw roots data to be parsed. + key (str, optional): Current root key. Set by recursion. + parent_keys (list): Parent dictionary keys. Set by recursion. + parent (Roots, optional): Parent object set in `RootItem` + helps to keep RootItem instance updated with `Roots` object. + + Returns: + `RootItem` or `dict` with multiple `RootItem`s when multiroot + setting is used. + """ + if not parent_keys: + parent_keys = [] + is_last = False + for value in data.values(): + if isinstance(value, six.string_types): + is_last = True + break + + if is_last: + return RootItem(data, key, parent_keys, parent=parent) + + output = {} + for _key, value in data.items(): + _parent_keys = list(parent_keys) + _parent_keys.append(_key) + output[_key] = Roots._parse_dict(value, _key, _parent_keys, parent) + return output diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py index 4a147c230b..e719e46514 100644 --- a/openpype/pipeline/context_tools.py +++ b/openpype/pipeline/context_tools.py @@ -1,11 +1,9 @@ """Core pipeline functionality""" import os -import sys import json import types import logging -import inspect import platform import pyblish.api @@ -14,11 +12,8 @@ from pyblish.lib import MessageHandler import openpype from openpype.modules import load_modules, ModulesManager from openpype.settings import get_project_settings -from openpype.lib import ( - Anatomy, - filter_pyblish_plugins, -) - +from openpype.lib import filter_pyblish_plugins +from .anatomy import Anatomy from . import ( legacy_io, register_loader_plugin_path, @@ -235,73 +230,10 @@ def register_host(host): required, or browse the source code. """ - signatures = { - "ls": [] - } - _validate_signature(host, signatures) _registered_host["_"] = host -def _validate_signature(module, signatures): - # Required signatures for each member - - missing = list() - invalid = list() - success = True - - for member in signatures: - if not hasattr(module, member): - missing.append(member) - success = False - - else: - attr = getattr(module, member) - if sys.version_info.major >= 3: - signature = inspect.getfullargspec(attr)[0] - else: - signature = inspect.getargspec(attr)[0] - required_signature = signatures[member] - - assert isinstance(signature, list) - assert isinstance(required_signature, list) - - if not all(member in signature - for member in required_signature): - invalid.append({ - "member": member, - "signature": ", ".join(signature), - "required": ", ".join(required_signature) - }) - success = False - - if not success: - report = list() - - if missing: - report.append( - "Incomplete interface for module: '%s'\n" - "Missing: %s" % (module, ", ".join( - "'%s'" % member for member in missing)) - ) - - if invalid: - report.append( - "'%s': One or more members were found, but didn't " - "have the right argument signature." % module.__name__ - ) - - for member in invalid: - report.append( - " Found: {member}({signature})".format(**member) - ) - report.append( - " Expected: {member}({required})".format(**member) - ) - - raise ValueError("\n".join(report)) - - def registered_host(): """Return currently registered host""" return _registered_host["_"] diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index 7931ea400a..9b55c3b21e 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -6,6 +6,7 @@ import inspect from uuid import uuid4 from contextlib import contextmanager +from openpype.host import INewPublisher from openpype.pipeline import legacy_io from openpype.pipeline.mongodb import ( AvalonMongoDB, @@ -28,6 +29,7 @@ UpdateData = collections.namedtuple("UpdateData", ["instance", "changes"]) class ImmutableKeyError(TypeError): """Accessed key is immutable so does not allow changes or removements.""" + def __init__(self, key, msg=None): self.immutable_key = key if not msg: @@ -39,6 +41,7 @@ class ImmutableKeyError(TypeError): class HostMissRequiredMethod(Exception): """Host does not have implemented required functions for creation.""" + def __init__(self, host, missing_methods): self.missing_methods = missing_methods self.host = host @@ -65,6 +68,7 @@ class InstanceMember: TODO: Implement and use! """ + def __init__(self, instance, name): self.instance = instance @@ -93,6 +97,7 @@ class AttributeValues: values(dict): Values after possible conversion. origin_data(dict): Values loaded from host before conversion. """ + def __init__(self, attr_defs, values, origin_data=None): from openpype.lib.attribute_definitions import UnknownDef @@ -173,6 +178,10 @@ class AttributeValues: output = {} for key in self._data: output[key] = self[key] + + for key, attr_def in self._attr_defs_by_key.items(): + if key not in output: + output[key] = attr_def.default return output @staticmethod @@ -195,6 +204,7 @@ class CreatorAttributeValues(AttributeValues): Args: instance (CreatedInstance): Instance for which are values hold. """ + def __init__(self, instance, *args, **kwargs): self.instance = instance super(CreatorAttributeValues, self).__init__(*args, **kwargs) @@ -210,6 +220,7 @@ class PublishAttributeValues(AttributeValues): publish_attributes(PublishAttributes): Wrapper for multiple publish attributes is used as parent object. """ + def __init__(self, publish_attributes, *args, **kwargs): self.publish_attributes = publish_attributes super(PublishAttributeValues, self).__init__(*args, **kwargs) @@ -231,6 +242,7 @@ class PublishAttributes: attr_plugins(list): List of publish plugins that may have defined attribute definitions. """ + def __init__(self, parent, origin_data, attr_plugins=None): self.parent = parent self._origin_data = copy.deepcopy(origin_data) @@ -269,6 +281,7 @@ class PublishAttributes: key(str): Plugin name. default: Default value if plugin was not found. """ + if key not in self._data: return default @@ -286,11 +299,13 @@ class PublishAttributes: def plugin_names_order(self): """Plugin names order by their 'order' attribute.""" + for name in self._plugin_names_order: yield name def data_to_store(self): """Convert attribute values to "data to store".""" + output = {} for key, attr_value in self._data.items(): output[key] = attr_value.data_to_store() @@ -298,6 +313,7 @@ class PublishAttributes: def changes(self): """Return changes per each key.""" + changes = {} for key, attr_val in self._data.items(): attr_changes = attr_val.changes() @@ -313,6 +329,7 @@ class PublishAttributes: def set_publish_plugins(self, attr_plugins): """Set publish plugins attribute definitions.""" + self._plugin_names_order = [] self._missing_plugins = [] self.attr_plugins = attr_plugins or [] @@ -364,6 +381,7 @@ class CreatedInstance: `openpype.pipeline.registered_host`. new(bool): Is instance new. """ + # Keys that can't be changed or removed from data after loading using # creator. # - 'creator_attributes' and 'publish_attributes' can change values of @@ -495,6 +513,20 @@ class CreatedInstance: def subset_name(self): return self._data["subset"] + @property + def label(self): + label = self._data.get("label") + if not label: + label = self.subset_name + return label + + @property + def group_label(self): + label = self._data.get("group") + if label: + return label + return self.creator.get_group_label() + @property def creator_identifier(self): return self.creator.identifier @@ -551,6 +583,7 @@ class CreatedInstance: @property def id(self): """Instance identifier.""" + return self._data["instance_id"] @property @@ -559,10 +592,12 @@ class CreatedInstance: Access to data is needed to modify values. """ + return self def changes(self): """Calculate and return changes.""" + changes = {} new_keys = set() for key, new_value in self._data.items(): @@ -651,12 +686,6 @@ class CreateContext: discover_publish_plugins(bool): Discover publish plugins during reset phase. """ - # Methods required in host implementaion to be able create instances - # or change context data. - required_methods = ( - "get_context_data", - "update_context_data" - ) def __init__( self, host, dbcon=None, headless=False, reset=True, @@ -707,6 +736,7 @@ class CreateContext: self.manual_creators = {} self.publish_discover_result = None + self.publish_plugins_mismatch_targets = [] self.publish_plugins = [] self.plugins_with_defs = [] self._attr_plugins_by_family = {} @@ -738,10 +768,10 @@ class CreateContext: Args: host(ModuleType): Host implementaion. """ - missing = set() - for attr_name in cls.required_methods: - if not hasattr(host, attr_name): - missing.add(attr_name) + + missing = set( + INewPublisher.get_missing_publish_methods(host) + ) return missing @property @@ -753,6 +783,10 @@ class CreateContext: def host_name(self): return os.environ["AVALON_APP"] + @property + def project_name(self): + return self.dbcon.active_project() + @property def log(self): """Dynamic access to logger.""" @@ -825,6 +859,7 @@ class CreateContext: discover_result = DiscoverResult() plugins_with_defs = [] plugins_by_targets = [] + plugins_mismatch_targets = [] if discover_publish_plugins: discover_result = publish_plugins_discover() publish_plugins = discover_result.plugins @@ -834,19 +869,26 @@ class CreateContext: plugins_by_targets = pyblish.logic.plugins_by_targets( publish_plugins, list(targets) ) + # Collect plugins that can have attribute definitions for plugin in publish_plugins: if OpenPypePyblishPluginMixin in inspect.getmro(plugin): plugins_with_defs.append(plugin) + plugins_mismatch_targets = [ + plugin + for plugin in publish_plugins + if plugin not in plugins_by_targets + ] + + self.publish_plugins_mismatch_targets = plugins_mismatch_targets self.publish_discover_result = discover_result self.publish_plugins = plugins_by_targets self.plugins_with_defs = plugins_with_defs # Prepare settings - project_name = self.dbcon.Session["AVALON_PROJECT"] system_settings = get_system_settings() - project_settings = get_project_settings(project_name) + project_settings = get_project_settings(self.project_name) # Discover and prepare creators creators = {} @@ -878,9 +920,9 @@ class CreateContext: continue creator = creator_class( - self, - system_settings, project_settings, + system_settings, + self, self.headless ) creators[creator_identifier] = creator diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 8006d4f4f8..52c76db5ef 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -1,5 +1,4 @@ import copy -import logging from abc import ( ABCMeta, @@ -47,6 +46,9 @@ class BaseCreator: # Label shown in UI label = None + group_label = None + # Cached group label after first call 'get_group_label' + _cached_group_label = None # Variable to store logger _log = None @@ -70,7 +72,7 @@ class BaseCreator: host_name = None def __init__( - self, create_context, system_settings, project_settings, headless=False + self, project_settings, system_settings, create_context, headless=False ): # Reference to CreateContext self.create_context = create_context @@ -85,15 +87,54 @@ class BaseCreator: Default implementation returns plugin's family. """ + return self.family @abstractproperty def family(self): """Family that plugin represents.""" + pass + @property + def project_name(self): + """Family that plugin represents.""" + + return self.create_context.project_name + + @property + def host(self): + return self.create_context.host + + def get_group_label(self): + """Group label under which are instances grouped in UI. + + Default implementation use attributes in this order: + - 'group_label' -> 'label' -> 'identifier' + Keep in mind that 'identifier' use 'family' by default. + + Returns: + str: Group label that can be used for grouping of instances in UI. + Group label can be overriden by instance itself. + """ + + if self._cached_group_label is None: + label = self.identifier + if self.group_label: + label = self.group_label + elif self.label: + label = self.label + self._cached_group_label = label + return self._cached_group_label + @property def log(self): + """Logger of the plugin. + + Returns: + logging.Logger: Logger with name of the plugin. + """ + if self._log is None: from openpype.api import Logger @@ -101,10 +142,30 @@ class BaseCreator: return self._log def _add_instance_to_context(self, instance): - """Helper method to ad d""" + """Helper method to add instance to create context. + + Instances should be stored to DCC workfile metadata to be able reload + them and also stored to CreateContext in which is creator plugin + existing at the moment to be able use it without refresh of + CreateContext. + + Args: + instance (CreatedInstance): New created instance. + """ + self.create_context.creator_adds_instance(instance) def _remove_instance_from_context(self, instance): + """Helper method to remove instance from create context. + + Instances must be removed from DCC workfile metadat aand from create + context in which plugin is existing at the moment of removement to + propagate the change without restarting create context. + + Args: + instance (CreatedInstance): Instance which should be removed. + """ + self.create_context.creator_removed_instance(instance) @abstractmethod @@ -115,6 +176,7 @@ class BaseCreator: - must expect all data that were passed to init in previous implementation """ + pass @abstractmethod @@ -141,6 +203,7 @@ class BaseCreator: self._add_instance_to_context(instance) ``` """ + pass @abstractmethod @@ -148,9 +211,10 @@ class BaseCreator: """Store changes of existing instances so they can be recollected. Args: - update_list(list): Gets list of tuples. Each item + update_list(List[UpdateData]): Gets list of tuples. Each item contain changed instance and it's changes. """ + pass @abstractmethod @@ -161,9 +225,10 @@ class BaseCreator: 'True' if did so. Args: - instance(list): Instance objects which should be + instance(List[CreatedInstance]): Instance objects which should be removed. """ + pass def get_icon(self): @@ -171,6 +236,7 @@ class BaseCreator: Can return path to image file or awesome icon name. """ + return self.icon def get_dynamic_data( @@ -181,6 +247,7 @@ class BaseCreator: These may be get dynamically created based on current context of workfile. """ + return {} def get_subset_name( @@ -205,6 +272,7 @@ class BaseCreator: project_name(str): Project name. host_name(str): Which host creates subset. """ + dynamic_data = self.get_dynamic_data( variant, task_name, asset_doc, project_name, host_name ) @@ -231,9 +299,10 @@ class BaseCreator: keys/values when plugin attributes change. Returns: - list: Attribute definitions that can be tweaked for + List[AbtractAttrDef]: Attribute definitions that can be tweaked for created instance. """ + return self.instance_attr_defs @@ -291,6 +360,7 @@ class Creator(BaseCreator): Returns: str: Short description of family. """ + return self.description def get_detail_description(self): @@ -301,6 +371,7 @@ class Creator(BaseCreator): Returns: str: Detailed description of family for artist. """ + return self.detailed_description def get_default_variants(self): @@ -312,8 +383,9 @@ class Creator(BaseCreator): By default returns `default_variants` value. Returns: - list: Whisper variants for user input. + List[str]: Whisper variants for user input. """ + return copy.deepcopy(self.default_variants) def get_default_variant(self): @@ -332,11 +404,13 @@ class Creator(BaseCreator): """Plugin attribute definitions needed for creation. Attribute definitions of plugin that define how creation will work. Values of these definitions are passed to `create` method. - NOTE: - Convert method should be implemented which should care about updating - keys/values when plugin attributes change. + + Note: + Convert method should be implemented which should care about + updating keys/values when plugin attributes change. + Returns: - list: Attribute definitions that can be tweaked for + List[AbtractAttrDef]: Attribute definitions that can be tweaked for created instance. """ return self.pre_create_attr_defs diff --git a/openpype/pipeline/legacy_io.py b/openpype/pipeline/legacy_io.py index 9359e3057b..bde2b24c2a 100644 --- a/openpype/pipeline/legacy_io.py +++ b/openpype/pipeline/legacy_io.py @@ -18,9 +18,13 @@ _database = database = None log = logging.getLogger(__name__) +def is_installed(): + return module._is_installed + + def install(): """Establish a persistent connection to the database""" - if module._is_installed: + if is_installed(): return session = session_data_from_environment(context_keys=True) @@ -55,7 +59,7 @@ def uninstall(): def requires_install(func): @functools.wraps(func) def decorated(*args, **kwargs): - if not module._is_installed: + if not is_installed(): install() return func(*args, **kwargs) return decorated diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index 99e5d11f82..2c213aff6f 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -6,13 +6,24 @@ import logging import inspect import numbers -import six -from bson.objectid import ObjectId - -from openpype.lib import Anatomy +from openpype.client import ( + get_project, + get_assets, + get_subsets, + get_versions, + get_version_by_id, + get_last_version_by_subset_id, + get_hero_version_by_subset_id, + get_version_by_name, + get_representations, + get_representation_by_id, + get_representation_by_name, + get_representation_parents +) from openpype.pipeline import ( schema, legacy_io, + Anatomy, ) log = logging.getLogger(__name__) @@ -52,13 +63,10 @@ def get_repres_contexts(representation_ids, dbcon=None): Returns: dict: The full representation context by representation id. - keys are repre_id, value is dictionary with full: - asset_doc - version_doc - subset_doc - repre_doc - + keys are repre_id, value is dictionary with full documents of + asset, subset, version and representation. """ + if not dbcon: dbcon = legacy_io @@ -66,26 +74,18 @@ def get_repres_contexts(representation_ids, dbcon=None): if not representation_ids: return contexts - _representation_ids = [] - for repre_id in representation_ids: - if isinstance(repre_id, six.string_types): - repre_id = ObjectId(repre_id) - _representation_ids.append(repre_id) + project_name = dbcon.active_project() + repre_docs = get_representations(project_name, representation_ids) - repre_docs = dbcon.find({ - "type": "representation", - "_id": {"$in": _representation_ids} - }) repre_docs_by_id = {} version_ids = set() for repre_doc in repre_docs: version_ids.add(repre_doc["parent"]) repre_docs_by_id[repre_doc["_id"]] = repre_doc - version_docs = dbcon.find({ - "type": {"$in": ["version", "hero_version"]}, - "_id": {"$in": list(version_ids)} - }) + version_docs = get_versions( + project_name, version_ids, hero=True + ) version_docs_by_id = {} hero_version_docs = [] @@ -99,10 +99,7 @@ def get_repres_contexts(representation_ids, dbcon=None): subset_ids.add(version_doc["parent"]) if versions_for_hero: - _version_docs = dbcon.find({ - "type": "version", - "_id": {"$in": list(versions_for_hero)} - }) + _version_docs = get_versions(project_name, versions_for_hero) _version_data_by_id = { version_doc["_id"]: version_doc["data"] for version_doc in _version_docs @@ -114,26 +111,20 @@ def get_repres_contexts(representation_ids, dbcon=None): version_data = copy.deepcopy(_version_data_by_id[version_id]) version_docs_by_id[hero_version_id]["data"] = version_data - subset_docs = dbcon.find({ - "type": "subset", - "_id": {"$in": list(subset_ids)} - }) + subset_docs = get_subsets(project_name, subset_ids) subset_docs_by_id = {} asset_ids = set() for subset_doc in subset_docs: subset_docs_by_id[subset_doc["_id"]] = subset_doc asset_ids.add(subset_doc["parent"]) - asset_docs = dbcon.find({ - "type": "asset", - "_id": {"$in": list(asset_ids)} - }) + asset_docs = get_assets(project_name, asset_ids) asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs } - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) for repre_id, repre_doc in repre_docs_by_id.items(): version_doc = version_docs_by_id[repre_doc["parent"]] @@ -173,32 +164,21 @@ def get_subset_contexts(subset_ids, dbcon=None): if not subset_ids: return contexts - _subset_ids = set() - for subset_id in subset_ids: - if isinstance(subset_id, six.string_types): - subset_id = ObjectId(subset_id) - _subset_ids.add(subset_id) - - subset_docs = dbcon.find({ - "type": "subset", - "_id": {"$in": list(_subset_ids)} - }) + project_name = dbcon.active_project() + subset_docs = get_subsets(project_name, subset_ids) subset_docs_by_id = {} asset_ids = set() for subset_doc in subset_docs: subset_docs_by_id[subset_doc["_id"]] = subset_doc asset_ids.add(subset_doc["parent"]) - asset_docs = dbcon.find({ - "type": "asset", - "_id": {"$in": list(asset_ids)} - }) + asset_docs = get_assets(project_name, asset_ids) asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs } - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) for subset_id, subset_doc in subset_docs_by_id.items(): asset_doc = asset_docs_by_id[subset_doc["parent"]] @@ -224,16 +204,19 @@ def get_representation_context(representation): Returns: dict: The full representation context. - """ assert representation is not None, "This is a bug" - if isinstance(representation, (six.string_types, ObjectId)): - representation = legacy_io.find_one( - {"_id": ObjectId(str(representation))}) + project_name = legacy_io.active_project() + if not isinstance(representation, dict): + representation = get_representation_by_id( + project_name, representation + ) - version, subset, asset, project = legacy_io.parenthood(representation) + version, subset, asset, project = get_representation_parents( + project_name, representation + ) assert all([representation, version, subset, asset, project]), ( "This is a bug" @@ -405,42 +388,36 @@ def update_container(container, version=-1): """Update a container""" # Compute the different version from 'representation' - current_representation = legacy_io.find_one({ - "_id": ObjectId(container["representation"]) - }) + project_name = legacy_io.active_project() + current_representation = get_representation_by_id( + project_name, container["representation"] + ) assert current_representation is not None, "This is a bug" - current_version, subset, asset, project = legacy_io.parenthood( - current_representation) - + current_version = get_version_by_id( + project_name, current_representation["parent"], fields=["parent"] + ) if version == -1: - new_version = legacy_io.find_one({ - "type": "version", - "parent": subset["_id"] - }, sort=[("name", -1)]) + new_version = get_last_version_by_subset_id( + project_name, current_version["parent"], fields=["_id"] + ) + + elif isinstance(version, HeroVersionType): + new_version = get_hero_version_by_subset_id( + project_name, current_version["parent"], fields=["_id"] + ) + else: - if isinstance(version, HeroVersionType): - version_query = { - "parent": subset["_id"], - "type": "hero_version" - } - else: - version_query = { - "parent": subset["_id"], - "type": "version", - "name": version - } - new_version = legacy_io.find_one(version_query) + new_version = get_version_by_name( + project_name, version, current_version["parent"], fields=["_id"] + ) assert new_version is not None, "This is a bug" - new_representation = legacy_io.find_one({ - "type": "representation", - "parent": new_version["_id"], - "name": current_representation["name"] - }) - + new_representation = get_representation_by_name( + project_name, current_representation["name"], new_version["_id"] + ) assert new_representation is not None, "Representation wasn't found" path = get_representation_path(new_representation) @@ -482,10 +459,10 @@ def switch_container(container, representation, loader_plugin=None): )) # Get the new representation to switch to - new_representation = legacy_io.find_one({ - "type": "representation", - "_id": representation["_id"], - }) + project_name = legacy_io.active_project() + new_representation = get_representation_by_id( + project_name, representation["_id"] + ) new_context = get_representation_context(new_representation) if not is_compatible_loader(loader_plugin, new_context): diff --git a/openpype/pipeline/publish/__init__.py b/openpype/pipeline/publish/__init__.py index af5d7c4a91..aa7fe0bdbf 100644 --- a/openpype/pipeline/publish/__init__.py +++ b/openpype/pipeline/publish/__init__.py @@ -1,4 +1,7 @@ from .publish_plugins import ( + AbstractMetaInstancePlugin, + AbstractMetaContextPlugin, + PublishValidationError, PublishXmlValidationError, KnownPublishError, @@ -13,8 +16,17 @@ from .lib import ( load_help_content_from_filepath, ) +from .abstract_expected_files import ExpectedFiles +from .abstract_collect_render import ( + RenderInstance, + AbstractCollectRender, +) + __all__ = ( + "AbstractMetaInstancePlugin", + "AbstractMetaContextPlugin", + "PublishValidationError", "PublishXmlValidationError", "KnownPublishError", @@ -25,4 +37,9 @@ __all__ = ( "publish_plugins_discover", "load_help_content_from_plugin", "load_help_content_from_filepath", + + "ExpectedFiles", + + "RenderInstance", + "AbstractCollectRender", ) diff --git a/openpype/pipeline/publish/abstract_collect_render.py b/openpype/pipeline/publish/abstract_collect_render.py new file mode 100644 index 0000000000..2e537227c3 --- /dev/null +++ b/openpype/pipeline/publish/abstract_collect_render.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +"""Collect render template. + +TODO: use @dataclass when times come. + +""" +from abc import abstractmethod + +import attr +import six + +import pyblish.api + +from openpype.pipeline import legacy_io +from .publish_plugins import AbstractMetaContextPlugin + + +@attr.s +class RenderInstance(object): + """Data collected by collectors. + + This data class later on passed to collected instances. + Those attributes are required later on. + + """ + + # metadata + version = attr.ib() # instance version + time = attr.ib() # time of instance creation (get_formatted_current_time) + source = attr.ib() # path to source scene file + label = attr.ib() # label to show in GUI + subset = attr.ib() # subset name + task = attr.ib() # task name + asset = attr.ib() # asset name (AVALON_ASSET) + attachTo = attr.ib() # subset name to attach render to + setMembers = attr.ib() # list of nodes/members producing render output + publish = attr.ib() # bool, True to publish instance + name = attr.ib() # instance name + + # format settings + resolutionWidth = attr.ib() # resolution width (1920) + resolutionHeight = attr.ib() # resolution height (1080) + pixelAspect = attr.ib() # pixel aspect (1.0) + + # time settings + frameStart = attr.ib() # start frame + frameEnd = attr.ib() # start end + frameStep = attr.ib() # frame step + + handleStart = attr.ib(default=None) # start frame + handleEnd = attr.ib(default=None) # start frame + + # for software (like Harmony) where frame range cannot be set by DB + # handles need to be propagated if exist + ignoreFrameHandleCheck = attr.ib(default=False) + + # -------------------- + # With default values + # metadata + renderer = attr.ib(default="") # renderer - can be used in Deadline + review = attr.ib(default=False) # generate review from instance (bool) + priority = attr.ib(default=50) # job priority on farm + + family = attr.ib(default="renderlayer") + families = attr.ib(default=["renderlayer"]) # list of families + + # format settings + multipartExr = attr.ib(default=False) # flag for multipart exrs + convertToScanline = attr.ib(default=False) # flag for exr conversion + + tileRendering = attr.ib(default=False) # bool: treat render as tiles + tilesX = attr.ib(default=0) # number of tiles in X + tilesY = attr.ib(default=0) # number of tiles in Y + + # submit_publish_job + toBeRenderedOn = attr.ib(default=None) + deadlineSubmissionJob = attr.ib(default=None) + anatomyData = attr.ib(default=None) + outputDir = attr.ib(default=None) + context = attr.ib(default=None) + + @frameStart.validator + def check_frame_start(self, _, value): + """Validate if frame start is not larger then end.""" + if value > self.frameEnd: + raise ValueError("frameStart must be smaller " + "or equal then frameEnd") + + @frameEnd.validator + def check_frame_end(self, _, value): + """Validate if frame end is not less then start.""" + if value < self.frameStart: + raise ValueError("frameEnd must be smaller " + "or equal then frameStart") + + @tilesX.validator + def check_tiles_x(self, _, value): + """Validate if tile x isn't less then 1.""" + if not self.tileRendering: + return + if value < 1: + raise ValueError("tile X size cannot be less then 1") + + if value == 1 and self.tilesY == 1: + raise ValueError("both tiles X a Y sizes are set to 1") + + @tilesY.validator + def check_tiles_y(self, _, value): + """Validate if tile y isn't less then 1.""" + if not self.tileRendering: + return + if value < 1: + raise ValueError("tile Y size cannot be less then 1") + + if value == 1 and self.tilesX == 1: + raise ValueError("both tiles X a Y sizes are set to 1") + + +@six.add_metaclass(AbstractMetaContextPlugin) +class AbstractCollectRender(pyblish.api.ContextPlugin): + """Gather all publishable render layers from renderSetup.""" + + order = pyblish.api.CollectorOrder + 0.01 + label = "Collect Render" + sync_workfile_version = False + + def __init__(self, *args, **kwargs): + """Constructor.""" + super(AbstractCollectRender, self).__init__(*args, **kwargs) + self._file_path = None + self._asset = legacy_io.Session["AVALON_ASSET"] + self._context = None + + def process(self, context): + """Entry point to collector.""" + self._context = context + for instance in context: + # make sure workfile instance publishing is enabled + try: + if "workfile" in instance.data["families"]: + instance.data["publish"] = True + # TODO merge renderFarm and render.farm + if ("renderFarm" in instance.data["families"] or + "render.farm" in instance.data["families"]): + instance.data["remove"] = True + except KeyError: + # be tolerant if 'families' is missing. + pass + + self._file_path = context.data["currentFile"].replace("\\", "/") + + render_instances = self.get_instances(context) + for render_instance in render_instances: + exp_files = self.get_expected_files(render_instance) + assert exp_files, "no file names were generated, this is bug" + + # if we want to attach render to subset, check if we have AOV's + # in expectedFiles. If so, raise error as we cannot attach AOV + # (considered to be subset on its own) to another subset + if render_instance.attachTo: + assert isinstance(exp_files, list), ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported" + ) + + frame_start_render = int(render_instance.frameStart) + frame_end_render = int(render_instance.frameEnd) + if (render_instance.ignoreFrameHandleCheck or + int(context.data['frameStartHandle']) == frame_start_render + and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501 + + handle_start = context.data['handleStart'] + handle_end = context.data['handleEnd'] + frame_start = context.data['frameStart'] + frame_end = context.data['frameEnd'] + frame_start_handle = context.data['frameStartHandle'] + frame_end_handle = context.data['frameEndHandle'] + else: + handle_start = 0 + handle_end = 0 + frame_start = frame_start_render + frame_end = frame_end_render + frame_start_handle = frame_start_render + frame_end_handle = frame_end_render + + data = { + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartHandle": frame_start_handle, + "frameEndHandle": frame_end_handle, + "byFrameStep": int(render_instance.frameStep), + + "author": context.data["user"], + # Add source to allow tracing back to the scene from + # which was submitted originally + "expectedFiles": exp_files, + } + if self.sync_workfile_version: + data["version"] = context.data["version"] + + # add additional data + data = self.add_additional_data(data) + render_instance_dict = attr.asdict(render_instance) + + instance = context.create_instance(render_instance.name) + instance.data["label"] = render_instance.label + instance.data.update(render_instance_dict) + instance.data.update(data) + + self.post_collecting_action() + + @abstractmethod + def get_instances(self, context): + """Get all renderable instances and their data. + + Args: + context (pyblish.api.Context): Context object. + + Returns: + list of :class:`RenderInstance`: All collected renderable instances + (like render layers, write nodes, etc.) + + """ + pass + + @abstractmethod + def get_expected_files(self, render_instance): + """Get list of expected files. + + Returns: + list: expected files. This can be either simple list of files with + their paths, or list of dictionaries, where key is name of AOV + for example and value is list of files for that AOV. + + Example:: + + ['/path/to/file.001.exr', '/path/to/file.002.exr'] + + or as dictionary: + + [ + { + "beauty": ['/path/to/beauty.001.exr', ...], + "mask": ['/path/to/mask.001.exr'] + } + ] + + """ + pass + + def add_additional_data(self, data): + """Add additional data to collected instance. + + This can be overridden by host implementation to add custom + additional data. + + """ + return data + + def post_collecting_action(self): + """Execute some code after collection is done. + + This is useful for example for restoring current render layer. + + """ + pass diff --git a/openpype/pipeline/publish/abstract_expected_files.py b/openpype/pipeline/publish/abstract_expected_files.py new file mode 100644 index 0000000000..f9f3c17ef5 --- /dev/null +++ b/openpype/pipeline/publish/abstract_expected_files.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +"""Abstract ExpectedFile class definition.""" +from abc import ABCMeta, abstractmethod +import six + + +@six.add_metaclass(ABCMeta) +class ExpectedFiles: + """Class grouping functionality for all supported renderers. + + Attributes: + multipart (bool): Flag if multipart exrs are used. + + """ + + multipart = False + + @abstractmethod + def get(self, render_instance): + """Get expected files for given renderer and render layer. + + This method should return dictionary of all files we are expecting + to be rendered from the host. Usually `render_instance` corresponds + to *render layer*. Result can be either flat list with the file + paths or it can be list of dictionaries. Each key corresponds to + for example AOV name or channel, etc. + + Example:: + + ['/path/to/file.001.exr', '/path/to/file.002.exr'] + + or as dictionary: + + [ + { + "beauty": ['/path/to/beauty.001.exr', ...], + "mask": ['/path/to/mask.001.exr'] + } + ] + + + Args: + render_instance (:class:`RenderInstance`): Data passed from + collector to determine files. This should be instance of + :class:`abstract_collect_render.RenderInstance` + + Returns: + list: Full paths to expected rendered files. + list of dict: Path to expected rendered files categorized by + AOVs, etc. + + """ + raise NotImplementedError() diff --git a/openpype/pipeline/publish/publish_plugins.py b/openpype/pipeline/publish/publish_plugins.py index 2402a005c2..71a2c675b6 100644 --- a/openpype/pipeline/publish/publish_plugins.py +++ b/openpype/pipeline/publish/publish_plugins.py @@ -1,7 +1,17 @@ +from abc import ABCMeta +from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin from openpype.lib import BoolDef from .lib import load_help_content_from_plugin +class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin): + pass + + +class AbstractMetaContextPlugin(ABCMeta, ExplicitMetaPlugin): + pass + + class PublishValidationError(Exception): """Validation error happened during publishing. @@ -16,6 +26,7 @@ class PublishValidationError(Exception): description(str): Detailed description of an error. It is possible to use Markdown syntax. """ + def __init__(self, message, title=None, description=None, detail=None): self.message = message self.title = title or "< Missing title >" @@ -49,6 +60,7 @@ class KnownPublishError(Exception): Message will be shown in UI for artist. """ + pass @@ -92,6 +104,7 @@ class OpenPypePyblishPluginMixin: Returns: list: Attribute definitions for plugin. """ + return [] @classmethod @@ -116,6 +129,7 @@ class OpenPypePyblishPluginMixin: Args: data(dict): Data from instance or context. """ + return ( data .get("publish_attributes", {}) diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index c3e9e9fa0a..6e0b464cc1 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -4,14 +4,14 @@ import uuid import clique from pymongo import UpdateOne -import ftrack_api import qargparse from Qt import QtWidgets, QtCore +from openpype.client import get_versions, get_representations from openpype import style -from openpype.pipeline import load, AvalonMongoDB +from openpype.pipeline import load, AvalonMongoDB, Anatomy from openpype.lib import StringTemplate -from openpype.api import Anatomy +from openpype.modules import ModulesManager class DeleteOldVersions(load.SubsetLoaderPlugin): @@ -198,18 +198,10 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): def get_data(self, context, versions_count): subset = context["subset"] asset = context["asset"] - anatomy = Anatomy(context["project"]["name"]) + project_name = context["project"]["name"] + anatomy = Anatomy(project_name) - self.dbcon = AvalonMongoDB() - self.dbcon.Session["AVALON_PROJECT"] = context["project"]["name"] - self.dbcon.install() - - versions = list( - self.dbcon.find({ - "type": "version", - "parent": {"$in": [subset["_id"]]} - }) - ) + versions = list(get_versions(project_name, subset_ids=[subset["_id"]])) versions_by_parent = collections.defaultdict(list) for ent in versions: @@ -268,10 +260,9 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): print(msg) return - repres = list(self.dbcon.find({ - "type": "representation", - "parent": {"$in": version_ids} - })) + repres = list(get_representations( + project_name, version_ids=version_ids + )) self.log.debug( "Collected representations to remove ({})".format(len(repres)) @@ -330,7 +321,7 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): return data - def main(self, data, remove_publish_folder): + def main(self, project_name, data, remove_publish_folder): # Size of files. size = 0 if not data: @@ -367,30 +358,70 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): )) if mongo_changes_bulk: - self.dbcon.bulk_write(mongo_changes_bulk) + dbcon = AvalonMongoDB() + dbcon.Session["AVALON_PROJECT"] = project_name + dbcon.install() + dbcon.bulk_write(mongo_changes_bulk) + dbcon.uninstall() - self.dbcon.uninstall() + self._ftrack_delete_versions(data) + + return size + + def _ftrack_delete_versions(self, data): + """Delete version on ftrack. + + Handling of ftrack logic in this plugin is not ideal. But in OP3 it is + almost impossible to solve the issue other way. + + Note: + Asset versions on ftrack are not deleted but marked as + "not published" which cause that they're invisible. + + Args: + data (dict): Data sent to subset loader with full context. + """ + + # First check for ftrack id on asset document + # - skip if ther is none + asset_ftrack_id = data["asset"]["data"].get("ftrackId") + if not asset_ftrack_id: + self.log.info(( + "Asset does not have filled ftrack id. Skipped delete" + " of ftrack version." + )) + return + + # Check if ftrack module is enabled + modules_manager = ModulesManager() + ftrack_module = modules_manager.modules_by_name.get("ftrack") + if not ftrack_module or not ftrack_module.enabled: + return + + import ftrack_api + + session = ftrack_api.Session() + subset_name = data["subset"]["name"] + versions = { + '"{}"'.format(version_doc["name"]) + for version_doc in data["versions"] + } + asset_versions = session.query( + ( + "select id, is_published from AssetVersion where" + " asset.parent.id is \"{}\"" + " and asset.name is \"{}\"" + " and version in ({})" + ).format( + asset_ftrack_id, + subset_name, + ",".join(versions) + ) + ).all() # Set attribute `is_published` to `False` on ftrack AssetVersions - session = ftrack_api.Session() - query = ( - "AssetVersion where asset.parent.id is \"{}\"" - " and asset.name is \"{}\"" - " and version is \"{}\"" - ) - for v in data["versions"]: - try: - ftrack_version = session.query( - query.format( - data["asset"]["data"]["ftrackId"], - data["subset"]["name"], - v["name"] - ) - ).one() - except ftrack_api.exception.NoResultFoundError: - continue - - ftrack_version["is_published"] = False + for asset_version in asset_versions: + asset_version["is_published"] = False try: session.commit() @@ -403,8 +434,6 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): self.log.error(msg) self.message(msg) - return size - def load(self, contexts, name=None, namespace=None, options=None): try: size = 0 @@ -423,7 +452,8 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): if not data: continue - size += self.main(data, remove_publish_folder) + project_name = context["project"]["name"] + size += self.main(project_name, data, remove_publish_folder) print("Progressing {}/{}".format(count + 1, len(contexts))) msg = "Total size of files: " + self.sizeof_fmt(size) @@ -449,7 +479,7 @@ class CalculateOldVersions(DeleteOldVersions): ) ] - def main(self, data, remove_publish_folder): + def main(self, project_name, data, remove_publish_folder): size = 0 if not data: diff --git a/openpype/plugins/load/delivery.py b/openpype/plugins/load/delivery.py index 7df07e3f64..7585ea4c59 100644 --- a/openpype/plugins/load/delivery.py +++ b/openpype/plugins/load/delivery.py @@ -3,8 +3,9 @@ from collections import defaultdict from Qt import QtWidgets, QtCore, QtGui -from openpype.pipeline import load, AvalonMongoDB -from openpype.api import Anatomy, config +from openpype.client import get_representations +from openpype.lib import config +from openpype.pipeline import load, Anatomy from openpype import resources, style from openpype.lib.delivery import ( @@ -68,17 +69,13 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): self.setStyleSheet(style.load_stylesheet()) - project = contexts[0]["project"]["name"] - self.anatomy = Anatomy(project) + project_name = contexts[0]["project"]["name"] + self.anatomy = Anatomy(project_name) self._representations = None self.log = log self.currently_uploaded = 0 - self.dbcon = AvalonMongoDB() - self.dbcon.Session["AVALON_PROJECT"] = project - self.dbcon.install() - - self._set_representations(contexts) + self._set_representations(project_name, contexts) dropdown = QtWidgets.QComboBox() self.templates = self._get_templates(self.anatomy) @@ -238,13 +235,12 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): return templates - def _set_representations(self, contexts): + def _set_representations(self, project_name, contexts): version_ids = [context["version"]["_id"] for context in contexts] - repres = list(self.dbcon.find({ - "type": "representation", - "parent": {"$in": version_ids} - })) + repres = list(get_representations( + project_name, version_ids=version_ids + )) self._representations = repres diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/openpype/plugins/publish/collect_anatomy_instance_data.py index 6a6ea170b5..f67d3373d9 100644 --- a/openpype/plugins/publish/collect_anatomy_instance_data.py +++ b/openpype/plugins/publish/collect_anatomy_instance_data.py @@ -27,6 +27,11 @@ import collections import pyblish.api +from openpype.client import ( + get_assets, + get_subsets, + get_last_versions +) from openpype.pipeline import legacy_io @@ -44,13 +49,15 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): def process(self, context): self.log.info("Collecting anatomy data for all instances.") - self.fill_missing_asset_docs(context) - self.fill_latest_versions(context) + project_name = legacy_io.active_project() + self.fill_missing_asset_docs(context, project_name) + self.fill_instance_data_from_asset(context) + self.fill_latest_versions(context, project_name) self.fill_anatomy_data(context) self.log.info("Anatomy Data collection finished.") - def fill_missing_asset_docs(self, context): + def fill_missing_asset_docs(self, context, project_name): self.log.debug("Qeurying asset documents for instances.") context_asset_doc = context.data.get("assetEntity") @@ -84,10 +91,8 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): self.log.debug("Querying asset documents with names: {}".format( ", ".join(["\"{}\"".format(name) for name in asset_names]) )) - asset_docs = legacy_io.find({ - "type": "asset", - "name": {"$in": asset_names} - }) + + asset_docs = get_assets(project_name, asset_names=asset_names) asset_docs_by_name = { asset_doc["name"]: asset_doc for asset_doc in asset_docs @@ -111,7 +116,24 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): "Not found asset documents with names \"{}\"." ).format(joined_asset_names)) - def fill_latest_versions(self, context): + def fill_instance_data_from_asset(self, context): + for instance in context: + asset_doc = instance.data.get("assetEntity") + if not asset_doc: + continue + + asset_data = asset_doc["data"] + for key in ( + "fps", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + ): + if key not in instance.data and key in asset_data: + instance.data[key] = asset_data[key] + + def fill_latest_versions(self, context, project_name): """Try to find latest version for each instance's subset. Key "latestVersion" is always set to latest version or `None`. @@ -126,7 +148,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): self.log.debug("Qeurying latest versions for instances.") hierarchy = {} - subset_filters = [] + names_by_asset_ids = collections.defaultdict(set) for instance in context: # Make sure `"latestVersion"` key is set latest_version = instance.data.get("latestVersion") @@ -147,67 +169,33 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): if subset_name not in hierarchy[asset_id]: hierarchy[asset_id][subset_name] = [] hierarchy[asset_id][subset_name].append(instance) - subset_filters.append({ - "parent": asset_id, - "name": subset_name - }) + names_by_asset_ids[asset_id].add(subset_name) subset_docs = [] - if subset_filters: - subset_docs = list(legacy_io.find({ - "type": "subset", - "$or": subset_filters - })) + if names_by_asset_ids: + subset_docs = list(get_subsets( + project_name, names_by_asset_ids=names_by_asset_ids + )) subset_ids = [ subset_doc["_id"] for subset_doc in subset_docs ] - last_version_by_subset_id = self._query_last_versions(subset_ids) + last_version_docs_by_subset_id = get_last_versions( + project_name, subset_ids, fields=["name"] + ) for subset_doc in subset_docs: subset_id = subset_doc["_id"] - last_version = last_version_by_subset_id.get(subset_id) - if last_version is None: + last_version_doc = last_version_docs_by_subset_id.get(subset_id) + if last_version_docs_by_subset_id is None: continue asset_id = subset_doc["parent"] subset_name = subset_doc["name"] _instances = hierarchy[asset_id][subset_name] for _instance in _instances: - _instance.data["latestVersion"] = last_version - - def _query_last_versions(self, subset_ids): - """Retrieve all latest versions for entered subset_ids. - - Args: - subset_ids (list): List of subset ids with type `ObjectId`. - - Returns: - dict: Key is subset id and value is last version name. - """ - _pipeline = [ - # Find all versions of those subsets - {"$match": { - "type": "version", - "parent": {"$in": subset_ids} - }}, - # Sorting versions all together - {"$sort": {"name": 1}}, - # Group them by "parent", but only take the last - {"$group": { - "_id": "$parent", - "_version_id": {"$last": "$_id"}, - "name": {"$last": "$name"} - }} - ] - - last_version_by_subset_id = {} - for doc in legacy_io.aggregate(_pipeline): - subset_id = doc["_id"] - last_version_by_subset_id[subset_id] = doc["name"] - - return last_version_by_subset_id + _instance.data["latestVersion"] = last_version_doc["name"] def fill_anatomy_data(self, context): self.log.debug("Storing anatomy data to instance data.") diff --git a/openpype/plugins/publish/collect_anatomy_object.py b/openpype/plugins/publish/collect_anatomy_object.py index 2c87918728..b1415098b6 100644 --- a/openpype/plugins/publish/collect_anatomy_object.py +++ b/openpype/plugins/publish/collect_anatomy_object.py @@ -4,11 +4,11 @@ Requires: os.environ -> AVALON_PROJECT Provides: - context -> anatomy (pype.api.Anatomy) + context -> anatomy (openpype.pipeline.anatomy.Anatomy) """ import os -from openpype.api import Anatomy import pyblish.api +from openpype.pipeline import Anatomy class CollectAnatomyObject(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/collect_avalon_entities.py b/openpype/plugins/publish/collect_avalon_entities.py index 3e7843407f..6cd0d136e8 100644 --- a/openpype/plugins/publish/collect_avalon_entities.py +++ b/openpype/plugins/publish/collect_avalon_entities.py @@ -10,6 +10,7 @@ Provides: import pyblish.api +from openpype.client import get_project, get_asset_by_name from openpype.pipeline import legacy_io @@ -25,10 +26,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] - project_entity = legacy_io.find_one({ - "type": "project", - "name": project_name - }) + project_entity = get_project(project_name) assert project_entity, ( "Project '{0}' was not found." ).format(project_name) @@ -39,11 +37,8 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): if not asset_name: self.log.info("Context is not set. Can't collect global data.") return - asset_entity = legacy_io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project_entity["_id"] - }) + + asset_entity = get_asset_by_name(project_name, asset_name) assert asset_entity, ( "No asset found by the name '{0}' in project '{1}'" ).format(asset_name, project_name) diff --git a/openpype/plugins/publish/collect_cleanup_keys.py b/openpype/plugins/publish/collect_cleanup_keys.py index 635b038387..b9cd1a9fc9 100644 --- a/openpype/plugins/publish/collect_cleanup_keys.py +++ b/openpype/plugins/publish/collect_cleanup_keys.py @@ -14,7 +14,7 @@ class CollectCleanupKeys(pyblish.api.ContextPlugin): """Prepare keys for 'ExplicitCleanUp' plugin.""" label = "Collect Cleanup Keys" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 def process(self, context): context.data["cleanupFullPaths"] = [] diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index f6ead98809..d2be633cbe 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -47,12 +47,11 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): "label": subset, "name": subset, "family": in_data["family"], - "families": instance_families + "families": instance_families, + "representations": [] }) for key, value in in_data.items(): if key not in instance.data: instance.data[key] = value self.log.info("collected instance: {}".format(instance.data)) self.log.info("parsing data: {}".format(in_data)) - - instance.data["representations"] = list() diff --git a/openpype/plugins/publish/collect_scene_loaded_versions.py b/openpype/plugins/publish/collect_scene_loaded_versions.py index bb34e3ce31..5ff2b46e3b 100644 --- a/openpype/plugins/publish/collect_scene_loaded_versions.py +++ b/openpype/plugins/publish/collect_scene_loaded_versions.py @@ -1,7 +1,6 @@ -from bson.objectid import ObjectId - import pyblish.api +from openpype.client import get_representations from openpype.pipeline import ( registered_host, legacy_io, @@ -39,23 +38,29 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): return loaded_versions = [] - _containers = list(host.ls()) - _repr_ids = [ObjectId(c["representation"]) for c in _containers] - repre_docs = legacy_io.find( - {"_id": {"$in": _repr_ids}}, - projection={"_id": 1, "parent": 1} + containers = list(host.ls()) + repre_ids = { + container["representation"] + for container in containers + } + + project_name = legacy_io.active_project() + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] ) - version_by_repr = { - str(doc["_id"]): doc["parent"] + repre_doc_by_str_id = { + str(doc["_id"]): doc for doc in repre_docs } # QUESTION should we add same representation id when loaded multiple # times? - for con in _containers: + for con in containers: repre_id = con["representation"] - version_id = version_by_repr.get(repre_id) - if version_id is None: + repre_doc = repre_doc_by_str_id.get(repre_id) + if repre_doc is None: self.log.warning(( "Skipping container," " did not find representation document. {}" @@ -66,8 +71,8 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): # may have more then one representation that are same version version = { "subsetName": con["name"], - "representation": ObjectId(repre_id), - "version": version_id, + "representation": repre_doc["_id"], + "version": repre_doc["parent"], } loaded_versions.append(version) diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index 1f7ce839ed..8d447ba595 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -1,5 +1,11 @@ from copy import deepcopy import pyblish.api +from openpype.client import ( + get_project, + get_asset_by_id, + get_asset_by_name, + get_archived_assets +) from openpype.pipeline import legacy_io @@ -19,14 +25,14 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if not legacy_io.Session: legacy_io.install() + project_name = legacy_io.active_project() hierarchy_context = self._get_active_assets(context) self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) self.project = None - self.import_to_avalon(hierarchy_context) + self.import_to_avalon(project_name, hierarchy_context) - - def import_to_avalon(self, input_data, parent=None): + def import_to_avalon(self, project_name, input_data, parent=None): for name in input_data: self.log.info("input_data[name]: {}".format(input_data[name])) entity_data = input_data[name] @@ -62,7 +68,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): update_data = True # Process project if entity_type.lower() == "project": - entity = legacy_io.find_one({"type": "project"}) + entity = get_project(project_name) # TODO: should be in validator? assert (entity is not None), "Did not find project in DB" @@ -79,7 +85,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): ) # Else process assset else: - entity = legacy_io.find_one({"type": "asset", "name": name}) + entity = get_asset_by_name(project_name, name) if entity: # Do not override data, only update cur_entity_data = entity.get("data") or {} @@ -103,10 +109,10 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): # Skip updating data update_data = False - archived_entities = legacy_io.find({ - "type": "archived_asset", - "name": name - }) + archived_entities = get_archived_assets( + project_name, + asset_names=[name] + ) unarchive_entity = None for archived_entity in archived_entities: archived_parents = ( @@ -120,7 +126,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if unarchive_entity is None: # Create entity if doesn"t exist - entity = self.create_avalon_asset(name, data) + entity = self.create_avalon_asset( + project_name, name, data + ) else: # Unarchive if entity was archived entity = self.unarchive_entity(unarchive_entity, data) @@ -133,7 +141,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): ) if "childs" in entity_data: - self.import_to_avalon(entity_data["childs"], entity) + self.import_to_avalon( + project_name, entity_data["childs"], entity + ) def unarchive_entity(self, entity, data): # Unarchived asset should not use same data @@ -151,7 +161,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): ) return new_entity - def create_avalon_asset(self, name, data): + def create_avalon_asset(self, project_name, name, data): item = { "schema": "openpype:asset-3.0", "name": name, @@ -162,7 +172,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): self.log.debug("Creating asset: {}".format(item)) entity_id = legacy_io.insert_one(item).inserted_id - return legacy_io.find_one({"_id": entity_id}) + return get_asset_by_id(project_name, entity_id) def _get_active_assets(self, context): """ Returns only asset dictionary. diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index b6e5fee1fe..1b6e2a1d61 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -447,7 +447,22 @@ class ExtractReview(pyblish.api.InstancePlugin): input_is_sequence = self.input_is_sequence(repre) input_allow_bg = False + first_sequence_frame = None if input_is_sequence and repre["files"]: + # Calculate first frame that should be used + cols, _ = clique.assemble(repre["files"]) + input_frames = list(sorted(cols[0].indexes)) + first_sequence_frame = input_frames[0] + # WARNING: This is an issue as we don't know if first frame + # is with or without handles! + # - handle start is added but how do not know if we should + output_duration = (output_frame_end - output_frame_start) + 1 + if ( + without_handles + and len(input_frames) - handle_start >= output_duration + ): + first_sequence_frame += handle_start + ext = os.path.splitext(repre["files"][0])[1].replace(".", "") if ext in self.alpha_exts: input_allow_bg = True @@ -467,6 +482,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "resolution_height": instance.data.get("resolutionHeight"), "origin_repre": repre, "input_is_sequence": input_is_sequence, + "first_sequence_frame": first_sequence_frame, "input_allow_bg": input_allow_bg, "with_audio": with_audio, "without_handles": without_handles, @@ -545,9 +561,9 @@ class ExtractReview(pyblish.api.InstancePlugin): if temp_data["input_is_sequence"]: # Set start frame of input sequence (just frame in filename) # - definition of input filepath - ffmpeg_input_args.append( - "-start_number {}".format(temp_data["output_frame_start"]) - ) + ffmpeg_input_args.extend([ + "-start_number", str(temp_data["first_sequence_frame"]) + ]) # TODO add fps mapping `{fps: fraction}` ? # - e.g.: { diff --git a/openpype/plugins/publish/extract_jpeg_exr.py b/openpype/plugins/publish/extract_thumbnail.py similarity index 89% rename from openpype/plugins/publish/extract_jpeg_exr.py rename to openpype/plugins/publish/extract_thumbnail.py index a467728e77..7933595b89 100644 --- a/openpype/plugins/publish/extract_jpeg_exr.py +++ b/openpype/plugins/publish/extract_thumbnail.py @@ -19,10 +19,10 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): label = "Extract Thumbnail" order = pyblish.api.ExtractorOrder families = [ - "imagesequence", "render", "render2d", + "imagesequence", "render", "render2d", "prerender", "source", "plate", "take" ] - hosts = ["shell", "fusion", "resolve"] + hosts = ["shell", "fusion", "resolve", "traypublisher"] enabled = False # presetable attribute @@ -46,6 +46,10 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): self.log.info("Skipping - no review set on instance.") return + if self._already_has_thumbnail(instance): + self.log.info("Thumbnail representation already present.") + return + filtered_repres = self._get_filtered_repres(instance) for repre in filtered_repres: repre_files = repre["files"] @@ -71,18 +75,12 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): if not is_oiio_supported(): thumbnail_created = self.create_thumbnail_ffmpeg(full_input_path, full_output_path) # noqa else: - # Check if the file can be read by OIIO - oiio_tool_path = get_oiio_tools_path() - args = [ - oiio_tool_path, "--info", "-i", full_output_path - ] - returncode = execute(args, silent=True) # If the input can read by OIIO then use OIIO method for # conversion otherwise use ffmpeg - if returncode == 0: - self.log.info("Input can be read by OIIO, converting with oiiotool now.") # noqa - thumbnail_created = self.create_thumbnail_oiio(full_input_path, full_output_path) # noqa - else: + self.log.info("Trying to convert with OIIO") # noqa + thumbnail_created = self.create_thumbnail_oiio(full_input_path, full_output_path) # noqa + + if not thumbnail_created: self.log.info("Converting with FFMPEG because input can't be read by OIIO.") # noqa thumbnail_created = self.create_thumbnail_ffmpeg(full_input_path, full_output_path) # noqa @@ -108,6 +106,14 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): # There is no need to create more then one thumbnail break + def _already_has_thumbnail(self, instance): + for repre in instance.data.get("representations", []): + self.log.info("repre {}".format(repre)) + if repre["name"] == "thumbnail": + return True + + return False + def _get_filtered_repres(self, instance): filtered_repres = [] src_repres = instance.data.get("representations") or [] diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index a706b653c4..5f97a9bd41 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -8,6 +8,12 @@ from bson.objectid import ObjectId from pymongo import InsertOne, ReplaceOne import pyblish.api +from openpype.client import ( + get_version_by_id, + get_hero_version_by_subset_id, + get_archived_representations, + get_representations, +) from openpype.lib import ( create_hard_link, filter_profiles @@ -85,9 +91,13 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): hero_template )) - self.integrate_instance(instance, template_key, hero_template) + self.integrate_instance( + instance, project_name, template_key, hero_template + ) - def integrate_instance(self, instance, template_key, hero_template): + def integrate_instance( + self, instance, project_name, template_key, hero_template + ): anatomy = instance.context.data["anatomy"] published_repres = instance.data["published_representations"] hero_publish_dir = self.get_publish_dir(instance, template_key) @@ -118,8 +128,8 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): "Published version entity was not sent in representation data." " Querying entity from database." )) - src_version_entity = ( - self.version_from_representations(published_repres) + src_version_entity = self.version_from_representations( + project_name, published_repres ) if not src_version_entity: @@ -170,8 +180,8 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): other_file_paths_mapping.append((file_path, dst_filepath)) # Current version - old_version, old_repres = ( - self.current_hero_ents(src_version_entity) + old_version, old_repres = self.current_hero_ents( + project_name, src_version_entity ) old_repres_by_name = { @@ -223,11 +233,11 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): if old_repres_by_name: old_repres_to_delete = old_repres_by_name - archived_repres = list(legacy_io.find({ + archived_repres = list(get_archived_representations( + project_name, # Check what is type of archived representation - "type": "archived_repsentation", - "parent": new_version_id - })) + version_ids=[new_version_id] + )) archived_repres_by_name = {} for repre in archived_repres: repre_name_low = repre["name"].lower() @@ -586,25 +596,23 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): shutil.copy(src_path, dst_path) - def version_from_representations(self, repres): + def version_from_representations(self, project_name, repres): for repre in repres: - version = legacy_io.find_one({"_id": repre["parent"]}) + version = get_version_by_id(project_name, repre["parent"]) if version: return version - def current_hero_ents(self, version): - hero_version = legacy_io.find_one({ - "parent": version["parent"], - "type": "hero_version" - }) + def current_hero_ents(self, project_name, version): + hero_version = get_hero_version_by_subset_id( + project_name, version["parent"] + ) if not hero_version: return (None, []) - hero_repres = list(legacy_io.find({ - "parent": hero_version["_id"], - "type": "representation" - })) + hero_repres = list(get_representations( + project_name, version_ids=[hero_version["_id"]] + )) return (hero_version, hero_repres) def _update_path(self, anatomy, path, src_file, dst_file): diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py index 4c14c17dae..f870220421 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_new.py @@ -16,6 +16,15 @@ from pymongo import DeleteOne, InsertOne import pyblish.api import openpype.api +from openpype.client import ( + get_asset_by_name, + get_subset_by_id, + get_subset_by_name, + get_version_by_id, + get_version_by_name, + get_representations, + get_archived_representations, +) from openpype.lib.profiles_filtering import filter_profiles from openpype.lib import ( prepare_template_data, @@ -201,6 +210,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): context = instance.context project_entity = instance.data["projectEntity"] + project_name = project_entity["name"] context_asset_name = None context_asset_doc = context.data.get("assetEntity") @@ -210,11 +220,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): asset_name = instance.data["asset"] asset_entity = instance.data.get("assetEntity") if not asset_entity or asset_entity["name"] != context_asset_name: - asset_entity = legacy_io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project_entity["_id"] - }) + asset_entity = get_asset_by_name(project_name, asset_name) assert asset_entity, ( "No asset found by the name \"{0}\" in project \"{1}\"" ).format(asset_name, project_entity["name"]) @@ -270,7 +276,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "Establishing staging directory @ {0}".format(stagingdir) ) - subset = self.get_subset(asset_entity, instance) + subset = self.get_subset(project_name, asset_entity, instance) instance.data["subsetEntity"] = subset version_number = instance.data["version"] @@ -297,11 +303,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): for _repre in repres ] - existing_version = legacy_io.find_one({ - 'type': 'version', - 'parent': subset["_id"], - 'name': version_number - }) + existing_version = get_version_by_name( + project_name, version_number, subset["_id"] + ) if existing_version is None: version_id = legacy_io.insert_one(version).inserted_id @@ -322,10 +326,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): version_id = existing_version['_id'] # Find representations of existing version and archive them - current_repres = list(legacy_io.find({ - "type": "representation", - "parent": version_id - })) + current_repres = list(get_representations( + project_name, version_ids=[version_id] + )) bulk_writes = [] for repre in current_repres: if append_repres: @@ -345,18 +348,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # bulk updates if bulk_writes: - project_name = legacy_io.Session["AVALON_PROJECT"] legacy_io.database[project_name].bulk_write( bulk_writes ) - version = legacy_io.find_one({"_id": version_id}) + version = get_version_by_id(project_name, version_id) instance.data["versionEntity"] = version - existing_repres = list(legacy_io.find({ - "parent": version_id, - "type": "archived_representation" - })) + existing_repres = list(get_archived_representations( + project_name, + version_ids=[version_id] + )) instance.data['version'] = version['name'] @@ -792,13 +794,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): create_hard_link(src, dst) - def get_subset(self, asset, instance): + def get_subset(self, project_name, asset, instance): subset_name = instance.data["subset"] - subset = legacy_io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": subset_name - }) + subset = get_subset_by_name(project_name, subset_name, asset["_id"]) if subset is None: self.log.info("Subset '%s' not found, creating ..." % subset_name) @@ -825,7 +823,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "parent": asset["_id"] }).inserted_id - subset = legacy_io.find_one({"_id": _id}) + subset = get_subset_by_id(project_name, _id) # QUESTION Why is changing of group and updating it's # families in 'get_subset'? diff --git a/openpype/plugins/publish/integrate_thumbnail.py b/openpype/plugins/publish/integrate_thumbnail.py index 5d6fc561ea..fd50858a91 100644 --- a/openpype/plugins/publish/integrate_thumbnail.py +++ b/openpype/plugins/publish/integrate_thumbnail.py @@ -8,6 +8,7 @@ import six import pyblish.api from bson.objectid import ObjectId +from openpype.client import get_version_by_id from openpype.pipeline import legacy_io @@ -70,7 +71,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): thumbnail_template = anatomy.templates["publish"]["thumbnail"] - version = legacy_io.find_one({"_id": thumb_repre["parent"]}) + version = get_version_by_id(project_name, thumb_repre["parent"]) if not version: raise AssertionError( "There does not exist version with id {}".format( diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/openpype/plugins/publish/validate_editorial_asset_name.py index f9cdaebf0c..702e87b58d 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/openpype/plugins/publish/validate_editorial_asset_name.py @@ -3,6 +3,7 @@ from pprint import pformat import pyblish.api from openpype.pipeline import legacy_io +from openpype.client import get_assets class ValidateEditorialAssetName(pyblish.api.ContextPlugin): @@ -29,8 +30,10 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): if not legacy_io.Session: legacy_io.install() - db_assets = list(legacy_io.find( - {"type": "asset"}, {"name": 1, "data.parents": 1})) + project_name = legacy_io.active_project() + db_assets = list(get_assets( + project_name, fields=["name", "data.parents"] + )) self.log.debug("__ db_assets: {}".format(db_assets)) asset_db_docs = { diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index 90c582a319..124eacbe39 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -7,7 +7,7 @@ import time from openpype.lib import PypeLogger from openpype.api import get_app_environments_for_context -from openpype.lib.plugin_tools import parse_json, get_batch_asset_task_info +from openpype.lib.plugin_tools import get_batch_asset_task_info from openpype.lib.remote_publish import ( get_webpublish_conn, start_webpublish_log, diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index a7836b9c1f..bfdc58d9ee 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -98,7 +98,7 @@ ], "reel_group_name": "OpenPype_Reels", "reel_name": "Loaded", - "clip_name_template": "{asset}_{subset}_{output}" + "clip_name_template": "{asset}_{subset}<_{output}>" }, "LoadClipBatch": { "enabled": true, @@ -121,7 +121,7 @@ "exr16fpdwaa" ], "reel_name": "OP_LoadedReel", - "clip_name_template": "{asset}_{subset}_{output}" + "clip_name_template": "{asset}_{subset}<_{output}>" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 831c34835e..70cda68cb4 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -124,6 +124,11 @@ "Project Manager" ], "cycle_enabled": false, + "cycle_hour_start": [ + 0, + 0, + 0 + ], "review_session_template": "{yy}{mm}{dd}" } }, @@ -268,6 +273,49 @@ } ] }, + { + "hosts": [ + "traypublisher" + ], + "families": [], + "task_types": [], + "tasks": [], + "add_ftrack_family": true, + "advanced_filtering": [] + }, + { + "hosts": [ + "traypublisher" + ], + "families": [ + "matchmove", + "shot" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": false, + "advanced_filtering": [] + }, + { + "hosts": [ + "traypublisher" + ], + "families": [ + "plate" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": false, + "advanced_filtering": [ + { + "families": [ + "clip", + "review" + ], + "add_ftrack_family": true + } + ] + }, { "hosts": [ "maya" diff --git a/openpype/settings/defaults/project_settings/harmony.json b/openpype/settings/defaults/project_settings/harmony.json index 1508b02e1b..d843bc8e70 100644 --- a/openpype/settings/defaults/project_settings/harmony.json +++ b/openpype/settings/defaults/project_settings/harmony.json @@ -1,4 +1,20 @@ { + "load": { + "ImageSequenceLoader": { + "family": [ + "shot", + "render", + "image", + "plate", + "reference" + ], + "representations": [ + "jpeg", + "png", + "jpg" + ] + } + }, "publish": { "CollectPalettes": { "allowed_tasks": [ diff --git a/openpype/settings/defaults/project_settings/hiero.json b/openpype/settings/defaults/project_settings/hiero.json index 1dff3aac51..e9e7199330 100644 --- a/openpype/settings/defaults/project_settings/hiero.json +++ b/openpype/settings/defaults/project_settings/hiero.json @@ -51,5 +51,17 @@ ] } }, - "filters": {} + "filters": {}, + "scriptsmenu": { + "name": "OpenPype Tools", + "definition": [ + { + "type": "action", + "sourcetype": "python", + "title": "OpenPype Docs", + "command": "import webbrowser;webbrowser.open(url='https://openpype.io/docs/artist_hosts_hiero')", + "tooltip": "Open the OpenPype Hiero user doc page" + } + ] + } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index cdd3a62d00..c96acbff6d 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -204,10 +204,16 @@ "ValidateFrameRange": { "enabled": true, "optional": true, - "active": true + "active": true, + "exclude_families": [ + "model", + "rig", + "staticMesh" + ] }, "ValidateShaderName": { "enabled": false, + "optional": true, "regex": "(?P.*)_(.*)_SHD" }, "ValidateShadingEngine": { @@ -221,6 +227,7 @@ }, "ValidateLoadedPlugin": { "enabled": false, + "optional": true, "whitelist_native_plugins": false, "authorized_plugins": [] }, @@ -235,6 +242,7 @@ }, "ValidateUnrealStaticMeshName": { "enabled": true, + "optional": true, "validate_mesh": false, "validate_collision": true }, @@ -251,6 +259,81 @@ "redshift_render_attributes": [], "renderman_render_attributes": [] }, + "ValidateCurrentRenderLayerIsRenderable": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderImageRule": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderNoDefaultCameras": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderSingleCamera": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderLayerAOVs": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateStepSize": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVRayDistributedRendering": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayReferencedAOVs": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVRayTranslatorEnabled": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayProxy": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayProxyMembers": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRenderScriptCallbacks": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigCacheState": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigInputShapesInInstance": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigSettings": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateModelName": { "enabled": false, "database": true, @@ -269,6 +352,7 @@ }, "ValidateTransformNamingSuffix": { "enabled": true, + "optional": true, "SUFFIX_NAMING_TABLE": { "mesh": [ "_GEO", @@ -292,7 +376,7 @@ "ALLOW_IF_NOT_IN_SUFFIX_TABLE": true }, "ValidateColorSets": { - "enabled": false, + "enabled": true, "optional": true, "active": true }, @@ -336,6 +420,16 @@ "optional": true, "active": true }, + "ValidateMeshNoNegativeScale": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateMeshNonZeroEdgeLength": { + "enabled": true, + "optional": true, + "active": true + }, "ValidateMeshNormalsUnlocked": { "enabled": false, "optional": true, @@ -358,22 +452,22 @@ }, "ValidateNoNamespace": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNoNullTransforms": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNoUnknownNodes": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNodeNoGhosting": { "enabled": false, - "optional": true, + "optional": false, "active": true }, "ValidateShapeDefaultNames": { @@ -401,6 +495,21 @@ "optional": true, "active": true }, + "ValidateNoVRayMesh": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateUnrealMeshTriangulated": { + "enabled": false, + "optional": true, + "active": true + }, + "ValidateAlembicVisibleOnly": { + "enabled": true, + "optional": false, + "active": true + }, "ExtractAlembic": { "enabled": true, "families": [ @@ -424,8 +533,34 @@ "optional": true, "active": true }, + "ValidateAnimationContent": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateOutRelatedNodeIds": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRigControllersArnoldAttributes": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateSkeletalMeshHierarchy": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateSkinclusterDeformerSet": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateRigOutSetNodeIds": { "enabled": true, + "optional": false, "allow_history_only": false }, "ValidateCameraAttributes": { @@ -438,14 +573,44 @@ "optional": true, "active": true }, + "ValidateAssemblyNamespaces": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateAssemblyModelTransforms": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateAssRelativePaths": { "enabled": true, + "optional": false, + "active": true + }, + "ValidateInstancerContent": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateInstancerFrameRanges": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateNoDefaultCameras": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateUnrealUpAxis": { + "enabled": false, "optional": true, "active": true }, "ValidateCameraContents": { "enabled": true, - "optional": true, + "optional": false, "validate_shapes": true }, "ExtractPlayblast": { @@ -496,11 +661,29 @@ "override_viewport_options": true, "displayLights": "default", "textureMaxResolution": 1024, - "multiSample": 4, + "renderDepthOfField": true, "shadows": true, "textures": true, "twoSidedLighting": true, - "ssaoEnable": true, + "lineAAEnable": true, + "multiSample": 8, + "ssaoEnable": false, + "ssaoAmount": 1, + "ssaoRadius": 16, + "ssaoFilterRadius": 16, + "ssaoSamples": 16, + "fogging": false, + "hwFogFalloff": "0", + "hwFogDensity": 0.0, + "hwFogStart": 0, + "hwFogEnd": 100, + "hwFogAlpha": 0, + "hwFogColorR": 1.0, + "hwFogColorG": 1.0, + "hwFogColorB": 1.0, + "motionBlurEnable": false, + "motionBlurSampleCount": 8, + "motionBlurShutterOpenFraction": 0.2, "cameras": false, "clipGhosts": false, "controlVertices": false, diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 6c45e2a9c1..3e29122074 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -287,7 +287,11 @@ "LoadClip": { "enabled": true, "_representations": [], - "node_name_template": "{class_name}_{ext}" + "node_name_template": "{class_name}_{ext}", + "options_defaults": { + "start_at_workfile": true, + "add_retime": true + } } }, "workfile_builder": { diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/openpype/settings/defaults/project_settings/traypublisher.json index 0b54cfd39e..8bf3e3b306 100644 --- a/openpype/settings/defaults/project_settings/traypublisher.json +++ b/openpype/settings/defaults/project_settings/traypublisher.json @@ -8,9 +8,10 @@ "default_variants": [ "Main" ], - "description": "Publish workfile backup", - "detailed_description": "", - "allow_sequences": true, + "description": "Backup of a working scene", + "detailed_description": "Workfiles are full scenes from any application that are directly edited by artists. They represent a state of work on a task at a given point and are usually not directly referenced into other scenes.", + "allow_sequences": false, + "allow_multiple_items": false, "extensions": [ ".ma", ".mb", @@ -30,6 +31,216 @@ ".psb", ".aep" ] + }, + { + "family": "model", + "identifier": "", + "label": "Model", + "icon": "fa.cubes", + "default_variants": [ + "Main", + "Proxy", + "Sculpt" + ], + "description": "Clean models", + "detailed_description": "Models should only contain geometry data, without any extras like cameras, locators or bones.\n\nKeep in mind that models published from tray publisher are not validated for correctness. ", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".ma", + ".mb", + ".obj", + ".abc", + ".fbx", + ".bgeo", + ".bgeogz", + ".bgeosc", + ".usd", + ".blend" + ] + }, + { + "family": "pointcache", + "identifier": "", + "label": "Pointcache", + "icon": "fa.gears", + "default_variants": [ + "Main" + ], + "description": "Geometry Caches", + "detailed_description": "Alembic or bgeo cache of animated data", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".abc", + ".bgeo", + ".bgeogz", + ".bgeosc" + ] + }, + { + "family": "plate", + "identifier": "", + "label": "Plate", + "icon": "mdi.camera-image", + "default_variants": [ + "Main", + "BG", + "Animatic", + "Reference", + "Offline" + ], + "description": "Footage Plates", + "detailed_description": "Any type of image seqeuence coming from outside of the studio. Usually camera footage, but could also be animatics used for reference.", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".png", + ".dpx", + ".jpg", + ".tiff", + ".tif", + ".mov", + ".mp4", + ".avi" + ] + }, + { + "family": "render", + "identifier": "", + "label": "Render", + "icon": "mdi.folder-multiple-image", + "default_variants": [], + "description": "Rendered images or video", + "detailed_description": "Sequence or single file renders", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".png", + ".dpx", + ".jpg", + ".jpeg", + ".tiff", + ".tif", + ".mov", + ".mp4", + ".avi" + ] + }, + { + "family": "camera", + "identifier": "", + "label": "Camera", + "icon": "fa.video-camera", + "default_variants": [], + "description": "3d Camera", + "detailed_description": "Ideally this should be only camera itself with baked animation, however, it can technically also include helper geometry.", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".abc", + ".ma", + ".hip", + ".blend", + ".fbx", + ".usd" + ] + }, + { + "family": "image", + "identifier": "", + "label": "Image", + "icon": "fa.image", + "default_variants": [ + "Reference", + "Texture", + "Concept", + "Background" + ], + "description": "Single image", + "detailed_description": "Any image data can be published as image family. References, textures, concept art, matte paints. This is a fallback 2d family for everything that doesn't fit more specific family.", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".jpg", + ".jpeg", + ".dpx", + ".bmp", + ".tif", + ".tiff", + ".png", + ".psb", + ".psd" + ] + }, + { + "family": "vdb", + "identifier": "", + "label": "VDB Volumes", + "icon": "fa.cloud", + "default_variants": [], + "description": "Sparse volumetric data", + "detailed_description": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".vdb" + ] + }, + { + "family": "matchmove", + "identifier": "", + "label": "Matchmove", + "icon": "fa.empire", + "default_variants": [ + "Camera", + "Object", + "Mocap" + ], + "description": "Matchmoving script", + "detailed_description": "Script exported from matchmoving application to be later processed into a tracked camera with additional data", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [] + }, + { + "family": "rig", + "identifier": "", + "label": "Rig", + "icon": "fa.wheelchair", + "default_variants": [], + "description": "CG rig file", + "detailed_description": "CG rigged character or prop. Rig should be clean of any extra data and directly loadable into it's respective application\t", + "allow_sequences": false, + "allow_multiple_items": false, + "extensions": [ + ".ma", + ".blend", + ".hip", + ".hda" + ] + }, + { + "family": "simpleUnrealTexture", + "identifier": "", + "label": "Simple UE texture", + "icon": "fa.image", + "default_variants": [], + "description": "Simple Unreal Engine texture", + "detailed_description": "Texture files with Unreal Engine naming conventions", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [] } - ] + ], + "BatchMovieCreator": { + "default_variants": ["Main"], + "default_tasks": ["Compositing"], + "extensions": [ + ".mov" + ] + } } \ No newline at end of file diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json index a06947ba77..909ffc1ee4 100644 --- a/openpype/settings/defaults/system_settings/general.json +++ b/openpype/settings/defaults/system_settings/general.json @@ -2,11 +2,7 @@ "studio_name": "Studio name", "studio_code": "stu", "admin_password": "", - "environment": { - "__environment_keys__": { - "global": [] - } - }, + "environment": {}, "log_to_server": true, "disk_mapping": { "windows": [], diff --git a/openpype/settings/entities/enum_entity.py b/openpype/settings/entities/enum_entity.py index 3b3dd47e61..defe4aa1f0 100644 --- a/openpype/settings/entities/enum_entity.py +++ b/openpype/settings/entities/enum_entity.py @@ -167,7 +167,8 @@ class HostsEnumEntity(BaseEnumEntity): "tvpaint", "unreal", "standalonepublisher", - "webpublisher", + "traypublisher", + "webpublisher" ] def _item_initialization(self): diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index f8f9d5093d..e008fd85ee 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -410,7 +410,41 @@ { "type": "boolean", "key": "cycle_enabled", - "label": "Create daily review session" + "label": "Run automatically every day" + }, + { + "type": "separator" + }, + { + "type": "list-strict", + "key": "cycle_hour_start", + "label": "Create daily review session at", + "tooltip": "This may take affect on next day", + "object_types": [ + { + "label": "H:", + "type": "number", + "minimum": 0, + "maximum": 23, + "decimal": 0 + }, { + "label": "M:", + "type": "number", + "minimum": 0, + "maximum": 59, + "decimal": 0 + }, { + "label": "S:", + "type": "number", + "minimum": 0, + "maximum": 59, + "decimal": 0 + } + ] + }, + { + "type": "label", + "label": "This can't be overriden per project and any change will take effect on the next day or on restart of event server." }, { "type": "separator" @@ -822,7 +856,7 @@ }, { "type": "label", - "label": "Template may contain formatting keys intent, comment, host_name, app_name, app_label and published_paths." + "label": "Template may contain formatting keys intent, comment, host_name, app_name, app_label, published_paths and source." }, { "type": "text", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json b/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json index c049ce3084..311f742f81 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json @@ -5,6 +5,34 @@ "label": "Harmony", "is_file": true, "children": [ + { + "type": "dict", + "collapsible": true, + "key": "load", + "label": "Loader plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "ImageSequenceLoader", + "label": "Load Image Sequence", + "children": [ + { + "type": "list", + "key": "family", + "label": "Families", + "object_type": "text" + }, + { + "type": "list", + "key": "representations", + "label": "Representations", + "object_type": "text" + } + ] + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json index f717eff7dd..3108d2197e 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json @@ -206,6 +206,10 @@ { "type": "schema", "name": "schema_publish_gui_filter" + }, + { + "type": "schema", + "name": "schema_scriptsmenu" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json index 55c1b7b7d7..8f0f864dc2 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json @@ -67,6 +67,11 @@ "label": "Allow sequences", "type": "boolean" }, + { + "key": "allow_multiple_items", + "label": "Allow multiple items", + "type": "boolean" + }, { "type": "list", "key": "extensions", @@ -78,6 +83,44 @@ } ] } + }, + { + "type": "dict", + "collapsible": true, + "key": "BatchMovieCreator", + "label": "Batch Movie Creator", + "collapsible_key": true, + "children": [ + { + "type": "label", + "label": "Allows to publish multiple video files in one go.
Name of matching asset is parsed from file names ('asset.mov', 'asset_v001.mov', 'my_asset_to_publish.mov')" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "list", + "key": "default_tasks", + "label": "Default tasks", + "object_type": { + "type": "text" + } + }, + { + "type": "list", + "key": "extensions", + "label": "Extensions", + "use_label_wrap": true, + "collapsible_key": true, + "collapsed": false, + "object_type": "text" + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json index d6b81c8687..7a40f349cc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json @@ -202,12 +202,15 @@ "decimal": 0 }, { - "type": "number", - "key": "multiSample", - "label": "Anti Aliasing Samples", - "decimal": 0, - "minimum": 0, - "maximum": 32 + "type": "splitter" + }, + { + "type":"boolean", + "key": "renderDepthOfField", + "label": "Depth of Field" + }, + { + "type": "splitter" }, { "type": "boolean", @@ -224,11 +227,145 @@ "key": "twoSidedLighting", "label": "Two Sided Lighting" }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "lineAAEnable", + "label": "Enable Anti-Aliasing" + }, + { + "type": "number", + "key": "multiSample", + "label": "Anti Aliasing Samples", + "decimal": 0, + "minimum": 0, + "maximum": 32 + }, + { + "type": "splitter" + }, { "type": "boolean", "key": "ssaoEnable", "label": "Screen Space Ambient Occlusion" }, + { + "type": "number", + "key": "ssaoAmount", + "label": "SSAO Amount" + }, + { + "type": "number", + "key": "ssaoRadius", + "label": "SSAO Radius" + }, + { + "type": "number", + "key": "ssaoFilterRadius", + "label": "SSAO Filter Radius", + "decimal": 0, + "minimum": 1, + "maximum": 32 + }, + { + "type": "number", + "key": "ssaoSamples", + "label": "SSAO Samples", + "decimal": 0, + "minimum": 8, + "maximum": 32 + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "fogging", + "label": "Enable Hardware Fog" + }, + { + "type": "enum", + "key": "hwFogFalloff", + "label": "Hardware Falloff", + "enum_items": [ + { "0": "Linear"}, + { "1": "Exponential"}, + { "2": "Exponential Squared"} + ] + }, + { + "type": "number", + "key": "hwFogDensity", + "label": "Fog Density", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogStart", + "label": "Fog Start" + }, + { + "type": "number", + "key": "hwFogEnd", + "label": "Fog End" + }, + { + "type": "number", + "key": "hwFogAlpha", + "label": "Fog Alpha" + }, + { + "type": "number", + "key": "hwFogColorR", + "label": "Fog Color R", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogColorG", + "label": "Fog Color G", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogColorB", + "label": "Fog Color B", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "motionBlurEnable", + "label": "Enable Motion Blur" + }, + { + "type": "number", + "key": "motionBlurSampleCount", + "label": "Motion Blur Sample Count", + "decimal": 0, + "minimum": 8, + "maximum": 32 + }, + { + "type": "number", + "key": "motionBlurShutterOpenFraction", + "label": "Shutter Open Fraction", + "decimal": 3, + "minimum": 0.01, + "maximum": 32 + }, { "type": "splitter" }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index 41b681d893..53247f6bd4 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -62,13 +62,36 @@ } ] }, - { - "type": "schema_template", - "name": "template_publish_plugin", - "template_data": [ + { + "type": "dict", + "collapsible": true, + "key": "ValidateFrameRange", + "label": "Validate Frame Range", + "checkbox_key": "enabled", + "children": [ { - "key": "ValidateFrameRange", - "label": "Validate Frame Range" + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "splitter" + }, + { + "key": "exclude_families", + "label": "Families", + "type": "list", + "object_type": "text" } ] }, @@ -84,6 +107,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "label", "label": "Shader name regex can use named capture group asset to validate against current asset name.

Example:
^.*(?P=<asset>.+)_SHD

" @@ -136,6 +164,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "whitelist_native_plugins", @@ -223,6 +256,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "validate_mesh", @@ -309,6 +347,72 @@ } ] }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateCurrentRenderLayerIsRenderable", + "label": "Validate Current Render Layer Has Renderable Camera" + }, + { + "key": "ValidateRenderImageRule", + "label": "Validate Images File Rule (Workspace)" + }, + { + "key": "ValidateRenderNoDefaultCameras", + "label": "Validate No Default Cameras Renderable" + }, + { + "key": "ValidateRenderSingleCamera", + "label": "Validate Render Single Camera" + }, + { + "key": "ValidateRenderLayerAOVs", + "label": "Validate Render Passes / AOVs Are Registered" + }, + { + "key": "ValidateStepSize", + "label": "Validate Step Size" + }, + { + "key": "ValidateVRayDistributedRendering", + "label": "VRay Distributed Rendering" + }, + { + "key": "ValidateVrayReferencedAOVs", + "label": "VRay Referenced AOVs" + }, + { + "key": "ValidateVRayTranslatorEnabled", + "label": "VRay Translator Settings" + }, + { + "key": "ValidateVrayProxy", + "label": "VRay Proxy Settings" + }, + { + "key": "ValidateVrayProxyMembers", + "label": "VRay Proxy Members" + }, + { + "key": "ValidateYetiRenderScriptCallbacks", + "label": "Yeti Render Script Callbacks" + }, + { + "key": "ValidateYetiRigCacheState", + "label": "Yeti Rig Cache State" + }, + { + "key": "ValidateYetiRigInputShapesInInstance", + "label": "Yeti Rig Input Shapes In Instance" + }, + { + "key": "ValidateYetiRigSettings", + "label": "Yeti Rig Settings" + } + ] + }, { "type": "collapsible-wrap", "label": "Model", @@ -393,6 +497,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "label", "label": "Validates transform suffix based on the type of its children shapes." @@ -449,6 +558,14 @@ "key": "ValidateMeshNonManifold", "label": "ValidateMeshNonManifold" }, + { + "key": "ValidateMeshNoNegativeScale", + "label": "Validate Mesh No Negative Scale" + }, + { + "key": "ValidateMeshNonZeroEdgeLength", + "label": "Validate Mesh Edge Length Non Zero" + }, { "key": "ValidateMeshNormalsUnlocked", "label": "ValidateMeshNormalsUnlocked" @@ -502,6 +619,18 @@ { "key": "ValidateUniqueNames", "label": "ValidateUniqueNames" + }, + { + "key": "ValidateNoVRayMesh", + "label": "Validate No V-Ray Proxies (VRayMesh)" + }, + { + "key": "ValidateUnrealMeshTriangulated", + "label": "Validate if Mesh is Triangulated" + }, + { + "key": "ValidateAlembicVisibleOnly", + "label": "Validate Alembic visible node" } ] }, @@ -550,6 +679,26 @@ { "key": "ValidateRigControllers", "label": "Validate Rig Controllers" + }, + { + "key": "ValidateAnimationContent", + "label": "Validate Animation Content" + }, + { + "key": "ValidateOutRelatedNodeIds", + "label": "Validate Animation Out Set Related Node Ids" + }, + { + "key": "ValidateRigControllersArnoldAttributes", + "label": "Validate Rig Controllers (Arnold Attributes)" + }, + { + "key": "ValidateSkeletalMeshHierarchy", + "label": "Validate Skeletal Mesh Top Node" + }, + { + "key": "ValidateSkinclusterDeformerSet", + "label": "Validate Skincluster Deformer Relationships" } ] }, @@ -566,6 +715,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "allow_history_only", @@ -588,9 +742,33 @@ "key": "ValidateAssemblyName", "label": "Validate Assembly Name" }, + { + "key": "ValidateAssemblyNamespaces", + "label": "Validate Assembly Namespaces" + }, + { + "key": "ValidateAssemblyModelTransforms", + "label": "Validate Assembly Model Transforms" + }, { "key": "ValidateAssRelativePaths", "label": "ValidateAssRelativePaths" + }, + { + "key": "ValidateInstancerContent", + "label": "Validate Instancer Content" + }, + { + "key": "ValidateInstancerFrameRanges", + "label": "Validate Instancer Cache Frame Ranges" + }, + { + "key": "ValidateNoDefaultCameras", + "label": "Validate No Default Cameras" + }, + { + "key": "ValidateUnrealUpAxis", + "label": "Validate Unreal Up-Axis check" } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json index 5bd8337e4c..805424c632 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json @@ -11,10 +11,52 @@ { "key": "LoadImage", "label": "Image Loader" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "LoadClip", + "label": "Clip Loader", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" }, { - "key": "LoadClip", - "label": "Clip Loader" + "type": "list", + "key": "_representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "text", + "key": "node_name_template", + "label": "Node name template" + }, + { + "type": "splitter" + }, + { + "type": "dict", + "collapsible": false, + "key": "options_defaults", + "label": "Loader option defaults", + "children": [ + { + "type": "boolean", + "key": "start_at_workfile", + "label": "Start at worfile beggining" + }, + { + "type": "boolean", + "key": "add_retime", + "label": "Add retime" + } + ] } ] } diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py index 94080e550d..ccccc76a08 100644 --- a/openpype/tests/test_lib_restructuralization.py +++ b/openpype/tests/test_lib_restructuralization.py @@ -21,7 +21,6 @@ def test_backward_compatibility(printer): from openpype.lib import is_latest from openpype.lib import any_outdated from openpype.lib import get_asset - from openpype.lib import get_hierarchy from openpype.lib import get_linked_assets from openpype.lib import get_latest_version from openpype.lib import get_ffprobe_streams diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index 1f6d8b9fa2..13e18b3757 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -17,8 +17,7 @@ from openpype.client import ( get_thumbnail_id_from_source, get_thumbnail, ) -from openpype.api import Anatomy -from openpype.pipeline import HeroVersionType +from openpype.pipeline import HeroVersionType, Anatomy from openpype.pipeline.thumbnail import get_thumbnail_binary from openpype.pipeline.load import ( discover_loader_plugins, diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 915fb7f32e..b48bb61386 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -154,15 +154,20 @@ class PublishReport: self._all_instances_by_id = {} self._current_context = None - def reset(self, context, publish_discover_result=None): + def reset(self, context, create_context): """Reset report and clear all data.""" - self._publish_discover_result = publish_discover_result + + self._publish_discover_result = create_context.publish_discover_result self._plugin_data = [] self._plugin_data_with_plugin = [] self._current_plugin_data = {} self._all_instances_by_id = {} self._current_context = context + for plugin in create_context.publish_plugins_mismatch_targets: + plugin_data = self._add_plugin_data_item(plugin) + plugin_data["skipped"] = True + def add_plugin_iter(self, plugin, context): """Add report about single iteration of plugin.""" for instance in context: @@ -205,6 +210,7 @@ class PublishReport: "name": plugin.__name__, "label": label, "order": plugin.order, + "targets": list(plugin.targets), "instances_data": [], "actions_data": [], "skipped": False, @@ -569,6 +575,8 @@ class PublisherController: # Stop publishing self.stop_publish() + self.save_changes() + # Reset avalon context self.create_context.reset_avalon_context() @@ -777,10 +785,7 @@ class PublisherController: # - pop the key after first collector using it would be safest option? self._publish_context.data["create_context"] = self.create_context - self._publish_report.reset( - self._publish_context, - self.create_context.publish_discover_result - ) + self._publish_report.reset(self._publish_context, self.create_context) self._publish_validation_errors = [] self._publish_current_plugin_validation_errors = None self._publish_error = None diff --git a/openpype/tools/publisher/publish_report_viewer/model.py b/openpype/tools/publisher/publish_report_viewer/model.py index a88129a358..bd03376c55 100644 --- a/openpype/tools/publisher/publish_report_viewer/model.py +++ b/openpype/tools/publisher/publish_report_viewer/model.py @@ -1,4 +1,5 @@ import uuid +import html from Qt import QtCore, QtGui import pyblish.api @@ -45,7 +46,8 @@ class InstancesModel(QtGui.QStandardItemModel): all_removed = True for instance_item in instance_items: item = QtGui.QStandardItem(instance_item.label) - item.setData(instance_item.label, ITEM_LABEL_ROLE) + instance_label = html.escape(instance_item.label) + item.setData(instance_label, ITEM_LABEL_ROLE) item.setData(instance_item.errored, ITEM_ERRORED_ROLE) item.setData(instance_item.id, ITEM_ID_ROLE) item.setData(instance_item.removed, INSTANCE_REMOVED_ROLE) diff --git a/openpype/tools/publisher/publish_report_viewer/report_items.py b/openpype/tools/publisher/publish_report_viewer/report_items.py index b47d14da25..8a01569723 100644 --- a/openpype/tools/publisher/publish_report_viewer/report_items.py +++ b/openpype/tools/publisher/publish_report_viewer/report_items.py @@ -83,10 +83,8 @@ class PublishReport: logs = [] plugins_items_by_id = {} - plugins_id_order = [] for plugin_data in data["plugins_data"]: item = PluginItem(plugin_data) - plugins_id_order.append(item.id) plugins_items_by_id[item.id] = item for instance_data_item in plugin_data["instances_data"]: instance_id = instance_data_item["id"] @@ -95,6 +93,14 @@ class PublishReport: copy.deepcopy(log_item_data), item.id, instance_id ) logs.append(log_item) + sorted_plugins = sorted( + plugins_items_by_id.values(), + key=lambda item: item.order + ) + plugins_id_order = [ + plugin_item.id + for plugin_item in sorted_plugins + ] logs_by_instance_id = collections.defaultdict(list) for log_item in logs: diff --git a/openpype/tools/publisher/publish_report_viewer/widgets.py b/openpype/tools/publisher/publish_report_viewer/widgets.py index fd226ea0e4..61eb814a56 100644 --- a/openpype/tools/publisher/publish_report_viewer/widgets.py +++ b/openpype/tools/publisher/publish_report_viewer/widgets.py @@ -1,3 +1,4 @@ +from math import ceil from Qt import QtWidgets, QtCore, QtGui from openpype.widgets.nice_checkbox import NiceCheckbox @@ -137,13 +138,75 @@ class PluginLoadReportWidget(QtWidgets.QWidget): self._model.set_report(report) +class ZoomPlainText(QtWidgets.QPlainTextEdit): + def __init__(self, *args, **kwargs): + super(ZoomPlainText, self).__init__(*args, **kwargs) + + anim_timer = QtCore.QTimer() + anim_timer.setInterval(20) + + anim_timer.timeout.connect(self._scaling_callback) + + self._anim_timer = anim_timer + self._zoom_enabled = False + self._scheduled_scalings = 0 + self._point_size = None + + def wheelEvent(self, event): + if not self._zoom_enabled: + super(ZoomPlainText, self).wheelEvent(event) + return + + degrees = float(event.delta()) / 8 + steps = int(ceil(degrees / 5)) + self._scheduled_scalings += steps + if (self._scheduled_scalings * steps < 0): + self._scheduled_scalings = steps + + self._anim_timer.start() + + def _scaling_callback(self): + if self._scheduled_scalings == 0: + self._anim_timer.stop() + return + + factor = 1.0 + (self._scheduled_scalings / 300) + font = self.font() + if self._point_size is None: + self._point_size = font.pointSizeF() + + self._point_size *= factor + if self._point_size < 1: + self._point_size = 1.0 + + font.setPointSizeF(self._point_size) + # Using 'self.setFont(font)' would not be propagated when stylesheets + # are applied on this widget + self.setStyleSheet("font-size: {}pt".format(font.pointSize())) + + if self._scheduled_scalings > 0: + self._scheduled_scalings -= 1 + else: + self._scheduled_scalings += 1 + + def keyPressEvent(self, event): + if event.key() == QtCore.Qt.Key_Control: + self._zoom_enabled = True + super(ZoomPlainText, self).keyPressEvent(event) + + def keyReleaseEvent(self, event): + if event.key() == QtCore.Qt.Key_Control: + self._zoom_enabled = False + super(ZoomPlainText, self).keyReleaseEvent(event) + + class DetailsWidget(QtWidgets.QWidget): def __init__(self, parent): super(DetailsWidget, self).__init__(parent) - output_widget = QtWidgets.QPlainTextEdit(self) - output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) + output_widget = ZoomPlainText(self) output_widget.setObjectName("PublishLogConsole") + output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) diff --git a/openpype/tools/publisher/widgets/card_view_widgets.py b/openpype/tools/publisher/widgets/card_view_widgets.py index 086cd5c59c..bd591138f4 100644 --- a/openpype/tools/publisher/widgets/card_view_widgets.py +++ b/openpype/tools/publisher/widgets/card_view_widgets.py @@ -22,6 +22,7 @@ Only one item can be selected at a time. import re import collections +import html from Qt import QtWidgets, QtCore @@ -98,6 +99,7 @@ class GroupWidget(QtWidgets.QWidget): instances(list): List of instances in CreateContext. """ + # Store instances by id and by subset name instances_by_id = {} instances_by_subset_name = collections.defaultdict(list) @@ -142,6 +144,7 @@ class GroupWidget(QtWidgets.QWidget): class CardWidget(BaseClickableFrame): """Clickable card used as bigger button.""" + selected = QtCore.Signal(str, str) # Group identifier of card # - this must be set because if send when mouse is released with card id @@ -178,6 +181,7 @@ class ContextCardWidget(CardWidget): Is not visually under group widget and is always at the top of card view. """ + def __init__(self, parent): super(ContextCardWidget, self).__init__(parent) @@ -204,13 +208,14 @@ class ContextCardWidget(CardWidget): class InstanceCardWidget(CardWidget): """Card widget representing instance.""" + active_changed = QtCore.Signal() def __init__(self, instance, group_icon, parent): super(InstanceCardWidget, self).__init__(parent) self._id = instance.id - self._group_identifier = instance.creator_label + self._group_identifier = instance.group_label self._group_icon = group_icon self.instance = instance @@ -303,13 +308,14 @@ class InstanceCardWidget(CardWidget): self._last_variant = variant self._last_subset_name = subset_name # Make `variant` bold - found_parts = set(re.findall(variant, subset_name, re.IGNORECASE)) + label = html.escape(self.instance.label) + found_parts = set(re.findall(variant, label, re.IGNORECASE)) if found_parts: for part in found_parts: replacement = "{}".format(part) - subset_name = subset_name.replace(part, replacement) + label = label.replace(part, replacement) - self._label_widget.setText(subset_name) + self._label_widget.setText(label) # HTML text will cause that label start catch mouse clicks # - disabling with changing interaction flag self._label_widget.setTextInteractionFlags( @@ -435,7 +441,7 @@ class InstanceCardView(AbstractInstanceView): instances_by_group = collections.defaultdict(list) identifiers_by_group = collections.defaultdict(set) for instance in self.controller.instances: - group_name = instance.creator_label + group_name = instance.group_label instances_by_group[group_name].append(instance) identifiers_by_group[group_name].add( instance.creator_identifier diff --git a/openpype/tools/publisher/widgets/create_dialog.py b/openpype/tools/publisher/widgets/create_dialog.py index 3a68835dc7..d4740b2493 100644 --- a/openpype/tools/publisher/widgets/create_dialog.py +++ b/openpype/tools/publisher/widgets/create_dialog.py @@ -342,7 +342,9 @@ class CreateDialog(QtWidgets.QDialog): creators_view = QtWidgets.QListView(self) creators_model = QtGui.QStandardItemModel() - creators_view.setModel(creators_model) + creators_sort_model = QtCore.QSortFilterProxyModel() + creators_sort_model.setSourceModel(creators_model) + creators_view.setModel(creators_sort_model) variant_widget = VariantInputsWidget(self) @@ -465,7 +467,7 @@ class CreateDialog(QtWidgets.QDialog): desc_width_anim_timer = QtCore.QTimer() desc_width_anim_timer.setInterval(10) - prereq_timer.timeout.connect(self._on_prereq_timer) + prereq_timer.timeout.connect(self._invalidate_prereq) desc_width_anim_timer.timeout.connect(self._on_desc_animation) @@ -513,9 +515,10 @@ class CreateDialog(QtWidgets.QDialog): self.variant_hints_group = variant_hints_group self._creators_header_widget = creators_header_widget - self.creators_model = creators_model - self.creators_view = creators_view - self.create_btn = create_btn + self._creators_model = creators_model + self._creators_sort_model = creators_sort_model + self._creators_view = creators_view + self._create_btn = create_btn self._creator_short_desc_widget = creator_short_desc_widget self._pre_create_widget = pre_create_widget @@ -573,7 +576,10 @@ class CreateDialog(QtWidgets.QDialog): def _set_context_enabled(self, enabled): self._assets_widget.set_enabled(enabled) self._tasks_widget.set_enabled(enabled) + check_prereq = self._context_widget.isEnabled() != enabled self._context_widget.setEnabled(enabled) + if check_prereq: + self._invalidate_prereq() def refresh(self): # Get context before refresh to keep selection of asset and @@ -600,23 +606,28 @@ class CreateDialog(QtWidgets.QDialog): self._tasks_widget.set_asset_name(asset_name) self._tasks_widget.select_task_name(task_name) - self._invalidate_prereq() + self._invalidate_prereq_deffered() - def _invalidate_prereq(self): + def _invalidate_prereq_deffered(self): self._prereq_timer.start() def _on_asset_filter_height_change(self, height): self._creators_header_widget.setMinimumHeight(height) self._creators_header_widget.setMaximumHeight(height) - def _on_prereq_timer(self): + def _invalidate_prereq(self): prereq_available = True creator_btn_tooltips = [] - if self.creators_model.rowCount() < 1: + + available_creators = self._creators_model.rowCount() > 0 + if available_creators != self._creators_view.isEnabled(): + self._creators_view.setEnabled(available_creators) + + if not available_creators: prereq_available = False creator_btn_tooltips.append("Creator is not selected") - if self._asset_doc is None: + if self._context_change_is_enabled() and self._asset_doc is None: # QUESTION how to handle invalid asset? prereq_available = False creator_btn_tooltips.append("Context is not selected") @@ -624,15 +635,15 @@ class CreateDialog(QtWidgets.QDialog): if prereq_available != self._prereq_available: self._prereq_available = prereq_available - self.create_btn.setEnabled(prereq_available) - self.creators_view.setEnabled(prereq_available) + self._create_btn.setEnabled(prereq_available) + self.variant_input.setEnabled(prereq_available) self.variant_hints_btn.setEnabled(prereq_available) tooltip = "" if creator_btn_tooltips: tooltip = "\n".join(creator_btn_tooltips) - self.create_btn.setToolTip(tooltip) + self._create_btn.setToolTip(tooltip) self._on_variant_change() @@ -670,8 +681,8 @@ class CreateDialog(QtWidgets.QDialog): # Refresh creators and add their families to list existing_items = {} old_creators = set() - for row in range(self.creators_model.rowCount()): - item = self.creators_model.item(row, 0) + for row in range(self._creators_model.rowCount()): + item = self._creators_model.item(row, 0) identifier = item.data(CREATOR_IDENTIFIER_ROLE) existing_items[identifier] = item old_creators.add(identifier) @@ -688,7 +699,7 @@ class CreateDialog(QtWidgets.QDialog): item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable ) - self.creators_model.appendRow(item) + self._creators_model.appendRow(item) label = creator.label or identifier item.setData(label, QtCore.Qt.DisplayRole) @@ -698,16 +709,17 @@ class CreateDialog(QtWidgets.QDialog): # Remove families that are no more available for identifier in (old_creators - new_creators): item = existing_items[identifier] - self.creators_model.takeRow(item.row()) + self._creators_model.takeRow(item.row()) - if self.creators_model.rowCount() < 1: + if self._creators_model.rowCount() < 1: return + self._creators_sort_model.sort(0) # Make sure there is a selection - indexes = self.creators_view.selectedIndexes() + indexes = self._creators_view.selectedIndexes() if not indexes: - index = self.creators_model.index(0, 0) - self.creators_view.setCurrentIndex(index) + index = self._creators_sort_model.index(0, 0) + self._creators_view.setCurrentIndex(index) else: index = indexes[0] @@ -726,11 +738,11 @@ class CreateDialog(QtWidgets.QDialog): asset_name = self._assets_widget.get_selected_asset_name() self._tasks_widget.set_asset_name(asset_name) if self._context_change_is_enabled(): - self._invalidate_prereq() + self._invalidate_prereq_deffered() def _on_task_change(self): if self._context_change_is_enabled(): - self._invalidate_prereq() + self._invalidate_prereq_deffered() def _on_current_session_context_request(self): self._assets_widget.set_current_session_asset() @@ -1010,13 +1022,18 @@ class CreateDialog(QtWidgets.QDialog): if variant_value is None: variant_value = self.variant_input.text() - self.create_btn.setEnabled(True) if not self._compiled_name_pattern.match(variant_value): - self.create_btn.setEnabled(False) + self._create_btn.setEnabled(False) self._set_variant_state_property("invalid") self.subset_name_input.setText("< Invalid variant >") return + if not self._context_change_is_enabled(): + self._create_btn.setEnabled(True) + self._set_variant_state_property("") + self.subset_name_input.setText("< Valid variant >") + return + project_name = self.controller.project_name task_name = self._get_task_name() @@ -1027,13 +1044,14 @@ class CreateDialog(QtWidgets.QDialog): variant_value, task_name, asset_doc, project_name ) except TaskNotSetError: - self.create_btn.setEnabled(False) + self._create_btn.setEnabled(False) self._set_variant_state_property("invalid") self.subset_name_input.setText("< Missing task >") return self.subset_name_input.setText(subset_name) + self._create_btn.setEnabled(True) self._validate_subset_name(subset_name, variant_value) def _validate_subset_name(self, subset_name, variant_value): @@ -1088,8 +1106,8 @@ class CreateDialog(QtWidgets.QDialog): self._set_variant_state_property(property_value) variant_is_valid = variant_value.strip() != "" - if variant_is_valid != self.create_btn.isEnabled(): - self.create_btn.setEnabled(variant_is_valid) + if variant_is_valid != self._create_btn.isEnabled(): + self._create_btn.setEnabled(variant_is_valid) def _set_variant_state_property(self, state): current_value = self.variant_input.property("state") @@ -1134,21 +1152,27 @@ class CreateDialog(QtWidgets.QDialog): self._update_help_btn() def _on_create(self): - indexes = self.creators_view.selectedIndexes() + indexes = self._creators_view.selectedIndexes() if not indexes or len(indexes) > 1: return - if not self.create_btn.isEnabled(): + if not self._create_btn.isEnabled(): return index = indexes[0] creator_label = index.data(QtCore.Qt.DisplayRole) creator_identifier = index.data(CREATOR_IDENTIFIER_ROLE) family = index.data(FAMILY_ROLE) - subset_name = self.subset_name_input.text() variant = self.variant_input.text() - asset_name = self._get_asset_name() - task_name = self._get_task_name() + # Care about subset name only if context change is enabled + subset_name = None + asset_name = None + task_name = None + if self._context_change_is_enabled(): + subset_name = self.subset_name_input.text() + asset_name = self._get_asset_name() + task_name = self._get_task_name() + pre_create_data = self._pre_create_widget.current_value() # Where to define these data? # - what data show be stored? diff --git a/openpype/tools/publisher/widgets/list_view_widgets.py b/openpype/tools/publisher/widgets/list_view_widgets.py index 6bddaf66c8..3e4fd5b72d 100644 --- a/openpype/tools/publisher/widgets/list_view_widgets.py +++ b/openpype/tools/publisher/widgets/list_view_widgets.py @@ -23,6 +23,7 @@ selection can be enabled disabled using checkbox or keyboard key presses: ``` """ import collections +import html from Qt import QtWidgets, QtCore, QtGui @@ -113,7 +114,9 @@ class InstanceListItemWidget(QtWidgets.QWidget): self.instance = instance - subset_name_label = QtWidgets.QLabel(instance["subset"], self) + instance_label = html.escape(instance.label) + + subset_name_label = QtWidgets.QLabel(instance_label, self) subset_name_label.setObjectName("ListViewSubsetName") active_checkbox = NiceCheckbox(parent=self) @@ -132,7 +135,7 @@ class InstanceListItemWidget(QtWidgets.QWidget): active_checkbox.stateChanged.connect(self._on_active_change) - self._subset_name_label = subset_name_label + self._instance_label_widget = subset_name_label self._active_checkbox = active_checkbox self._has_valid_context = None @@ -146,8 +149,8 @@ class InstanceListItemWidget(QtWidgets.QWidget): state = "" if not valid: state = "invalid" - self._subset_name_label.setProperty("state", state) - self._subset_name_label.style().polish(self._subset_name_label) + self._instance_label_widget.setProperty("state", state) + self._instance_label_widget.style().polish(self._instance_label_widget) def is_active(self): """Instance is activated.""" @@ -176,9 +179,9 @@ class InstanceListItemWidget(QtWidgets.QWidget): def update_instance_values(self): """Update instance data propagated to widgets.""" # Check subset name - subset_name = self.instance["subset"] - if subset_name != self._subset_name_label.text(): - self._subset_name_label.setText(subset_name) + label = self.instance.label + if label != self._instance_label_widget.text(): + self._instance_label_widget.setText(html.escape(label)) # Check active state self.set_active(self.instance["active"]) # Check valid states @@ -519,7 +522,7 @@ class InstanceListView(AbstractInstanceView): instances_by_group_name = collections.defaultdict(list) group_names = set() for instance in self.controller.instances: - group_label = instance.creator_label + group_label = instance.group_label group_names.add(group_label) instances_by_group_name[group_label].append(instance) diff --git a/openpype/tools/publisher/widgets/widgets.py b/openpype/tools/publisher/widgets/widgets.py index 7096b9fb50..5a5f8c4c37 100644 --- a/openpype/tools/publisher/widgets/widgets.py +++ b/openpype/tools/publisher/widgets/widgets.py @@ -1225,6 +1225,7 @@ class CreatorAttrsWidget(QtWidgets.QWidget): different creators. If creator have same (similar) definitions their widgets are merged into one (different label does not count). """ + def __init__(self, controller, parent): super(CreatorAttrsWidget, self).__init__(parent) @@ -1275,6 +1276,7 @@ class CreatorAttrsWidget(QtWidgets.QWidget): content_layout = QtWidgets.QGridLayout(content_widget) content_layout.setColumnStretch(0, 0) content_layout.setColumnStretch(1, 1) + content_layout.setAlignment(QtCore.Qt.AlignTop) row = 0 for attr_def, attr_instances, values in result: diff --git a/openpype/tools/sceneinventory/model.py b/openpype/tools/sceneinventory/model.py index 0bb9c4a658..63fbe04c5c 100644 --- a/openpype/tools/sceneinventory/model.py +++ b/openpype/tools/sceneinventory/model.py @@ -6,6 +6,7 @@ from collections import defaultdict from Qt import QtCore, QtGui import qtawesome +from openpype.host import ILoadHost from openpype.client import ( get_asset_by_id, get_subset_by_id, @@ -193,7 +194,10 @@ class InventoryModel(TreeModel): host = registered_host() if not items: # for debugging or testing, injecting items from outside - items = host.ls() + if isinstance(host, ILoadHost): + items = host.get_containers() + else: + items = host.ls() self.clear() diff --git a/openpype/tools/settings/settings/categories.py b/openpype/tools/settings/settings/categories.py index 764f42f1a3..f42027d9e2 100644 --- a/openpype/tools/settings/settings/categories.py +++ b/openpype/tools/settings/settings/categories.py @@ -854,6 +854,9 @@ class ProjectWidget(SettingsCategoryWidget): project_list_widget.version_change_requested.connect( self._on_source_version_change ) + project_list_widget.extract_to_file_requested.connect( + self._on_extract_to_file + ) self.project_list_widget = project_list_widget diff --git a/openpype/tools/settings/settings/widgets.py b/openpype/tools/settings/settings/widgets.py index 45c21d5685..88d923c16a 100644 --- a/openpype/tools/settings/settings/widgets.py +++ b/openpype/tools/settings/settings/widgets.py @@ -1008,6 +1008,7 @@ class ProjectSortFilterProxy(QtCore.QSortFilterProxyModel): class ProjectListWidget(QtWidgets.QWidget): project_changed = QtCore.Signal() version_change_requested = QtCore.Signal(str) + extract_to_file_requested = QtCore.Signal() def __init__(self, parent, only_active=False): self._parent = parent @@ -1099,7 +1100,12 @@ class ProjectListWidget(QtWidgets.QWidget): self.version_change_requested ) submenu.addAction(action) + + extract_action = QtWidgets.QAction("Extract to file", menu) + extract_action.triggered.connect(self.extract_to_file_requested) + menu.addMenu(submenu) + menu.addAction(extract_action) menu.exec_(QtGui.QCursor.pos()) def on_item_clicked(self, new_index): diff --git a/openpype/tools/texture_copy/app.py b/openpype/tools/texture_copy/app.py index 746a72b3ec..a695bb8c4d 100644 --- a/openpype/tools/texture_copy/app.py +++ b/openpype/tools/texture_copy/app.py @@ -6,8 +6,7 @@ import speedcopy from openpype.client import get_project, get_asset_by_name from openpype.lib import Terminal -from openpype.api import Anatomy -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, Anatomy t = Terminal() diff --git a/openpype/tools/traypublisher/window.py b/openpype/tools/traypublisher/window.py index 5934c4aa8a..cc33287091 100644 --- a/openpype/tools/traypublisher/window.py +++ b/openpype/tools/traypublisher/window.py @@ -12,9 +12,7 @@ from openpype.pipeline import ( install_host, AvalonMongoDB, ) -from openpype.hosts.traypublisher import ( - api as traypublisher -) +from openpype.hosts.traypublisher.api import TrayPublisherHost from openpype.tools.publisher import PublisherWindow from openpype.tools.utils.constants import PROJECT_NAME_ROLE from openpype.tools.utils.models import ( @@ -111,9 +109,13 @@ class StandaloneOverlayWidget(QtWidgets.QFrame): if project_name: self._set_project(project_name) + @property + def host(self): + return self._publisher_window.controller.host + def _set_project(self, project_name): self._project_name = project_name - traypublisher.set_project_name(project_name) + self.host.set_project_name(project_name) self.setVisible(False) self.project_selected.emit(project_name) @@ -190,7 +192,8 @@ class TrayPublishWindow(PublisherWindow): def main(): - install_host(traypublisher) + host = TrayPublisherHost() + install_host(host) app = QtWidgets.QApplication([]) window = TrayPublishWindow() window.show() diff --git a/openpype/tools/utils/__init__.py b/openpype/tools/utils/__init__.py index 0f367510bd..5ccc1b40b3 100644 --- a/openpype/tools/utils/__init__.py +++ b/openpype/tools/utils/__init__.py @@ -1,4 +1,5 @@ from .widgets import ( + CustomTextComboBox, PlaceholderLineEdit, BaseClickableFrame, ClickableFrame, @@ -28,6 +29,7 @@ from .overlay_messages import ( __all__ = ( + "CustomTextComboBox", "PlaceholderLineEdit", "BaseClickableFrame", "ClickableFrame", diff --git a/openpype/tools/utils/host_tools.py b/openpype/tools/utils/host_tools.py index 9dbbe25fda..52d15a59f7 100644 --- a/openpype/tools/utils/host_tools.py +++ b/openpype/tools/utils/host_tools.py @@ -6,7 +6,7 @@ use singleton approach with global functions (using helper anyway). import os import pyblish.api - +from openpype.host import IWorkfileHost, ILoadHost from openpype.pipeline import ( registered_host, legacy_io, @@ -49,49 +49,34 @@ class HostToolsHelper: def get_workfiles_tool(self, parent): """Create, cache and return workfiles tool window.""" if self._workfiles_tool is None: - from openpype.tools.workfiles.app import ( - Window, validate_host_requirements - ) + from openpype.tools.workfiles.app import Window + # Host validation host = registered_host() - validate_host_requirements(host) + IWorkfileHost.validate_workfile_methods(host) workfiles_window = Window(parent=parent) self._workfiles_tool = workfiles_window return self._workfiles_tool - def show_workfiles(self, parent=None, use_context=None, save=None): + def show_workfiles( + self, parent=None, use_context=None, save=None, on_top=None + ): """Workfiles tool for changing context and saving workfiles.""" - if use_context is None: - use_context = True - - if save is None: - save = True with qt_app_context(): workfiles_tool = self.get_workfiles_tool(parent) - workfiles_tool.set_save_enabled(save) - - if not workfiles_tool.isVisible(): - workfiles_tool.show() - - if use_context: - context = { - "asset": legacy_io.Session["AVALON_ASSET"], - "task": legacy_io.Session["AVALON_TASK"] - } - workfiles_tool.set_context(context) - - # Pull window to the front. - workfiles_tool.raise_() - workfiles_tool.activateWindow() + workfiles_tool.ensure_visible(use_context, save, on_top) def get_loader_tool(self, parent): """Create, cache and return loader tool window.""" if self._loader_tool is None: from openpype.tools.loader import LoaderWindow + host = registered_host() + ILoadHost.validate_load_methods(host) + loader_window = LoaderWindow(parent=parent or self._parent) self._loader_tool = loader_window @@ -164,6 +149,9 @@ class HostToolsHelper: if self._scene_inventory_tool is None: from openpype.tools.sceneinventory import SceneInventoryWindow + host = registered_host() + ILoadHost.validate_load_methods(host) + scene_inventory_window = SceneInventoryWindow( parent=parent or self._parent ) @@ -390,9 +378,9 @@ def show_tool_by_name(tool_name, parent=None, *args, **kwargs): _SingletonPoint.show_tool_by_name(tool_name, parent, *args, **kwargs) -def show_workfiles(parent=None, use_context=None, save=None): +def show_workfiles(*args, **kwargs): _SingletonPoint.show_tool_by_name( - "workfiles", parent, use_context=use_context, save=save + "workfiles", *args, **kwargs ) diff --git a/openpype/tools/utils/widgets.py b/openpype/tools/utils/widgets.py index d5ae909be8..df0d349822 100644 --- a/openpype/tools/utils/widgets.py +++ b/openpype/tools/utils/widgets.py @@ -11,6 +11,28 @@ from openpype.style import ( log = logging.getLogger(__name__) +class CustomTextComboBox(QtWidgets.QComboBox): + """Combobox which can have different text showed.""" + + def __init__(self, *args, **kwargs): + self._custom_text = None + super(CustomTextComboBox, self).__init__(*args, **kwargs) + + def set_custom_text(self, text=None): + if self._custom_text != text: + self._custom_text = text + self.repaint() + + def paintEvent(self, event): + painter = QtWidgets.QStylePainter(self) + option = QtWidgets.QStyleOptionComboBox() + self.initStyleOption(option) + if self._custom_text is not None: + option.currentText = self._custom_text + painter.drawComplexControl(QtWidgets.QStyle.CC_ComboBox, option) + painter.drawControl(QtWidgets.QStyle.CE_ComboBoxLabel, option) + + class PlaceholderLineEdit(QtWidgets.QLineEdit): """Set placeholder color of QLineEdit in Qt 5.12 and higher.""" def __init__(self, *args, **kwargs): diff --git a/openpype/tools/workfiles/__init__.py b/openpype/tools/workfiles/__init__.py index 5fbc71797d..205fd44838 100644 --- a/openpype/tools/workfiles/__init__.py +++ b/openpype/tools/workfiles/__init__.py @@ -1,12 +1,10 @@ from .window import Window from .app import ( show, - validate_host_requirements, ) __all__ = [ "Window", "show", - "validate_host_requirements", ] diff --git a/openpype/tools/workfiles/app.py b/openpype/tools/workfiles/app.py index 352847ede8..2d0d551faf 100644 --- a/openpype/tools/workfiles/app.py +++ b/openpype/tools/workfiles/app.py @@ -1,6 +1,7 @@ import sys import logging +from openpype.host import IWorkfileHost from openpype.pipeline import ( registered_host, legacy_io, @@ -14,31 +15,6 @@ module = sys.modules[__name__] module.window = None -def validate_host_requirements(host): - if host is None: - raise RuntimeError("No registered host.") - - # Verify the host has implemented the api for Work Files - required = [ - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "work_root", - "file_extensions", - ] - missing = [] - for name in required: - if not hasattr(host, name): - missing.append(name) - if missing: - raise RuntimeError( - "Host is missing required Work Files interfaces: " - "%s (host: %s)" % (", ".join(missing), host) - ) - return True - - def show(root=None, debug=False, parent=None, use_context=True, save=True): """Show Work Files GUI""" # todo: remove `root` argument to show() @@ -50,7 +26,7 @@ def show(root=None, debug=False, parent=None, use_context=True, save=True): pass host = registered_host() - validate_host_requirements(host) + IWorkfileHost.validate_workfile_methods(host) if debug: legacy_io.Session["AVALON_ASSET"] = "Mock" diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py index a7e54471dc..34692b7102 100644 --- a/openpype/tools/workfiles/files_widget.py +++ b/openpype/tools/workfiles/files_widget.py @@ -6,12 +6,12 @@ import copy import Qt from Qt import QtWidgets, QtCore +from openpype.host import IWorkfileHost from openpype.client import get_asset_by_id from openpype.tools.utils import PlaceholderLineEdit from openpype.tools.utils.delegates import PrettyTimeDelegate from openpype.lib import ( emit_event, - Anatomy, get_workfile_template_key, create_workdir_extra_folders, ) @@ -22,6 +22,7 @@ from openpype.lib.avalon_context import ( from openpype.pipeline import ( registered_host, legacy_io, + Anatomy, ) from .model import ( WorkAreaFilesModel, @@ -125,7 +126,7 @@ class FilesWidget(QtWidgets.QWidget): filter_layout.addWidget(published_checkbox, 0) # Create the Files models - extensions = set(self.host.file_extensions()) + extensions = set(self._get_host_extensions()) views_widget = QtWidgets.QWidget(self) # --- Workarea view --- @@ -452,7 +453,12 @@ class FilesWidget(QtWidgets.QWidget): def open_file(self, filepath): host = self.host - if host.has_unsaved_changes(): + if isinstance(host, IWorkfileHost): + has_unsaved_changes = host.workfile_has_unsaved_changes() + else: + has_unsaved_changes = host.has_unsaved_changes() + + if has_unsaved_changes: result = self.save_changes_prompt() if result is None: # Cancel operation @@ -460,7 +466,10 @@ class FilesWidget(QtWidgets.QWidget): # Save first if has changes if result: - current_file = host.current_file() + if isinstance(host, IWorkfileHost): + current_file = host.get_current_workfile() + else: + current_file = host.current_file() if not current_file: # If the user requested to save the current scene # we can't actually automatically do so if the current @@ -471,7 +480,10 @@ class FilesWidget(QtWidgets.QWidget): return # Save current scene, continue to open file - host.save_file(current_file) + if isinstance(host, IWorkfileHost): + host.save_workfile(current_file) + else: + host.save_file(current_file) event_data_before = self._get_event_context_data() event_data_before["filepath"] = filepath @@ -482,7 +494,10 @@ class FilesWidget(QtWidgets.QWidget): source="workfiles.tool" ) self._enter_session() - host.open_file(filepath) + if isinstance(host, IWorkfileHost): + host.open_workfile(filepath) + else: + host.open_file(filepath) emit_event( "workfile.open.after", event_data_after, @@ -524,7 +539,7 @@ class FilesWidget(QtWidgets.QWidget): filepath = self._get_selected_filepath() extensions = [os.path.splitext(filepath)[1]] else: - extensions = self.host.file_extensions() + extensions = self._get_host_extensions() window = SaveAsDialog( parent=self, @@ -572,9 +587,14 @@ class FilesWidget(QtWidgets.QWidget): self.open_file(path) + def _get_host_extensions(self): + if isinstance(self.host, IWorkfileHost): + return self.host.get_workfile_extensions() + return self.host.file_extensions() + def on_browse_pressed(self): ext_filter = "Work File (*{0})".format( - " *".join(self.host.file_extensions()) + " *".join(self._get_host_extensions()) ) kwargs = { "caption": "Work Files", @@ -632,10 +652,16 @@ class FilesWidget(QtWidgets.QWidget): self._enter_session() if not self.published_enabled: - self.host.save_file(filepath) + if isinstance(self.host, IWorkfileHost): + self.host.save_workfile(filepath) + else: + self.host.save_file(filepath) else: shutil.copy(src_path, filepath) - self.host.open_file(filepath) + if isinstance(self.host, IWorkfileHost): + self.host.open_workfile(filepath) + else: + self.host.open_file(filepath) # Create extra folders create_workdir_extra_folders( diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py index 9f4cea2f8a..0b0d67e589 100644 --- a/openpype/tools/workfiles/window.py +++ b/openpype/tools/workfiles/window.py @@ -1,11 +1,15 @@ import os import datetime -from Qt import QtCore, QtWidgets +from Qt import QtCore, QtWidgets, QtGui -from openpype.client import get_asset_by_id, get_asset_by_name +from openpype.client import ( + get_asset_by_id, + get_asset_by_name, + get_workfile_info, +) from openpype import style +from openpype import resources from openpype.lib import ( - get_workfile_doc, create_workfile_doc, save_workfile_data_to_doc, ) @@ -139,21 +143,19 @@ class SidePanelWidget(QtWidgets.QWidget): return self._workfile_doc, data -class Window(QtWidgets.QMainWindow): +class Window(QtWidgets.QWidget): """Work Files Window""" title = "Work Files" def __init__(self, parent=None): super(Window, self).__init__(parent=parent) self.setWindowTitle(self.title) - window_flags = QtCore.Qt.Window | QtCore.Qt.WindowCloseButtonHint - if not parent: - window_flags |= QtCore.Qt.WindowStaysOnTopHint - self.setWindowFlags(window_flags) + icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + self.setWindowIcon(icon) + self.setWindowFlags(self.windowFlags() | QtCore.Qt.Window) # Create pages widget and set it as central widget pages_widget = QtWidgets.QStackedWidget(self) - self.setCentralWidget(pages_widget) home_page_widget = QtWidgets.QWidget(pages_widget) home_body_widget = QtWidgets.QWidget(home_page_widget) @@ -188,6 +190,9 @@ class Window(QtWidgets.QMainWindow): # the files widget has a filter field which tasks does not. tasks_widget.setContentsMargins(0, 32, 0, 0) + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.addWidget(pages_widget, 1) + # Set context after asset widget is refreshed # - to do so it is necessary to wait until refresh is done set_context_timer = QtCore.QTimer() @@ -224,6 +229,49 @@ class Window(QtWidgets.QMainWindow): self._first_show = True self._context_to_set = None + def ensure_visible( + self, use_context=None, save=None, on_top=None + ): + if save is None: + save = True + + self.set_save_enabled(save) + + if self.isVisible(): + use_context = False + elif use_context is None: + use_context = True + + if on_top is None and self._first_show: + on_top = self.parent() is None + + window_flags = self.windowFlags() + new_window_flags = window_flags + if on_top is True: + new_window_flags = window_flags | QtCore.Qt.WindowStaysOnTopHint + elif on_top is False: + new_window_flags = window_flags & ~QtCore.Qt.WindowStaysOnTopHint + + if new_window_flags != window_flags: + # Note this is not propagated after initialization of widget in + # some Qt builds + self.setWindowFlags(new_window_flags) + self.show() + + elif not self.isVisible(): + self.show() + + if use_context is None or use_context is True: + context = { + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] + } + self.set_context(context) + + # Pull window to the front. + self.raise_() + self.activateWindow() + @property def project_name(self): return legacy_io.Session["AVALON_PROJECT"] @@ -255,8 +303,9 @@ class Window(QtWidgets.QMainWindow): workfile_doc = None if asset_id and task_name and filepath: filename = os.path.split(filepath)[1] - workfile_doc = get_workfile_doc( - asset_id, task_name, filename, legacy_io + project_name = legacy_io.active_project() + workfile_doc = get_workfile_info( + project_name, asset_id, task_name, filename ) self.side_panel.set_context( asset_id, task_name, filepath, workfile_doc @@ -289,8 +338,9 @@ class Window(QtWidgets.QMainWindow): return filename = os.path.split(filepath)[1] - return get_workfile_doc( - asset_id, task_name, filename, legacy_io + project_name = legacy_io.active_project() + return get_workfile_info( + project_name, asset_id, task_name, filename ) def _create_workfile_doc(self, filepath, force=False): @@ -326,6 +376,7 @@ class Window(QtWidgets.QMainWindow): if self.assets_widget.refreshing: return + self._set_context_timer.stop() self._context_to_set, context = None, self._context_to_set if "asset" in context: asset_doc = get_asset_by_name( diff --git a/openpype/vendor/python/common/capture.py b/openpype/vendor/python/common/capture.py index 6b4c40a6e8..71b86a5f1a 100644 --- a/openpype/vendor/python/common/capture.py +++ b/openpype/vendor/python/common/capture.py @@ -380,7 +380,8 @@ Viewport2Options = { "transparencyAlgorithm": 1, "transparencyQuality": 0.33, "useMaximumHardwareLights": True, - "vertexAnimationCache": 0 + "vertexAnimationCache": 0, + "renderDepthOfField": 0 } @@ -402,7 +403,7 @@ def apply_view(panel, **options): camera_options = options.get("camera_options", {}) _iteritems = getattr(camera_options, "iteritems", camera_options.items) for key, value in _iteritems: - cmds.setAttr("{0}.{1}".format(camera, key), value) + _safe_setAttr("{0}.{1}".format(camera, key), value) # Viewport options viewport_options = options.get("viewport_options", {}) @@ -416,7 +417,7 @@ def apply_view(panel, **options): ) for key, value in _iteritems(): attr = "hardwareRenderingGlobals.{0}".format(key) - cmds.setAttr(attr, value) + _safe_setAttr(attr, value) def parse_active_panel(): @@ -550,10 +551,10 @@ def apply_scene(**options): cmds.playbackOptions(maxTime=options["end_frame"]) if "width" in options: - cmds.setAttr("defaultResolution.width", options["width"]) + _safe_setAttr("defaultResolution.width", options["width"]) if "height" in options: - cmds.setAttr("defaultResolution.height", options["height"]) + _safe_setAttr("defaultResolution.height", options["height"]) if "compression" in options: cmds.optionVar( @@ -664,7 +665,7 @@ def _applied_camera_options(options, panel): _iteritems = getattr(options, "iteritems", options.items) for opt, value in _iteritems(): - cmds.setAttr(camera + "." + opt, value) + _safe_setAttr(camera + "." + opt, value) try: yield @@ -672,7 +673,7 @@ def _applied_camera_options(options, panel): if old_options: _iteritems = getattr(old_options, "iteritems", old_options.items) for opt, value in _iteritems(): - cmds.setAttr(camera + "." + opt, value) + _safe_setAttr(camera + "." + opt, value) @contextlib.contextmanager @@ -759,7 +760,7 @@ def _applied_viewport2_options(options): # Apply settings _iteritems = getattr(options, "iteritems", options.items) for opt, value in _iteritems(): - cmds.setAttr("hardwareRenderingGlobals." + opt, value) + _safe_setAttr("hardwareRenderingGlobals." + opt, value) try: yield @@ -767,7 +768,7 @@ def _applied_viewport2_options(options): # Restore previous settings _iteritems = getattr(original, "iteritems", original.items) for opt, value in _iteritems(): - cmds.setAttr("hardwareRenderingGlobals." + opt, value) + _safe_setAttr("hardwareRenderingGlobals." + opt, value) @contextlib.contextmanager @@ -801,14 +802,14 @@ def _maintain_camera(panel, camera): else: state = dict((camera, cmds.getAttr(camera + ".rnd")) for camera in cmds.ls(type="camera")) - cmds.setAttr(camera + ".rnd", True) + _safe_setAttr(camera + ".rnd", True) try: yield finally: _iteritems = getattr(state, "iteritems", state.items) for camera, renderable in _iteritems(): - cmds.setAttr(camera + ".rnd", renderable) + _safe_setAttr(camera + ".rnd", renderable) @contextlib.contextmanager @@ -845,6 +846,18 @@ def _in_standalone(): return not hasattr(cmds, "about") or cmds.about(batch=True) +def _safe_setAttr(*args, **kwargs): + """Wrapper to handle failures when attribute is locked. + + Temporary hotfix until better approach (store value, unlock, set new, + return old, lock again) is implemented. + """ + try: + cmds.setAttr(*args, **kwargs) + except RuntimeError: + print("Cannot setAttr {}!".format(args)) + + # -------------------------------- # # Apply version specific settings diff --git a/openpype/version.py b/openpype/version.py index 65e439ef33..dd5ad97449 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.12.0" +__version__ = "3.12.2-nightly.2" diff --git a/openpype/widgets/attribute_defs/files_widget.py b/openpype/widgets/attribute_defs/files_widget.py index 698a91a1a5..d29aa1b607 100644 --- a/openpype/widgets/attribute_defs/files_widget.py +++ b/openpype/widgets/attribute_defs/files_widget.py @@ -1,6 +1,7 @@ import os import collections import uuid +import json from Qt import QtWidgets, QtCore, QtGui @@ -26,6 +27,27 @@ IS_SEQUENCE_ROLE = QtCore.Qt.UserRole + 7 EXT_ROLE = QtCore.Qt.UserRole + 8 +def convert_bytes_to_json(bytes_value): + if isinstance(bytes_value, QtCore.QByteArray): + # Raw data are already QByteArray and we don't have to load them + encoded_data = bytes_value + else: + encoded_data = QtCore.QByteArray.fromRawData(bytes_value) + stream = QtCore.QDataStream(encoded_data, QtCore.QIODevice.ReadOnly) + text = stream.readQString() + try: + return json.loads(text) + except Exception: + return None + + +def convert_data_to_bytes(data): + bytes_value = QtCore.QByteArray() + stream = QtCore.QDataStream(bytes_value, QtCore.QIODevice.WriteOnly) + stream.writeQString(json.dumps(data)) + return bytes_value + + class SupportLabel(QtWidgets.QLabel): pass @@ -33,7 +55,7 @@ class SupportLabel(QtWidgets.QLabel): class DropEmpty(QtWidgets.QWidget): _empty_extensions = "Any file" - def __init__(self, single_item, allow_sequences, parent): + def __init__(self, single_item, allow_sequences, extensions_label, parent): super(DropEmpty, self).__init__(parent) drop_label_widget = QtWidgets.QLabel("Drag & Drop files here", self) @@ -61,7 +83,19 @@ class DropEmpty(QtWidgets.QWidget): widget.setAlignment(QtCore.Qt.AlignCenter) widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + update_size_timer = QtCore.QTimer() + update_size_timer.setInterval(10) + update_size_timer.setSingleShot(True) + + update_size_timer.timeout.connect(self._on_update_size_timer) + + self._update_size_timer = update_size_timer + + if extensions_label and not extensions_label.startswith(" "): + extensions_label = " " + extensions_label + self._single_item = single_item + self._extensions_label = extensions_label self._allow_sequences = allow_sequences self._allowed_extensions = set() self._allow_folders = None @@ -114,22 +148,51 @@ class DropEmpty(QtWidgets.QWidget): items_label = "Single " if len(allowed_items) == 1: - allowed_items_label = allowed_items[0] + extensions_label = allowed_items[0] elif len(allowed_items) == 2: - allowed_items_label = " or ".join(allowed_items) + extensions_label = " or ".join(allowed_items) else: last_item = allowed_items.pop(-1) new_last_item = " or ".join(last_item, allowed_items.pop(-1)) allowed_items.append(new_last_item) - allowed_items_label = ", ".join(allowed_items) + extensions_label = ", ".join(allowed_items) + + allowed_items_label = extensions_label items_label += allowed_items_label + label_tooltip = None if self._allowed_extensions: items_label += " of\n{}".format( ", ".join(sorted(self._allowed_extensions)) ) + if self._extensions_label: + label_tooltip = items_label + items_label = self._extensions_label + + if self._items_label_widget.text() == items_label: + return + + self._items_label_widget.setToolTip(label_tooltip) self._items_label_widget.setText(items_label) + self._update_size_timer.start() + + def resizeEvent(self, event): + super(DropEmpty, self).resizeEvent(event) + self._update_size_timer.start() + + def _on_update_size_timer(self): + """Recalculate height of label with extensions. + + Dynamic QLabel with word wrap does not handle properly it's sizeHint + calculations on show. This way it is recalculated. It is good practice + to trigger this method with small offset using '_update_size_timer'. + """ + + width = self._items_label_widget.width() + height = self._items_label_widget.heightForWidth(width) + self._items_label_widget.setMinimumHeight(height) + self._items_label_widget.updateGeometry() def paintEvent(self, event): super(DropEmpty, self).paintEvent(event) @@ -162,6 +225,7 @@ class FilesModel(QtGui.QStandardItemModel): def __init__(self, single_item, allow_sequences): super(FilesModel, self).__init__() + self._id = str(uuid.uuid4()) self._single_item = single_item self._multivalue = False self._allow_sequences = allow_sequences @@ -171,6 +235,10 @@ class FilesModel(QtGui.QStandardItemModel): self._filenames_by_dirpath = collections.defaultdict(set) self._items_by_dirpath = collections.defaultdict(list) + @property + def id(self): + return self._id + def set_multivalue(self, multivalue): """Disable filtering.""" @@ -245,6 +313,66 @@ class FilesModel(QtGui.QStandardItemModel): return item_id, item + def mimeData(self, indexes): + item_ids = [ + index.data(ITEM_ID_ROLE) + for index in indexes + ] + + item_ids_data = convert_data_to_bytes(item_ids) + mime_data = super(FilesModel, self).mimeData(indexes) + mime_data.setData("files_widget/internal_move", item_ids_data) + + file_items = [] + for item_id in item_ids: + file_item = self.get_file_item_by_id(item_id) + if file_item: + file_items.append(file_item.to_dict()) + + full_item_data = convert_data_to_bytes({ + "items": file_items, + "id": self._id + }) + mime_data.setData("files_widget/full_data", full_item_data) + return mime_data + + def dropMimeData(self, mime_data, action, row, col, index): + item_ids = convert_bytes_to_json( + mime_data.data("files_widget/internal_move") + ) + if item_ids is None: + return False + + # Find matching item after which will be items moved + # - store item before moved items are removed + root = self.invisibleRootItem() + if row >= 0: + src_item = self.item(row) + else: + src_item_id = index.data(ITEM_ID_ROLE) + src_item = self._items_by_id.get(src_item_id) + + # Take out items that should be moved + items = [] + for item_id in item_ids: + item = self._items_by_id.get(item_id) + if item: + self.takeRow(item.row()) + items.append(item) + + # Skip if there are not items that can be moved + if not items: + return False + + # Calculate row where items should be inserted + if src_item: + src_row = src_item.row() + else: + src_row = root.rowCount() + + root.insertRow(src_row, items) + return True + class FilesProxyModel(QtCore.QSortFilterProxyModel): def __init__(self, *args, **kwargs): @@ -428,6 +556,9 @@ class FilesView(QtWidgets.QListView): QtWidgets.QAbstractItemView.ExtendedSelection ) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.setAcceptDrops(True) + self.setDragEnabled(True) + self.setDragDropMode(self.InternalMove) remove_btn = InViewButton(self) pix_enabled = paint_image_with_color( @@ -529,11 +660,13 @@ class FilesView(QtWidgets.QListView): class FilesWidget(QtWidgets.QFrame): value_changed = QtCore.Signal() - def __init__(self, single_item, allow_sequences, parent): + def __init__(self, single_item, allow_sequences, extensions_label, parent): super(FilesWidget, self).__init__(parent) self.setAcceptDrops(True) - empty_widget = DropEmpty(single_item, allow_sequences, self) + empty_widget = DropEmpty( + single_item, allow_sequences, extensions_label, self + ) files_model = FilesModel(single_item, allow_sequences) files_proxy_model = FilesProxyModel() @@ -553,6 +686,7 @@ class FilesWidget(QtWidgets.QFrame): files_view.context_menu_requested.connect( self._on_context_menu_requested ) + self._in_set_value = False self._single_item = single_item self._multivalue = False @@ -637,8 +771,6 @@ class FilesWidget(QtWidgets.QFrame): ) self._widgets_by_id[item_id] = widget - self._files_proxy_model.sort(0) - if not self._in_set_value: self.value_changed.emit() @@ -743,12 +875,22 @@ class FilesWidget(QtWidgets.QFrame): event.setDropAction(QtCore.Qt.CopyAction) event.accept() + full_data_value = mime_data.data("files_widget/full_data") + if self._handle_full_data_drag(full_data_value): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + def dragLeaveEvent(self, event): event.accept() def dropEvent(self, event): + if self._multivalue: + return + mime_data = event.mimeData() - if not self._multivalue and mime_data.hasUrls(): + if mime_data.hasUrls(): + event.accept() + # event.setDropAction(QtCore.Qt.CopyAction) filepaths = [] for url in mime_data.urls(): filepath = url.toLocalFile() @@ -759,7 +901,58 @@ class FilesWidget(QtWidgets.QFrame): filepaths = self._files_proxy_model.filter_valid_files(filepaths) if filepaths: self._add_filepaths(filepaths) - event.accept() + + if self._handle_full_data_drop( + mime_data.data("files_widget/full_data") + ): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + super(FilesWidget, self).dropEvent(event) + + def _handle_full_data_drag(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + return True + + def _handle_full_data_drop(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + + for item in full_data["items"]: + filepaths = [ + os.path.join(item["directory"], filename) + for filename in item["filenames"] + ] + filepaths = self._files_proxy_model.filter_valid_files(filepaths) + if filepaths: + self._add_filepaths(filepaths) + + if self._copy_modifiers_enabled(): + return False + return True + + def _copy_modifiers_enabled(self): + if ( + QtWidgets.QApplication.keyboardModifiers() + & QtCore.Qt.ControlModifier + ): + return True + return False def _add_filepaths(self, filepaths): self._files_model.add_filepaths(filepaths) diff --git a/openpype/widgets/attribute_defs/widgets.py b/openpype/widgets/attribute_defs/widgets.py index b6493b80a8..60ae952553 100644 --- a/openpype/widgets/attribute_defs/widgets.py +++ b/openpype/widgets/attribute_defs/widgets.py @@ -15,6 +15,7 @@ from openpype.lib.attribute_definitions import ( UISeparatorDef, UILabelDef ) +from openpype.tools.utils import CustomTextComboBox from openpype.widgets.nice_checkbox import NiceCheckbox from .files_widget import FilesWidget @@ -369,8 +370,12 @@ class BoolAttrWidget(_BaseAttrDefWidget): class EnumAttrWidget(_BaseAttrDefWidget): + def __init__(self, *args, **kwargs): + self._multivalue = False + super(EnumAttrWidget, self).__init__(*args, **kwargs) + def _ui_init(self): - input_widget = QtWidgets.QComboBox(self) + input_widget = CustomTextComboBox(self) combo_delegate = QtWidgets.QStyledItemDelegate(input_widget) input_widget.setItemDelegate(combo_delegate) @@ -394,6 +399,9 @@ class EnumAttrWidget(_BaseAttrDefWidget): def _on_value_change(self): new_value = self.current_value() + if self._multivalue: + self._multivalue = False + self._input_widget.set_custom_text(None) self.value_changed.emit(new_value, self.attr_def.id) def current_value(self): @@ -401,14 +409,23 @@ class EnumAttrWidget(_BaseAttrDefWidget): return self._input_widget.itemData(idx) def set_value(self, value, multivalue=False): + if multivalue: + set_value = set(value) + if len(set_value) == 1: + multivalue = False + value = tuple(set_value)[0] + if not multivalue: idx = self._input_widget.findData(value) cur_idx = self._input_widget.currentIndex() if idx != cur_idx and idx >= 0: self._input_widget.setCurrentIndex(idx) - else: - self._input_widget.lineEdit().setText("Multiselection") + custom_text = None + if multivalue: + custom_text = "< Multiselection >" + self._input_widget.set_custom_text(custom_text) + self._multivalue = multivalue class UnknownAttrWidget(_BaseAttrDefWidget): @@ -443,7 +460,10 @@ class UnknownAttrWidget(_BaseAttrDefWidget): class FileAttrWidget(_BaseAttrDefWidget): def _ui_init(self): input_widget = FilesWidget( - self.attr_def.single_item, self.attr_def.allow_sequences, self + self.attr_def.single_item, + self.attr_def.allow_sequences, + self.attr_def.extensions_label, + self ) if self.attr_def.tooltip: diff --git a/pyproject.toml b/pyproject.toml index 306c7206fa..76a6628b23 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.12.0" # OpenPype +version = "3.12.2-nightly.2" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" @@ -136,7 +136,7 @@ hash = "b9950f5d2fa3720b52b8be55bacf5f56d33f9e029d38ee86534995f3d8d253d2" [openpype.thirdparty.oiio.linux] url = "https://distribute.openpype.io/thirdparty/oiio_tools-2.2.20-linux-centos7.tgz" -hash = "be1abf8a50e9da5913298447421af0a17829d83ed6252ae1d40da7fa36a78787" +hash = "3894dec7e4e521463891a869586850e8605f5fd604858b674c87323bf33e273d" [openpype.thirdparty.oiio.darwin] url = "https://distribute.openpype.io/thirdparty/oiio-2.2.0-darwin.tgz" diff --git a/tools/build.ps1 b/tools/build.ps1 index ff28544954..195b2dc75e 100644 --- a/tools/build.ps1 +++ b/tools/build.ps1 @@ -28,6 +28,13 @@ if($arguments -eq "--no-submodule-update") { $disable_submodule_update=$true } +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + function Start-Progress { param([ScriptBlock]$code) $scroll = "/-\|/-\|" @@ -72,14 +79,18 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } +function Install-Poetry() { + Write-Color -Text ">>> ", "Installing Poetry ... " -Color Green, Gray + $env:POETRY_HOME="$openpype_root\.poetry" + (Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/install-poetry.py -UseBasicParsing).Content | python - +} + $art = @" . . .. . .. @@ -103,10 +114,6 @@ Write-Host $art -ForegroundColor DarkGreen # Enable if PS 7.x is needed. # Show-PSWarning -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - $env:_INSIDE_OPENPYPE_TOOL = "1" if (-not (Test-Path 'env:POETRY_HOME')) { @@ -119,8 +126,7 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Yellow, Gray Exit-WithCode 1 } @@ -129,75 +135,62 @@ if (-not (Test-Path -PathType Container -Path "$($openpype_root)\build")) { New-Item -ItemType Directory -Force -Path "$($openpype_root)\build" } -Write-Host "--- " -NoNewline -ForegroundColor yellow -Write-Host "Cleaning build directory ..." +Write-Color -Text "--- ", "Cleaning build directory ..." -Color Yellow, Gray try { Remove-Item -Recurse -Force "$($openpype_root)\build\*" } catch { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "Cannot clean build directory, possibly because process is using it." - Write-Host $_.Exception.Message + Write-Color -Text "!!! ", "Cannot clean build directory, possibly because process is using it." -Color Red, Gray + Write-Color -Text $_.Exception.Message -Color Red Exit-WithCode 1 } if (-not $disable_submodule_update) { - Write-Host ">>> " -NoNewLine -ForegroundColor green - Write-Host "Making sure submodules are up-to-date ..." - git submodule update --init --recursive + Write-Color -Text ">>> ", "Making sure submodules are up-to-date ..." -Color Green, Gray + & git submodule update --init --recursive } else { - Write-Host "*** " -NoNewLine -ForegroundColor yellow - Write-Host "Not updating submodules ..." + Write-Color -Text "*** ", "Not updating submodules ..." -Color Green, Gray } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "OpenPype [ " -NoNewline -ForegroundColor white -Write-host $openpype_version -NoNewline -ForegroundColor green -Write-Host " ]" -ForegroundColor white +Write-Color -Text ">>> ", "OpenPype [ ", $openpype_version, " ]" -Color Green, White, Cyan, White -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Cleaning cache files ... " -NoNewline +Write-Color -Text ">>> ", "Cleaning cache files ... " -Color Green, Gray -NoNewline Get-ChildItem $openpype_root -Filter "*.pyc" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force Get-ChildItem $openpype_root -Filter "*.pyo" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force Get-ChildItem $openpype_root -Filter "__pycache__" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force -Recurse -Write-Host "OK" -ForegroundColor green +Write-Color -Text "OK" -Color green -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Building OpenPype ..." +Write-Color -Text ">>> ", "Building OpenPype ..." -Color Green, White $startTime = [int][double]::Parse((Get-Date -UFormat %s)) $out = & "$($env:POETRY_HOME)\bin\poetry" run python setup.py build 2>&1 Set-Content -Path "$($openpype_root)\build\build.log" -Value $out if ($LASTEXITCODE -ne 0) { - Write-Host "------------------------------------------" -ForegroundColor Red + Write-Color -Text "------------------------------------------" -Color Red Get-Content "$($openpype_root)\build\build.log" - Write-Host "------------------------------------------" -ForegroundColor Red - Write-Host "!!! " -NoNewLine -ForegroundColor Red - Write-Host "Build failed. Check the log: " -NoNewline - Write-Host ".\build\build.log" -ForegroundColor Yellow + Write-Color -Text "------------------------------------------" -Color Yellow + Write-Color -Text "!!! ", "Build failed. Check the log: ", ".\build\build.log" -Color Red, Yellow, White Exit-WithCode $LASTEXITCODE } Set-Content -Path "$($openpype_root)\build\build.log" -Value $out & "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\tools\build_dependencies.py" -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "restoring current directory" +Write-Color -Text ">>> ", "Restoring current directory" -Color Green, Gray Set-Location -Path $current_dir $endTime = [int][double]::Parse((Get-Date -UFormat %s)) -Write-Host "*** " -NoNewline -ForegroundColor Cyan -Write-Host "All done in $($endTime - $startTime) secs. You will find OpenPype and build log in " -NoNewLine -Write-Host "'.\build'" -NoNewline -ForegroundColor Green -Write-Host " directory." +try +{ + New-BurntToastNotification -AppLogo "$openpype_root/openpype/resources/icons/openpype_icon.png" -Text "OpenPype build complete!", "All done in $( $endTime - $startTime ) secs. You will find OpenPype and build log in build directory." +} catch {} +Write-Color -Text "*** ", "All done in ", $($endTime - $startTime), " secs. You will find OpenPype and build log in ", "'.\build'", " directory." -Color Green, Gray, White, Gray, White, Gray diff --git a/tools/build_win_installer.ps1 b/tools/build_win_installer.ps1 index 49fa803742..b9d1ca2d3f 100644 --- a/tools/build_win_installer.ps1 +++ b/tools/build_win_installer.ps1 @@ -11,6 +11,12 @@ PS> .\build_win_installer.ps1 #> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" function Start-Progress { param([ScriptBlock]$code) @@ -44,7 +50,6 @@ function Start-Progress { #> } - function Exit-WithCode($exitcode) { # Only exit this host process if it's a child of another PowerShell parent process... $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId @@ -56,10 +61,8 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } @@ -87,9 +90,6 @@ Write-Host $art -ForegroundColor DarkGreen # Enable if PS 7.x is needed. # Show-PSWarning -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName Set-Location -Path $openpype_root @@ -97,16 +97,15 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Yellow, Gray Exit-WithCode 1 } + $env:BUILD_VERSION = $openpype_version iscc -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Detecting host Python ... " -NoNewline +Write-Color ">>> ", "Detecting host Python ... " -Color Green, White -NoNewline $python = "python" if (Get-Command "pyenv" -ErrorAction SilentlyContinue) { $pyenv_python = & pyenv which python @@ -115,7 +114,7 @@ if (Get-Command "pyenv" -ErrorAction SilentlyContinue) { } } if (-not (Get-Command $python -ErrorAction SilentlyContinue)) { - Write-Host "!!! Python not detected" -ForegroundColor red + Write-Color "!!! ", "Python not detected" -Color Red, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } @@ -128,7 +127,7 @@ $p = & $python -c $version_command $env:PYTHON_VERSION = $p $m = $p -match '(\d+)\.(\d+)' if(-not $m) { - Write-Host "!!! Cannot determine version" -ForegroundColor red + Write-Color "!!! ", "Cannot determine version" -Color Red, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } @@ -145,8 +144,7 @@ if (($matches[1] -lt 3) -or ($matches[2] -lt 7)) { Write-Host "OK [ $p ]" -ForegroundColor green } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Creating OpenPype installer ... " -ForegroundColor white +Write-Color -Text ">>> ", "Creating OpenPype installer ... " -Color Green, White $build_dir_command = @" import sys @@ -155,24 +153,25 @@ print('exe.{}-{}'.format(get_platform(), sys.version[0:3])) "@ $build_dir = & $python -c $build_dir_command -Write-Host "Build directory ... ${build_dir}" -ForegroundColor white +Write-Color -Text "--- ", "Build directory ", "${build_dir}" -Color Green, Gray, White $env:BUILD_DIR = $build_dir -if (Get-Command iscc -errorAction SilentlyContinue -ErrorVariable ProcessError) -{ - iscc "$openpype_root\inno_setup.iss" -}else { - Write-Host "!!! Cannot find Inno Setup command" -ForegroundColor red - Write-Host "!!! You can download it at https://jrsoftware.org/" -ForegroundColor red +if (-not (Get-Command iscc -errorAction SilentlyContinue -ErrorVariable ProcessError)) { + Write-Color -Text "!!! ", "Cannot find Inno Setup command" -Color Red, Yellow + Write-Color "!!! You can download it at https://jrsoftware.org/" -ForegroundColor red Exit-WithCode 1 } +& iscc "$openpype_root\inno_setup.iss" -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "restoring current directory" +if ($LASTEXITCODE -ne 0) { + Write-Color -Text "!!! ", "Creating installer failed." -Color Red, Yellow + Exit-WithCode 1 +} + +Write-Color -Text ">>> ", "Restoring current directory" -Color Green, Gray Set-Location -Path $current_dir - -Write-Host "*** " -NoNewline -ForegroundColor Cyan -Write-Host "All done. You will find OpenPype installer in " -NoNewLine -Write-Host "'.\build'" -NoNewline -ForegroundColor Green -Write-Host " directory." +try { + New-BurntToastNotification -AppLogo "$openpype_root/openpype/resources/icons/openpype_icon.png" -Text "OpenPype build complete!", "All done. You will find You will find OpenPype installer in '.\build' directory." +} catch {} +Write-Color -Text "*** ", "All done. You will find OpenPype installer in ", "'.\build'", " directory." -Color Green, Gray, White, Gray diff --git a/tools/create_env.ps1 b/tools/create_env.ps1 index c307ba2031..3f956e5c6a 100644 --- a/tools/create_env.ps1 +++ b/tools/create_env.ps1 @@ -24,6 +24,15 @@ if($arguments -eq "--verbose") { $poetry_verbosity="-vvv" } +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +& git submodule update --init --recursive +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + + function Exit-WithCode($exitcode) { # Only exit this host process if it's a child of another PowerShell parent process... $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId @@ -36,30 +45,26 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } function Install-Poetry() { - Write-Host ">>> " -NoNewline -ForegroundColor Green - Write-Host "Installing Poetry ... " + Write-Color -Text ">>> ", "Installing Poetry ... " -Color Green, Gray $python = "python" if (Get-Command "pyenv" -ErrorAction SilentlyContinue) { if (-not (Test-Path -PathType Leaf -Path "$($openpype_root)\.python-version")) { $result = & pyenv global if ($result -eq "no global version configured") { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "Using pyenv but having no local or global version of Python set." + Write-Color -Text "!!! ", "Using pyenv but having no local or global version of Python set." -Color Red, Yellow Exit-WithCode 1 } } $python = & pyenv which python - + } $env:POETRY_HOME="$openpype_root\.poetry" @@ -68,8 +73,7 @@ function Install-Poetry() { function Test-Python() { - Write-Host ">>> " -NoNewline -ForegroundColor green - Write-Host "Detecting host Python ... " -NoNewline + Write-Color -Text ">>> ", "Detecting host Python ... " -Color Green, Gray -NoNewline $python = "python" if (Get-Command "pyenv" -ErrorAction SilentlyContinue) { $pyenv_python = & pyenv which python @@ -97,22 +101,17 @@ print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1])) } # We are supporting python 3.7 only if (($matches[1] -lt 3) -or ($matches[2] -lt 7)) { - Write-Host "FAILED Version [ $p ] is old and unsupported" -ForegroundColor red + Write-Color -Text "FAILED ", "Version ", "[", $p ,"]", "is old and unsupported" -Color Red, Yellow, Cyan, White, Cyan, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } elseif (($matches[1] -eq 3) -and ($matches[2] -gt 7)) { - Write-Host "WARNING Version [ $p ] is unsupported, use at your own risk." -ForegroundColor yellow - Write-Host "*** " -NoNewline -ForegroundColor yellow - Write-Host "OpenPype supports only Python 3.7" -ForegroundColor white + Write-Color -Text "WARNING Version ", "[", $p, "]", " is unsupported, use at your own risk." -Color Yellow, Cyan, White, Cyan, Yellow + Write-Color -Text "*** ", "OpenPype supports only Python 3.7" -Color Yellow, White } else { - Write-Host "OK [ $p ]" -ForegroundColor green + Write-Color "OK ", "[", $p, "]" -Color Green, Cyan, White, Cyan } } -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - if (-not (Test-Path 'env:POETRY_HOME')) { $env:POETRY_HOME = "$openpype_root\.poetry" } @@ -150,41 +149,39 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Red, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Found OpenPype version " -NoNewline -Write-Host "[ $($openpype_version) ]" -ForegroundColor Green +Write-Color -Text ">>> ", "Found OpenPype version ", "[ ", $($openpype_version), " ]" -Color Green, Gray, Cyan, White, Cyan Test-Python -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Color -Text "NOT FOUND" -Color Yellow Install-Poetry - Write-Host "INSTALLED" -ForegroundColor Cyan + Write-Color -Text "INSTALLED" -Color Cyan } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } if (-not (Test-Path -PathType Leaf -Path "$($openpype_root)\poetry.lock")) { - Write-Host ">>> " -NoNewline -ForegroundColor green - Write-Host "Installing virtual environment and creating lock." + Write-Color -Text ">>> ", "Installing virtual environment and creating lock." -Color Green, Gray } else { - Write-Host ">>> " -NoNewline -ForegroundColor green - Write-Host "Installing virtual environment from lock." + Write-Color -Text ">>> ", "Installing virtual environment from lock." -Color Green, Gray } +$startTime = [int][double]::Parse((Get-Date -UFormat %s)) & "$env:POETRY_HOME\bin\poetry" install --no-root $poetry_verbosity --ansi if ($LASTEXITCODE -ne 0) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Poetry command failed." + Write-Color -Text "!!! ", "Poetry command failed." -Color Red, Yellow Set-Location -Path $current_dir Exit-WithCode 1 } +$endTime = [int][double]::Parse((Get-Date -UFormat %s)) Set-Location -Path $current_dir -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Virtual environment created." +try +{ + New-BurntToastNotification -AppLogo "$openpype_root/openpype/resources/icons/openpype_icon.png" -Text "OpenPype", "Virtual environment created.", "All done in $( $endTime - $startTime ) secs." +} catch {} +Write-Color -Text ">>> ", "Virtual environment created." -Color Green, White diff --git a/tools/create_zip.ps1 b/tools/create_zip.ps1 index e33445d1fa..7b852b7c54 100644 --- a/tools/create_zip.ps1 +++ b/tools/create_zip.ps1 @@ -19,6 +19,13 @@ PS> .\create_zip.ps1 --path C:\OpenPype #> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + function Exit-WithCode($exitcode) { # Only exit this host process if it's a child of another PowerShell parent process... $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId @@ -31,18 +38,12 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - $env:_INSIDE_OPENPYPE_TOOL = "1" if (-not (Test-Path 'env:POETRY_HOME')) { @@ -78,31 +79,25 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Yellow, Gray Exit-WithCode 1 } -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Cleaning cache files ... " -NoNewline +Write-Color -Text ">>> ", "Cleaning cache files ... " -Color Green, Gray -NoNewline Get-ChildItem $openpype_root -Filter "__pycache__" -Force -Recurse| Where-Object {( $_.FullName -inotmatch '\\build\\' ) -and ( $_.FullName -inotmatch '\\.venv' )} | Remove-Item -Force -Recurse Get-ChildItem $openpype_root -Filter "*.pyc" -Force -Recurse | Where-Object {( $_.FullName -inotmatch '\\build\\' ) -and ( $_.FullName -inotmatch '\\.venv' )} | Remove-Item -Force Get-ChildItem $openpype_root -Filter "*.pyo" -Force -Recurse | Where-Object {( $_.FullName -inotmatch '\\build\\' ) -and ( $_.FullName -inotmatch '\\.venv' )} | Remove-Item -Force -Write-Host "OK" -ForegroundColor green -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Generating zip from current sources ..." +Write-Color -Text ">>> ", "Generating zip from current sources ..." -Color Green, Gray $env:PYTHONPATH="$($openpype_root);$($env:PYTHONPATH)" $env:OPENPYPE_ROOT="$($openpype_root)" & "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\tools\create_zip.py" $ARGS diff --git a/tools/fetch_thirdparty_libs.ps1 b/tools/fetch_thirdparty_libs.ps1 index 16f7b70e7a..4df007ad67 100644 --- a/tools/fetch_thirdparty_libs.ps1 +++ b/tools/fetch_thirdparty_libs.ps1 @@ -15,6 +15,9 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $env:_INSIDE_OPENPYPE_TOOL = "1" if (-not (Test-Path 'env:POETRY_HOME')) { @@ -23,16 +26,19 @@ if (-not (Test-Path 'env:POETRY_HOME')) { Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } - +$startTime = [int][double]::Parse((Get-Date -UFormat %s)) & "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\tools\fetch_thirdparty_libs.py" +$endTime = [int][double]::Parse((Get-Date -UFormat %s)) Set-Location -Path $current_dir +try +{ + New-BurntToastNotification -AppLogo "$openpype_root/openpype/resources/icons/openpype_icon.png" -Text "OpenPype", "Dependencies downloaded", "All done in $( $endTime - $startTime ) secs." +} catch {} \ No newline at end of file diff --git a/tools/make_docs.ps1 b/tools/make_docs.ps1 index 45a11171ae..43ecd0c09c 100644 --- a/tools/make_docs.ps1 +++ b/tools/make_docs.ps1 @@ -44,27 +44,30 @@ $art = @" "@ +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + Write-Host $art -ForegroundColor DarkGreen -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." - & "$openpype_root\tools\create_env.ps1" + Write-Color -Text "NOT FOUND" -Color Yellow + Install-Poetry + Write-Color -Text "INSTALLED" -Color Cyan } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } -Write-Host "This will not overwrite existing source rst files, only scan and add new." +Write-Color -Text "... ", "This will not overwrite existing source rst files, only scan and add new." -Color Yellow, Gray Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Running apidoc ..." +Write-Color -Text ">>> ", "Running apidoc ..." -Color Green, Gray & "$env:POETRY_HOME\bin\poetry" run sphinx-apidoc -M -e -d 10 --ext-intersphinx --ext-todo --ext-coverage --ext-viewcode -o "$($openpype_root)\docs\source" igniter & "$env:POETRY_HOME\bin\poetry" run sphinx-apidoc.exe -M -e -d 10 --ext-intersphinx --ext-todo --ext-coverage --ext-viewcode -o "$($openpype_root)\docs\source" openpype vendor, openpype\vendor -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Building html ..." +Write-Color -Text ">>> ", "Building html ..." -Color Green, Gray & "$env:POETRY_HOME\bin\poetry" run python "$($openpype_root)\setup.py" build_sphinx Set-Location -Path $current_dir diff --git a/tools/modules/powershell/BurntToast b/tools/modules/powershell/BurntToast new file mode 160000 index 0000000000..f58c9a26d6 --- /dev/null +++ b/tools/modules/powershell/BurntToast @@ -0,0 +1 @@ +Subproject commit f58c9a26d6ede30ecc7998e92b26974887e945fe diff --git a/tools/modules/powershell/PSWriteColor b/tools/modules/powershell/PSWriteColor new file mode 160000 index 0000000000..12eda384eb --- /dev/null +++ b/tools/modules/powershell/PSWriteColor @@ -0,0 +1 @@ +Subproject commit 12eda384ebd7a7954e15855e312215c009c97114 diff --git a/tools/pack_project.ps1 b/tools/pack_project.ps1 new file mode 100644 index 0000000000..856247f7ca --- /dev/null +++ b/tools/pack_project.ps1 @@ -0,0 +1,39 @@ +<# +.SYNOPSIS + Helper script OpenPype Packing project. + +.DESCRIPTION + Once you are happy with the project and want to preserve it for future work, just change the project name on line 38 and copy the file into .\OpenPype\tools. Then use the cmd form .EXAMPLE + +.EXAMPLE + +PS> .\tools\run_pack_project.ps1 + +#> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + +Set-Location -Path $openpype_root + +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green +} + +& "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\start.py" pack-project --project $ARGS +Set-Location -Path $current_dir \ No newline at end of file diff --git a/tools/run_mongo.ps1 b/tools/run_mongo.ps1 index f6fa37207d..c64ff75969 100644 --- a/tools/run_mongo.ps1 +++ b/tools/run_mongo.ps1 @@ -11,6 +11,13 @@ PS> .\run_mongo.ps1 #> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $art = @" . . .. . .. @@ -43,8 +50,7 @@ function Exit-WithCode($exitcode) { function Find-Mongo ($preferred_version) { $defaultPath = "C:\Program Files\MongoDB\Server" - Write-Host ">>> " -NoNewLine -ForegroundColor Green - Write-Host "Detecting MongoDB ... " -NoNewline + Write-Color -Text ">>> ", "Detecting MongoDB ... " -Color Green, Gray -NoNewline if (-not (Get-Command "mongod" -ErrorAction SilentlyContinue)) { if(Test-Path "$($defaultPath)\*\bin\mongod.exe" -PathType Leaf) { # we have mongo server installed on standard Windows location @@ -52,17 +58,14 @@ function Find-Mongo ($preferred_version) { # $preferred_version. $mongoVersions = Get-ChildItem -Directory 'C:\Program Files\MongoDB\Server' | Sort-Object -Property {$_.Name -as [int]} if(Test-Path "$($mongoVersions[-1])\bin\mongod.exe" -PathType Leaf) { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green $use_version = $mongoVersions[-1] foreach ($v in $mongoVersions) { - Write-Host " - found [ " -NoNewline - Write-Host $v -NoNewLine -ForegroundColor Cyan - Write-Host " ]" -NoNewLine - + Write-Color -Text " - found [ ", $v, " ]" -Color Cyan, White, Cyan -NoNewLine $version = Split-Path $v -Leaf if ($preferred_version -eq $version) { - Write-Host " *" -ForegroundColor Green + Write-Color -Text " *" -Color Green $use_version = $v } else { Write-Host "" @@ -71,27 +74,20 @@ function Find-Mongo ($preferred_version) { $env:PATH = "$($env:PATH);$($use_version)\bin\" - Write-Host " - auto-added from [ " -NoNewline - Write-Host "$($use_version)\bin\mongod.exe" -NoNewLine -ForegroundColor Cyan - Write-Host " ]" + Write-Color -Text " - auto-added from [ ", "$($use_version)\bin\mongod.exe", " ]" -Color Cyan, White, Cyan return "$($use_version)\bin\mongod.exe" } else { - Write-Host "FAILED " -NoNewLine -ForegroundColor Red - Write-Host "MongoDB not detected" -ForegroundColor Yellow - Write-Host "Tried to find it on standard location " -NoNewline -ForegroundColor Gray - Write-Host " [ " -NoNewline -ForegroundColor Cyan - Write-Host "$($mongoVersions[-1])\bin\mongod.exe" -NoNewline -ForegroundColor White - Write-Host " ] " -NoNewLine -ForegroundColor Cyan - Write-Host "but failed." -ForegroundColor Gray + Write-Color -Text "FAILED " -Color Red -NoNewLine + Write-Color -Text "MongoDB not detected" -Color Yellow + Write-Color -Text "Tried to find it on standard location ", "[ ", "$($mongoVersions[-1])\bin\mongod.exe", " ]", " but failed." -Color Gray, Cyan, White, Cyan, Gray -NoNewline Exit-WithCode 1 } } else { - Write-Host "FAILED " -NoNewLine -ForegroundColor Red - Write-Host "MongoDB not detected in PATH" -ForegroundColor Yellow + Write-Color -Text "FAILED ", "MongoDB not detected in PATH" -Color Red, Yellow Exit-WithCode 1 } } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green return Get-Command "mongod" -ErrorAction SilentlyContinue } <# @@ -104,9 +100,6 @@ function Find-Mongo ($preferred_version) { #> } -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - # mongodb port $port = 2707 @@ -116,15 +109,7 @@ $dbpath = (Get-Item $openpype_root).parent.FullName + "\mongo_db_data" $preferred_version = "5.0" $mongoPath = Find-Mongo $preferred_version -Write-Host ">>> " -NoNewLine -ForegroundColor Green -Write-Host "Using DB path: " -NoNewLine -Write-Host " [ " -NoNewline -ForegroundColor Cyan -Write-Host "$($dbpath)" -NoNewline -ForegroundColor White -Write-Host " ] "-ForegroundColor Cyan -Write-Host ">>> " -NoNewLine -ForegroundColor Green -Write-Host "Port: " -NoNewLine -Write-Host " [ " -NoNewline -ForegroundColor Cyan -Write-Host "$($port)" -NoNewline -ForegroundColor White -Write-Host " ] " -ForegroundColor Cyan -Start-Process -FilePath $mongopath "--dbpath $($dbpath) --port $($port)" -PassThru | Out-Null +Write-Color -Text ">>> ", "Using DB path: ", "[ ", "$($dbpath)", " ]" -Color Green, Gray, Cyan, White, Cyan +Write-Color -Text ">>> ", "Port: ", "[ ", "$($port)", " ]" -Color Green, Gray, Cyan, White, Cyan +Start-Process -FilePath $mongopath "--dbpath $($dbpath) --port $($port)" -PassThru | Out-Null diff --git a/tools/run_project_manager.ps1 b/tools/run_project_manager.ps1 index a9cfbb1e7b..c1813e4ed9 100644 --- a/tools/run_project_manager.ps1 +++ b/tools/run_project_manager.ps1 @@ -35,6 +35,9 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $env:_INSIDE_OPENPYPE_TOOL = "1" # make sure Poetry is in PATH @@ -45,15 +48,13 @@ $env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." - & "$openpype_root\tools\create_env.ps1" + Write-Color -Text "NOT FOUND" -Color Yellow + Install-Poetry + Write-Color -Text "INSTALLED" -Color Cyan } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } & "$env:POETRY_HOME\bin\poetry" run python "$($openpype_root)\start.py" projectmanager diff --git a/tools/run_settings.ps1 b/tools/run_settings.ps1 index 1c0aa6e8f3..c74ae1ea3a 100644 --- a/tools/run_settings.ps1 +++ b/tools/run_settings.ps1 @@ -15,6 +15,9 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $env:_INSIDE_OPENPYPE_TOOL = "1" # make sure Poetry is in PATH @@ -25,15 +28,13 @@ $env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." - & "$openpype_root\tools\create_env.ps1" + Write-Color -Text "NOT FOUND" -Color Yellow + Install-Poetry + Write-Color -Text "INSTALLED" -Color Cyan } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } & "$env:POETRY_HOME\bin\poetry" run python "$($openpype_root)\start.py" settings --dev diff --git a/tools/run_tests.ps1 b/tools/run_tests.ps1 index e631cb72df..4fa598c413 100644 --- a/tools/run_tests.ps1 +++ b/tools/run_tests.ps1 @@ -11,6 +11,13 @@ PS> .\run_test.ps1 #> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + function Exit-WithCode($exitcode) { # Only exit this host process if it's a child of another PowerShell parent process... $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId @@ -22,10 +29,8 @@ function Exit-WithCode($exitcode) { function Show-PSWarning() { if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Host "!!! " -NoNewline -ForegroundColor Red - Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" - Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray - Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Write-Color -Text "!!! ", "You are using old version of PowerShell - ", "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" -Color Red, Yellow, White + Write-Color -Text " Please update to at least 7.0 - ", "https://github.com/PowerShell/PowerShell/releases" -Color Yellow, White Exit-WithCode 1 } } @@ -53,10 +58,6 @@ Write-Host $art -ForegroundColor DarkGreen # Enable if PS 7.x is needed. # Show-PSWarning -$current_dir = Get-Location -$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent -$openpype_root = (Get-Item $script_dir).parent.FullName - $env:_INSIDE_OPENPYPE_TOOL = "1" if (-not (Test-Path 'env:POETRY_HOME')) { @@ -69,46 +70,32 @@ $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" $result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') $openpype_version = $result[0].Groups['version'].Value if (-not $openpype_version) { - Write-Host "!!! " -ForegroundColor yellow -NoNewline - Write-Host "Cannot determine OpenPype version." + Write-Color -Text "!!! ", "Cannot determine OpenPype version." -Color Yellow, Gray Exit-WithCode 1 } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "OpenPype [ " -NoNewline -ForegroundColor white -Write-host $openpype_version -NoNewline -ForegroundColor green -Write-Host " ] ..." -ForegroundColor white +Write-Color -Text ">>> ", "OpenPype [ ", $openpype_version, " ]" -Color Green, White, Cyan, White -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Cleaning cache files ... " -NoNewline +Write-Color -Text ">>> ", "Cleaning cache files ... " -Color Green, Gray -NoNewline Get-ChildItem $openpype_root -Filter "*.pyc" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force +Get-ChildItem $openpype_root -Filter "*.pyo" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force Get-ChildItem $openpype_root -Filter "__pycache__" -Force -Recurse | Where-Object { $_.FullName -inotmatch 'build' } | Remove-Item -Force -Recurse -Write-Host "OK" -ForegroundColor green +Write-Color -Text "OK" -Color green -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Testing OpenPype ..." +Write-Color -Text ">>> ", "Testing OpenPype ..." -Color Green, White $original_pythonpath = $env:PYTHONPATH $env:PYTHONPATH="$($openpype_root);$($env:PYTHONPATH)" & "$env:POETRY_HOME\bin\poetry" run pytest -x --capture=sys --print -W ignore::DeprecationWarning "$($openpype_root)/tests" $env:PYTHONPATH = $original_pythonpath -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "restoring current directory" +Write-Color -Text ">>> ", "Restoring current directory" -Color Green, Gray Set-Location -Path $current_dir - - - - - - diff --git a/tools/run_tray.ps1 b/tools/run_tray.ps1 index 872c1524a6..40157c4e81 100644 --- a/tools/run_tray.ps1 +++ b/tools/run_tray.ps1 @@ -14,6 +14,9 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +# Install PSWriteColor to support colorized output to terminal +$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell" + $env:_INSIDE_OPENPYPE_TOOL = "1" # make sure Poetry is in PATH @@ -24,15 +27,13 @@ $env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" Set-Location -Path $openpype_root -Write-Host ">>> " -NoNewline -ForegroundColor Green -Write-Host "Reading Poetry ... " -NoNewline +Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { - Write-Host "NOT FOUND" -ForegroundColor Yellow - Write-Host "*** " -NoNewline -ForegroundColor Yellow - Write-Host "We need to install Poetry create virtual env first ..." + Write-Color -Text "NOT FOUND" -Color Yellow + Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray & "$openpype_root\tools\create_env.ps1" } else { - Write-Host "OK" -ForegroundColor Green + Write-Color -Text "OK" -Color Green } & "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\start.py" tray --debug diff --git a/tools/unpack_project.ps1 b/tools/unpack_project.ps1 new file mode 100644 index 0000000000..e7b9e87a7f --- /dev/null +++ b/tools/unpack_project.ps1 @@ -0,0 +1,39 @@ +<# +.SYNOPSIS + Helper script OpenPype Unpacking project. + +.DESCRIPTION + Make sure you had dropped the project from your db and removed the poject data in case you were having them previously. Then on line 38 change the to any path where the zip with project is - usually we are having it here https://drive.google.com/drive/u/0/folders/0AKE4mxImOsAGUk9PVA . Copy the file into .\OpenPype\tools. Then use the cmd form .EXAMPLE + +.EXAMPLE + +PS> .\tools\run_unpack_project.ps1 + +#> +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + +Set-Location -Path $openpype_root + +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green +} + +& "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\start.py" unpack-project --zipfile $ARGS +Set-Location -Path $current_dir \ No newline at end of file diff --git a/website/docs/admin_hosts_hiero.md b/website/docs/admin_hosts_hiero.md new file mode 100644 index 0000000000..b75d8dee7d --- /dev/null +++ b/website/docs/admin_hosts_hiero.md @@ -0,0 +1,9 @@ +--- +id: admin_hosts_hiero +title: Hiero +sidebar_label: Hiero +--- + +## Custom Menu +You can add your custom tools menu into Hiero by extending definitions in **Hiero -> Scripts Menu Definition**. +![Custom menu definition](assets/hiero-admin_scriptsmenu.png) diff --git a/website/docs/admin_openpype_commands.md b/website/docs/admin_openpype_commands.md index 53b4799d6e..53fc12410f 100644 --- a/website/docs/admin_openpype_commands.md +++ b/website/docs/admin_openpype_commands.md @@ -45,6 +45,7 @@ For more information [see here](admin_use.md#run-openpype). | publish | Pype takes JSON from provided path and use it to publish data in it. | [📑](#publish-arguments) | | extractenvironments | Extract environment variables for entered context to a json file. | [📑](#extractenvironments-arguments) | | run | Execute given python script within OpenPype environment. | [📑](#run-arguments) | +| interactive | Start python like interactive console session. | | | projectmanager | Launch Project Manager UI | [📑](#projectmanager-arguments) | | settings | Open Settings UI | [📑](#settings-arguments) | | standalonepublisher | Open Standalone Publisher UI | [📑](#standalonepublisher-arguments) | diff --git a/website/docs/admin_use.md b/website/docs/admin_use.md index f84905c486..1c4ae9e01c 100644 --- a/website/docs/admin_use.md +++ b/website/docs/admin_use.md @@ -105,6 +105,10 @@ save it in secure way to your systems keyring - on Windows it is **Credential Ma This can be also set beforehand with environment variable `OPENPYPE_MONGO`. If set it takes precedence over the one set in keyring. +:::tip Minimal permissions for DB user +- `readWrite` role to `openpype` and `avalon` databases +- `find` permission on `openpype`, `avalon` and `local` + #### Check for OpenPype version path When connection to MongoDB is made, OpenPype will get various settings from there - one among them is directory location where OpenPype versions are stored. If this directory exists OpenPype tries to diff --git a/website/docs/assets/hiero-admin_scriptsmenu.png b/website/docs/assets/hiero-admin_scriptsmenu.png new file mode 100644 index 0000000000..6de136a434 Binary files /dev/null and b/website/docs/assets/hiero-admin_scriptsmenu.png differ diff --git a/website/docs/dev_host_implementation.md b/website/docs/dev_host_implementation.md new file mode 100644 index 0000000000..3702483ad1 --- /dev/null +++ b/website/docs/dev_host_implementation.md @@ -0,0 +1,89 @@ +--- +id: dev_host_implementation +title: Host implementation +sidebar_label: Host implementation +toc_max_heading_level: 4 +--- + +Host is an integration of DCC but in most of cases have logic that need to be handled before DCC is launched. Then based on abilities (or purpose) of DCC the integration can support different pipeline workflows. + +## Pipeline workflows +Workflows available in OpenPype are Workfiles, Load and Create-Publish. Each of them may require some functionality available in integration (e.g. call host API to achieve certain functionality). We'll go through them later. + +## How to implement and manage host +At this moment there is not fully unified way how host should be implemented but we're working on it. Host should have a "public face" code that can be used outside of DCC and in-DCC integration code. The main reason is that in-DCC code can have specific dependencies for python modules not available out of it's process. Hosts are located in `openpype/hosts/{host name}` folder. Current code (at many places) expect that the host name has equivalent folder there. So each subfolder should be named with the name of host it represents. + +### Recommended folder structure +```python +openpype/hosts/{host name} +│ +│ # Content of DCC integration - with in-DCC imports +├─ api +│ ├─ __init__.py +│ └─ [DCC integration files] +│ +│ # Plugins related to host - dynamically imported (can contain in-DCC imports) +├─ plugins +│ ├─ create +│ │ └─ [create plugin files] +│ ├─ load +│ │ └─ [load plugin files] +│ └─ publish +│ └─ [publish plugin files] +│ +│ # Launch hooks - used to modify how application is launched +├─ hooks +│ └─ [some pre/post launch hooks] +| +│ # Code initializing host integration in-DCC (DCC specific - example from Maya) +├─ startup +│ └─ userSetup.py +│ +│ # Public interface +├─ __init__.py +└─ [other public code] +``` + +### Launch Hooks +Launch hooks are not directly connected to host implementation, but they can be used to modify launch of process which may be crutial for the implementation. Launch hook are plugins called when DCC is launched. They are processed in sequence before and after launch. Pre launch hooks can change how process of DCC is launched, e.g. change subprocess flags, modify environments or modify launch arguments. If prelaunch hook crashes the application is not launched at all. Postlaunch hooks are triggered after launch of subprocess. They can be used to change statuses in your project tracker, start timer, etc. Crashed postlaunch hooks have no effect on rest of postlaunch hooks or launched process. They can be filtered by platform, host and application and order is defined by integer value. Hooks inside host are automatically loaded (one reason why folder name should match host name) or can be defined from modules. Hooks execution share same launch context where can be stored data used across multiple hooks (please be very specific in stored keys e.g. 'project' vs. 'project_name'). For more detailed information look into `openpype/lib/applications.py`. + +### Public interface +Public face is at this moment related to launching of the DCC. At this moment there there is only option to modify environment variables before launch by implementing function `add_implementation_envs` (must be available in `openpype/hosts/{host name}/__init__.py`). The function is called after pre launch hooks, as last step before subprocess launch, to be able set environment variables crutial for proper integration. It is also good place for functions that are used in prelaunch hooks and in-DCC integration. Future plans are to be able get workfiles extensions from here. Right now workfiles extensions are hardcoded in `openpype/pipeline/constants.py` under `HOST_WORKFILE_EXTENSIONS`, we would like to handle hosts as addons similar to OpenPype modules, and more improvements which are now hardcoded. + +### Integration +We've prepared base class `HostBase` in `openpype/host/host.py` to define minimum requirements and provide some default method implementations. The minimum requirement for a host is `name` attribute, this host would not be able to do much but is valid. To extend functionality we've prepared interfaces that helps to identify what is host capable of and if is possible to use certain tools with it. For those cases we defined interfaces for each workflow. `IWorkfileHost` interface add requirement to implement workfiles related methods which makes host usable in combination with Workfiles tool. `ILoadHost` interface add requirements to be able load, update, switch or remove referenced representations which should add support to use Loader and Scene Inventory tools. `INewPublisher` interface is required to be able use host with new OpenPype publish workflow. This is what must or can be implemented to allow certain functionality. `HostBase` will have more responsibility which will be taken from global variables in future. This process won't happen at once, but will be slow to keep backwards compatibility for some time. + +#### Example +```python +from openpype.host import HostBase, IWorkfileHost, ILoadHost + + +class MayaHost(HostBase, IWorkfileHost, ILoadHost): + def open_workfile(self, filepath): + ... + + def save_current_workfile(self, filepath=None): + ... + + def get_current_workfile(self): + ... + ... +``` + +### Install integration +We have prepared a host class, now where and how to initialize it's object? This part is DCC specific. In DCCs like Maya with embedded python and Qt we use advantage of being able to initialize object of the class directly in DCC process on start, the same happens in Nuke, Hiero and Houdini. In DCCs like Photoshop or Harmony there is launched OpenPype (python) process next to it which handles host initialization and communication with the DCC process (e.g. using sockects). Created object of host must be installed and registered to global scope of OpenPype. Which means that at this moment one process can handle only one host at a time. + +#### Install example (Maya startup file) +```python +from openpype.pipeline import install_host +from openpype.hosts.maya.api import MayaHost + + +host = MayaHost() +install_host(host) +``` + +Function `install_host` cares about installing global plugins, callbacks and register host. Host registration means that the object is kept in memory and is accessible using `get_registered_host()`. + +### Using UI tools +Most of functionality in DCCs is provided to artists by using UI tools. We're trying to keep UIs consistent so we use same set of tools in each host, all or most of them are Qt based. There is a `HostToolsHelper` in `openpype/tools/utils/host_tools.py` which unify showing of default tools, they can be showed almost at any point. Some of them are validating if host is capable of using them (Workfiles, Loader and Scene Inventory) which is related to [pipeline workflows](#pipeline-workflows). `HostToolsHelper` provides API to show tools but host integration must care about giving artists ability to show them. Most of DCCs have some extendable menu bar where is possible to add custom actions, which is preferred approach how to give ability to show the tools. diff --git a/website/docs/dev_publishing.md b/website/docs/dev_publishing.md index 8ee3b7e85f..f11a2c3047 100644 --- a/website/docs/dev_publishing.md +++ b/website/docs/dev_publishing.md @@ -66,7 +66,7 @@ Another optional function is **get_current_context**. This function is handy in Main responsibility of create plugin is to create, update, collect and remove instance metadata and propagate changes to create context. Has access to **CreateContext** (`self.create_context`) that discovered the plugin so has also access to other creators and instances. Create plugins have a lot of responsibility so it is recommended to implement common code per host. #### *BaseCreator* -Base implementation of creator plugin. It is not recommended to use this class as base for production plugins but rather use one of **AutoCreator** and **Creator** variants. +Base implementation of creator plugin. It is not recommended to use this class as base for production plugins but rather use one of **HiddenCreator**, **AutoCreator** and **Creator** variants. **Abstractions** - **`family`** (class attr) - Tells what kind of instance will be created. @@ -92,7 +92,7 @@ def collect_instances(self): self._add_instance_to_context(instance) ``` -- **`create`** (method) - Create a new object of **CreatedInstance** store its metadata to the workfile and add the instance into the created context. Failed Creating should raise **CreatorError** if an error happens that artists can fix or give them some useful information. Triggers and implementation differs for **Creator** and **AutoCreator**. +- **`create`** (method) - Create a new object of **CreatedInstance** store its metadata to the workfile and add the instance into the created context. Failed Creating should raise **CreatorError** if an error happens that artists can fix or give them some useful information. Triggers and implementation differs for **Creator**, **HiddenCreator** and **AutoCreator**. - **`update_instances`** (method) - Update data of instances. Receives tuple with **instance** and **changes**. ```python @@ -172,11 +172,11 @@ class RenderLayerCreator(Creator): icon = "fa5.building" ``` -- **`get_instance_attr_defs`** (method) - Attribute definitions of instance. Creator can define attribute values with default values for each instance. These attributes may affect how instances will be instance processed during publishing. Attribute defiitions can be used from `openpype.pipeline.lib.attribute_definitions` (NOTE: Will be moved to `openpype.lib.attribute_definitions` soon). Attribute definitions define basic types of values for different cases e.g. boolean, number, string, enumerator, etc. Default implementation returns **instance_attr_defs**. +- **`get_instance_attr_defs`** (method) - Attribute definitions of instance. Creator can define attribute values with default values for each instance. These attributes may affect how instances will be instance processed during publishing. Attribute defiitions can be used from `openpype.lib.attribute_definitions`. Attribute definitions define basic types of values for different cases e.g. boolean, number, string, enumerator, etc. Default implementation returns **instance_attr_defs**. - **`instance_attr_defs`** (attr) - Attribute for default implementation of **get_instance_attr_defs**. ```python -from openpype.pipeline import attribute_definitions +from openpype.lib import attribute_definitions class RenderLayerCreator(Creator): @@ -199,6 +199,20 @@ class RenderLayerCreator(Creator): - **`get_dynamic_data`** (method) - Can be used to extend data for subset templates which may be required in some cases. +#### *HiddenCreator* +Creator which is not showed in UI so artist can't trigger it directly but is available for other creators. This creator is primarily meant for cases when creation should create different types of instances. For example during editorial publishing where input is single edl file but should create 2 or more kind of instances each with different family, attributes and abilities. Arguments for creation were limited to `instance_data` and `source_data`. Data of `instance_data` should follow what is sent to other creators and `source_data` can be used to send custom data defined by main creator. It is expected that `HiddenCreator` has specific main or "parent" creator. + +```python +def create(self, instance_data, source_data): + variant = instance_data["variant"] + task_name = instance_data["task"] + asset_name = instance_data["asset"] + asset_doc = get_asset_by_name(self.project_name, asset_name) + self.get_subset_name( + variant, task_name, asset_doc, self.project_name, self.host_name) +``` + + #### *AutoCreator* Creator that is triggered on reset of create context. Can be used for families that are expected to be created automatically without artist interaction (e.g. **workfile**). Method `create` is triggered after collecting all creators. @@ -234,14 +248,14 @@ def create(self): # - variant can be filled from settings variant = self._variant_name # Only place where we can look for current context - project_name = io.Session["AVALON_PROJECT"] - asset_name = io.Session["AVALON_ASSET"] - task_name = io.Session["AVALON_TASK"] - host_name = io.Session["AVALON_APP"] + project_name = self.project_name + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] # Create new instance if does not exist yet if existing_instance is None: - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) @@ -264,7 +278,7 @@ def create(self): existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) @@ -297,7 +311,8 @@ class BulkRenderCreator(Creator): - **`pre_create_attr_defs`** (attr) - Attribute for default implementation of **get_pre_create_attr_defs**. ```python -from openpype.pipeline import Creator, attribute_definitions +from openpype.lib import attribute_definitions +from openpype.pipeline.create import Creator class CreateRender(Creator): @@ -470,10 +485,8 @@ Possible attribute definitions can be found in `openpype/pipeline/lib/attribute_ ```python import pyblish.api -from openpype.pipeline import ( - OpenPypePyblishPluginMixin, - attribute_definitions, -) +from openpype.lib import attribute_definitions +from openpype.pipeline import OpenPypePyblishPluginMixin # Example context plugin diff --git a/website/sidebars.js b/website/sidebars.js index d4fec9fba2..9d60a5811c 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -100,6 +100,7 @@ module.exports = { label: "Integrations", items: [ "admin_hosts_blender", + "admin_hosts_hiero", "admin_hosts_maya", "admin_hosts_nuke", "admin_hosts_resolve", @@ -155,6 +156,7 @@ module.exports = { type: "category", label: "Hosts integrations", items: [ + "dev_host_implementation", "dev_publishing" ] }